i965: Remove now unneeded calls to calculate_cfg().
Now that nothing invalidates the CFG, we can calculate_cfg() immediately after emit_fb_writes()/emit_thread_end() and never again. Reviewed-by: Topi Pohjolainen <topi.pohjolainen@intel.com>
This commit is contained in:
parent
072ea414d0
commit
a4fb8897a2
|
@ -40,8 +40,6 @@ dead_control_flow_eliminate(backend_visitor *v)
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
v->calculate_cfg();
|
||||
|
||||
foreach_block_safe (block, v->cfg) {
|
||||
bblock_t *if_block = NULL, *else_block = NULL, *endif_block = block;
|
||||
bool found = false;
|
||||
|
|
|
@ -1951,8 +1951,6 @@ fs_visitor::assign_constant_locations()
|
|||
void
|
||||
fs_visitor::demote_pull_constants()
|
||||
{
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst (block, fs_inst, inst, cfg) {
|
||||
for (int i = 0; i < inst->sources; i++) {
|
||||
if (inst->src[i].file != UNIFORM)
|
||||
|
@ -2448,8 +2446,6 @@ fs_visitor::remove_duplicate_mrf_writes()
|
|||
|
||||
memset(last_mrf_move, 0, sizeof(last_mrf_move));
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
|
||||
if (inst->is_control_flow()) {
|
||||
memset(last_mrf_move, 0, sizeof(last_mrf_move));
|
||||
|
@ -2704,8 +2700,6 @@ fs_visitor::insert_gen4_send_dependency_workarounds()
|
|||
* have a .reg_offset of 0.
|
||||
*/
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst(block, fs_inst, inst, cfg) {
|
||||
if (inst->mlen != 0 && inst->dst.file == GRF) {
|
||||
insert_gen4_pre_send_dependency_workarounds(block, inst);
|
||||
|
@ -2737,8 +2731,6 @@ fs_visitor::insert_gen4_send_dependency_workarounds()
|
|||
void
|
||||
fs_visitor::lower_uniform_pull_constant_loads()
|
||||
{
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst (block, fs_inst, inst, cfg) {
|
||||
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
|
||||
continue;
|
||||
|
@ -2791,8 +2783,6 @@ fs_visitor::lower_load_payload()
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
|
||||
if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
|
||||
fs_reg dst = inst->dst;
|
||||
|
@ -3270,6 +3260,8 @@ fs_visitor::run()
|
|||
|
||||
emit_fb_writes();
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
split_virtual_grfs();
|
||||
|
||||
move_uniform_array_access_to_pull_constants();
|
||||
|
@ -3417,8 +3409,6 @@ fs_visitor::run()
|
|||
*/
|
||||
assert(sanity_param_count == prog->Parameters->NumParameters);
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
return !failed;
|
||||
}
|
||||
|
||||
|
|
|
@ -608,8 +608,6 @@ fs_visitor::opt_copy_propagate_local(void *copy_prop_ctx, bblock_t *block,
|
|||
bool
|
||||
fs_visitor::opt_copy_propagate()
|
||||
{
|
||||
calculate_cfg();
|
||||
|
||||
bool progress = false;
|
||||
void *copy_prop_ctx = ralloc_context(NULL);
|
||||
exec_list *out_acp[cfg->num_blocks];
|
||||
|
|
|
@ -319,7 +319,6 @@ fs_visitor::calculate_live_intervals()
|
|||
virtual_grf_end[i] = -1;
|
||||
}
|
||||
|
||||
calculate_cfg();
|
||||
this->live_intervals = new(mem_ctx) fs_live_variables(this, cfg);
|
||||
|
||||
/* Merge the per-component live ranges to whole VGRF live ranges. */
|
||||
|
|
|
@ -45,8 +45,6 @@ fs_visitor::opt_peephole_predicated_break()
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block (block, cfg) {
|
||||
/* BREAK and CONTINUE instructions, by definition, can only be found at
|
||||
* the ends of basic blocks.
|
||||
|
|
|
@ -670,8 +670,6 @@ fs_visitor::spill_reg(int spill_reg)
|
|||
|
||||
last_scratch += size * reg_size;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* Generate spill/unspill instructions for the objects being
|
||||
* spilled. Right now, we spill or unspill the whole thing to a
|
||||
* virtual grf of the same size. For most instructions, though, we
|
||||
|
|
|
@ -127,8 +127,6 @@ fs_visitor::opt_peephole_sel()
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block (block, cfg) {
|
||||
/* IF instructions, by definition, can only be found at the ends of
|
||||
* basic blocks.
|
||||
|
|
|
@ -413,7 +413,6 @@ public:
|
|||
this->remaining_grf_uses = NULL;
|
||||
this->grf_active = NULL;
|
||||
}
|
||||
v->calculate_cfg();
|
||||
}
|
||||
|
||||
~instruction_scheduler()
|
||||
|
|
|
@ -700,8 +700,6 @@ vec4_visitor::opt_algebraic()
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
|
||||
switch (inst->opcode) {
|
||||
case BRW_OPCODE_ADD:
|
||||
|
@ -807,8 +805,6 @@ vec4_visitor::move_push_constants_to_pull_constants()
|
|||
}
|
||||
}
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* Now actually rewrite usage of the things we've moved to pull
|
||||
* constants.
|
||||
*/
|
||||
|
@ -859,8 +855,6 @@ vec4_visitor::opt_set_dependency_control()
|
|||
vec4_instruction *last_mrf_write[BRW_MAX_GRF];
|
||||
uint8_t mrf_channels_written[BRW_MAX_GRF];
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
assert(prog_data->total_grf ||
|
||||
!"Must be called after register allocation");
|
||||
|
||||
|
@ -1747,6 +1741,8 @@ vec4_visitor::run()
|
|||
|
||||
emit_thread_end();
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* Before any optimization, push array accesses out to scratch
|
||||
* space where we need them to be. This pass may allocate new
|
||||
* virtual GRFs, so we want to do it early. It also makes sure
|
||||
|
@ -1846,8 +1842,6 @@ vec4_visitor::run()
|
|||
*/
|
||||
assert(sanity_param_count == prog->Parameters->NumParameters);
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
return !failed;
|
||||
}
|
||||
|
||||
|
|
|
@ -247,7 +247,6 @@ vec4_visitor::calculate_live_intervals()
|
|||
* The control flow-aware analysis was done at a channel level, while at
|
||||
* this point we're distilling it down to vgrfs.
|
||||
*/
|
||||
calculate_cfg();
|
||||
vec4_live_variables livevars(this, cfg);
|
||||
|
||||
foreach_block (block, cfg) {
|
||||
|
|
|
@ -327,8 +327,6 @@ vec4_visitor::spill_reg(int spill_reg_nr)
|
|||
assert(virtual_grf_sizes[spill_reg_nr] == 1);
|
||||
unsigned int spill_offset = c->last_scratch++;
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* Generate spill/unspill instructions for the objects being spilled. */
|
||||
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
|
||||
for (unsigned int i = 0; i < 3; i++) {
|
||||
|
|
|
@ -3337,8 +3337,6 @@ vec4_visitor::move_grf_array_access_to_scratch()
|
|||
scratch_loc[i] = -1;
|
||||
}
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* First, calculate the set of virtual GRFs that need to be punted
|
||||
* to scratch due to having any array access on them, and where in
|
||||
* scratch.
|
||||
|
@ -3445,8 +3443,6 @@ vec4_visitor::move_uniform_array_access_to_pull_constants()
|
|||
pull_constant_loc[i] = -1;
|
||||
}
|
||||
|
||||
calculate_cfg();
|
||||
|
||||
/* Walk through and find array access of uniforms. Put a copy of that
|
||||
* uniform in the pull constant buffer.
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue