pan/midgard: Break mir_spill_register into its function
No functional changes, just breaks out a megamonster function and fixes the indentation. Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
parent
d4bcca19da
commit
e94239b9a4
|
@ -732,39 +732,21 @@ v_load_store_scratch(
|
||||||
return ins;
|
return ins;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
/* If register allocation fails, find the best spill node and spill it to fix
|
||||||
schedule_program(compiler_context *ctx)
|
* whatever the issue was. This spill node could be a work register (spilling
|
||||||
|
* to thread local storage), but it could also simply be a special register
|
||||||
|
* that needs to spill to become a work register. */
|
||||||
|
|
||||||
|
static void mir_spill_register(
|
||||||
|
compiler_context *ctx,
|
||||||
|
struct ra_graph *g,
|
||||||
|
unsigned *spill_count)
|
||||||
{
|
{
|
||||||
struct ra_graph *g = NULL;
|
|
||||||
bool spilled = false;
|
|
||||||
int iter_count = 1000; /* max iterations */
|
|
||||||
|
|
||||||
/* Number of 128-bit slots in memory we've spilled into */
|
|
||||||
unsigned spill_count = 0;
|
|
||||||
|
|
||||||
midgard_promote_uniforms(ctx, 8);
|
|
||||||
|
|
||||||
mir_foreach_block(ctx, block) {
|
|
||||||
midgard_pair_load_store(ctx, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Must be lowered right before RA */
|
|
||||||
mir_squeeze_index(ctx);
|
|
||||||
mir_lower_special_reads(ctx);
|
|
||||||
|
|
||||||
/* Lowering can introduce some dead moves */
|
|
||||||
|
|
||||||
mir_foreach_block(ctx, block) {
|
|
||||||
midgard_opt_dead_move_eliminate(ctx, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
|
||||||
/* If we spill, find the best spill node and spill it */
|
|
||||||
|
|
||||||
unsigned spill_index = ctx->temp_count;
|
unsigned spill_index = ctx->temp_count;
|
||||||
if (g && spilled) {
|
|
||||||
/* All nodes are equal in spill cost, but we can't
|
/* Our first step is to calculate spill cost to figure out the best
|
||||||
* spill nodes written to from an unspill */
|
* spill node. All nodes are equal in spill cost, but we can't spill
|
||||||
|
* nodes written to from an unspill */
|
||||||
|
|
||||||
for (unsigned i = 0; i < ctx->temp_count; ++i) {
|
for (unsigned i = 0; i < ctx->temp_count; ++i) {
|
||||||
ra_set_node_spill_cost(g, i, 1.0);
|
ra_set_node_spill_cost(g, i, 1.0);
|
||||||
|
@ -785,15 +767,16 @@ schedule_program(compiler_context *ctx)
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check the class. Work registers legitimately spill
|
/* We have a spill node, so check the class. Work registers
|
||||||
* to TLS, but special registers just spill to work
|
* legitimately spill to TLS, but special registers just spill to work
|
||||||
* registers */
|
* registers */
|
||||||
|
|
||||||
unsigned class = ra_get_node_class(g, spill_node);
|
unsigned class = ra_get_node_class(g, spill_node);
|
||||||
bool is_special = (class >> 2) != REG_CLASS_WORK;
|
bool is_special = (class >> 2) != REG_CLASS_WORK;
|
||||||
bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
|
bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
|
||||||
|
|
||||||
/* Allocate TLS slot (maybe) */
|
/* Allocate TLS slot (maybe) */
|
||||||
unsigned spill_slot = !is_special ? spill_count++ : 0;
|
unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
|
||||||
midgard_instruction *spill_move = NULL;
|
midgard_instruction *spill_move = NULL;
|
||||||
|
|
||||||
/* For TLS, replace all stores to the spilled node. For
|
/* For TLS, replace all stores to the spilled node. For
|
||||||
|
@ -831,7 +814,6 @@ schedule_program(compiler_context *ctx)
|
||||||
* spilling is to use memory to back work registers) */
|
* spilling is to use memory to back work registers) */
|
||||||
|
|
||||||
mir_foreach_block(ctx, block) {
|
mir_foreach_block(ctx, block) {
|
||||||
|
|
||||||
bool consecutive_skip = false;
|
bool consecutive_skip = false;
|
||||||
unsigned consecutive_index = 0;
|
unsigned consecutive_index = 0;
|
||||||
|
|
||||||
|
@ -886,6 +868,36 @@ schedule_program(compiler_context *ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
schedule_program(compiler_context *ctx)
|
||||||
|
{
|
||||||
|
struct ra_graph *g = NULL;
|
||||||
|
bool spilled = false;
|
||||||
|
int iter_count = 1000; /* max iterations */
|
||||||
|
|
||||||
|
/* Number of 128-bit slots in memory we've spilled into */
|
||||||
|
unsigned spill_count = 0;
|
||||||
|
|
||||||
|
midgard_promote_uniforms(ctx, 8);
|
||||||
|
|
||||||
|
mir_foreach_block(ctx, block) {
|
||||||
|
midgard_pair_load_store(ctx, block);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Must be lowered right before RA */
|
||||||
|
mir_squeeze_index(ctx);
|
||||||
|
mir_lower_special_reads(ctx);
|
||||||
|
|
||||||
|
/* Lowering can introduce some dead moves */
|
||||||
|
|
||||||
|
mir_foreach_block(ctx, block) {
|
||||||
|
midgard_opt_dead_move_eliminate(ctx, block);
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (spilled)
|
||||||
|
mir_spill_register(ctx, g, &spill_count);
|
||||||
|
|
||||||
mir_squeeze_index(ctx);
|
mir_squeeze_index(ctx);
|
||||||
|
|
||||||
g = NULL;
|
g = NULL;
|
||||||
|
|
Loading…
Reference in New Issue