agx: Remove nir_register support

We don't use it anymore, now that we can handle SSA form. Gets rid of
the most gross hack in the compiler.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16268>
This commit is contained in:
Alyssa Rosenzweig 2022-05-01 17:51:09 -04:00
parent 3927a9e911
commit 18ef9398de
3 changed files with 5 additions and 66 deletions

View File

@ -1735,49 +1735,12 @@ agx_compile_shader_nir(nir_shader *nir,
ctx->indexed_nir_blocks =
rzalloc_array(ctx, agx_block *, func->impl->num_blocks);
/* TODO: Handle phi nodes instead of just convert_from_ssa and yolo'ing
* the mapping of nir_register to hardware registers and guaranteeing bad
* performance and breaking spilling... */
ctx->nir_regalloc = rzalloc_array(ctx, unsigned, func->impl->reg_alloc);
/* Leave the last 4 registers for hacky p-copy lowering */
unsigned nir_regalloc = AGX_NUM_REGS - (4 * 2);
/* Assign backwards so we don't need to guess a size */
nir_foreach_register(reg, &func->impl->registers) {
/* Ensure alignment */
if (reg->bit_size >= 32 && (nir_regalloc & 1))
nir_regalloc--;
unsigned size = DIV_ROUND_UP(reg->bit_size * reg->num_components, 16);
nir_regalloc -= size;
ctx->nir_regalloc[reg->index] = nir_regalloc;
}
ctx->max_register = nir_regalloc;
ctx->alloc += func->impl->ssa_alloc;
emit_cf_list(ctx, &func->impl->body);
agx_emit_phis_deferred(ctx);
break; /* TODO: Multi-function shaders */
}
/* TODO: Actual RA... this way passes don't need to deal nir_register */
agx_foreach_instr_global(ctx, I) {
agx_foreach_dest(I, d) {
if (I->dest[d].type == AGX_INDEX_NIR_REGISTER) {
I->dest[d].type = AGX_INDEX_REGISTER;
I->dest[d].value = ctx->nir_regalloc[I->dest[d].value];
}
}
agx_foreach_src(I, s) {
if (I->src[s].type == AGX_INDEX_NIR_REGISTER) {
I->src[s].type = AGX_INDEX_REGISTER;
I->src[s].value = ctx->nir_regalloc[I->src[s].value];
}
}
}
/* Terminate the shader after the exit block */
agx_block *last_block = list_last_entry(&ctx->blocks, agx_block, link);
agx_builder _b = agx_init_builder(ctx, agx_after_block(last_block));

View File

@ -58,7 +58,6 @@ enum agx_index_type {
AGX_INDEX_IMMEDIATE = 2,
AGX_INDEX_UNIFORM = 3,
AGX_INDEX_REGISTER = 4,
AGX_INDEX_NIR_REGISTER = 5,
};
enum agx_size {
@ -138,16 +137,6 @@ agx_register(uint8_t imm, enum agx_size size)
};
}
static inline agx_index
agx_nir_register(unsigned imm, enum agx_size size)
{
return (agx_index) {
.value = imm,
.size = size,
.type = AGX_INDEX_NIR_REGISTER,
};
}
/* Also in half-words */
static inline agx_index
agx_uniform(uint8_t imm, enum agx_size size)
@ -389,13 +378,6 @@ typedef struct {
/* Remapping table for varyings indexed by driver_location */
unsigned varyings[AGX_MAX_VARYINGS];
/* Handling phi nodes is still TODO while we bring up other parts of the
* driver. YOLO the mapping of nir_register to fixed hardware registers */
unsigned *nir_regalloc;
/* We reserve the top (XXX: that hurts thread count) */
unsigned max_register;
/* Place to start pushing new values */
unsigned push_base;
@ -460,10 +442,7 @@ agx_size_for_bits(unsigned bits)
static inline agx_index
agx_src_index(nir_src *src)
{
if (!src->is_ssa) {
return agx_nir_register(src->reg.reg->index,
agx_size_for_bits(nir_src_bit_size(*src)));
}
assert(src->is_ssa);
return agx_get_index(src->ssa->index,
agx_size_for_bits(nir_src_bit_size(*src)));
@ -472,10 +451,7 @@ agx_src_index(nir_src *src)
static inline agx_index
agx_dest_index(nir_dest *dst)
{
if (!dst->is_ssa) {
return agx_nir_register(dst->reg.reg->index,
agx_size_for_bits(nir_dest_bit_size(*dst)));
}
assert(dst->is_ssa);
return agx_get_index(dst->ssa.index,
agx_size_for_bits(nir_dest_bit_size(*dst)));

View File

@ -92,7 +92,7 @@ agx_assign_regs(BITSET_WORD *used_regs, unsigned count, unsigned align, unsigned
/** Assign registers to SSA values in a block. */
static void
agx_ra_assign_local(agx_block *block, uint8_t *ssa_to_reg, uint8_t *ncomps, unsigned max_reg)
agx_ra_assign_local(agx_block *block, uint8_t *ssa_to_reg, uint8_t *ncomps)
{
BITSET_DECLARE(used_regs, AGX_NUM_REGS) = { 0 };
@ -164,7 +164,7 @@ agx_ra_assign_local(agx_block *block, uint8_t *ssa_to_reg, uint8_t *ncomps, unsi
if (I->dest[d].type == AGX_INDEX_NORMAL) {
unsigned count = agx_write_registers(I, d);
unsigned align = (I->dest[d].size == AGX_SIZE_16) ? 1 : 2;
unsigned reg = agx_assign_regs(used_regs, count, align, max_reg);
unsigned reg = agx_assign_regs(used_regs, count, align, AGX_NUM_REGS);
ssa_to_reg[I->dest[d].value] = reg;
}
@ -279,7 +279,7 @@ agx_ra(agx_context *ctx)
* to a NIR invariant, so we do not need special handling for this.
*/
agx_foreach_block(ctx, block) {
agx_ra_assign_local(block, ssa_to_reg, ncomps, ctx->max_register);
agx_ra_assign_local(block, ssa_to_reg, ncomps);
}
agx_foreach_instr_global(ctx, ins) {