/* * Copyright (C) 2021 Alyssa Rosenzweig * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "agx_compiler.h" #include "agx_builder.h" /* Trivial register allocator that never frees anything. * * TODO: Write a real register allocator. * TODO: Handle phi nodes. */ /** Returns number of registers written by an instruction */ unsigned agx_write_registers(agx_instr *I, unsigned d) { unsigned size = I->dest[d].size == AGX_SIZE_32 ? 2 : 1; switch (I->op) { case AGX_OPCODE_LD_VARY: assert(1 <= I->channels && I->channels <= 4); return I->channels * size; case AGX_OPCODE_DEVICE_LOAD: case AGX_OPCODE_TEXTURE_SAMPLE: case AGX_OPCODE_LD_TILE: /* TODO: mask */ return 4 * size; case AGX_OPCODE_LD_VARY_FLAT: return 6; case AGX_OPCODE_P_COMBINE: { unsigned components = 0; for (unsigned i = 0; i < 4; ++i) { if (!agx_is_null(I->src[i])) components = i + 1; } return components * size; } default: return size; } } static unsigned agx_assign_regs(BITSET_WORD *used_regs, unsigned count, unsigned align, unsigned max) { for (unsigned reg = 0; reg < max; reg += align) { bool conflict = false; for (unsigned j = 0; j < count; ++j) conflict |= BITSET_TEST(used_regs, reg + j); if (!conflict) { for (unsigned j = 0; j < count; ++j) BITSET_SET(used_regs, reg + j); return reg; } } /* Couldn't find a free register, dump the state of the register file */ fprintf(stderr, "Failed to find register of size %u aligned %u max %u.\n", count, align, max); fprintf(stderr, "Register file:\n"); for (unsigned i = 0; i < BITSET_WORDS(max); ++i) fprintf(stderr, " %08X\n", used_regs[i]); unreachable("Could not find a free register"); } /** Assign registers to SSA values in a block. */ static void agx_ra_assign_local(agx_block *block, uint8_t *ssa_to_reg, uint8_t *ncomps, unsigned max_reg) { BITSET_DECLARE(used_regs, AGX_NUM_REGS) = { 0 }; agx_foreach_predecessor(block, pred) { for (unsigned i = 0; i < BITSET_WORDS(AGX_NUM_REGS); ++i) used_regs[i] |= pred->regs_out[i]; } BITSET_SET(used_regs, 0); // control flow writes r0l BITSET_SET(used_regs, 5*2); // TODO: precolouring, don't overwrite vertex ID BITSET_SET(used_regs, (5*2 + 1)); BITSET_SET(used_regs, (6*2 + 0)); BITSET_SET(used_regs, (6*2 + 1)); agx_foreach_instr_in_block(block, I) { /* First, free killed sources */ agx_foreach_src(I, s) { if (I->src[s].type == AGX_INDEX_NORMAL && I->src[s].kill) { unsigned reg = ssa_to_reg[I->src[s].value]; unsigned count = ncomps[I->src[s].value]; for (unsigned i = 0; i < count; ++i) BITSET_CLEAR(used_regs, reg + i); } } /* Next, assign destinations. Always legal in SSA form. */ agx_foreach_dest(I, d) { if (I->dest[d].type == AGX_INDEX_NORMAL) { unsigned count = agx_write_registers(I, d); unsigned align = (I->dest[d].size == AGX_SIZE_16) ? 1 : 2; unsigned reg = agx_assign_regs(used_regs, count, align, max_reg); ssa_to_reg[I->dest[d].value] = reg; } } } STATIC_ASSERT(sizeof(block->regs_out) == sizeof(used_regs)); memcpy(block->regs_out, used_regs, sizeof(used_regs)); } /* * Resolve an agx_index of type NORMAL or REGISTER to a physical register, once * registers have been allocated for all SSA values. */ static unsigned agx_index_to_reg(uint8_t *ssa_to_reg, agx_index idx) { if (idx.type == AGX_INDEX_NORMAL) { return ssa_to_reg[idx.value]; } else { assert(idx.type == AGX_INDEX_REGISTER); return idx.value; } } void agx_ra(agx_context *ctx) { unsigned *alloc = calloc(ctx->alloc, sizeof(unsigned)); agx_compute_liveness(ctx); uint8_t *ssa_to_reg = calloc(ctx->alloc, sizeof(uint8_t)); uint8_t *ncomps = calloc(ctx->alloc, sizeof(uint8_t)); agx_foreach_instr_global(ctx, I) { agx_foreach_dest(I, d) { if (I->dest[d].type != AGX_INDEX_NORMAL) continue; unsigned v = I->dest[d].value; assert(ncomps[v] == 0 && "broken SSA"); ncomps[v] = agx_write_registers(I, d); } } /* Assign registers in dominance-order. This coincides with source-order due * to a NIR invariant, so we do not need special handling for this. */ agx_foreach_block(ctx, block) { agx_ra_assign_local(block, ssa_to_reg, ncomps, ctx->max_register); } /* TODO: Coalesce combines */ agx_foreach_instr_global_safe(ctx, ins) { /* Lower away RA pseudo-instructions */ if (ins->op == AGX_OPCODE_P_COMBINE) { unsigned base = agx_index_to_reg(ssa_to_reg, ins->dest[0]); unsigned width = agx_size_align_16(ins->dest[0].size); struct agx_copy copies[4]; unsigned n = 0; /* Move the sources */ for (unsigned i = 0; i < 4; ++i) { if (agx_is_null(ins->src[i])) continue; assert(ins->src[i].size == ins->dest[0].size); copies[n++] = (struct agx_copy) { .dest = base + (i * width), .src = agx_index_to_reg(ssa_to_reg, ins->src[i]) , .size = ins->src[i].size }; } /* Lower away the copies pseudo-instruction */ agx_builder b = agx_init_builder(ctx, agx_after_instr(ins)); agx_emit_parallel_copies(&b, copies, n); continue; } else if (ins->op == AGX_OPCODE_P_EXTRACT) { /* Uses the destination size */ unsigned size = agx_size_align_16(ins->dest[0].size); unsigned left = agx_index_to_reg(ssa_to_reg, ins->dest[0]); unsigned right = agx_index_to_reg(ssa_to_reg, ins->src[0]) + (size * ins->imm); if (left != right) { agx_builder b = agx_init_builder(ctx, agx_after_instr(ins)); agx_mov_to(&b, agx_register(left, ins->dest[0].size), agx_register(right, ins->src[0].size)); } agx_remove_instruction(ins); continue; } agx_foreach_src(ins, s) { if (ins->src[s].type == AGX_INDEX_NORMAL) { unsigned v = ssa_to_reg[ins->src[s].value]; ins->src[s] = agx_replace_index(ins->src[s], agx_register(v, ins->src[s].size)); } } agx_foreach_dest(ins, d) { if (ins->dest[d].type == AGX_INDEX_NORMAL) { unsigned v = ssa_to_reg[ins->dest[d].value]; ins->dest[d] = agx_replace_index(ins->dest[d], agx_register(v, ins->dest[d].size)); } } } free(ssa_to_reg); free(ncomps); free(alloc); }