diff --git a/src/panfrost/bifrost/bifrost_opts.c b/src/panfrost/bifrost/bifrost_opts.c new file mode 100644 index 00000000000..58c9888cc38 --- /dev/null +++ b/src/panfrost/bifrost/bifrost_opts.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2019 Ryan Houdek + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "bifrost_opts.h" +#include "compiler_defines.h" + +bool +bifrost_opt_branch_fusion(compiler_context *ctx, bifrost_block *block) +{ + bool progress = false; + mir_foreach_instr_in_block_safe(block, instr) { + if (instr->op != op_branch) continue; + if (instr->literal_args[0] != BR_COND_EQ) continue; + + unsigned src1 = instr->ssa_args.src0; + + // Only work on SSA values + if (src1 >= SSA_FIXED_MINIMUM) continue; + + // Find the source for this conditional branch instruction + // It'll be a CSEL instruction + // If it's comparision is one of the ops that our conditional branch supports + // then we can merge the two + mir_foreach_instr_in_block_from_rev(block, next_instr, instr) { + if (next_instr->op != op_csel_i32) continue; + + if (next_instr->ssa_args.dest == src1) { + // We found the CSEL instruction that is the source here + // Check its condition to make sure it matches what we can fuse + unsigned cond = next_instr->literal_args[0]; + if (cond == CSEL_IEQ) { + // This CSEL is doing an IEQ for our conditional branch doing EQ + // We can just emit a conditional branch that does the comparison + struct bifrost_instruction new_instr = { + .op = op_branch, + .dest_components = 0, + .ssa_args = { + .dest = SSA_INVALID_VALUE, + .src0 = next_instr->ssa_args.src0, + .src1 = next_instr->ssa_args.src1, + .src2 = SSA_INVALID_VALUE, + .src3 = SSA_INVALID_VALUE, + }, + .literal_args[0] = BR_COND_EQ, + .literal_args[1] = instr->literal_args[1], + }; + mir_insert_instr_before(instr, new_instr); + mir_remove_instr(instr); + progress |= true; + break; + } + } + } + } + + return progress; +} + diff --git a/src/panfrost/bifrost/bifrost_opts.h b/src/panfrost/bifrost/bifrost_opts.h new file mode 100644 index 00000000000..b152cdd09fd --- /dev/null +++ b/src/panfrost/bifrost/bifrost_opts.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2019 Ryan Houdek + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef bifrost_opts_h +#define bifrost_opts_h +#include "compiler_defines.h" + +#include + +bool +bifrost_opt_branch_fusion(compiler_context *ctx, bifrost_block *block); + +#endif /* bifrost_opts_h */ diff --git a/src/panfrost/bifrost/bifrost_sched.c b/src/panfrost/bifrost/bifrost_sched.c new file mode 100644 index 00000000000..3432cb35241 --- /dev/null +++ b/src/panfrost/bifrost/bifrost_sched.c @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2019 Ryan Houdek + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "util/register_allocate.h" +#include "compiler_defines.h" +#include "bifrost_sched.h" +#include "bifrost_compile.h" +#include "bifrost_print.h" + +#define BI_DEBUG +const unsigned max_primary_reg = 64; // XXX: Not correct since there are special ones in the top end +const unsigned max_vec2_reg = max_primary_reg / 2; +const unsigned max_vec3_reg = max_primary_reg / 4; // XXX: Do we need to align vec3 to vec4 boundary? +const unsigned max_vec4_reg = max_primary_reg / 4; +const unsigned max_registers = max_primary_reg + + max_vec2_reg + + max_vec3_reg + + max_vec4_reg; +const unsigned primary_base = 0; +const unsigned vec2_base = primary_base + max_primary_reg; +const unsigned vec3_base = vec2_base + max_vec2_reg; +const unsigned vec4_base = vec3_base + max_vec3_reg; +const unsigned vec4_end = vec4_base + max_vec4_reg; + +static unsigned +find_or_allocate_temp(compiler_context *ctx, unsigned hash) +{ + if (hash >= SSA_FIXED_MINIMUM) + return hash; + + unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(ctx->hash_to_temp, hash + 1); + + if (temp) + return temp - 1; + + /* If no temp is find, allocate one */ + temp = ctx->num_temps++; + ctx->max_hash = MAX2(ctx->max_hash, hash); + + _mesa_hash_table_u64_insert(ctx->hash_to_temp, hash + 1, (void *) ((uintptr_t) temp + 1)); + + return temp; +} + +static bool +is_live_in_instr(bifrost_instruction *instr, unsigned temp) +{ + if (instr->ssa_args.src0 == temp) return true; + if (instr->ssa_args.src1 == temp) return true; + if (instr->ssa_args.src2 == temp) return true; + if (instr->ssa_args.src3 == temp) return true; + + return false; +} + +static bool +is_live_after_instr(compiler_context *ctx, bifrost_block *blk, bifrost_instruction *instr, unsigned temp) +{ + // Scan forward in the block from this location to see if we are still live. + + mir_foreach_instr_in_block_from(blk, ins, mir_next_instr(instr)) { + if (is_live_in_instr(ins, temp)) + return true; + } + + // XXX: Walk all successor blocks and ensure the value isn't used there + + return false; +} + +static uint32_t +ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data) +{ + for (int i = primary_base; i < vec4_end; ++i) { + if (BITSET_TEST(regs, i)) { + return i; + } + } + + assert(0); + return 0; +} + +static uint32_t +ra_get_phys_reg(compiler_context *ctx, struct ra_graph *g, unsigned temp, unsigned max_reg) +{ + if (temp == SSA_INVALID_VALUE || + temp >= SSA_FIXED_UREG_MINIMUM || + temp == SSA_FIXED_CONST_0) + return temp; + + if (temp >= SSA_FIXED_MINIMUM) + return SSA_REG_FROM_FIXED(temp); + + assert(temp < max_reg); + uint32_t r = ra_get_node_reg(g, temp); + if (r >= vec4_base) + return (r - vec4_base) * 4; + else if (r >= vec3_base) + return (r - vec3_base) * 4; + else if (r >= vec2_base) + return (r - vec2_base) * 2; + + return r; +} + +static void +allocate_registers(compiler_context *ctx) +{ + struct ra_regs *regs = ra_alloc_reg_set(NULL, max_registers, true); + + int primary_class = ra_alloc_reg_class(regs); + int vec2_class = ra_alloc_reg_class(regs); + int vec3_class = ra_alloc_reg_class(regs); + int vec4_class = ra_alloc_reg_class(regs); + + // Allocate our register classes and conflicts + { + unsigned reg = 0; + unsigned primary_base = 0; + + // Add all of our primary scalar registers + for (unsigned i = 0; i < max_primary_reg; ++i) { + ra_class_add_reg(regs, primary_class, reg); + reg++; + } + + // Add all of our vec2 class registers + // These alias with the scalar registers + for (unsigned i = 0; i < max_vec2_reg; ++i) { + ra_class_add_reg(regs, vec2_class, reg); + + // Tell RA that this conflicts with primary class registers + // Make sure to tell the RA utility all conflict slots + ra_add_reg_conflict(regs, reg, primary_base + i*2 + 0); + ra_add_reg_conflict(regs, reg, primary_base + i*2 + 1); + + reg++; + } + + // Add all of our vec3 class registers + // These alias with the scalar registers + for (unsigned i = 0; i < max_vec3_reg; ++i) { + ra_class_add_reg(regs, vec3_class, reg); + + // Tell RA that this conflicts with primary class registers + // Make sure to tell the RA utility all conflict slots + // These are aligned to vec4 even though they only conflict with a vec3 wide slot + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 0); + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 1); + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 2); + + // State that this class conflicts with the vec2 class + ra_add_reg_conflict(regs, reg, vec2_base + i*2 + 0); + ra_add_reg_conflict(regs, reg, vec2_base + i*2 + 1); + + reg++; + } + + // Add all of our vec4 class registers + // These alias with the scalar registers + for (unsigned i = 0; i < max_vec4_reg; ++i) { + ra_class_add_reg(regs, vec4_class, reg); + + // Tell RA that this conflicts with primary class registers + // Make sure to tell the RA utility all conflict slots + // These are aligned to vec4 even though they only conflict with a vec3 wide slot + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 0); + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 1); + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 2); + ra_add_reg_conflict(regs, reg, primary_base + i*4 + 3); + + // State that this class conflicts with the vec2 class + ra_add_reg_conflict(regs, reg, vec2_base + i*2 + 0); + ra_add_reg_conflict(regs, reg, vec2_base + i*2 + 1); + + // State that this class conflicts with the vec3 class + // They conflict on the exact same location due to alignments + ra_add_reg_conflict(regs, reg, vec3_base + i); + + reg++; + } + } + + ra_set_finalize(regs, NULL); + mir_foreach_block(ctx, block) { + mir_foreach_instr_in_block(block, instr) { + instr->ssa_args.src0 = find_or_allocate_temp(ctx, instr->ssa_args.src0); + instr->ssa_args.src1 = find_or_allocate_temp(ctx, instr->ssa_args.src1); + instr->ssa_args.src2 = find_or_allocate_temp(ctx, instr->ssa_args.src2); + instr->ssa_args.src3 = find_or_allocate_temp(ctx, instr->ssa_args.src3); + instr->ssa_args.dest = find_or_allocate_temp(ctx, instr->ssa_args.dest); + } + } + + uint32_t nodes = ctx->num_temps; + struct ra_graph *g = ra_alloc_interference_graph(regs, nodes); + + mir_foreach_block(ctx, block) { + mir_foreach_instr_in_block(block, instr) { + if (instr->ssa_args.dest >= SSA_FIXED_MINIMUM) continue; + if (instr->dest_components == 4) + ra_set_node_class(g, instr->ssa_args.dest, vec4_class); + else if (instr->dest_components == 3) + ra_set_node_class(g, instr->ssa_args.dest, vec3_class); + else if (instr->dest_components == 2) + ra_set_node_class(g, instr->ssa_args.dest, vec2_class); + else + ra_set_node_class(g, instr->ssa_args.dest, primary_class); + } + } + + uint32_t *live_start = malloc(nodes * sizeof(uint32_t)); + uint32_t *live_end = malloc(nodes * sizeof(uint32_t)); + + memset(live_start, 0xFF, nodes * sizeof(uint32_t)); + memset(live_end, 0xFF, nodes * sizeof(uint32_t)); + + uint32_t location = 0; + mir_foreach_block(ctx, block) { + mir_foreach_instr_in_block(block, instr) { + if (instr->ssa_args.dest < SSA_FIXED_MINIMUM) { + // If the destination isn't yet live before this point + // then this is the point it becomes live since we wrote to it + if (live_start[instr->ssa_args.dest] == ~0U) { + live_start[instr->ssa_args.dest] = location; + } + } + + uint32_t sources[4] = { + instr->ssa_args.src0, + instr->ssa_args.src1, + instr->ssa_args.src2, + instr->ssa_args.src3, + }; + + for (unsigned i = 0; i < 4; ++i) { + if (sources[i] >= SSA_FIXED_MINIMUM) + continue; + + // If the source is no longer live after this instruction then we can end its liveness + if (!is_live_after_instr(ctx, block, instr, sources[i])) { + live_end[sources[i]] = location; + } + } + ++location; + } + } + + // Spin through the nodes quick and ensure they are all killed by the end of the program + for (unsigned i = 0; i < nodes; ++i) { + if (live_end[i] == ~0U) + live_end[i] = location; + } + + for (int i = 0; i < nodes; ++i) { + for (int j = i + 1; j < nodes; ++j) { + if (!(live_start[i] >= live_end[j] || live_start[j] >= live_end[i])) { + ra_add_node_interference(g, i, j); + } + } + } + + ra_set_select_reg_callback(g, ra_select_callback, NULL); + + if (!ra_allocate(g)) { + assert(0); + } + + free(live_start); + free(live_end); + + mir_foreach_block(ctx, block) { + mir_foreach_instr_in_block(block, instr) { + instr->args.src0 = ra_get_phys_reg(ctx, g, instr->ssa_args.src0, nodes); + instr->args.src1 = ra_get_phys_reg(ctx, g, instr->ssa_args.src1, nodes); + instr->args.src2 = ra_get_phys_reg(ctx, g, instr->ssa_args.src2, nodes); + instr->args.src3 = ra_get_phys_reg(ctx, g, instr->ssa_args.src3, nodes); + instr->args.dest = ra_get_phys_reg(ctx, g, instr->ssa_args.dest, nodes); + } + } +} + +static void +bundle_block(compiler_context *ctx, bifrost_block *block) +{ +} + +static void +remove_create_vectors(compiler_context *ctx, bifrost_block *block) +{ + mir_foreach_instr_in_block_safe(block, instr) { + if (instr->op != op_create_vector) continue; + + uint32_t vector_ssa_sources[4] = { + instr->ssa_args.src0, + instr->ssa_args.src1, + instr->ssa_args.src2, + instr->ssa_args.src3, + }; + + mir_foreach_instr_in_block_from_rev(block, next_instr, instr) { + // Walk our block backwards and find the creators of this vector creation instruction + for (unsigned i = 0; i < instr->dest_components; ++i) { + // If this instruction is ther one that writes this register then forward it to the real register + if (vector_ssa_sources[i] == next_instr->ssa_args.dest) { + next_instr->ssa_args.dest = vector_ssa_sources[i]; + // Source instruction destination is a vector register of size dest_components + // So dest + i gets the components of it + next_instr->args.dest = instr->args.dest + i; + } + } + } + + // Remove the instruction now that we have copied over all the sources + mir_remove_instr(instr); + } +} + +static void +remove_extract_elements(compiler_context *ctx, bifrost_block *block) +{ + mir_foreach_instr_in_block_safe(block, instr) { + if (instr->op != op_extract_element) continue; + + mir_foreach_instr_in_block_from(block, next_instr, instr) { + // Walk our block forward to replace uses of this register with a real register + // src0 = vector + // src1 = index in to vector + uint32_t vector_ssa_sources[4] = { + next_instr->ssa_args.src0, + next_instr->ssa_args.src1, + next_instr->ssa_args.src2, + next_instr->ssa_args.src3, + }; + uint32_t *vector_sources[4] = { + &next_instr->args.src0, + &next_instr->args.src1, + &next_instr->args.src2, + &next_instr->args.src3, + }; + + for (unsigned i = 0; i < 4; ++i) { + if (vector_ssa_sources[i] == instr->ssa_args.dest) { + // This source uses this vector extraction + // Replace its usage with the real register + // src0 is a vector register and src1 is the constant element of the vector + *vector_sources[i] = instr->args.src0 + instr->literal_args[0]; + } + } + + } + + // Remove the instruction now that we have copied over all the sources + mir_remove_instr(instr); + } +} + + +void schedule_program(compiler_context *ctx) +{ + // XXX: we should move instructions together before RA that can feed in to each other and be scheduled in the same clause + allocate_registers(ctx); + + mir_foreach_block(ctx, block) { + remove_create_vectors(ctx, block); + remove_extract_elements(ctx, block); + } + + mir_foreach_block(ctx, block) { +#ifdef BI_DEBUG + print_mir_block(block, true); +#endif + + bundle_block(ctx, block); + } +} + diff --git a/src/panfrost/bifrost/bifrost_sched.h b/src/panfrost/bifrost/bifrost_sched.h new file mode 100644 index 00000000000..cb9d383010a --- /dev/null +++ b/src/panfrost/bifrost/bifrost_sched.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019 Ryan Houdek + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef bifrost_ra_h +#define bifrost_ra_h +#include "compiler_defines.h" + +void schedule_program(compiler_context *ctx); + +#endif /* bifrost_ra_h */ diff --git a/src/panfrost/bifrost/compiler_defines.h b/src/panfrost/bifrost/compiler_defines.h new file mode 100644 index 00000000000..a853b2d0e60 --- /dev/null +++ b/src/panfrost/bifrost/compiler_defines.h @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2019 Ryan Houdek + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __compiler_defines_h__ +#define __compiler_defines_h__ +#include "bifrost.h" +#include "bifrost_compile.h" +#include "bifrost_ops.h" + +struct nir_builder; + +typedef struct ssa_args { + uint32_t dest; + uint32_t src0, src1, src2, src3; +} ssa_args; + +/** + * @brief Singular unpacked instruction that lives outside of the clause bundle + */ +typedef struct bifrost_instruction { + // Must be first + struct list_head link; + + /** + * @brief Pre-RA arguments + */ + struct ssa_args ssa_args; + uint32_t literal_args[4]; + uint32_t src_modifiers; + unsigned op; + + + /** + * @brief Post-RA arguments + */ + struct ssa_args args; + + /** + * @brief The number of components that the destination takes up + * + * This allows the RA to understand when it needs to allocate registers from different classes + */ + unsigned dest_components; + +} bifrost_instruction; + +typedef struct bifrost_clause { + struct bifrost_header header; + + /* List of bifrost_instructions emitted for the current clause */ + struct list_head instructions; + +} bifrost_clause; + +typedef struct bifrost_block { + /* Link to next block. Must be first for mir_get_block */ + struct list_head link; + + /* List of bifrost_instructions emitted for the current block */ + struct list_head instructions; + + /* List of bifrost clauses to be emitted for the current block*/ + struct util_dynarray clauses; + + /* Maximum number of successors is 2 */ + struct bifrost_block *successors[2]; + uint32_t num_successors; + +} bifrost_block; + +typedef struct compiler_context { + nir_shader *nir; + gl_shader_stage stage; + + /* Current NIR function */ + nir_function *func; + struct nir_builder *b; + + /* Unordered list of bifrost_blocks */ + uint32_t block_count; + struct list_head blocks; + + /* The current block we are operating on */ + struct bifrost_block *current_block; + + struct hash_table_u64 *ssa_constants; + + /* Uniform IDs */ + struct hash_table_u64 *uniform_nir_to_bi; + uint32_t uniform_count; + + struct hash_table_u64 *varying_nir_to_bi; + uint32_t varying_count; + + struct hash_table_u64 *outputs_nir_to_bi; + uint32_t outputs_count; + + /* Count of instructions emitted from NIR overall, across all blocks */ + uint32_t instruction_count; + + uint32_t mir_temp; + + struct hash_table_u64 *hash_to_temp; + uint32_t num_temps; + + uint32_t max_hash; + +} compiler_context; + +#define mir_foreach_block(ctx, v) list_for_each_entry(struct bifrost_block, v, &ctx->blocks, link) +#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct bifrost_block, v, from, &ctx->blocks, link) + +#define mir_last_block(ctx) list_last_entry(&ctx->blocks, struct bifrost_block, link) + +#define mir_foreach_instr(ctx, v) list_for_each_entry(struct bifrost_instruction, v, &ctx->current_block->instructions, link) +#define mir_foreach_instr_in_block(block, v) list_for_each_entry(struct bifrost_instruction, v, &block->instructions, link) +#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct bifrost_instruction, v, from, &block->instructions, link) +#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct bifrost_instruction, v, &block->instructions, link) +#define mir_last_instr_in_block(block) list_last_entry(&block->instructions, struct bifrost_instruction, link) +#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct bifrost_instruction, v, from, &block->instructions, link) + +#define mir_next_instr(from) list_first_entry(&(from->link), struct bifrost_instruction, link) +#define mir_remove_instr(instr) list_del(&instr->link) + +#define mir_insert_instr_before(before, ins) list_addtail(&(mir_alloc_ins(ins))->link, &before->link) + +#define SSA_INVALID_VALUE ~0U +#define SSA_TEMP_SHIFT 24 +#define SSA_FIXED_REGISTER_SHIFT 25 + +#define SSA_FIXED_REGISTER(x) ((1U << SSA_FIXED_REGISTER_SHIFT) + (x)) +#define SSA_REG_FROM_FIXED(x) ((x) & ~(1U << SSA_FIXED_REGISTER_SHIFT)) + +#define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0) +#define SSA_FIXED_UREG_MINIMUM SSA_FIXED_REGISTER(64) +#define SSA_FIXED_CONST_0 SSA_FIXED_REGISTER(256 + 64) + +#define SSA_FIXED_UREGISTER(x) (SSA_FIXED_REGISTER(x + 64)) +#define SSA_UREG_FROM_FIXED(x) (SSA_REG_FROM_FIXED(x) - 64) + +#define SSA_TEMP_VALUE(x) ((1U << SSA_TEMP_SHIFT) + (x)) +#define SSA_TEMP_FROM_VALUE(x) (((x) & ~(1U << SSA_TEMP_SHIFT))) +#define MIR_TEMP_MINIMUM SSA_TEMP_VALUE(0) + +#define SRC_MOD_ABS 1 +#define SRC_MOD_NEG 2 +#define MOD_SIZE 2 +#define SOURCE_MODIFIER(src, mod) (mod << (src * MOD_SIZE)) + +struct bifrost_instruction * +mir_alloc_ins(struct bifrost_instruction instr); + +#endif