pan/bi: Use new instruction types

And remove now redundant casts

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8135>
This commit is contained in:
Alyssa Rosenzweig 2020-12-28 12:12:53 -05:00 committed by Marge Bot
parent cf5b2b8939
commit e904bbb170
6 changed files with 54 additions and 60 deletions

View File

@ -36,12 +36,11 @@ bi_opt_dead_code_eliminate(bi_context *ctx, bi_block *block)
uint16_t *live = mem_dup(block->base.live_out, temp_count * sizeof(uint16_t));
bi_foreach_instr_in_block_safe_rev(block, _ins) {
bi_instr *ins = (bi_instr *) _ins;
bi_foreach_instr_in_block_safe_rev(block, ins) {
unsigned index = bi_get_node(ins->dest[0]);
if (index < temp_count && !live[index]) {
bi_remove_instruction((bi_instruction *) ins);
bi_remove_instruction(ins);
progress |= true;
}

View File

@ -173,9 +173,9 @@ bi_assign_fau_idx(bi_clause *clause,
bi_bundle *bundle)
{
bool assigned =
bi_assign_fau_idx_single(&bundle->regs, clause, (bi_instr *) bundle->fma, false, true);
bi_assign_fau_idx_single(&bundle->regs, clause, bundle->fma, false, true);
bi_assign_fau_idx_single(&bundle->regs, clause, (bi_instr *) bundle->add, assigned, false);
bi_assign_fau_idx_single(&bundle->regs, clause, bundle->add, assigned, false);
}
/* Assigns a slot for reading, before anything is written */
@ -224,21 +224,21 @@ bi_assign_slots(bi_bundle *now, bi_bundle *prev)
* and thus gets skipped over here. */
bool read_dreg = now->add &&
bi_opcode_props[((bi_instr *) now->add)->op].sr_read;
bi_opcode_props[(now->add)->op].sr_read;
bool write_dreg = now->add &&
bi_opcode_props[((bi_instr *) now->add)->op].sr_write;
bi_opcode_props[(now->add)->op].sr_write;
/* First, assign reads */
if (now->fma)
bi_foreach_src(now->fma, src)
bi_assign_slot_read(&now->regs, ((bi_instr *) now->fma)->src[src]);
bi_assign_slot_read(&now->regs, (now->fma)->src[src]);
if (now->add) {
bi_foreach_src(now->add, src) {
if (!(src == 0 && read_dreg))
bi_assign_slot_read(&now->regs, ((bi_instr *) now->add)->src[src]);
bi_assign_slot_read(&now->regs, (now->add)->src[src]);
}
}
@ -246,8 +246,8 @@ bi_assign_slots(bi_bundle *now, bi_bundle *prev)
* +ATEST wants its destination written to both a staging register
* _and_ a regular write, because it may not generate a message */
if (prev->add && (!write_dreg || ((bi_instr *) prev->add)->op == BI_OPCODE_ATEST)) {
bi_index idx = ((bi_instr *) prev->add)->dest[0];
if (prev->add && (!write_dreg || prev->add->op == BI_OPCODE_ATEST)) {
bi_index idx = prev->add->dest[0];
if (idx.type == BI_INDEX_REGISTER) {
now->regs.slot[3] = idx.value;
@ -256,7 +256,7 @@ bi_assign_slots(bi_bundle *now, bi_bundle *prev)
}
if (prev->fma) {
bi_index idx = ((bi_instr *) prev->fma)->dest[0];
bi_index idx = (prev->fma)->dest[0];
if (idx.type == BI_INDEX_REGISTER) {
if (now->regs.slot23.slot3) {
@ -405,7 +405,7 @@ bi_flip_slots(bi_registers *regs)
static void
bi_lower_cubeface2(bi_context *ctx, bi_bundle *bundle)
{
bi_instr *old = (bi_instr *) bundle->add;
bi_instr *old = bundle->add;
/* Filter for +CUBEFACE2 */
if (!old || old->op != BI_OPCODE_CUBEFACE2)
@ -425,7 +425,7 @@ bi_lower_cubeface2(bi_context *ctx, bi_bundle *bundle)
/* Emit the instruction */
list_addtail(&new->link, &old->link);
bundle->fma = (bi_instruction *) new;
bundle->fma = new;
/* Now replace the sources of the CUBEFACE2 with a single passthrough
* from the CUBEFACE1 (and a side-channel) */
@ -476,23 +476,23 @@ bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_b
bi_flip_slots(&bundle.regs);
bool sr_read = bundle.add &&
bi_opcode_props[((bi_instr *) bundle.add)->op].sr_read;
bi_opcode_props[(bundle.add)->op].sr_read;
uint64_t reg = bi_pack_registers(bundle.regs);
uint64_t fma = bi_pack_fma((bi_instr *) bundle.fma,
bi_get_src_new((bi_instr *) bundle.fma, &bundle.regs, 0),
bi_get_src_new((bi_instr *) bundle.fma, &bundle.regs, 1),
bi_get_src_new((bi_instr *) bundle.fma, &bundle.regs, 2),
bi_get_src_new((bi_instr *) bundle.fma, &bundle.regs, 3));
uint64_t fma = bi_pack_fma(bundle.fma,
bi_get_src_new(bundle.fma, &bundle.regs, 0),
bi_get_src_new(bundle.fma, &bundle.regs, 1),
bi_get_src_new(bundle.fma, &bundle.regs, 2),
bi_get_src_new(bundle.fma, &bundle.regs, 3));
uint64_t add = bi_pack_add((bi_instr *) bundle.add,
bi_get_src_new((bi_instr *) bundle.add, &bundle.regs, sr_read + 0),
bi_get_src_new((bi_instr *) bundle.add, &bundle.regs, sr_read + 1),
bi_get_src_new((bi_instr *) bundle.add, &bundle.regs, sr_read + 2),
uint64_t add = bi_pack_add(bundle.add,
bi_get_src_new(bundle.add, &bundle.regs, sr_read + 0),
bi_get_src_new(bundle.add, &bundle.regs, sr_read + 1),
bi_get_src_new(bundle.add, &bundle.regs, sr_read + 2),
0);
if (bundle.add) {
bi_instr *add = (bi_instr *) bundle.add;
bi_instr *add = bundle.add;
bool sr_write = bi_opcode_props[add->op].sr_write;
@ -548,7 +548,7 @@ bi_pack_constants(bi_context *ctx, bi_clause *clause,
/* Compute branch offset instead of a dummy 0 */
if (branches) {
bi_instr *br = (bi_instr *) clause->bundles[clause->bundle_count - 1].add;
bi_instr *br = clause->bundles[clause->bundle_count - 1].add;
assert(br && br->branch_target);
/* Put it in the high place */
@ -674,7 +674,7 @@ bi_collect_blend_ret_addr(bi_context *ctx, struct util_dynarray *emission,
return;
const bi_bundle *bundle = &clause->bundles[clause->bundle_count - 1];
const bi_instr *ins = (bi_instr *) bundle->add;
const bi_instr *ins = bundle->add;
if (!ins || ins->op != BI_OPCODE_BLEND)
return;

View File

@ -67,11 +67,11 @@ bi_print_slots(bi_registers *regs, FILE *fp)
void
bi_print_bundle(bi_bundle *bundle, FILE *fp)
{
bi_instruction *ins[2] = { bundle->fma, bundle->add };
bi_instr *ins[2] = { bundle->fma, bundle->add };
for (unsigned i = 0; i < 2; ++i) {
if (ins[i])
bi_print_instr((bi_instr *) ins[i], fp);
bi_print_instr(ins[i], fp);
else
fprintf(fp, "nop\n");
}

View File

@ -38,11 +38,10 @@ bi_compute_interference(bi_context *ctx, struct lcra_state *l)
bi_block *blk = (bi_block *) _blk;
uint16_t *live = mem_dup(_blk->live_out, l->node_count * sizeof(uint16_t));
bi_foreach_instr_in_block_rev(blk, _ins) {
bi_foreach_instr_in_block_rev(blk, ins) {
/* Mark all registers live after the instruction as
* interfering with the destination */
bi_instr *ins = (bi_instr *) _ins;
for (unsigned d = 0; d < ARRAY_SIZE(ins->dest); ++d) {
if (bi_get_node(ins->dest[d]) >= l->node_count)
continue;
@ -83,8 +82,7 @@ bi_allocate_registers(bi_context *ctx, bool *success)
l->class_size[BI_REG_CLASS_WORK] = 59 * 4;
}
bi_foreach_instr_global(ctx, _ins) {
bi_instr *ins = (bi_instr *) _ins;
bi_foreach_instr_global(ctx, ins) {
unsigned dest = bi_get_node(ins->dest[0]);
/* Blend shaders expect the src colour to be in r0-r3 */
@ -147,8 +145,7 @@ bi_reg_from_index(struct lcra_state *l, bi_index index)
static void
bi_install_registers(bi_context *ctx, struct lcra_state *l)
{
bi_foreach_instr_global(ctx, _ins) {
bi_instr *ins = (bi_instr *) _ins;
bi_foreach_instr_global(ctx, ins) {
ins->dest[0] = bi_reg_from_index(l, ins->dest[0]);
bi_foreach_src(ins, s)
@ -175,14 +172,13 @@ bi_rewrite_index_src_single(bi_instr *ins, bi_index old, bi_index new)
* that bridge when we get to it. For now, just grab the one and only
* instruction in the clause */
static bi_instruction *
static bi_instr *
bi_unwrap_singleton(bi_clause *clause)
{
assert(clause->bundle_count == 1);
assert((clause->bundles[0].fma != NULL) ^ (clause->bundles[0].add != NULL));
return clause->bundles[0].fma ? clause->bundles[0].fma
: clause->bundles[0].add;
return clause->bundles[0].fma ?: clause->bundles[0].add;
}
/* If register allocation fails, find the best spill node */
@ -192,8 +188,7 @@ bi_choose_spill_node(bi_context *ctx, struct lcra_state *l)
{
/* Pick a node satisfying bi_spill_register's preconditions */
bi_foreach_instr_global(ctx, _ins) {
bi_instr *ins = (bi_instr *) _ins;
bi_foreach_instr_global(ctx, ins) {
if (ins->no_spill || ins->dest[0].offset || !bi_is_null(ins->dest[1])) {
for (unsigned d = 0; d < ARRAY_SIZE(ins->dest); ++d)
lcra_set_node_spill_cost(l, bi_get_node(ins->dest[0]), -1);
@ -247,7 +242,7 @@ bi_fill_src(bi_builder *b, bi_index index, uint32_t offset, bi_clause *clause,
list_addtail(&singleton->link, &clause->link);
/* Rewrite to use */
bi_rewrite_index_src_single((bi_instr *) ins, index, temp);
bi_rewrite_index_src_single(ins, index, temp);
b->shader->fills++;
}
@ -267,7 +262,7 @@ bi_spill_register(bi_context *ctx, bi_index index, uint32_t offset)
bi_foreach_block(ctx, _block) {
bi_block *block = (bi_block *) _block;
bi_foreach_clause_in_block_safe(block, clause) {
bi_instr *ins = (bi_instr *) bi_unwrap_singleton(clause);
bi_instr *ins = bi_unwrap_singleton(clause);
if (bi_is_equiv(ins->dest[0], index)) {
bi_spill_dest(&_b, index, offset, clause,
block, ins, &channels);

View File

@ -88,9 +88,9 @@ bi_singleton(void *memctx, bi_instr *ins,
assert(can_fma || can_add);
if (can_add)
u->bundles[0].add = (bi_instruction *) ins;
u->bundles[0].add = ins;
else
u->bundles[0].fma = (bi_instruction *) ins;
u->bundles[0].fma = ins;
u->scoreboard_id = scoreboard_id;
u->staging_barrier = osrb;
@ -167,7 +167,7 @@ bi_schedule(bi_context *ctx)
list_inithead(&bblock->clauses);
bi_foreach_instr_in_block(bblock, ins) {
bi_clause *u = bi_singleton(ctx, (bi_instr *) ins,
bi_clause *u = bi_singleton(ctx, ins,
bblock, 0, (1 << 0),
!is_first);

View File

@ -712,8 +712,8 @@ typedef struct {
typedef struct {
uint8_t fau_idx;
bi_registers regs;
bi_instruction *fma;
bi_instruction *add;
bi_instr *fma;
bi_instr *add;
} bi_bundle;
struct bi_block;
@ -824,7 +824,7 @@ typedef struct {
} bi_context;
static inline void
bi_remove_instruction(bi_instruction *ins)
bi_remove_instruction(bi_instr *ins)
{
list_del(&ins->link);
}
@ -969,22 +969,22 @@ bi_node_to_index(unsigned node, unsigned node_count)
list_for_each_entry_from_rev(pan_block, v, from, &ctx->blocks, link)
#define bi_foreach_instr_in_block(block, v) \
list_for_each_entry(bi_instruction, v, &(block)->base.instructions, link)
list_for_each_entry(bi_instr, v, &(block)->base.instructions, link)
#define bi_foreach_instr_in_block_rev(block, v) \
list_for_each_entry_rev(bi_instruction, v, &(block)->base.instructions, link)
list_for_each_entry_rev(bi_instr, v, &(block)->base.instructions, link)
#define bi_foreach_instr_in_block_safe(block, v) \
list_for_each_entry_safe(bi_instruction, v, &(block)->base.instructions, link)
list_for_each_entry_safe(bi_instr, v, &(block)->base.instructions, link)
#define bi_foreach_instr_in_block_safe_rev(block, v) \
list_for_each_entry_safe_rev(bi_instruction, v, &(block)->base.instructions, link)
list_for_each_entry_safe_rev(bi_instr, v, &(block)->base.instructions, link)
#define bi_foreach_instr_in_block_from(block, v, from) \
list_for_each_entry_from(bi_instruction, v, from, &(block)->base.instructions, link)
list_for_each_entry_from(bi_instr, v, from, &(block)->base.instructions, link)
#define bi_foreach_instr_in_block_from_rev(block, v, from) \
list_for_each_entry_from_rev(bi_instruction, v, from, &(block)->base.instructions, link)
list_for_each_entry_from_rev(bi_instr, v, from, &(block)->base.instructions, link)
#define bi_foreach_clause_in_block(block, v) \
list_for_each_entry(bi_clause, v, &(block)->clauses, link)
@ -1020,16 +1020,16 @@ bi_node_to_index(unsigned node, unsigned node_count)
#define bi_foreach_src(ins, v) \
for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
static inline bi_instruction *
bi_prev_op(bi_instruction *ins)
static inline bi_instr *
bi_prev_op(bi_instr *ins)
{
return list_last_entry(&(ins->link), bi_instruction, link);
return list_last_entry(&(ins->link), bi_instr, link);
}
static inline bi_instruction *
bi_next_op(bi_instruction *ins)
static inline bi_instr *
bi_next_op(bi_instr *ins)
{
return list_first_entry(&(ins->link), bi_instruction, link);
return list_first_entry(&(ins->link), bi_instr, link);
}
static inline pan_block *