pan/midgard: Represent ld/st offset unpacked

This simplifies manipulation of the offsets dramatically, fixing some
UBO access related bugs.

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
Alyssa Rosenzweig 2019-11-15 14:19:34 -05:00
parent 1798f6bfc3
commit bc9a7d0699
6 changed files with 14 additions and 47 deletions

View File

@ -517,7 +517,6 @@ bool mir_special_index(compiler_context *ctx, unsigned idx);
unsigned mir_use_count(compiler_context *ctx, unsigned value);
bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
unsigned mir_ubo_shift(midgard_load_store_op op);
midgard_reg_mode mir_typesize(midgard_instruction *ins);
midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
unsigned mir_bytes_for_mode(midgard_reg_mode mode);

View File

@ -1141,11 +1141,8 @@ emit_ubo_read(
{
/* TODO: half-floats */
midgard_instruction ins = m_ld_ubo_int4(dest, offset);
/* TODO: Don't split */
ins.load_store.varying_parameters = (offset & 0x7F) << 3;
ins.load_store.address = offset >> 7;
midgard_instruction ins = m_ld_ubo_int4(dest, 0);
ins.constants[0] = offset;
mir_set_intr_mask(instr, &ins, true);
if (indirect_offset) {

View File

@ -388,6 +388,14 @@ emit_binary_bundle(compiler_context *ctx,
mir_pack_ldst_mask(bundle->instructions[i]);
mir_pack_swizzle_ldst(bundle->instructions[i]);
/* Apply a constant offset */
unsigned offset = bundle->instructions[i]->constants[0];
if (offset) {
bundle->instructions[i]->load_store.varying_parameters |= (offset & 0x7F) << 3;
bundle->instructions[i]->load_store.address |= (offset >> 7);
}
}
memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));

View File

@ -1169,16 +1169,14 @@ v_load_store_scratch(
/* For register spilling - to thread local storage */
.arg_1 = 0xEA,
.arg_2 = 0x1E,
/* Splattered across, TODO combine logically */
.varying_parameters = (byte & 0x1FF) << 1,
.address = (byte >> 9)
},
/* If we spill an unspill, RA goes into an infinite loop */
.no_spill = true
};
ins.constants[0] = byte;
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));

View File

@ -478,25 +478,6 @@ mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
return mask;
}
unsigned
mir_ubo_shift(midgard_load_store_op op)
{
switch (op) {
case midgard_op_ld_ubo_char:
return 0;
case midgard_op_ld_ubo_char2:
return 1;
case midgard_op_ld_ubo_char4:
return 2;
case midgard_op_ld_ubo_short4:
return 3;
case midgard_op_ld_ubo_int4:
return 4;
default:
unreachable("Invalid op");
}
}
/* Register allocation occurs after instruction scheduling, which is fine until
* we start needing to spill registers and therefore insert instructions into
* an already-scheduled program. We don't have to be terribly efficient about

View File

@ -36,22 +36,6 @@
* program so we allow that many registers through at minimum, to prevent
* spilling. If we spill anyway, I mean, it's a lose-lose at that point. */
static unsigned
mir_ubo_offset(midgard_instruction *ins)
{
assert(ins->type == TAG_LOAD_STORE_4);
assert(OP_IS_UBO_READ(ins->load_store.op));
/* Grab the offset as the hw understands it */
unsigned lo = ins->load_store.varying_parameters >> 7;
unsigned hi = ins->load_store.address;
unsigned raw = ((hi << 3) | lo);
/* Account for the op's shift */
unsigned shift = mir_ubo_shift(ins->load_store.op);
return (raw << shift);
}
void
midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
{
@ -59,8 +43,8 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_UBO_READ(ins->load_store.op)) continue;
/* Get the offset. TODO: can we promote unaligned access? */
unsigned off = mir_ubo_offset(ins);
/* TODO: promote unaligned access via swizzle? */
unsigned off = ins->constants[0];
if (off & 0xF) continue;
unsigned address = off / 16;