pan/mdg: Defer modifier packing until emit time

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5151>
This commit is contained in:
Alyssa Rosenzweig 2020-05-21 12:38:27 -04:00 committed by Marge Bot
parent edf1479bea
commit 1cd65353c9
5 changed files with 181 additions and 226 deletions

View File

@ -508,8 +508,7 @@ void mir_print_instruction(midgard_instruction *ins);
void mir_print_bundle(midgard_bundle *ctx);
void mir_print_block(midgard_block *block);
void mir_print_shader(compiler_context *ctx);
bool mir_nontrivial_source2_mod(midgard_instruction *ins);
bool mir_nontrivial_source2_mod_simple(midgard_instruction *ins);
bool mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle);
bool mir_nontrivial_outmod(midgard_instruction *ins);
void mir_insert_instruction_before_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);

View File

@ -128,44 +128,6 @@ schedule_barrier(compiler_context *ctx)
#define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
#define M_STORE(name, T) M_LOAD_STORE(name, true, T)
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
vector_alu_modifiers(bool abs, bool neg, bool is_int,
bool half, bool sext)
{
/* Figure out how many components there are so we can adjust.
* Specifically we want to broadcast the last channel so things like
* ball2/3 work.
*/
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
.half = half
};
if (is_int) {
alu_src.mod = midgard_int_normal;
/* Sign/zero-extend if needed */
if (half) {
alu_src.mod = sext ?
midgard_int_sign_extend
: midgard_int_zero_extend;
}
/* These should have been lowered away */
assert(!(abs || neg));
} else {
alu_src.mod = (abs << 0) | (neg << 1);
}
return alu_src;
}
M_LOAD(ld_attr_32, nir_type_uint32);
M_LOAD(ld_vary_32, nir_type_uint32);
M_LOAD(ld_ubo_int4, nir_type_uint32);
@ -582,10 +544,7 @@ nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
#define ALU_CHECK_CMP(sext) \
if (src_bitsize == 16 && dst_bitsize == 32) { \
half_1 = true; \
half_2 = true; \
sext_1 = sext; \
sext_2 = sext; \
/* inferred */ \
} else { \
assert(src_bitsize == dst_bitsize); \
} \
@ -766,11 +725,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
midgard_dest_override dest_override =
midgard_dest_override_none;
/* Should we use a smaller respective source and sign-extend? */
bool half_1 = false, sext_1 = false;
bool half_2 = false, sext_2 = false;
/* Should we swap arguments? */
bool flip_src12 = false;
@ -897,11 +851,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
case nir_op_i2i16:
case nir_op_i2i32:
case nir_op_i2i64:
/* If we end up upscale, we'll need a sign-extend on the
* operand (the second argument) */
sext_2 = true;
/* fallthrough */
case nir_op_u2u8:
case nir_op_u2u16:
case nir_op_u2u32:
@ -916,8 +865,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
op = midgard_alu_op_imov;
if (dst_bitsize == (src_bitsize * 2)) {
/* Converting up */
half_2 = true;
/* inferred */
} else if (src_bitsize == (dst_bitsize * 2)) {
/* Converting down */
dest_override = midgard_dest_override_lower;
@ -972,9 +920,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
/* Midgard can perform certain modifiers on output of an ALU op */
unsigned outmod = 0;
bool abs[4] = { false };
bool neg[4] = { false };
bool is_int = midgard_is_integer_op(op);
if (midgard_is_integer_out_op(op)) {
@ -1030,7 +975,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
if (quirk_flipped_r24) {
ins.src[0] = ~0;
mir_copy_src(&ins, instr, 0, 1, &abs[1], &neg[1], &ins.src_invert[1], is_int, broadcast_swizzle);
mir_copy_src(&ins, instr, 0, 1, &ins.src_abs[1], &ins.src_neg[1], &ins.src_invert[1], is_int, broadcast_swizzle);
} else {
for (unsigned i = 0; i < nr_inputs; ++i) {
unsigned to = i;
@ -1051,7 +996,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
to = 1 - to;
}
mir_copy_src(&ins, instr, i, to, &abs[to], &neg[to], &ins.src_invert[to], is_int, broadcast_swizzle);
mir_copy_src(&ins, instr, i, to, &ins.src_abs[to], &ins.src_neg[to], &ins.src_invert[to], is_int, broadcast_swizzle);
/* (!c) ? a : b = c ? b : a */
if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
@ -1064,10 +1009,10 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
/* Lowered to move */
if (instr->op == nir_op_fneg)
neg[1] = !neg[1];
ins.src_neg[1] ^= true;
if (instr->op == nir_op_fabs)
abs[1] = true;
ins.src_abs[1] = true;
}
ins.mask = mask_of(nr_components);
@ -1077,9 +1022,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
.reg_mode = reg_mode,
.dest_override = dest_override,
.outmod = outmod,
.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(abs[0], neg[0], is_int, half_1, sext_1)),
.src2 = vector_alu_srco_unsigned(vector_alu_modifiers(abs[1], neg[1], is_int, half_2, sext_2)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to
@ -2242,12 +2184,9 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
continue;
}
/* We don't know how to handle these with a constant */
if (mir_nontrivial_source2_mod_simple(ins) || src->rep_low || src->rep_high) {
DBG("Bailing inline constant...\n");
/* Should've been const folded */
if (ins->src_abs[1] || ins->src_neg[1])
continue;
}
/* Make sure that the constant is not itself a vector
* by checking if all accessed values are the same. */

View File

@ -24,6 +24,38 @@
#include "compiler.h"
#include "midgard_ops.h"
static midgard_int_mod
mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
{
if (!half) {
assert(!shift);
/* Sign-extension, really... */
return scalar ? 0 : midgard_int_normal;
}
if (shift)
return midgard_int_shift;
if (nir_alu_type_get_base_type(T) == nir_type_int)
return midgard_int_sign_extend;
else
return midgard_int_zero_extend;
}
static unsigned
mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
{
bool integer = midgard_is_integer_op(ins->alu.op);
unsigned base_size = (8 << ins->alu.reg_mode);
unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
bool half = (sz == (base_size >> 1));
return integer ?
mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
((ins->src_abs[i] << 0) |
((ins->src_neg[i] << 1)));
}
/* Midgard IR only knows vector ALU types, but we sometimes need to actually
* use scalar ALU instructions, for functional or performance reasons. To do
* this, we just demote vector ALU payloads to scalar. */
@ -41,39 +73,13 @@ component_from_mask(unsigned mask)
}
static unsigned
vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
unsigned component)
mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
{
midgard_vector_alu_src v;
memcpy(&v, &u, sizeof(v));
/* TODO: Integers */
midgard_scalar_alu_src s = { 0 };
if (is_full) {
/* For a 32-bit op, just check the source half flag */
s.full = !v.half;
} else if (!v.half) {
/* For a 16-bit op that's not subdivided, never full */
s.full = false;
} else {
/* We can't do 8-bit scalar, abort! */
assert(0);
}
/* Component indexing takes size into account */
if (s.full)
s.component = component << 1;
else
s.component = component;
if (is_int) {
/* TODO */
} else {
s.mod = v.mod;
}
midgard_scalar_alu_src s = {
.mod = mod,
.full = is_full,
.component = component << (is_full ? 1 : 0)
};
unsigned o;
memcpy(&o, &s, sizeof(s));
@ -84,17 +90,22 @@ vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
{
bool is_int = midgard_is_integer_op(v.op);
bool is_full = v.reg_mode == midgard_reg_mode_32;
bool is_inline_constant = ins->has_inline_constant;
bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
unsigned comp = component_from_mask(ins->mask);
unsigned packed_src[2] = {
mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
};
/* The output component is from the mask */
midgard_scalar_alu s = {
.op = v.op,
.src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
.src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
.src1 = packed_src[0],
.src2 = packed_src[1],
.unknown = 0,
.outmod = v.outmod,
.output_full = is_full,
@ -175,96 +186,125 @@ mir_pack_mask_alu(midgard_instruction *ins)
ins->alu.mask = effective;
}
static void
mir_pack_swizzle_alu(midgard_instruction *ins)
static unsigned
mir_pack_swizzle(unsigned mask, unsigned *swizzle,
nir_alu_type T, midgard_reg_mode reg_mode,
bool op_channeled, bool *rep_low, bool *rep_high)
{
midgard_vector_alu_src src[] = {
vector_alu_from_unsigned(ins->alu.src1),
vector_alu_from_unsigned(ins->alu.src2)
};
unsigned packed = 0;
unsigned sz = nir_alu_type_get_type_size(T);
for (unsigned i = 0; i < 2; ++i) {
unsigned packed = 0;
if (reg_mode == midgard_reg_mode_64) {
unsigned components = 64 / sz;
if (ins->alu.reg_mode == midgard_reg_mode_64) {
unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
unsigned components = 64 / sz;
packed = mir_pack_swizzle_64(swizzle, components);
packed = mir_pack_swizzle_64(ins->swizzle[i], components);
if (sz == 32) {
bool lo = swizzle[0] >= COMPONENT_Z;
bool hi = swizzle[1] >= COMPONENT_Z;
if (sz == 32) {
bool lo = ins->swizzle[i][0] >= COMPONENT_Z;
bool hi = ins->swizzle[i][1] >= COMPONENT_Z;
unsigned mask = mir_bytemask(ins);
if (mask & 0x1) {
/* We can't mix halves... */
if (mask & 2)
assert(lo == hi);
if (mask & 0xFF) {
/* We can't mix halves... */
if (mask & 0xFF00)
assert(lo == hi);
src[i].rep_low |= lo;
} else {
src[i].rep_low |= hi;
}
} else if (sz < 32) {
unreachable("Cannot encode 8/16 swizzle in 64-bit");
}
} else {
/* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
* the strategy is to check whether the nibble we're on is
* upper or lower. We need all components to be on the same
* "side"; that much is enforced by the ISA and should have
* been lowered. TODO: 8-bit packing. TODO: vec8 */
unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
bool upper = ins->swizzle[i][first] > 3;
if (upper && ins->mask)
assert(nir_alu_type_get_type_size(ins->src_types[i]) <= 16);
bool dest_up =
GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? false :
(first >= 4);
for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
unsigned v = ins->swizzle[i][c];
bool t_upper = v > 3;
/* Ensure we're doing something sane */
if (ins->mask & (1 << c)) {
assert(t_upper == upper);
assert(v <= 7);
}
/* Use the non upper part */
v &= 0x3;
packed |= v << (2 * (c % 4));
}
/* Replicate for now.. should really pick a side for
* dot products */
if (ins->alu.reg_mode == midgard_reg_mode_16) {
src[i].rep_low = !upper;
src[i].rep_high = upper;
} else if (ins->alu.reg_mode == midgard_reg_mode_32) {
src[i].rep_low = upper;
*rep_low = lo;
} else {
unreachable("Unhandled reg mode");
*rep_low = hi;
}
} else if (sz < 32) {
unreachable("Cannot encode 8/16 swizzle in 64-bit");
}
} else {
/* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
* the strategy is to check whether the nibble we're on is
* upper or lower. We need all components to be on the same
* "side"; that much is enforced by the ISA and should have
* been lowered. TODO: 8-bit packing. TODO: vec8 */
unsigned first = mask ? ffs(mask) - 1 : 0;
bool upper = swizzle[first] > 3;
if (upper && mask)
assert(sz <= 16);
bool dest_up = !op_channeled && (first >= 4);
for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
unsigned v = swizzle[c];
bool t_upper = v > 3;
/* Ensure we're doing something sane */
if (mask & (1 << c)) {
assert(t_upper == upper);
assert(v <= 7);
}
/* Use the non upper part */
v &= 0x3;
packed |= v << (2 * (c % 4));
}
src[i].swizzle = packed;
/* Replicate for now.. should really pick a side for
* dot products */
if (reg_mode == midgard_reg_mode_16) {
*rep_low = !upper;
*rep_high = upper;
} else if (reg_mode == midgard_reg_mode_32) {
*rep_low = upper;
} else {
unreachable("Unhandled reg mode");
}
}
ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
return packed;
}
if (!ins->has_inline_constant)
ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
static void
mir_pack_vector_srcs(midgard_instruction *ins)
{
bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
midgard_reg_mode mode = ins->alu.reg_mode;
unsigned base_size = (8 << mode);
for (unsigned i = 0; i < 2; ++i) {
if (ins->has_inline_constant && (i == 1))
continue;
if (ins->src[i] == ~0)
continue;
bool rep_lo = false, rep_hi = false;
unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
bool half = (sz == (base_size >> 1));
assert((sz == base_size) || half);
unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
ins->src_types[i], ins->alu.reg_mode,
channeled, &rep_lo, &rep_hi);
midgard_vector_alu_src pack = {
.mod = mir_pack_mod(ins, i, false),
.rep_low = rep_lo,
.rep_high = rep_hi,
.half = half,
.swizzle = swizzle
};
unsigned p = vector_alu_srco_unsigned(pack);
if (i == 0)
ins->alu.src1 = p;
else
ins->alu.src2 = p;
}
}
static void
@ -421,7 +461,7 @@ emit_alu_bundle(compiler_context *ctx,
if (ins->unit & UNITS_ANY_VECTOR) {
mir_pack_mask_alu(ins);
mir_pack_swizzle_alu(ins);
mir_pack_vector_srcs(ins);
size = sizeof(midgard_vector_alu);
source = &ins->alu;
} else if (ins->unit == ALU_ENAB_BR_COMPACT) {

View File

@ -44,7 +44,7 @@ midgard_opt_copy_prop_reg(compiler_context *ctx, midgard_block *block)
if (ins->has_inline_constant) continue;
if (ins->has_constants) continue;
if (mir_nontrivial_source2_mod(ins)) continue;
if (mir_nontrivial_mod(ins, 1, true)) continue;
if (mir_nontrivial_outmod(ins)) continue;
if (!mir_single_use(ctx, from)) continue;
@ -82,7 +82,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
if (ins->has_constants) continue;
/* Modifier propagation is not handled here */
if (mir_nontrivial_source2_mod_simple(ins)) continue;
if (mir_nontrivial_mod(ins, 1, false)) continue;
if (mir_nontrivial_outmod(ins)) continue;
/* Shortened arguments (bias for textures, extra load/store

View File

@ -106,53 +106,30 @@ mir_single_use(compiler_context *ctx, unsigned value)
return mir_use_count(ctx, value) <= 1;
}
static bool
mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
bool
mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
{
if (is_int)
return src.mod == midgard_int_shift;
else
return src.mod;
}
bool is_int = midgard_is_integer_op(ins->alu.op);
static bool
mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask, unsigned *swizzle)
{
if (mir_nontrivial_raw_mod(src, is_int)) return true;
if (is_int) {
if (ins->src_shift[i]) return true;
} else {
if (ins->src_neg[i]) return true;
if (ins->src_abs[i]) return true;
}
/* size-conversion */
if (src.half) return true;
if (ins->dest_type != ins->src_types[i]) return true;
for (unsigned c = 0; c < 16; ++c) {
if (!(mask & (1 << c))) continue;
if (swizzle[c] != c) return true;
if (check_swizzle) {
for (unsigned c = 0; c < 16; ++c) {
if (!(ins->mask & (1 << c))) continue;
if (ins->swizzle[i][c] != c) return true;
}
}
return false;
}
bool
mir_nontrivial_source2_mod(midgard_instruction *ins)
{
bool is_int = midgard_is_integer_op(ins->alu.op);
midgard_vector_alu_src src2 =
vector_alu_from_unsigned(ins->alu.src2);
return mir_nontrivial_mod(src2, is_int, ins->mask, ins->swizzle[1]);
}
bool
mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
{
bool is_int = midgard_is_integer_op(ins->alu.op);
midgard_vector_alu_src src2 =
vector_alu_from_unsigned(ins->alu.src2);
return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
}
bool
mir_nontrivial_outmod(midgard_instruction *ins)
{