intel/compiler: lower some 16-bit integer operations to 32-bit
These are not supported in hardware for 16-bit integers. We do the lowering pass after the optimization loop to ensure that we lower ALU operations injected by algebraic optimizations too. Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
parent
b9a3d8c23e
commit
b11e9425df
|
@ -592,6 +592,25 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
|
|||
return nir;
|
||||
}
|
||||
|
||||
static unsigned
|
||||
lower_bit_size_callback(const nir_alu_instr *alu, void *data)
|
||||
{
|
||||
assert(alu->dest.dest.is_ssa);
|
||||
if (alu->dest.dest.ssa.bit_size != 16)
|
||||
return 0;
|
||||
|
||||
switch (alu->op) {
|
||||
case nir_op_idiv:
|
||||
case nir_op_imod:
|
||||
case nir_op_irem:
|
||||
case nir_op_udiv:
|
||||
case nir_op_umod:
|
||||
return 32;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Does some simple lowering and runs the standard suite of optimizations
|
||||
*
|
||||
* This is intended to be called more-or-less directly after you get the
|
||||
|
@ -645,6 +664,8 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
|
|||
|
||||
nir = brw_nir_optimize(nir, compiler, is_scalar);
|
||||
|
||||
nir_lower_bit_size(nir, lower_bit_size_callback, NULL);
|
||||
|
||||
if (is_scalar) {
|
||||
OPT(nir_lower_load_const_to_scalar);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue