nir/lower_doubles: Use the new NIR lowering framework

One advantage of this is that we no longer need to run in a loop because
the new framework handles lowering instructions added by lowering.

Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Jason Ekstrand 2019-07-11 16:54:01 -05:00
parent 197a08dc69
commit d7d35a9522
1 changed files with 63 additions and 70 deletions

View File

@ -424,13 +424,13 @@ lower_mod(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
nir_imm_double(b, 0.0));
}
static bool
static nir_ssa_def *
lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
const nir_shader *softfp64,
nir_lower_doubles_options options)
{
if (!(options & nir_lower_fp64_full_software))
return false;
return NULL;
assert(instr->dest.dest.is_ssa);
@ -569,8 +569,6 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
assert(func);
}
b->cursor = nir_before_instr(&instr->instr);
nir_ssa_def *params[4] = { NULL, };
nir_variable *ret_tmp =
@ -586,10 +584,7 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
nir_inline_function_impl(b, func->impl, params);
nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
nir_src_for_ssa(nir_load_deref(b, ret_deref)));
nir_instr_remove(&instr->instr);
return true;
return nir_load_deref(b, ret_deref);
}
nir_lower_doubles_options
@ -609,11 +604,22 @@ nir_lower_doubles_op_to_options_mask(nir_op opcode)
}
}
struct lower_doubles_data {
const nir_shader *softfp64;
nir_lower_doubles_options options;
};
static bool
lower_doubles_instr(nir_builder *b, nir_alu_instr *alu,
const nir_shader *softfp64,
nir_lower_doubles_options options)
should_lower_double_instr(const nir_instr *instr, const void *_data)
{
const struct lower_doubles_data *data = _data;
const nir_lower_doubles_options options = data->options;
if (instr->type != nir_instr_type_alu)
return false;
const nir_alu_instr *alu = nir_instr_as_alu(instr);
assert(alu->dest.dest.is_ssa);
bool is_64 = alu->dest.dest.ssa.bit_size == 64;
@ -625,58 +631,56 @@ lower_doubles_instr(nir_builder *b, nir_alu_instr *alu,
if (!is_64)
return false;
if (lower_doubles_instr_to_soft(b, alu, softfp64, options))
if (options & nir_lower_fp64_full_software)
return true;
if (!(options & nir_lower_doubles_op_to_options_mask(alu->op)))
return false;
return options & nir_lower_doubles_op_to_options_mask(alu->op);
}
b->cursor = nir_before_instr(&alu->instr);
static nir_ssa_def *
lower_doubles_instr(nir_builder *b, nir_instr *instr, void *_data)
{
const struct lower_doubles_data *data = _data;
const nir_lower_doubles_options options = data->options;
nir_alu_instr *alu = nir_instr_as_alu(instr);
nir_ssa_def *soft_def =
lower_doubles_instr_to_soft(b, alu, data->softfp64, options);
if (soft_def)
return soft_def;
if (!(options & nir_lower_doubles_op_to_options_mask(alu->op)))
return NULL;
nir_ssa_def *src = nir_mov_alu(b, alu->src[0],
alu->dest.dest.ssa.num_components);
nir_ssa_def *result;
switch (alu->op) {
case nir_op_frcp:
result = lower_rcp(b, src);
break;
return lower_rcp(b, src);
case nir_op_fsqrt:
result = lower_sqrt_rsq(b, src, true);
break;
return lower_sqrt_rsq(b, src, true);
case nir_op_frsq:
result = lower_sqrt_rsq(b, src, false);
break;
return lower_sqrt_rsq(b, src, false);
case nir_op_ftrunc:
result = lower_trunc(b, src);
break;
return lower_trunc(b, src);
case nir_op_ffloor:
result = lower_floor(b, src);
break;
return lower_floor(b, src);
case nir_op_fceil:
result = lower_ceil(b, src);
break;
return lower_ceil(b, src);
case nir_op_ffract:
result = lower_fract(b, src);
break;
return lower_fract(b, src);
case nir_op_fround_even:
result = lower_round_even(b, src);
break;
return lower_round_even(b, src);
case nir_op_fmod: {
nir_ssa_def *src1 = nir_mov_alu(b, alu->src[1],
alu->dest.dest.ssa.num_components);
result = lower_mod(b, src, src1);
return lower_mod(b, src, src1);
}
break;
default:
unreachable("unhandled opcode");
}
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(result));
nir_instr_remove(&alu->instr);
return true;
}
static bool
@ -684,41 +688,30 @@ nir_lower_doubles_impl(nir_function_impl *impl,
const nir_shader *softfp64,
nir_lower_doubles_options options)
{
bool progress = false;
struct lower_doubles_data data = {
.softfp64 = softfp64,
.options = options,
};
nir_builder b;
nir_builder_init(&b, impl);
bool progress =
nir_function_impl_lower_instructions(impl,
should_lower_double_instr,
lower_doubles_instr,
&data);
nir_foreach_block_safe(block, impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_alu)
progress |= lower_doubles_instr(&b, nir_instr_as_alu(instr),
softfp64, options);
}
if (progress && (options & nir_lower_fp64_full_software)) {
/* SSA and register indices are completely messed up now */
nir_index_ssa_defs(impl);
nir_index_local_regs(impl);
nir_metadata_preserve(impl, nir_metadata_none);
/* And we have deref casts we need to clean up thanks to function
* inlining.
*/
nir_opt_deref_impl(impl);
}
if (progress) {
if (options & nir_lower_fp64_full_software) {
/* SSA and register indices are completely messed up now */
nir_index_ssa_defs(impl);
nir_index_local_regs(impl);
nir_metadata_preserve(impl, nir_metadata_none);
/* And we have deref casts we need to clean up thanks to function
* inlining.
*/
nir_opt_deref_impl(impl);
} else {
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
}
} else {
#ifndef NDEBUG
impl->valid_metadata &= ~nir_metadata_not_properly_reset;
#endif
}
return progress;
}