ir3/nir: Add a new pass 'ir3_nir_lower_io_offsets'

This NIR->NIR pass implements offset computations that are currently
done on the IR3 backend compiler, to give NIR a better chance of
optimizing them.

For now, it supports lowering the dword-offset computation for SSBO
instructions. It will take an SSBO intrinsic and replace it with the
new ir3-specific version that adds an extra source. That source will
hold the SSA value resulting from inserting a division by 4 (an SHR op)
of the original byte-offset source already provided by NIR in one of
the intrinsic sources.

Note that on a6xx the original byte-offset is not needed, so we could
potentially replace that source instead of adding a new one. But to
keep things simple and consistent we always add the new source and
a6xx will just ignore the original one.

Reviewed-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Eduardo Lima Mitev 2019-01-13 20:10:34 +01:00
parent 6ff50a488a
commit 9dd0cfafc9
4 changed files with 217 additions and 0 deletions

View File

@ -35,6 +35,7 @@ ir3_SOURCES := \
ir3/ir3_legalize.c \
ir3/ir3_nir.c \
ir3/ir3_nir.h \
ir3/ir3_nir_lower_io_offsets.c \
ir3/ir3_nir_lower_tg4_to_tex.c \
ir3/ir3_print.c \
ir3/ir3_ra.c \

View File

@ -36,6 +36,7 @@ void ir3_nir_scan_driver_consts(nir_shader *shader, struct ir3_driver_const_layo
bool ir3_nir_apply_trig_workarounds(nir_shader *shader);
bool ir3_nir_lower_tg4_to_tex(nir_shader *shader);
bool ir3_nir_lower_io_offsets(nir_shader *shader);
const nir_shader_compiler_options * ir3_get_compiler_options(struct ir3_compiler *compiler);
bool ir3_key_lowers_nir(const struct ir3_shader_key *key);

View File

@ -0,0 +1,214 @@
/*
* Copyright © 2018-2019 Igalia S.L.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "ir3_nir.h"
#include "compiler/nir/nir_builder.h"
/**
* This pass moves to NIR certain offset computations for different I/O
* ops that are currently implemented on the IR3 backend compiler, to
* give NIR a chance to optimize them:
*
* - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
* is emitted that replaces the original one, adding a new source that
* holds the result of the original byte-offset source divided by 4.
*/
/* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
* instruction that is handled by this pass. It also conveniently returns
* the offset source index in @offset_src_idx.
*
* If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
* returned.
*/
static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
uint8_t *offset_src_idx)
{
debug_assert(offset_src_idx);
*offset_src_idx = 1;
switch (intrinsic) {
case nir_intrinsic_store_ssbo:
*offset_src_idx = 2;
return nir_intrinsic_store_ssbo_ir3;
case nir_intrinsic_load_ssbo:
return nir_intrinsic_load_ssbo_ir3;
case nir_intrinsic_ssbo_atomic_add:
return nir_intrinsic_ssbo_atomic_add_ir3;
case nir_intrinsic_ssbo_atomic_imin:
return nir_intrinsic_ssbo_atomic_imin_ir3;
case nir_intrinsic_ssbo_atomic_umin:
return nir_intrinsic_ssbo_atomic_umin_ir3;
case nir_intrinsic_ssbo_atomic_imax:
return nir_intrinsic_ssbo_atomic_imax_ir3;
case nir_intrinsic_ssbo_atomic_umax:
return nir_intrinsic_ssbo_atomic_umax_ir3;
case nir_intrinsic_ssbo_atomic_and:
return nir_intrinsic_ssbo_atomic_and_ir3;
case nir_intrinsic_ssbo_atomic_or:
return nir_intrinsic_ssbo_atomic_or_ir3;
case nir_intrinsic_ssbo_atomic_xor:
return nir_intrinsic_ssbo_atomic_xor_ir3;
case nir_intrinsic_ssbo_atomic_exchange:
return nir_intrinsic_ssbo_atomic_exchange_ir3;
case nir_intrinsic_ssbo_atomic_comp_swap:
return nir_intrinsic_ssbo_atomic_comp_swap_ir3;
default:
break;
}
return -1;
}
static bool
lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
{
unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
nir_ssa_def *new_dest = NULL;
/* Here we create a new intrinsic and copy over all contents from the old one. */
nir_intrinsic_instr *new_intrinsic;
nir_src *target_src;
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic =
nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
debug_assert(intrinsic->src[offset_src_idx].is_ssa);
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
/* The new source that will hold the dword-offset is always the last
* one for every intrinsic.
*/
target_src = &new_intrinsic->src[num_srcs];
*target_src = nir_src_for_ssa(offset);
if (has_dest) {
debug_assert(intrinsic->dest.is_ssa);
nir_ssa_def *dest = &intrinsic->dest.ssa;
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
dest->num_components, dest->bit_size, NULL);
new_dest = &new_intrinsic->dest.ssa;
}
for (unsigned i = 0; i < num_srcs; i++)
new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
for (unsigned i = 0; i < NIR_INTRINSIC_MAX_CONST_INDEX; i++)
new_intrinsic->const_index[i] = intrinsic->const_index[i];
new_intrinsic->num_components = intrinsic->num_components;
b->cursor = nir_before_instr(&intrinsic->instr);
nir_ssa_def *offset_div_4 = nir_ushr(b, offset, nir_imm_int(b, 2));
debug_assert(offset_div_4);
/* Insert the new intrinsic right before the old one. */
b->cursor = nir_before_instr(&intrinsic->instr);
nir_builder_instr_insert(b, &new_intrinsic->instr);
/* Replace the last source of the new intrinsic by the result of
* the offset divided by 4.
*/
nir_instr_rewrite_src(&new_intrinsic->instr,
target_src,
nir_src_for_ssa(offset_div_4));
if (has_dest) {
/* Replace the uses of the original destination by that
* of the new intrinsic.
*/
nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa,
nir_src_for_ssa(new_dest));
}
/* Finally remove the original intrinsic. */
nir_instr_remove(&intrinsic->instr);
return true;
}
static bool
lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
{
bool progress = false;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
/* SSBO */
int ir3_intrinsic;
uint8_t offset_src_idx;
ir3_intrinsic = get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic,
&offset_src_idx);
if (ir3_intrinsic != -1) {
progress |= lower_offset_for_ssbo(intr, b, (unsigned) ir3_intrinsic,
offset_src_idx);
}
}
return progress;
}
static bool
lower_io_offsets_func(nir_function_impl *impl)
{
void *mem_ctx = ralloc_parent(impl);
nir_builder b;
nir_builder_init(&b, impl);
bool progress = false;
nir_foreach_block_safe(block, impl) {
progress |= lower_io_offsets_block(block, &b, mem_ctx);
}
if (progress) {
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
}
return progress;
}
bool
ir3_nir_lower_io_offsets(nir_shader *shader)
{
bool progress = false;
nir_foreach_function(function, shader) {
if (function->impl)
progress |= lower_io_offsets_func(function->impl);
}
return progress;
}

View File

@ -50,6 +50,7 @@ libfreedreno_ir3_files = files(
'ir3_legalize.c',
'ir3_nir.c',
'ir3_nir.h',
'ir3_nir_lower_io_offsets.c',
'ir3_nir_lower_tg4_to_tex.c',
'ir3_print.c',
'ir3_ra.c',