freedreno/ir3: Enable PIPE_CAP_PACKED_UNIFORMS

This commit turns on the gallium cap and adds a pass to lower the
load_ubo intrinsics for block 0 back to load_uniform intrinsics and
adjust the backend where the cap switches units from vec4s to dwords.

As we stop using ir3_glsl_type_size() for uniform layout, this also
corrects an issue where we would allocate a vec4 slot for samplers in
uniforms, fixing:

  dEQP-GLES3.functional.shaders.struct.uniform.sampler_array_fragment
  dEQP-GLES3.functional.shaders.struct.uniform.sampler_array_vertex
  dEQP-GLES3.functional.shaders.struct.uniform.sampler_nested_fragment
  dEQP-GLES2.functional.shaders.struct.uniform.sampler_nested_vertex
  dEQP-GLES2.functional.shaders.struct.uniform.sampler_nested_fragment

Signed-off-by: Kristian H. Kristensen <hoegsberg@chromium.org>
Reviewed-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Kristian H. Kristensen 2019-03-26 10:31:54 -07:00
parent 56b4bc292f
commit 3c8779af32
8 changed files with 120 additions and 13 deletions

View File

@ -35,6 +35,7 @@ ir3_SOURCES := \
ir3/ir3_legalize.c \
ir3/ir3_nir.c \
ir3/ir3_nir.h \
ir3/ir3_nir_analyze_ubo_ranges.c \
ir3/ir3_nir_lower_io_offsets.c \
ir3/ir3_nir_lower_tg4_to_tex.c \
ir3/ir3_print.c \

View File

@ -681,8 +681,10 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
struct ir3_block *b = ctx->block;
struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
nir_const_value *const_offset;
/* UBO addresses are the first driver params: */
unsigned ubo = regid(ctx->so->constbase.ubo, 0);
/* UBO addresses are the first driver params, but subtract 2 here to
* account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0
* is the uniforms: */
unsigned ubo = regid(ctx->so->constbase.ubo, 0) - 2;
const unsigned ptrsz = ir3_pointer_size(ctx);
int off = 0;
@ -1151,15 +1153,13 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
if (const_offset) {
idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = create_uniform(b, n);
dst[i] = create_uniform(b, idx + i);
}
} else {
src = ir3_get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
int n = idx * 4 + i;
dst[i] = create_uniform_indirect(b, n,
ir3_get_addr(ctx, src[0], 4));
dst[i] = create_uniform_indirect(b, idx + i,
ir3_get_addr(ctx, src[0], 1));
}
/* NOTE: if relative addressing is used, we set
* constlen in the compiler (to worst-case value)

View File

@ -224,10 +224,13 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
ir3_optimize_loop(s);
/* do idiv lowering after first opt loop to give a chance for
* divide by immed power-of-two to be caught first:
/* do ubo load and idiv lowering after first opt loop to get a chance to
* propagate constants for divide by immed power-of-two and constant ubo
* block/offsets:
*/
if (OPT(s, nir_lower_idiv))
const bool ubo_progress = OPT(s, ir3_nir_analyze_ubo_ranges, shader);
const bool idiv_progress = OPT(s, nir_lower_idiv);
if (ubo_progress || idiv_progress)
ir3_optimize_loop(s);
OPT_V(s, nir_remove_dead_variables, nir_var_function_temp);

View File

@ -28,6 +28,7 @@
#define IR3_NIR_H_
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
#include "compiler/shader_enums.h"
#include "ir3_shader.h"
@ -43,4 +44,9 @@ bool ir3_key_lowers_nir(const struct ir3_shader_key *key);
struct nir_shader * ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
const struct ir3_shader_key *key);
bool ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader);
nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift);
#endif /* IR3_NIR_H_ */

View File

@ -0,0 +1,95 @@
/*
* Copyright © 2019 Google, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ir3_nir.h"
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
#include "util/u_dynarray.h"
#include "mesa/main/macros.h"
struct ir3_ubo_analysis_state {
unsigned lower_count;
};
static void
lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
struct ir3_ubo_analysis_state *state)
{
/* We don't lower dynamic block index UBO loads to load_uniform, but we
* could probably with some effort determine a block stride in number of
* registers.
*/
if (!nir_src_is_const(instr->src[0]))
return;
const uint32_t block = nir_src_as_uint(instr->src[0]);
if (block > 0)
return;
b->cursor = nir_before_instr(&instr->instr);
nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
nir_ssa_def *uniform_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
if (uniform_offset == NULL)
uniform_offset = nir_ushr(b, ubo_offset, nir_imm_int(b, 2));
nir_intrinsic_instr *uniform =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
uniform->num_components = instr->num_components;
uniform->src[0] = nir_src_for_ssa(uniform_offset);
nir_ssa_dest_init(&uniform->instr, &uniform->dest,
uniform->num_components, instr->dest.ssa.bit_size,
instr->dest.ssa.name);
nir_builder_instr_insert(b, &uniform->instr);
nir_ssa_def_rewrite_uses(&instr->dest.ssa,
nir_src_for_ssa(&uniform->dest.ssa));
nir_instr_remove(&instr->instr);
state->lower_count++;
}
bool
ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader)
{
struct ir3_ubo_analysis_state state = { 0 };
nir_foreach_function(function, nir) {
if (function->impl) {
nir_builder builder;
nir_builder_init(&builder, function->impl);
nir_foreach_block(block, function->impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_intrinsic &&
nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_ubo)
lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr), &builder, &state);
}
}
nir_metadata_preserve(function->impl, nir_metadata_block_index |
nir_metadata_dominance);
}
}
return state.lower_count > 0;
}

View File

@ -124,8 +124,8 @@ check_and_propagate_bit_shift32(nir_builder *b, nir_ssa_def *offset,
return shift_ssa;
}
static nir_ssa_def *
try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift)
nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift)
{
nir_instr *offset_instr = offset->parent_instr;
if (offset_instr->type != nir_instr_type_alu)
@ -187,7 +187,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
* Here we use the convention that shifting right is negative while shifting
* left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
*/
nir_ssa_def *new_offset = try_propagate_bit_shift(b, offset, -2);
nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -2);
/* The new source that will hold the dword-offset is always the last
* one for every intrinsic.

View File

@ -50,6 +50,7 @@ libfreedreno_ir3_files = files(
'ir3_legalize.c',
'ir3_nir.c',
'ir3_nir.h',
'ir3_nir_analyze_ubo_ranges.c',
'ir3_nir_lower_io_offsets.c',
'ir3_nir_lower_tg4_to_tex.c',
'ir3_print.c',

View File

@ -195,6 +195,7 @@ fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
case PIPE_CAP_TEXTURE_BARRIER:
case PIPE_CAP_INVALIDATE_BUFFER:
case PIPE_CAP_PACKED_UNIFORMS:
return 1;
case PIPE_CAP_VERTEXID_NOBASE: