v3dv/pipeline: use nir_shader_instructions_pass when lowering layout

The trigger for this commit was when we found that we were not calling
nir_metadata_preserve when lowering the layout code. But then I found
that it would be better to just update the code to use
nir_shader_instructions_pass, so we can avoid to manually:
   * Initialize the nir_builder
   * Call nir_foreach functions (we pass the callback)
   * Call nir_metadata_preserve functions (that as mentioned we were not calling)

We also get a nice cleanup of several functions by reducing the number
of parameters (we pass a state struct).

Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17609>
This commit is contained in:
Alejandro Piñeiro 2022-07-18 14:31:21 +02:00 committed by Marge Bot
parent 8d3ce4eb06
commit 22022dfa04
1 changed files with 60 additions and 67 deletions

View File

@ -533,10 +533,16 @@ descriptor_map_add(struct v3dv_descriptor_map *map,
return index; return index;
} }
struct lower_pipeline_layout_state {
struct v3dv_pipeline *pipeline;
const struct v3dv_pipeline_layout *layout;
bool needs_default_sampler_state;
};
static void static void
lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr, lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
struct v3dv_pipeline *pipeline) struct lower_pipeline_layout_state *state)
{ {
assert(instr->intrinsic == nir_intrinsic_load_push_constant); assert(instr->intrinsic == nir_intrinsic_load_push_constant);
instr->intrinsic = nir_intrinsic_load_uniform; instr->intrinsic = nir_intrinsic_load_uniform;
@ -584,9 +590,7 @@ pipeline_get_descriptor_map(struct v3dv_pipeline *pipeline,
static void static void
lower_vulkan_resource_index(nir_builder *b, lower_vulkan_resource_index(nir_builder *b,
nir_intrinsic_instr *instr, nir_intrinsic_instr *instr,
nir_shader *shader, struct lower_pipeline_layout_state *state)
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{ {
assert(instr->intrinsic == nir_intrinsic_vulkan_resource_index); assert(instr->intrinsic == nir_intrinsic_vulkan_resource_index);
@ -594,7 +598,7 @@ lower_vulkan_resource_index(nir_builder *b,
unsigned set = nir_intrinsic_desc_set(instr); unsigned set = nir_intrinsic_desc_set(instr);
unsigned binding = nir_intrinsic_binding(instr); unsigned binding = nir_intrinsic_binding(instr);
struct v3dv_descriptor_set_layout *set_layout = layout->set[set].layout; struct v3dv_descriptor_set_layout *set_layout = state->layout->set[set].layout;
struct v3dv_descriptor_set_binding_layout *binding_layout = struct v3dv_descriptor_set_binding_layout *binding_layout =
&set_layout->binding[binding]; &set_layout->binding[binding];
unsigned index = 0; unsigned index = 0;
@ -606,8 +610,8 @@ lower_vulkan_resource_index(nir_builder *b,
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: { case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
struct v3dv_descriptor_map *descriptor_map = struct v3dv_descriptor_map *descriptor_map =
pipeline_get_descriptor_map(pipeline, binding_layout->type, pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
shader->info.stage, false); b->shader->info.stage, false);
if (!const_val) if (!const_val)
unreachable("non-constant vulkan_resource_index array index"); unreachable("non-constant vulkan_resource_index array index");
@ -661,10 +665,10 @@ lower_vulkan_resource_index(nir_builder *b,
* sampler object * sampler object
*/ */
static uint8_t static uint8_t
lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx, lower_tex_src_to_offset(nir_builder *b,
nir_shader *shader, nir_tex_instr *instr,
struct v3dv_pipeline *pipeline, unsigned src_idx,
const struct v3dv_pipeline_layout *layout) struct lower_pipeline_layout_state *state)
{ {
nir_ssa_def *index = NULL; nir_ssa_def *index = NULL;
unsigned base_index = 0; unsigned base_index = 0;
@ -727,7 +731,7 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
*/ */
bool relaxed_precision = deref->var->data.precision == GLSL_PRECISION_MEDIUM || bool relaxed_precision = deref->var->data.precision == GLSL_PRECISION_MEDIUM ||
deref->var->data.precision == GLSL_PRECISION_LOW; deref->var->data.precision == GLSL_PRECISION_LOW;
struct v3dv_descriptor_set_layout *set_layout = layout->set[set].layout; struct v3dv_descriptor_set_layout *set_layout = state->layout->set[set].layout;
struct v3dv_descriptor_set_binding_layout *binding_layout = struct v3dv_descriptor_set_binding_layout *binding_layout =
&set_layout->binding[binding]; &set_layout->binding[binding];
@ -747,8 +751,8 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
return_size = relaxed_precision || instr->is_shadow ? 16 : 32; return_size = relaxed_precision || instr->is_shadow ? 16 : 32;
struct v3dv_descriptor_map *map = struct v3dv_descriptor_map *map =
pipeline_get_descriptor_map(pipeline, binding_layout->type, pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
shader->info.stage, is_sampler); b->shader->info.stage, is_sampler);
int desc_index = int desc_index =
descriptor_map_add(map, descriptor_map_add(map,
deref->var->data.descriptor_set, deref->var->data.descriptor_set,
@ -767,11 +771,9 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
} }
static bool static bool
lower_sampler(nir_builder *b, nir_tex_instr *instr, lower_sampler(nir_builder *b,
nir_shader *shader, nir_tex_instr *instr,
struct v3dv_pipeline *pipeline, struct lower_pipeline_layout_state *state)
const struct v3dv_pipeline_layout *layout,
bool *needs_default_sampler_state)
{ {
uint8_t return_size = 0; uint8_t return_size = 0;
@ -779,14 +781,13 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
nir_tex_instr_src_index(instr, nir_tex_src_texture_deref); nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
if (texture_idx >= 0) if (texture_idx >= 0)
return_size = lower_tex_src_to_offset(b, instr, texture_idx, shader, return_size = lower_tex_src_to_offset(b, instr, texture_idx, state);
pipeline, layout);
int sampler_idx = int sampler_idx =
nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref); nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
if (sampler_idx >= 0) if (sampler_idx >= 0)
lower_tex_src_to_offset(b, instr, sampler_idx, shader, pipeline, layout); lower_tex_src_to_offset(b, instr, sampler_idx, state);
if (texture_idx < 0 && sampler_idx < 0) if (texture_idx < 0 && sampler_idx < 0)
return false; return false;
@ -795,7 +796,7 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
* case, and we ensure that it is using the correct return size. * case, and we ensure that it is using the correct return size.
*/ */
if (sampler_idx < 0) { if (sampler_idx < 0) {
*needs_default_sampler_state = true; state->needs_default_sampler_state = true;
instr->sampler_index = return_size == 16 ? instr->sampler_index = return_size == 16 ?
V3DV_NO_SAMPLER_16BIT_IDX : V3DV_NO_SAMPLER_32BIT_IDX; V3DV_NO_SAMPLER_16BIT_IDX : V3DV_NO_SAMPLER_32BIT_IDX;
} }
@ -807,9 +808,7 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
static void static void
lower_image_deref(nir_builder *b, lower_image_deref(nir_builder *b,
nir_intrinsic_instr *instr, nir_intrinsic_instr *instr,
nir_shader *shader, struct lower_pipeline_layout_state *state)
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{ {
nir_deref_instr *deref = nir_src_as_deref(instr->src[0]); nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
nir_ssa_def *index = NULL; nir_ssa_def *index = NULL;
@ -848,7 +847,7 @@ lower_image_deref(nir_builder *b,
uint32_t set = deref->var->data.descriptor_set; uint32_t set = deref->var->data.descriptor_set;
uint32_t binding = deref->var->data.binding; uint32_t binding = deref->var->data.binding;
struct v3dv_descriptor_set_layout *set_layout = layout->set[set].layout; struct v3dv_descriptor_set_layout *set_layout = state->layout->set[set].layout;
struct v3dv_descriptor_set_binding_layout *binding_layout = struct v3dv_descriptor_set_binding_layout *binding_layout =
&set_layout->binding[binding]; &set_layout->binding[binding];
@ -858,8 +857,8 @@ lower_image_deref(nir_builder *b,
binding_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER); binding_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
struct v3dv_descriptor_map *map = struct v3dv_descriptor_map *map =
pipeline_get_descriptor_map(pipeline, binding_layout->type, pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
shader->info.stage, false); b->shader->info.stage, false);
int desc_index = int desc_index =
descriptor_map_add(map, descriptor_map_add(map,
@ -882,18 +881,17 @@ lower_image_deref(nir_builder *b,
} }
static bool static bool
lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr, lower_intrinsic(nir_builder *b,
nir_shader *shader, nir_intrinsic_instr *instr,
struct v3dv_pipeline *pipeline, struct lower_pipeline_layout_state *state)
const struct v3dv_pipeline_layout *layout)
{ {
switch (instr->intrinsic) { switch (instr->intrinsic) {
case nir_intrinsic_load_push_constant: case nir_intrinsic_load_push_constant:
lower_load_push_constant(b, instr, pipeline); lower_load_push_constant(b, instr, state);
return true; return true;
case nir_intrinsic_vulkan_resource_index: case nir_intrinsic_vulkan_resource_index:
lower_vulkan_resource_index(b, instr, shader, pipeline, layout); lower_vulkan_resource_index(b, instr, state);
return true; return true;
case nir_intrinsic_load_vulkan_descriptor: { case nir_intrinsic_load_vulkan_descriptor: {
@ -919,7 +917,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
case nir_intrinsic_image_deref_atomic_comp_swap: case nir_intrinsic_image_deref_atomic_comp_swap:
case nir_intrinsic_image_deref_size: case nir_intrinsic_image_deref_size:
case nir_intrinsic_image_deref_samples: case nir_intrinsic_image_deref_samples:
lower_image_deref(b, instr, shader, pipeline, layout); lower_image_deref(b, instr, state);
return true; return true;
default: default:
@ -928,35 +926,24 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
} }
static bool static bool
lower_impl(nir_function_impl *impl, lower_pipeline_layout_cb(nir_builder *b,
nir_shader *shader, nir_instr *instr,
struct v3dv_pipeline *pipeline, void *_state)
const struct v3dv_pipeline_layout *layout,
bool *needs_default_sampler_state)
{ {
nir_builder b;
nir_builder_init(&b, impl);
bool progress = false; bool progress = false;
struct lower_pipeline_layout_state *state = _state;
nir_foreach_block(block, impl) { b->cursor = nir_before_instr(instr);
nir_foreach_instr_safe(instr, block) {
b.cursor = nir_before_instr(instr);
switch (instr->type) { switch (instr->type) {
case nir_instr_type_tex: case nir_instr_type_tex:
progress |= progress |= lower_sampler(b, nir_instr_as_tex(instr), state);
lower_sampler(&b, nir_instr_as_tex(instr), shader, pipeline,
layout, needs_default_sampler_state);
break; break;
case nir_instr_type_intrinsic: case nir_instr_type_intrinsic:
progress |= progress |= lower_intrinsic(b, nir_instr_as_intrinsic(instr), state);
lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader,
pipeline, layout);
break; break;
default: default:
break; break;
} }
}
}
return progress; return progress;
} }
@ -968,13 +955,19 @@ lower_pipeline_layout_info(nir_shader *shader,
bool *needs_default_sampler_state) bool *needs_default_sampler_state)
{ {
bool progress = false; bool progress = false;
*needs_default_sampler_state = false;
nir_foreach_function(function, shader) { struct lower_pipeline_layout_state state = {
if (function->impl) .pipeline = pipeline,
progress |= lower_impl(function->impl, shader, pipeline, layout, .layout = layout,
needs_default_sampler_state); .needs_default_sampler_state = false,
} };
progress = nir_shader_instructions_pass(shader, lower_pipeline_layout_cb,
nir_metadata_block_index |
nir_metadata_dominance,
&state);
*needs_default_sampler_state = state.needs_default_sampler_state;
return progress; return progress;
} }