v3d/v3dv: use NIR_PASS(_

Instead of NIR_PASS_V, when possible.

This was done recently on anv (see commit ce60195ec and MR#17014)

Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17609>
This commit is contained in:
Alejandro Piñeiro 2022-07-18 13:10:24 +02:00 committed by Marge Bot
parent 0a50330c3d
commit efc827ceea
3 changed files with 118 additions and 118 deletions

View File

@ -650,15 +650,15 @@ v3d_lower_nir(struct v3d_compile *c)
}
}
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
NIR_PASS_V(c->s, nir_lower_system_values);
NIR_PASS_V(c->s, nir_lower_compute_system_values, NULL);
NIR_PASS(_, c->s, nir_lower_tex, &tex_options);
NIR_PASS(_, c->s, nir_lower_system_values);
NIR_PASS(_, c->s, nir_lower_compute_system_values, NULL);
NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
nir_var_function_temp,
0,
glsl_get_natural_size_align_bytes);
NIR_PASS_V(c->s, v3d_nir_lower_scratch);
NIR_PASS(_, c->s, nir_lower_vars_to_scratch,
nir_var_function_temp,
0,
glsl_get_natural_size_align_bytes);
NIR_PASS(_, c->s, v3d_nir_lower_scratch);
}
static void
@ -903,32 +903,32 @@ v3d_nir_lower_vs_early(struct v3d_compile *c)
/* Split our I/O vars and dead code eliminate the unused
* components.
*/
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, c->s, nir_lower_io_to_scalar_early,
nir_var_shader_in | nir_var_shader_out);
uint64_t used_outputs[4] = {0};
for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
used_outputs[comp] |= 1ull << slot;
}
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
NIR_PASS(_, c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS(_, c->s, nir_lower_global_vars_to_local);
v3d_optimize_nir(c, c->s);
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
/* This must go before nir_lower_io */
if (c->vs_key->per_vertex_point_size)
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
NIR_PASS(_, c->s, nir_lower_point_size, 1.0f, 0.0f);
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
NIR_PASS(_, c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
/* clean up nir_lower_io's deref_var remains and do a constant folding pass
* on the code it generated.
*/
NIR_PASS_V(c->s, nir_opt_dce);
NIR_PASS_V(c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_opt_dce);
NIR_PASS(_, c->s, nir_opt_constant_folding);
}
static void
@ -937,32 +937,32 @@ v3d_nir_lower_gs_early(struct v3d_compile *c)
/* Split our I/O vars and dead code eliminate the unused
* components.
*/
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, c->s, nir_lower_io_to_scalar_early,
nir_var_shader_in | nir_var_shader_out);
uint64_t used_outputs[4] = {0};
for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
used_outputs[comp] |= 1ull << slot;
}
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
NIR_PASS(_, c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS(_, c->s, nir_lower_global_vars_to_local);
v3d_optimize_nir(c, c->s);
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
/* This must go before nir_lower_io */
if (c->gs_key->per_vertex_point_size)
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
NIR_PASS(_, c->s, nir_lower_point_size, 1.0f, 0.0f);
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
NIR_PASS(_, c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
/* clean up nir_lower_io's deref_var remains and do a constant folding pass
* on the code it generated.
*/
NIR_PASS_V(c->s, nir_opt_dce);
NIR_PASS_V(c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_opt_dce);
NIR_PASS(_, c->s, nir_opt_constant_folding);
}
static void
@ -1001,11 +1001,11 @@ v3d_nir_lower_fs_early(struct v3d_compile *c)
if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
v3d_fixup_fs_output_types(c);
NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
NIR_PASS(_, c->s, v3d_nir_lower_logic_ops, c);
if (c->fs_key->line_smoothing) {
v3d_nir_lower_line_smooth(c->s);
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
NIR_PASS(_, c->s, v3d_nir_lower_line_smooth);
NIR_PASS(_, c->s, nir_lower_global_vars_to_local);
/* The lowering pass can introduce new sysval reads */
nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));
}
@ -1015,8 +1015,8 @@ static void
v3d_nir_lower_gs_late(struct v3d_compile *c)
{
if (c->key->ucp_enables) {
NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
false, NULL);
NIR_PASS(_, c->s, nir_lower_clip_gs, c->key->ucp_enables,
false, NULL);
}
/* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
@ -1027,8 +1027,8 @@ static void
v3d_nir_lower_vs_late(struct v3d_compile *c)
{
if (c->key->ucp_enables) {
NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
false, false, NULL);
NIR_PASS(_, c->s, nir_lower_clip_vs, c->key->ucp_enables,
false, false, NULL);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
@ -1048,7 +1048,7 @@ v3d_nir_lower_fs_late(struct v3d_compile *c)
* are using.
*/
if (c->key->ucp_enables)
NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
NIR_PASS(_, c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
}
@ -1559,32 +1559,32 @@ v3d_attempt_compile(struct v3d_compile *c)
break;
}
NIR_PASS_V(c->s, v3d_nir_lower_io, c);
NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
NIR_PASS(_, c->s, v3d_nir_lower_io, c);
NIR_PASS(_, c->s, v3d_nir_lower_txf_ms, c);
NIR_PASS(_, c->s, v3d_nir_lower_image_load_store);
nir_lower_idiv_options idiv_options = {
.imprecise_32bit_lowering = true,
.allow_fp16 = true,
};
NIR_PASS_V(c->s, nir_lower_idiv, &idiv_options);
NIR_PASS(_, c->s, nir_lower_idiv, &idiv_options);
if (c->key->robust_buffer_access) {
/* v3d_nir_lower_robust_buffer_access assumes constant buffer
* indices on ubo/ssbo intrinsics so run copy propagation and
* constant folding passes before we run the lowering to warrant
* this. We also want to run the lowering before v3d_optimize to
* clean-up redundant get_buffer_size calls produced in the pass.
*/
NIR_PASS_V(c->s, nir_copy_prop);
NIR_PASS_V(c->s, nir_opt_constant_folding);
NIR_PASS_V(c->s, v3d_nir_lower_robust_buffer_access, c);
/* v3d_nir_lower_robust_buffer_access assumes constant buffer
* indices on ubo/ssbo intrinsics so run copy propagation and
* constant folding passes before we run the lowering to warrant
* this. We also want to run the lowering before v3d_optimize to
* clean-up redundant get_buffer_size calls produced in the pass.
*/
NIR_PASS(_, c->s, nir_copy_prop);
NIR_PASS(_, c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, v3d_nir_lower_robust_buffer_access, c);
}
NIR_PASS_V(c->s, nir_lower_wrmasks, should_split_wrmask, c->s);
NIR_PASS(_, c->s, nir_lower_wrmasks, should_split_wrmask, c->s);
NIR_PASS_V(c->s, v3d_nir_lower_load_store_bitsize, c);
NIR_PASS(_, c->s, v3d_nir_lower_load_store_bitsize, c);
NIR_PASS_V(c->s, v3d_nir_lower_subgroup_intrinsics, c);
NIR_PASS(_, c->s, v3d_nir_lower_subgroup_intrinsics, c);
v3d_optimize_nir(c, c->s);
@ -1597,16 +1597,16 @@ v3d_attempt_compile(struct v3d_compile *c)
while (more_late_algebraic) {
more_late_algebraic = false;
NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
NIR_PASS_V(c->s, nir_opt_constant_folding);
NIR_PASS_V(c->s, nir_copy_prop);
NIR_PASS_V(c->s, nir_opt_dce);
NIR_PASS_V(c->s, nir_opt_cse);
NIR_PASS(_, c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_copy_prop);
NIR_PASS(_, c->s, nir_opt_dce);
NIR_PASS(_, c->s, nir_opt_cse);
}
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
NIR_PASS_V(c->s, nir_convert_to_lcssa, true, true);
NIR_PASS(_, c->s, nir_lower_bool_to_int32);
NIR_PASS(_, c->s, nir_convert_to_lcssa, true, true);
NIR_PASS_V(c->s, nir_divergence_analysis);
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
NIR_PASS(_, c->s, nir_convert_from_ssa, true);
struct nir_schedule_options schedule_options = {
/* Schedule for about half our register space, to enable more
@ -1633,9 +1633,9 @@ v3d_attempt_compile(struct v3d_compile *c)
NIR_PASS_V(c->s, nir_schedule, &schedule_options);
if (!c->disable_constant_ubo_load_sorting)
NIR_PASS_V(c->s, v3d_nir_sort_constant_ubo_loads, c);
NIR_PASS(_, c->s, v3d_nir_sort_constant_ubo_loads, c);
NIR_PASS_V(c->s, nir_opt_move, nir_move_load_uniform |
NIR_PASS(_, c->s, nir_opt_move, nir_move_load_uniform |
nir_move_const_undef);
v3d_nir_to_vir(c);

View File

@ -362,7 +362,7 @@ preprocess_nir(nir_shader *nir)
.frag_coord = true,
.point_coord = true,
};
NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
NIR_PASS(_, nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
@ -370,12 +370,12 @@ preprocess_nir(nir_shader *nir)
/* Make sure we lower variable initializers on output variables so that
* nir_remove_dead_variables below sees the corresponding stores
*/
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_shader_out);
if (nir->info.stage == MESA_SHADER_FRAGMENT)
NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
NIR_PASS(_, nir, nir_lower_io_to_vector, nir_var_shader_out);
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_input_attachments,
NIR_PASS(_, nir, nir_lower_input_attachments,
&(nir_input_attachment_options) {
.use_fragcoord_sysval = false,
});
@ -384,47 +384,47 @@ preprocess_nir(nir_shader *nir)
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
NIR_PASS(_, nir, nir_lower_system_values);
NIR_PASS(_, nir, nir_lower_clip_cull_distance_arrays);
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(_, nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(nir, nir_normalize_cubemap_coords);
NIR_PASS(_, nir, nir_normalize_cubemap_coords);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_struct_vars, nir_var_function_temp);
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_split_struct_vars, nir_var_function_temp);
nir_optimize(nir, true);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_push_const,
nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_push_const,
nir_address_format_32bit_offset);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_32bit_index_offset);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_32bit_index_offset);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_global,
nir_address_format_2x32bit_global);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_global,
nir_address_format_2x32bit_global);
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
/* Lower a bunch of stuff */
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_shader_in, UINT32_MAX);
NIR_PASS_V(nir, nir_lower_indirect_derefs,
nir_var_function_temp, 2);
NIR_PASS(_, nir, nir_lower_indirect_derefs,
nir_var_function_temp, 2);
NIR_PASS_V(nir, nir_lower_array_deref_of_vec,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_lower_direct_array_deref_of_vec_load);
NIR_PASS(_, nir, nir_lower_array_deref_of_vec,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_lower_direct_array_deref_of_vec_load);
NIR_PASS_V(nir, nir_lower_frexp);
NIR_PASS(_, nir, nir_lower_frexp);
/* Get rid of split copies */
nir_optimize(nir, false);
@ -978,7 +978,7 @@ lower_fs_io(nir_shader *nir)
{
/* Our backend doesn't handle array fragment shader outputs */
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_shader_out, NULL);
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
MESA_SHADER_FRAGMENT);
@ -986,8 +986,8 @@ lower_fs_io(nir_shader *nir)
nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
MESA_SHADER_FRAGMENT);
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4, 0);
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4, 0);
}
static void
@ -1704,8 +1704,8 @@ link_shaders(nir_shader *producer, nir_shader *consumer)
assert(consumer);
if (producer->options->lower_to_scalar) {
NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
}
nir_lower_io_arrays_to_elements(producer, consumer);
@ -1716,12 +1716,12 @@ link_shaders(nir_shader *producer, nir_shader *consumer)
if (nir_link_opt_varyings(producer, consumer))
nir_optimize(consumer, false);
NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
nir_optimize(producer, false);
nir_optimize(consumer, false);
@ -1730,8 +1730,8 @@ link_shaders(nir_shader *producer, nir_shader *consumer)
* nir_compact_varyings() depends on all dead varyings being removed so
* we need to call nir_remove_dead_variables() again here.
*/
NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
}
}
@ -1766,8 +1766,8 @@ pipeline_lower_nir(struct v3dv_pipeline *pipeline,
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
bool needs_default_sampler_state = false;
NIR_PASS_V(p_stage->nir, lower_pipeline_layout_info, pipeline, layout,
&needs_default_sampler_state);
NIR_PASS(_, p_stage->nir, lower_pipeline_layout_info, pipeline, layout,
&needs_default_sampler_state);
/* If in the end we didn't need to use the default sampler states and the
* shader doesn't need any other samplers, get rid of them so we can
@ -3127,10 +3127,10 @@ shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
static void
lower_cs_shared(struct nir_shader *nir)
{
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_shared, nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_shared, nir_address_format_32bit_offset);
}
static VkResult

View File

@ -308,19 +308,19 @@ v3d_uncompiled_shader_create(struct pipe_context *pctx,
if (s->info.stage != MESA_SHADER_VERTEX &&
s->info.stage != MESA_SHADER_GEOMETRY) {
NIR_PASS_V(s, nir_lower_io,
nir_var_shader_in | nir_var_shader_out,
type_size, (nir_lower_io_options)0);
NIR_PASS(_, s, nir_lower_io,
nir_var_shader_in | nir_var_shader_out,
type_size, (nir_lower_io_options)0);
}
NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
NIR_PASS(_, s, nir_lower_regs_to_ssa);
NIR_PASS(_, s, nir_normalize_cubemap_coords);
NIR_PASS_V(s, nir_lower_load_const_to_scalar);
NIR_PASS(_, s, nir_lower_load_const_to_scalar);
v3d_optimize_nir(NULL, s);
NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS(_, s, nir_remove_dead_variables, nir_var_function_temp, NULL);
/* Garbage collect dead instructions */
nir_sweep(s);