intel, anv: propagate robustness setting to nir_opt_load_store_vectorize

Closes #4309
Fixes dEQP-VK-robustness.robustness2.*.readonly.*

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10147>
This commit is contained in:
Iván Briano 2021-04-09 14:42:53 -07:00
parent e6c79329dd
commit 8328989130
9 changed files with 56 additions and 20 deletions

View File

@ -256,6 +256,8 @@ struct brw_base_prog_key {
enum brw_subgroup_size_type subgroup_size_type; enum brw_subgroup_size_type subgroup_size_type;
struct brw_sampler_prog_key_data tex; struct brw_sampler_prog_key_data tex;
bool robust_buffer_access;
}; };
/** /**

View File

@ -9097,7 +9097,8 @@ brw_compile_fs(const struct brw_compiler *compiler,
if (!key->multisample_fbo) if (!key->multisample_fbo)
NIR_PASS_V(nir, brw_nir_demote_sample_qualifiers); NIR_PASS_V(nir, brw_nir_demote_sample_qualifiers);
NIR_PASS_V(nir, brw_nir_move_interpolation_to_top); NIR_PASS_V(nir, brw_nir_move_interpolation_to_top);
brw_postprocess_nir(nir, compiler, true, debug_enabled); brw_postprocess_nir(nir, compiler, true, debug_enabled,
key->base.robust_buffer_access);
brw_nir_populate_wm_prog_data(nir, compiler->devinfo, key, prog_data); brw_nir_populate_wm_prog_data(nir, compiler->devinfo, key, prog_data);
@ -9427,7 +9428,8 @@ compile_cs_to_nir(const struct brw_compiler *compiler,
NIR_PASS_V(shader, nir_opt_constant_folding); NIR_PASS_V(shader, nir_opt_constant_folding);
NIR_PASS_V(shader, nir_opt_dce); NIR_PASS_V(shader, nir_opt_dce);
brw_postprocess_nir(shader, compiler, true, debug_enabled); brw_postprocess_nir(shader, compiler, true, debug_enabled,
key->base.robust_buffer_access);
return shader; return shader;
} }
@ -9720,7 +9722,8 @@ brw_compile_bs(const struct brw_compiler *compiler, void *log_data,
const unsigned max_dispatch_width = 16; const unsigned max_dispatch_width = 16;
brw_nir_apply_key(shader, compiler, &key->base, max_dispatch_width, true); brw_nir_apply_key(shader, compiler, &key->base, max_dispatch_width, true);
brw_postprocess_nir(shader, compiler, true, debug_enabled); brw_postprocess_nir(shader, compiler, true, debug_enabled,
key->base.robust_buffer_access);
fs_visitor *v = NULL, *v8 = NULL, *v16 = NULL; fs_visitor *v = NULL, *v8 = NULL, *v16 = NULL;
bool has_spilled = false; bool has_spilled = false;

View File

@ -1027,7 +1027,8 @@ bool combine_all_barriers(nir_intrinsic_instr *a,
static void static void
brw_vectorize_lower_mem_access(nir_shader *nir, brw_vectorize_lower_mem_access(nir_shader *nir,
const struct brw_compiler *compiler, const struct brw_compiler *compiler,
bool is_scalar) bool is_scalar,
bool robust_buffer_access)
{ {
const struct gen_device_info *devinfo = compiler->devinfo; const struct gen_device_info *devinfo = compiler->devinfo;
bool progress = false; bool progress = false;
@ -1040,6 +1041,11 @@ brw_vectorize_lower_mem_access(nir_shader *nir,
.robust_modes = (nir_variable_mode)0, .robust_modes = (nir_variable_mode)0,
}; };
if (robust_buffer_access) {
options.robust_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
nir_var_mem_global;
}
OPT(nir_opt_load_store_vectorize, &options); OPT(nir_opt_load_store_vectorize, &options);
} }
@ -1077,7 +1083,8 @@ nir_shader_has_local_variables(const nir_shader *nir)
*/ */
void void
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler, brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool debug_enabled) bool is_scalar, bool debug_enabled,
bool robust_buffer_access)
{ {
const struct gen_device_info *devinfo = compiler->devinfo; const struct gen_device_info *devinfo = compiler->devinfo;
@ -1103,7 +1110,8 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
brw_nir_optimize(nir, compiler, is_scalar, false); brw_nir_optimize(nir, compiler, is_scalar, false);
} }
brw_vectorize_lower_mem_access(nir, compiler, is_scalar); brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
robust_buffer_access);
if (OPT(nir_lower_int64)) if (OPT(nir_lower_int64))
brw_nir_optimize(nir, compiler, is_scalar, false); brw_nir_optimize(nir, compiler, is_scalar, false);

View File

@ -135,7 +135,8 @@ bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader,
void brw_postprocess_nir(nir_shader *nir, void brw_postprocess_nir(nir_shader *nir,
const struct brw_compiler *compiler, const struct brw_compiler *compiler,
bool is_scalar, bool is_scalar,
bool debug_enabled); bool debug_enabled,
bool robust_buffer_access);
bool brw_nir_clamp_image_1d_2d_array_sizes(nir_shader *shader); bool brw_nir_clamp_image_1d_2d_array_sizes(nir_shader *shader);

View File

@ -1324,7 +1324,8 @@ brw_compile_tes(const struct brw_compiler *compiler,
brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar); brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
brw_nir_lower_tes_inputs(nir, input_vue_map); brw_nir_lower_tes_inputs(nir, input_vue_map);
brw_nir_lower_vue_outputs(nir); brw_nir_lower_vue_outputs(nir);
brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled); brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
key->base.robust_buffer_access);
brw_compute_vue_map(devinfo, &prog_data->base.vue_map, brw_compute_vue_map(devinfo, &prog_data->base.vue_map,
nir->info.outputs_written, nir->info.outputs_written,

View File

@ -2867,7 +2867,8 @@ brw_compile_vs(const struct brw_compiler *compiler,
brw_nir_lower_vs_inputs(nir, key->gl_attrib_wa_flags); brw_nir_lower_vs_inputs(nir, key->gl_attrib_wa_flags);
brw_nir_lower_vue_outputs(nir); brw_nir_lower_vue_outputs(nir);
brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled); brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
key->base.robust_buffer_access);
prog_data->base.clip_distance_mask = prog_data->base.clip_distance_mask =
((1 << nir->info.clip_distance_array_size) - 1); ((1 << nir->info.clip_distance_array_size) - 1);

View File

@ -618,7 +618,8 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar); brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
brw_nir_lower_vue_inputs(nir, &c.input_vue_map); brw_nir_lower_vue_inputs(nir, &c.input_vue_map);
brw_nir_lower_vue_outputs(nir); brw_nir_lower_vue_outputs(nir);
brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled); brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
key->base.robust_buffer_access);
prog_data->base.clip_distance_mask = prog_data->base.clip_distance_mask =
((1 << nir->info.clip_distance_array_size) - 1); ((1 << nir->info.clip_distance_array_size) - 1);

View File

@ -390,7 +390,8 @@ brw_compile_tcs(const struct brw_compiler *compiler,
if (key->quads_workaround) if (key->quads_workaround)
brw_nir_apply_tcs_quads_workaround(nir); brw_nir_apply_tcs_quads_workaround(nir);
brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled); brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
key->base.robust_buffer_access);
bool has_primitive_id = bool has_primitive_id =
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID); BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID);

View File

@ -394,6 +394,7 @@ populate_sampler_prog_key(const struct gen_device_info *devinfo,
static void static void
populate_base_prog_key(const struct gen_device_info *devinfo, populate_base_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
struct brw_base_prog_key *key) struct brw_base_prog_key *key)
{ {
if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
@ -401,17 +402,20 @@ populate_base_prog_key(const struct gen_device_info *devinfo,
else else
key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT; key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
key->robust_buffer_access = robust_buffer_acccess;
populate_sampler_prog_key(devinfo, &key->tex); populate_sampler_prog_key(devinfo, &key->tex);
} }
static void static void
populate_vs_prog_key(const struct gen_device_info *devinfo, populate_vs_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
struct brw_vs_prog_key *key) struct brw_vs_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
/* XXX: Handle vertex input work-arounds */ /* XXX: Handle vertex input work-arounds */
@ -421,12 +425,13 @@ populate_vs_prog_key(const struct gen_device_info *devinfo,
static void static void
populate_tcs_prog_key(const struct gen_device_info *devinfo, populate_tcs_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
unsigned input_vertices, unsigned input_vertices,
struct brw_tcs_prog_key *key) struct brw_tcs_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
key->input_vertices = input_vertices; key->input_vertices = input_vertices;
} }
@ -434,33 +439,36 @@ populate_tcs_prog_key(const struct gen_device_info *devinfo,
static void static void
populate_tes_prog_key(const struct gen_device_info *devinfo, populate_tes_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
struct brw_tes_prog_key *key) struct brw_tes_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
} }
static void static void
populate_gs_prog_key(const struct gen_device_info *devinfo, populate_gs_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
struct brw_gs_prog_key *key) struct brw_gs_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
} }
static void static void
populate_wm_prog_key(const struct gen_device_info *devinfo, populate_wm_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
const struct anv_subpass *subpass, const struct anv_subpass *subpass,
const VkPipelineMultisampleStateCreateInfo *ms_info, const VkPipelineMultisampleStateCreateInfo *ms_info,
struct brw_wm_prog_key *key) struct brw_wm_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
/* We set this to 0 here and set to the actual value before we call /* We set this to 0 here and set to the actual value before we call
* brw_compile_fs. * brw_compile_fs.
@ -510,12 +518,13 @@ populate_wm_prog_key(const struct gen_device_info *devinfo,
static void static void
populate_cs_prog_key(const struct gen_device_info *devinfo, populate_cs_prog_key(const struct gen_device_info *devinfo,
VkPipelineShaderStageCreateFlags flags, VkPipelineShaderStageCreateFlags flags,
bool robust_buffer_acccess,
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info, const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
struct brw_cs_prog_key *key) struct brw_cs_prog_key *key)
{ {
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
populate_base_prog_key(devinfo, flags, &key->base); populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
if (rss_info) { if (rss_info) {
assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING); assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
@ -1269,23 +1278,31 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct gen_device_info *devinfo = &pipeline->base.device->info;
switch (stage) { switch (stage) {
case MESA_SHADER_VERTEX: case MESA_SHADER_VERTEX:
populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs); populate_vs_prog_key(devinfo, sinfo->flags,
pipeline->base.device->robust_buffer_access,
&stages[stage].key.vs);
break; break;
case MESA_SHADER_TESS_CTRL: case MESA_SHADER_TESS_CTRL:
populate_tcs_prog_key(devinfo, sinfo->flags, populate_tcs_prog_key(devinfo, sinfo->flags,
pipeline->base.device->robust_buffer_access,
info->pTessellationState->patchControlPoints, info->pTessellationState->patchControlPoints,
&stages[stage].key.tcs); &stages[stage].key.tcs);
break; break;
case MESA_SHADER_TESS_EVAL: case MESA_SHADER_TESS_EVAL:
populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes); populate_tes_prog_key(devinfo, sinfo->flags,
pipeline->base.device->robust_buffer_access,
&stages[stage].key.tes);
break; break;
case MESA_SHADER_GEOMETRY: case MESA_SHADER_GEOMETRY:
populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs); populate_gs_prog_key(devinfo, sinfo->flags,
pipeline->base.device->robust_buffer_access,
&stages[stage].key.gs);
break; break;
case MESA_SHADER_FRAGMENT: { case MESA_SHADER_FRAGMENT: {
const bool raster_enabled = const bool raster_enabled =
!info->pRasterizationState->rasterizerDiscardEnable; !info->pRasterizationState->rasterizerDiscardEnable;
populate_wm_prog_key(devinfo, sinfo->flags, populate_wm_prog_key(devinfo, sinfo->flags,
pipeline->base.device->robust_buffer_access,
pipeline->subpass, pipeline->subpass,
raster_enabled ? info->pMultisampleState : NULL, raster_enabled ? info->pMultisampleState : NULL,
&stages[stage].key.wm); &stages[stage].key.wm);
@ -1662,6 +1679,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT); PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags, populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
pipeline->base.device->robust_buffer_access,
rss_info, &stage.key.cs); rss_info, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout); ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);