mesa: Add GL/GLSL plumbing for INTEL_fragment_shader_ordering
This extension provides new GLSL built-in function beginFragmentShaderOrderingIntel() that guarantees (taking wording of GL_INTEL_fragment_shader_ordering extension) that any memory transactions issued by shader invocations from previous primitives mapped to same xy window coordinates (and same sample when per-sample shading is active), complete and are visible to the shader invocation that called beginFragmentShaderOrderingINTEL(). One advantage of INTEL_fragment_shader_ordering over ARB_fragment_shader_interlock is that it provides a function that operates as a memory barrie (instead of a defining a critcial section) that can be called under arbitary control flow from any function (in contrast the begin/end of ARB_fragment_shader_interlock may only be called once, from main(), under no control flow. Signed-off-by: Kevin Rogovin <kevin.rogovin@intel.com> Reviewed-by: Plamena Manolova <plamena.manolova@intel.com>
This commit is contained in:
parent
1b0df8a460
commit
119435c877
|
@ -525,6 +525,12 @@ supports_nv_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
|
|||
return state->NV_fragment_shader_interlock_enable;
|
||||
}
|
||||
|
||||
static bool
|
||||
supports_intel_fragment_shader_ordering(const _mesa_glsl_parse_state *state)
|
||||
{
|
||||
return state->INTEL_fragment_shader_ordering_enable;
|
||||
}
|
||||
|
||||
static bool
|
||||
shader_clock(const _mesa_glsl_parse_state *state)
|
||||
{
|
||||
|
@ -1305,6 +1311,11 @@ builtin_builder::create_intrinsics()
|
|||
supports_arb_fragment_shader_interlock,
|
||||
ir_intrinsic_end_invocation_interlock), NULL);
|
||||
|
||||
add_function("__intrinsic_begin_fragment_shader_ordering",
|
||||
_invocation_interlock_intrinsic(
|
||||
supports_intel_fragment_shader_ordering,
|
||||
ir_intrinsic_begin_fragment_shader_ordering), NULL);
|
||||
|
||||
add_function("__intrinsic_shader_clock",
|
||||
_shader_clock_intrinsic(shader_clock,
|
||||
glsl_type::uvec2_type),
|
||||
|
@ -3419,6 +3430,12 @@ builtin_builder::create_builtins()
|
|||
supports_nv_fragment_shader_interlock),
|
||||
NULL);
|
||||
|
||||
add_function("beginFragmentShaderOrderingINTEL",
|
||||
_invocation_interlock(
|
||||
"__intrinsic_begin_fragment_shader_ordering",
|
||||
supports_intel_fragment_shader_ordering),
|
||||
NULL);
|
||||
|
||||
add_function("anyInvocationARB",
|
||||
_vote("__intrinsic_vote_any", vote),
|
||||
NULL);
|
||||
|
|
|
@ -725,6 +725,7 @@ static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
|
|||
EXT_AEP(EXT_texture_buffer),
|
||||
EXT_AEP(EXT_texture_cube_map_array),
|
||||
EXT(INTEL_conservative_rasterization),
|
||||
EXT(INTEL_fragment_shader_ordering),
|
||||
EXT(INTEL_shader_atomic_float_minmax),
|
||||
EXT(MESA_shader_integer_functions),
|
||||
EXT(NV_fragment_shader_interlock),
|
||||
|
|
|
@ -812,6 +812,8 @@ struct _mesa_glsl_parse_state {
|
|||
bool EXT_texture_cube_map_array_warn;
|
||||
bool INTEL_conservative_rasterization_enable;
|
||||
bool INTEL_conservative_rasterization_warn;
|
||||
bool INTEL_fragment_shader_ordering_enable;
|
||||
bool INTEL_fragment_shader_ordering_warn;
|
||||
bool INTEL_shader_atomic_float_minmax_enable;
|
||||
bool INTEL_shader_atomic_float_minmax_warn;
|
||||
bool MESA_shader_integer_functions_enable;
|
||||
|
|
|
@ -742,6 +742,9 @@ nir_visitor::visit(ir_call *ir)
|
|||
case ir_intrinsic_end_invocation_interlock:
|
||||
op = nir_intrinsic_end_invocation_interlock;
|
||||
break;
|
||||
case ir_intrinsic_begin_fragment_shader_ordering:
|
||||
op = nir_intrinsic_begin_fragment_shader_ordering;
|
||||
break;
|
||||
case ir_intrinsic_group_memory_barrier:
|
||||
op = nir_intrinsic_group_memory_barrier;
|
||||
break;
|
||||
|
@ -975,6 +978,9 @@ nir_visitor::visit(ir_call *ir)
|
|||
case nir_intrinsic_end_invocation_interlock:
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
case nir_intrinsic_begin_fragment_shader_ordering:
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
case nir_intrinsic_store_ssbo: {
|
||||
exec_node *param = ir->actual_parameters.get_head();
|
||||
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
||||
|
|
|
@ -1122,6 +1122,7 @@ enum ir_intrinsic_id {
|
|||
ir_intrinsic_memory_barrier_shared,
|
||||
ir_intrinsic_begin_invocation_interlock,
|
||||
ir_intrinsic_end_invocation_interlock,
|
||||
ir_intrinsic_begin_fragment_shader_ordering,
|
||||
|
||||
ir_intrinsic_vote_all,
|
||||
ir_intrinsic_vote_any,
|
||||
|
|
|
@ -191,6 +191,7 @@ barrier("memory_barrier_image")
|
|||
barrier("memory_barrier_shared")
|
||||
barrier("begin_invocation_interlock")
|
||||
barrier("end_invocation_interlock")
|
||||
barrier("begin_fragment_shader_ordering")
|
||||
|
||||
# A conditional discard, with a single boolean source.
|
||||
intrinsic("discard_if", src_comp=[1])
|
||||
|
|
|
@ -316,6 +316,7 @@ EXT(IBM_texture_mirrored_repeat , dummy_true
|
|||
EXT(INGR_blend_func_separate , EXT_blend_func_separate , GLL, x , x , x , 1999)
|
||||
|
||||
EXT(INTEL_conservative_rasterization , INTEL_conservative_rasterization , x , GLC, x , 31, 2013)
|
||||
EXT(INTEL_fragment_shader_ordering , INTEL_fragment_shader_ordering , GLL, GLC, x , x , 2013)
|
||||
EXT(INTEL_performance_query , INTEL_performance_query , GLL, GLC, x , ES2, 2013)
|
||||
EXT(INTEL_shader_atomic_float_minmax , INTEL_shader_atomic_float_minmax , GLL, GLC, x , x , 2018)
|
||||
|
||||
|
|
|
@ -4271,6 +4271,7 @@ struct gl_extensions
|
|||
GLboolean ATI_fragment_shader;
|
||||
GLboolean GREMEDY_string_marker;
|
||||
GLboolean INTEL_conservative_rasterization;
|
||||
GLboolean INTEL_fragment_shader_ordering;
|
||||
GLboolean INTEL_performance_query;
|
||||
GLboolean INTEL_shader_atomic_float_minmax;
|
||||
GLboolean KHR_blend_equation_advanced;
|
||||
|
|
Loading…
Reference in New Issue