2015-04-07 23:15:09 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "brw_nir.h"
|
2021-06-21 11:44:53 +01:00
|
|
|
#include "brw_nir_rt.h"
|
2015-08-12 22:29:25 +01:00
|
|
|
#include "brw_shader.h"
|
2021-04-05 18:44:41 +01:00
|
|
|
#include "dev/intel_debug.h"
|
2016-04-15 11:51:05 +01:00
|
|
|
#include "compiler/glsl_types.h"
|
2016-01-18 10:54:03 +00:00
|
|
|
#include "compiler/nir/nir_builder.h"
|
2018-08-21 17:46:46 +01:00
|
|
|
#include "util/u_math.h"
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2016-11-24 09:50:10 +00:00
|
|
|
static bool
|
|
|
|
remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
|
2022-01-19 01:43:15 +00:00
|
|
|
enum tess_primitive_mode _primitive_mode)
|
2016-11-24 09:50:10 +00:00
|
|
|
{
|
|
|
|
const int location = nir_intrinsic_base(intr);
|
|
|
|
const unsigned component = nir_intrinsic_component(intr);
|
|
|
|
bool out_of_bounds;
|
|
|
|
|
|
|
|
if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
|
2022-01-19 01:43:15 +00:00
|
|
|
switch (_primitive_mode) {
|
|
|
|
case TESS_PRIMITIVE_QUADS:
|
2016-11-24 09:50:10 +00:00
|
|
|
/* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
|
|
|
|
nir_intrinsic_set_base(intr, 0);
|
|
|
|
nir_intrinsic_set_component(intr, 3 - component);
|
|
|
|
out_of_bounds = false;
|
|
|
|
break;
|
2022-01-19 01:43:15 +00:00
|
|
|
case TESS_PRIMITIVE_TRIANGLES:
|
2016-11-24 09:50:10 +00:00
|
|
|
/* gl_TessLevelInner[0] lives at DWord 4. */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
out_of_bounds = component > 0;
|
|
|
|
break;
|
2022-01-19 01:43:15 +00:00
|
|
|
case TESS_PRIMITIVE_ISOLINES:
|
2016-11-24 09:50:10 +00:00
|
|
|
out_of_bounds = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Bogus tessellation domain");
|
|
|
|
}
|
|
|
|
} else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
|
2022-01-19 01:43:15 +00:00
|
|
|
if (_primitive_mode == TESS_PRIMITIVE_ISOLINES) {
|
2016-11-24 09:50:10 +00:00
|
|
|
/* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
|
|
|
|
out_of_bounds = component > 1;
|
|
|
|
} else {
|
|
|
|
/* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
|
2022-01-19 01:43:15 +00:00
|
|
|
out_of_bounds = component == 3 && _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
|
2016-11-24 09:50:10 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_of_bounds) {
|
|
|
|
if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
|
|
|
|
b->cursor = nir_before_instr(&intr->instr);
|
|
|
|
nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
|
2021-03-03 06:13:38 +00:00
|
|
|
nir_ssa_def_rewrite_uses(&intr->dest.ssa, undef);
|
2016-11-24 09:50:10 +00:00
|
|
|
}
|
|
|
|
nir_instr_remove(&intr->instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
static bool
|
|
|
|
is_input(nir_intrinsic_instr *intrin)
|
|
|
|
{
|
|
|
|
return intrin->intrinsic == nir_intrinsic_load_input ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_interpolated_input;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
is_output(nir_intrinsic_instr *intrin)
|
|
|
|
{
|
|
|
|
return intrin->intrinsic == nir_intrinsic_load_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_store_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-10 05:41:35 +00:00
|
|
|
static bool
|
2016-04-13 03:56:14 +01:00
|
|
|
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
|
2016-11-24 09:50:10 +00:00
|
|
|
const struct brw_vue_map *vue_map,
|
2022-01-19 01:43:15 +00:00
|
|
|
enum tess_primitive_mode tes_primitive_mode)
|
2015-12-10 05:41:35 +00:00
|
|
|
{
|
2017-05-08 17:20:21 +01:00
|
|
|
const bool is_passthrough_tcs = b->shader->info.name &&
|
2020-11-11 20:17:50 +00:00
|
|
|
strcmp(b->shader->info.name, "passthrough TCS") == 0;
|
2016-11-24 09:50:10 +00:00
|
|
|
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2015-12-10 05:41:35 +00:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
gl_shader_stage stage = b->shader->info.stage;
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
|
|
|
|
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
|
2016-11-24 09:50:10 +00:00
|
|
|
|
|
|
|
if (!is_passthrough_tcs &&
|
|
|
|
remap_tess_levels(b, intrin, tes_primitive_mode))
|
|
|
|
continue;
|
|
|
|
|
2016-04-13 03:56:14 +01:00
|
|
|
int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
|
2015-12-10 05:41:35 +00:00
|
|
|
assert(vue_slot != -1);
|
|
|
|
intrin->const_index[0] = vue_slot;
|
|
|
|
|
2021-10-14 17:14:12 +01:00
|
|
|
nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
|
2015-12-10 05:41:35 +00:00
|
|
|
if (vertex) {
|
2019-03-31 02:54:21 +01:00
|
|
|
if (nir_src_is_const(*vertex)) {
|
|
|
|
intrin->const_index[0] += nir_src_as_uint(*vertex) *
|
2016-04-13 03:56:14 +01:00
|
|
|
vue_map->num_per_vertex_slots;
|
2015-12-10 05:41:35 +00:00
|
|
|
} else {
|
2016-04-13 03:56:14 +01:00
|
|
|
b->cursor = nir_before_instr(&intrin->instr);
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
/* Multiply by the number of per-vertex slots. */
|
|
|
|
nir_ssa_def *vertex_offset =
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_imul(b,
|
|
|
|
nir_ssa_for_src(b, *vertex, 1),
|
|
|
|
nir_imm_int(b,
|
|
|
|
vue_map->num_per_vertex_slots));
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
/* Add it to the existing offset */
|
|
|
|
nir_src *offset = nir_get_io_offset_src(intrin);
|
|
|
|
nir_ssa_def *total_offset =
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_iadd(b, vertex_offset,
|
|
|
|
nir_ssa_for_src(b, *offset, 1));
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
nir_instr_rewrite_src(&intrin->instr, offset,
|
|
|
|
nir_src_for_ssa(total_offset));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:02:28 +00:00
|
|
|
brw_nir_lower_vs_inputs(nir_shader *nir,
|
2021-06-08 06:04:42 +01:00
|
|
|
bool edgeflag_is_last,
|
2016-02-25 06:02:28 +00:00
|
|
|
const uint8_t *vs_attrib_wa_flags)
|
2015-08-26 11:07:29 +01:00
|
|
|
{
|
2016-02-25 06:02:28 +00:00
|
|
|
/* Start with the location of the variable's base. */
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_in_variable(var, nir)
|
2016-02-25 06:02:28 +00:00
|
|
|
var->data.driver_location = var->data.location;
|
2015-08-14 23:15:11 +01:00
|
|
|
|
2016-04-01 16:25:03 +01:00
|
|
|
/* Now use nir_lower_io to walk dereference chains. Attribute arrays are
|
|
|
|
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
|
|
|
|
* whether it is a double-precision type or not.
|
2016-02-25 06:02:28 +00:00
|
|
|
*/
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
|
|
|
|
nir_lower_io_lower_64bit_to_32);
|
2015-08-15 00:01:33 +01:00
|
|
|
|
2016-02-25 06:02:28 +00:00
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
2016-01-13 23:07:18 +00:00
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
|
2016-01-13 23:07:18 +00:00
|
|
|
|
i965: Drop support for the legacy SNORM -> Float equation.
Older OpenGL defines two equations for converting from signed-normalized
to floating point data. These are:
f = (2c + 1)/(2^b - 1) (equation 2.2)
f = max{c/2^(b-1) - 1), -1.0} (equation 2.3)
Both OpenGL 4.2+ and OpenGL ES 3.0+ mandate that equation 2.3 is to be
used in all scenarios, and remove equation 2.2. DirectX uses equation
2.3 as well. Intel hardware only supports equation 2.3, so Gen7.5+
systems that use the vertex fetcher hardware to do the conversions
always get formula 2.3.
This can make a big difference for 10-10-10-2 formats - the 2-bit value
can represent 0 with equation 2.3, and cannot with equation 2.2.
Ivybridge and older were using equation 2.2 for OpenGL, and 2.3 for ES.
Now that Ivybridge supports OpenGL 4.2, this is wrong - we need to use
the new rules, at least in core profile. That would leave Gen4-6 doing
something different than all other hardware, which seems...lame.
With context version promotion, applications that requested a pre-4.2
context may get promoted to 4.2, and thus get the new rules. Zero cases
have been reported of this being a problem. However, we've received a
report that following the old rules breaks expectations. SuperTuxKart
apparently renders the cars red when following equation 2.2, and works
correctly when following equation 2.3:
https://github.com/supertuxkart/stk-code/issues/2885#issuecomment-353858405
So, this patch deletes the legacy equation 2.2 support entirely, making
all hardware and APIs consistently use the new equation 2.3 rules.
If we ever find an application that truly requires the old formula, then
we'd likely want that application to work on modern hardware, too. We'd
likely restore this support as a driconf option. Until then, drop it.
This commit will regress Piglit's draw-vertices-2101010 test on
pre-Haswell without the corresponding Piglit patch to accept either
formula (commit 35daaa1695ea01eb85bc02f9be9b6ebd1a7113a1):
draw-vertices-2101010: Accept either SNORM conversion formula.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Chris Forbes <chrisforbes@google.com>
2017-12-26 03:10:22 +00:00
|
|
|
brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
|
2016-01-14 04:33:15 +00:00
|
|
|
|
2017-05-04 01:40:54 +01:00
|
|
|
/* The last step is to remap VERT_ATTRIB_* to actual registers */
|
2017-05-04 00:41:43 +01:00
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
/* Whether or not we have any system generated values. gl_DrawID is not
|
|
|
|
* included here as it lives in its own vec4.
|
|
|
|
*/
|
|
|
|
const bool has_sgvs =
|
2021-01-20 01:14:28 +00:00
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
|
2017-05-04 00:53:40 +01:00
|
|
|
|
2018-08-21 17:46:46 +01:00
|
|
|
const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
|
2017-05-04 00:53:40 +01:00
|
|
|
|
2017-05-04 00:41:43 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
|
|
|
if (!function->impl)
|
|
|
|
continue;
|
2017-01-13 16:47:57 +00:00
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
|
2017-05-04 00:41:43 +01:00
|
|
|
nir_foreach_block(block, function->impl) {
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2017-05-04 00:41:43 +01:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
switch (intrin->intrinsic) {
|
2018-01-25 18:15:40 +00:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2017-05-04 00:53:40 +01:00
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
case nir_intrinsic_load_instance_id:
|
2018-04-28 13:09:20 +01:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
2017-05-04 00:53:40 +01:00
|
|
|
case nir_intrinsic_load_draw_id: {
|
|
|
|
b.cursor = nir_after_instr(&intrin->instr);
|
|
|
|
|
|
|
|
/* gl_VertexID and friends are stored by the VF as the last
|
|
|
|
* vertex element. We convert them to load_input intrinsics at
|
|
|
|
* the right location.
|
|
|
|
*/
|
|
|
|
nir_intrinsic_instr *load =
|
|
|
|
nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
|
|
|
|
load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
|
|
|
|
nir_intrinsic_set_base(load, num_inputs);
|
|
|
|
switch (intrin->intrinsic) {
|
2018-01-25 18:15:40 +00:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_intrinsic_set_component(load, 0);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
nir_intrinsic_set_component(load, 1);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
nir_intrinsic_set_component(load, 2);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_instance_id:
|
|
|
|
nir_intrinsic_set_component(load, 3);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_draw_id:
|
2018-04-28 13:09:20 +01:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
|
|
|
/* gl_DrawID and IsIndexedDraw are stored right after
|
|
|
|
* gl_VertexID and friends if any of them exist.
|
2017-05-04 00:53:40 +01:00
|
|
|
*/
|
|
|
|
nir_intrinsic_set_base(load, num_inputs + has_sgvs);
|
2018-04-28 13:09:20 +01:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_load_draw_id)
|
|
|
|
nir_intrinsic_set_component(load, 0);
|
|
|
|
else
|
|
|
|
nir_intrinsic_set_component(load, 1);
|
2017-05-04 00:53:40 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Invalid system value intrinsic");
|
|
|
|
}
|
|
|
|
|
|
|
|
load->num_components = 1;
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
|
|
|
|
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
|
2021-03-03 06:13:38 +00:00
|
|
|
&load->dest.ssa);
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_instr_remove(&intrin->instr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case nir_intrinsic_load_input: {
|
2017-05-04 00:41:43 +01:00
|
|
|
/* Attributes come in a contiguous block, ordered by their
|
|
|
|
* gl_vert_attrib value. That means we can compute the slot
|
|
|
|
* number for an attribute by masking out the enabled attributes
|
|
|
|
* before it and counting the bits.
|
|
|
|
*/
|
|
|
|
int attr = nir_intrinsic_base(intrin);
|
2021-06-08 06:04:42 +01:00
|
|
|
uint64_t inputs_read = nir->info.inputs_read;
|
|
|
|
int slot = -1;
|
|
|
|
if (edgeflag_is_last) {
|
|
|
|
inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
|
|
|
|
if (attr == VERT_ATTRIB_EDGEFLAG)
|
|
|
|
slot = num_inputs - 1;
|
|
|
|
}
|
|
|
|
if (slot == -1)
|
|
|
|
slot = util_bitcount64(inputs_read &
|
|
|
|
BITFIELD64_MASK(attr));
|
2017-05-04 00:56:15 +01:00
|
|
|
nir_intrinsic_set_base(intrin, slot);
|
2017-05-04 00:53:40 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break; /* Nothing to do */
|
2017-01-13 16:47:57 +00:00
|
|
|
}
|
2015-08-15 00:01:33 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2017-05-05 00:36:26 +01:00
|
|
|
brw_nir_lower_vue_inputs(nir_shader *nir,
|
2016-02-25 07:43:17 +00:00
|
|
|
const struct brw_vue_map *vue_map)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_in_variable(var, nir)
|
2016-02-25 07:44:46 +00:00
|
|
|
var->data.driver_location = var->data.location;
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2016-02-25 07:44:46 +00:00
|
|
|
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
|
|
|
|
nir_lower_io_lower_64bit_to_32);
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2017-05-04 22:57:52 +01:00
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
|
2017-05-04 22:57:52 +01:00
|
|
|
|
|
|
|
nir_foreach_function(function, nir) {
|
|
|
|
if (!function->impl)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_foreach_block(block, function->impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2017-05-04 22:57:52 +01:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_load_input ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
|
2017-05-05 00:33:32 +01:00
|
|
|
/* Offset 0 is the VUE header, which contains
|
|
|
|
* VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
|
|
|
|
* VARYING_SLOT_PSIZ [.w].
|
|
|
|
*/
|
|
|
|
int varying = nir_intrinsic_base(intrin);
|
|
|
|
int vue_slot;
|
|
|
|
switch (varying) {
|
|
|
|
case VARYING_SLOT_PSIZ:
|
|
|
|
nir_intrinsic_set_base(intrin, 0);
|
|
|
|
nir_intrinsic_set_component(intrin, 3);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
vue_slot = vue_map->varying_to_slot[varying];
|
|
|
|
assert(vue_slot != -1);
|
|
|
|
nir_intrinsic_set_base(intrin, vue_slot);
|
|
|
|
break;
|
|
|
|
}
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:34:51 +00:00
|
|
|
brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_in_variable(var, nir)
|
2016-02-25 06:02:28 +00:00
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
|
|
|
|
nir_lower_io_lower_64bit_to_32);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2016-02-25 06:02:28 +00:00
|
|
|
if (function->impl) {
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
nir_foreach_block(block, function->impl) {
|
2016-11-24 09:50:10 +00:00
|
|
|
remap_patch_urb_offsets(block, &b, vue_map,
|
2022-01-19 01:43:15 +00:00
|
|
|
nir->info.tess._primitive_mode);
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-05 03:01:13 +01:00
|
|
|
/**
|
|
|
|
* Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
|
|
|
|
* offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
|
|
|
|
*
|
|
|
|
* We clamp to +7/16 on the upper end of the range, since +0.5 isn't
|
|
|
|
* representable in a S0.4 value; a naive conversion would give us -8/16,
|
|
|
|
* which is the opposite of what was intended.
|
|
|
|
*
|
|
|
|
* This is allowed by GL_ARB_gpu_shader5's quantization rules:
|
|
|
|
*
|
|
|
|
* "Not all values of <offset> may be supported; x and y offsets may
|
|
|
|
* be rounded to fixed-point values with the number of fraction bits
|
|
|
|
* given by the implementation-dependent constant
|
|
|
|
* FRAGMENT_INTERPOLATION_OFFSET_BITS."
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
lower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
|
|
|
|
{
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
|
|
|
if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
b->cursor = nir_before_instr(instr);
|
|
|
|
|
|
|
|
assert(intrin->src[0].ssa);
|
|
|
|
nir_ssa_def *offset =
|
|
|
|
nir_imin(b, nir_imm_int(b, 7),
|
|
|
|
nir_f2i32(b, nir_fmul(b, nir_imm_float(b, 16),
|
|
|
|
intrin->src[0].ssa)));
|
|
|
|
|
|
|
|
nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(offset));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
i965: Move Gen4-5 interpolation stuff to brw_wm_prog_data.
This fixes glxgears rendering, which had surprisingly been broken since
late October! Specifically, commit 91d61fbf7cb61a44adcaae51ee08ad0dd6b.
glxgears uses glShadeModel(GL_FLAT) when drawing the main portion of the
gears, then uses glShadeModel(GL_SMOOTH) for drawing the Gouraud-shaded
inner portion of the gears. This results in the same fragment program
having two different state-dependent interpolation maps: one where
gl_Color is flat, and another where it's smooth.
The problem is that there's only one gen4_fragment_program, so it can't
store both. Each FS compile would trash the last one. But, the FS
compiles are cached, so the first one would store FLAT, and the second
would see a matching program in the cache and never bother to compile
one with SMOOTH. (Clearing the program cache on every draw made it
render correctly.)
Instead, move it to brw_wm_prog_data, where we can keep a copy for
every specialization of the program. The only downside is bloating
the structure a bit, but we can tighten that up a bit if we need to.
This also lets us kill gen4_fragment_program entirely!
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Timothy Arceri <timothy.arceri@collabora.com>
2017-01-13 22:29:52 +00:00
|
|
|
brw_nir_lower_fs_inputs(nir_shader *nir,
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo,
|
2016-09-14 18:39:52 +01:00
|
|
|
const struct brw_wm_prog_key *key)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_in_variable(var, nir) {
|
2016-07-12 11:57:25 +01:00
|
|
|
var->data.driver_location = var->data.location;
|
2016-09-14 18:42:42 +01:00
|
|
|
|
|
|
|
/* Apply default interpolation mode.
|
|
|
|
*
|
|
|
|
* Everything defaults to smooth except for the legacy GL color
|
|
|
|
* built-in variables, which might be flat depending on API state.
|
|
|
|
*/
|
|
|
|
if (var->data.interpolation == INTERP_MODE_NONE) {
|
|
|
|
const bool flat = key->flat_shade &&
|
|
|
|
(var->data.location == VARYING_SLOT_COL0 ||
|
|
|
|
var->data.location == VARYING_SLOT_COL1);
|
|
|
|
|
|
|
|
var->data.interpolation = flat ? INTERP_MODE_FLAT
|
|
|
|
: INTERP_MODE_SMOOTH;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On Ironlake and below, there is only one interpolation mode.
|
|
|
|
* Centroid interpolation doesn't mean anything on this hardware --
|
|
|
|
* there is no multisampling.
|
|
|
|
*/
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver < 6) {
|
2016-09-14 18:42:42 +01:00
|
|
|
var->data.centroid = false;
|
|
|
|
var->data.sample = false;
|
|
|
|
}
|
2016-07-12 11:57:25 +01:00
|
|
|
}
|
|
|
|
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
|
2016-09-14 18:39:52 +01:00
|
|
|
if (key->persample_interp)
|
|
|
|
lower_io_options |= nir_lower_io_force_sample_interpolation;
|
|
|
|
|
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver >= 11)
|
2019-04-11 20:57:12 +01:00
|
|
|
nir_lower_interpolation(nir, ~0);
|
2016-07-12 11:57:25 +01:00
|
|
|
|
2021-11-19 19:00:09 +00:00
|
|
|
if (!key->multisample_fbo)
|
|
|
|
nir_lower_single_sampled(nir);
|
|
|
|
|
2020-08-05 03:01:13 +01:00
|
|
|
nir_shader_instructions_pass(nir, lower_barycentric_at_offset,
|
|
|
|
nir_metadata_block_index |
|
|
|
|
nir_metadata_dominance,
|
|
|
|
NULL);
|
|
|
|
|
2016-07-12 11:57:25 +01:00
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2018-05-23 19:33:51 +01:00
|
|
|
brw_nir_lower_vue_outputs(nir_shader *nir)
|
2015-08-26 11:07:29 +01:00
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_out_variable(var, nir) {
|
2016-10-13 06:41:09 +01:00
|
|
|
var->data.driver_location = var->data.location;
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
2016-10-13 06:41:09 +01:00
|
|
|
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
|
|
|
|
nir_lower_io_lower_64bit_to_32);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-11-24 09:50:10 +00:00
|
|
|
brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
|
2022-01-19 01:43:15 +00:00
|
|
|
enum tess_primitive_mode tes_primitive_mode)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_out_variable(var, nir) {
|
2016-02-25 06:02:28 +00:00
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
}
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2019-07-19 23:23:26 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
|
|
|
|
nir_lower_io_lower_64bit_to_32);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
2019-05-14 11:10:11 +01:00
|
|
|
nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2016-02-25 06:02:28 +00:00
|
|
|
if (function->impl) {
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
nir_foreach_block(block, function->impl) {
|
2016-11-24 09:50:10 +00:00
|
|
|
remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:02:28 +00:00
|
|
|
brw_nir_lower_fs_outputs(nir_shader *nir)
|
|
|
|
{
|
2020-07-19 00:24:25 +01:00
|
|
|
nir_foreach_shader_out_variable(var, nir) {
|
2016-07-22 05:26:20 +01:00
|
|
|
var->data.driver_location =
|
|
|
|
SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
|
|
|
|
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
|
|
|
|
}
|
|
|
|
|
2016-09-14 18:29:38 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 21:33:41 +00:00
|
|
|
#define OPT(pass, ...) ({ \
|
|
|
|
bool this_progress = false; \
|
|
|
|
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
|
|
|
|
if (this_progress) \
|
|
|
|
progress = true; \
|
|
|
|
this_progress; \
|
|
|
|
})
|
|
|
|
|
2019-06-05 00:19:06 +01:00
|
|
|
void
|
2017-10-28 16:50:54 +01:00
|
|
|
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
|
2018-07-24 06:20:41 +01:00
|
|
|
bool is_scalar, bool allow_copies)
|
2017-10-28 16:50:54 +01:00
|
|
|
{
|
2015-04-07 23:15:09 +01:00
|
|
|
bool progress;
|
2018-08-19 00:42:04 +01:00
|
|
|
unsigned lower_flrp =
|
|
|
|
(nir->options->lower_flrp16 ? 16 : 0) |
|
|
|
|
(nir->options->lower_flrp32 ? 32 : 0) |
|
|
|
|
(nir->options->lower_flrp64 ? 64 : 0);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
do {
|
|
|
|
progress = false;
|
2019-01-15 23:05:04 +00:00
|
|
|
OPT(nir_split_array_vars, nir_var_function_temp);
|
|
|
|
OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
|
2019-01-11 20:33:17 +00:00
|
|
|
OPT(nir_opt_deref);
|
2020-09-28 15:11:39 +01:00
|
|
|
if (OPT(nir_opt_memcpy))
|
|
|
|
OPT(nir_split_var_copies);
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_vars_to_ssa);
|
2018-07-24 06:20:41 +01:00
|
|
|
if (allow_copies) {
|
|
|
|
/* Only run this pass in the first call to brw_nir_optimize. Later
|
|
|
|
* calls assume that we've lowered away any copy_deref instructions
|
|
|
|
* and we don't want to introduce any more.
|
|
|
|
*/
|
|
|
|
OPT(nir_opt_find_array_copies);
|
|
|
|
}
|
2018-03-28 05:00:01 +01:00
|
|
|
OPT(nir_opt_copy_prop_vars);
|
2018-08-30 01:26:03 +01:00
|
|
|
OPT(nir_opt_dead_write_vars);
|
2019-03-08 18:08:20 +00:00
|
|
|
OPT(nir_opt_combine_stores, nir_var_all);
|
2015-06-25 08:52:35 +01:00
|
|
|
|
2021-06-21 11:44:53 +01:00
|
|
|
OPT(nir_opt_ray_queries);
|
|
|
|
|
2015-06-25 08:52:35 +01:00
|
|
|
if (is_scalar) {
|
2019-08-30 05:14:54 +01:00
|
|
|
OPT(nir_lower_alu_to_scalar, NULL, NULL);
|
2020-07-23 05:35:07 +01:00
|
|
|
} else {
|
2022-01-10 12:56:32 +00:00
|
|
|
OPT(nir_opt_shrink_stores, true);
|
|
|
|
OPT(nir_opt_shrink_vectors);
|
2015-06-25 08:52:35 +01:00
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
2015-07-15 08:32:17 +01:00
|
|
|
|
|
|
|
if (is_scalar) {
|
2021-02-23 10:31:41 +00:00
|
|
|
OPT(nir_lower_phis_to_scalar, false);
|
2015-07-15 08:32:17 +01:00
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
intel/nir: Combine store_derefs to improve code from SPIR-V
Due to lack of write mask in SPIR-V store, generators may produce
multiple stores to the same vector but using different array derefs.
Use the combining store pass to clean this up. For example,
layout(binding = 3) buffer block {
vec4 v;
};
void main() {
v.x = 11;
v.y = 22;
}
after going to SPIR-V and NIR, ends up with in two store_derefs to
v[0] and v[1]
vec2 32 ssa_4 = deref_struct &ssa_3->field0 (ssbo vec4) /* &((block *)ssa_2)->field0 */
vec2 32 ssa_6 = deref_array &(*ssa_4)[0] (ssbo float) /* &((block *)ssa_2)->field0[0] */
intrinsic store_deref (ssa_6, ssa_7) (1, 0) /* wrmask=x */ /* access=0 */
vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */)
vec2 32 ssa_14 = deref_array &(*ssa_4)[1] (ssbo float) /* &((block *)ssa_2)->field0[1] */
intrinsic store_deref (ssa_14, ssa_15) (1, 0) /* wrmask=x */ /* access=0 */
producing two different sends instructions in skl. The combining pass
transform the snippet above into
vec2 32 ssa_4 = deref_struct &ssa_3->field0 (ssbo vec4) /* &((block *)ssa_2)->field0 */
vec4 32 ssa_18 = vec4 ssa_7, ssa_15, ssa_16, ssa_17
intrinsic store_deref (ssa_4, ssa_18) (3, 0) /* wrmask=xy */ /* access=0 */
producing a single sends instruction.
v2: Move this from spirv_to_nir into the general optimization pass for
intel compiler. (Jason)
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-03-11 16:43:04 +00:00
|
|
|
OPT(nir_opt_combine_stores, nir_var_all);
|
2018-06-27 19:41:19 +01:00
|
|
|
|
2018-05-23 02:56:41 +01:00
|
|
|
/* Passing 0 to the peephole select pass causes it to convert
|
|
|
|
* if-statements that contain only move instructions in the branches
|
|
|
|
* regardless of the count.
|
|
|
|
*
|
|
|
|
* Passing 1 to the peephole select pass causes it to convert
|
|
|
|
* if-statements that contain at most a single ALU instruction (total)
|
2021-03-29 23:46:12 +01:00
|
|
|
* in both branches. Before Gfx6, some math instructions were
|
2018-05-23 02:56:41 +01:00
|
|
|
* prohibitively expensive and the results of compare operations need an
|
|
|
|
* extra resolve step. For these reasons, this pass is more harmful
|
|
|
|
* than good on those platforms.
|
|
|
|
*
|
|
|
|
* For indirect loads of uniforms (push constants), we assume that array
|
2018-06-27 19:41:19 +01:00
|
|
|
* indices will nearly always be in bounds and the cost of the load is
|
|
|
|
* low. Therefore there shouldn't be a performance benefit to avoid it.
|
|
|
|
* However, in vec4 tessellation shaders, these loads operate by
|
|
|
|
* actually pulling from memory.
|
|
|
|
*/
|
|
|
|
const bool is_vec4_tessellation = !is_scalar &&
|
|
|
|
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
|
|
|
|
nir->info.stage == MESA_SHADER_TESS_EVAL);
|
2018-06-19 00:11:55 +01:00
|
|
|
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
|
2019-11-01 22:40:12 +00:00
|
|
|
OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
|
2021-03-29 22:41:58 +01:00
|
|
|
compiler->devinfo->ver >= 6);
|
2018-06-27 19:41:19 +01:00
|
|
|
|
2017-06-22 20:13:25 +01:00
|
|
|
OPT(nir_opt_intrinsics);
|
2017-12-29 03:53:36 +00:00
|
|
|
OPT(nir_opt_idiv_const, 32);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_algebraic);
|
2020-09-28 15:11:39 +01:00
|
|
|
OPT(nir_lower_constant_convert_alu_types);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_constant_folding);
|
2018-08-19 00:42:04 +01:00
|
|
|
|
|
|
|
if (lower_flrp != 0) {
|
|
|
|
if (OPT(nir_lower_flrp,
|
|
|
|
lower_flrp,
|
2020-07-23 03:13:16 +01:00
|
|
|
false /* always_precise */)) {
|
2018-08-19 00:42:04 +01:00
|
|
|
OPT(nir_opt_constant_folding);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing should rematerialize any flrps, so we only need to do this
|
|
|
|
* lowering once.
|
|
|
|
*/
|
|
|
|
lower_flrp = 0;
|
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_dead_cf);
|
2016-12-17 07:30:40 +00:00
|
|
|
if (OPT(nir_opt_trivial_continues)) {
|
|
|
|
/* If nir_opt_trivial_continues makes progress, then we need to clean
|
|
|
|
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
|
|
|
|
* to make progress.
|
|
|
|
*/
|
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
}
|
2019-04-08 11:13:49 +01:00
|
|
|
OPT(nir_opt_if, false);
|
2019-07-19 18:34:53 +01:00
|
|
|
OPT(nir_opt_conditional_discard);
|
2016-12-13 00:36:51 +00:00
|
|
|
if (nir->options->max_unroll_iterations != 0) {
|
2021-07-29 10:34:26 +01:00
|
|
|
OPT(nir_opt_loop_unroll);
|
2016-12-13 00:36:51 +00:00
|
|
|
}
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_remove_phis);
|
2019-03-21 05:40:06 +00:00
|
|
|
OPT(nir_opt_gcm, false);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_undef);
|
2018-04-27 08:28:48 +01:00
|
|
|
OPT(nir_lower_pack);
|
2015-04-07 23:15:09 +01:00
|
|
|
} while (progress);
|
2015-11-16 19:48:05 +00:00
|
|
|
|
2018-08-23 14:34:19 +01:00
|
|
|
/* Workaround Gfxbench unused local sampler variable which will trigger an
|
|
|
|
* assert in the opt_large_constants pass.
|
|
|
|
*/
|
2020-05-28 01:59:28 +01:00
|
|
|
OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
|
2015-04-07 23:15:09 +01:00
|
|
|
}
|
|
|
|
|
2018-04-26 09:07:56 +01:00
|
|
|
static unsigned
|
2020-11-06 04:53:52 +00:00
|
|
|
lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
|
2018-04-26 09:07:56 +01:00
|
|
|
{
|
2018-04-26 09:26:22 +01:00
|
|
|
const struct brw_compiler *compiler = (const struct brw_compiler *) data;
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo = compiler->devinfo;
|
2020-11-06 05:16:19 +00:00
|
|
|
|
|
|
|
switch (instr->type) {
|
|
|
|
case nir_instr_type_alu: {
|
|
|
|
nir_alu_instr *alu = nir_instr_as_alu(instr);
|
2022-04-08 21:17:33 +01:00
|
|
|
switch (alu->op) {
|
|
|
|
case nir_op_bit_count:
|
|
|
|
case nir_op_ufind_msb:
|
|
|
|
case nir_op_ifind_msb:
|
|
|
|
case nir_op_find_lsb:
|
|
|
|
/* These are handled specially because the destination is always
|
|
|
|
* 32-bit and so the bit size of the instruction is given by the
|
|
|
|
* source.
|
|
|
|
*/
|
|
|
|
assert(alu->src[0].src.is_ssa);
|
|
|
|
return alu->src[0].src.ssa->bit_size == 32 ? 0 : 32;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-11-06 05:16:19 +00:00
|
|
|
assert(alu->dest.dest.is_ssa);
|
|
|
|
if (alu->dest.dest.ssa.bit_size >= 32)
|
|
|
|
return 0;
|
|
|
|
|
2021-01-22 22:54:02 +00:00
|
|
|
/* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
|
|
|
|
* 8-bit ABS or NEG instruction should eventually get copy propagated
|
|
|
|
* into the MOV that does the type conversion. This results in far
|
|
|
|
* fewer MOV instructions.
|
|
|
|
*/
|
2020-11-06 05:16:19 +00:00
|
|
|
switch (alu->op) {
|
|
|
|
case nir_op_idiv:
|
|
|
|
case nir_op_imod:
|
|
|
|
case nir_op_irem:
|
|
|
|
case nir_op_udiv:
|
|
|
|
case nir_op_umod:
|
|
|
|
case nir_op_fceil:
|
|
|
|
case nir_op_ffloor:
|
|
|
|
case nir_op_ffract:
|
|
|
|
case nir_op_fround_even:
|
|
|
|
case nir_op_ftrunc:
|
|
|
|
return 32;
|
|
|
|
case nir_op_frcp:
|
|
|
|
case nir_op_frsq:
|
|
|
|
case nir_op_fsqrt:
|
|
|
|
case nir_op_fpow:
|
|
|
|
case nir_op_fexp2:
|
|
|
|
case nir_op_flog2:
|
|
|
|
case nir_op_fsin:
|
|
|
|
case nir_op_fcos:
|
2021-03-29 22:41:58 +01:00
|
|
|
return devinfo->ver < 9 ? 32 : 0;
|
2021-01-22 22:54:02 +00:00
|
|
|
case nir_op_isign:
|
|
|
|
assert(!"Should have been lowered by nir_opt_algebraic.");
|
|
|
|
return 0;
|
2020-11-06 05:16:19 +00:00
|
|
|
default:
|
2021-01-23 22:28:07 +00:00
|
|
|
if (nir_op_infos[alu->op].num_inputs >= 2 &&
|
|
|
|
alu->dest.dest.ssa.bit_size == 8)
|
|
|
|
return 16;
|
|
|
|
|
|
|
|
if (nir_alu_instr_is_comparison(alu) &&
|
|
|
|
alu->src[0].src.ssa->bit_size == 8)
|
|
|
|
return 16;
|
|
|
|
|
2020-11-06 05:16:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2018-04-26 09:26:22 +01:00
|
|
|
|
2020-11-06 05:23:07 +00:00
|
|
|
case nir_instr_type_intrinsic: {
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
switch (intrin->intrinsic) {
|
2020-11-06 05:19:31 +00:00
|
|
|
case nir_intrinsic_read_invocation:
|
|
|
|
case nir_intrinsic_read_first_invocation:
|
|
|
|
case nir_intrinsic_vote_feq:
|
|
|
|
case nir_intrinsic_vote_ieq:
|
|
|
|
case nir_intrinsic_shuffle:
|
|
|
|
case nir_intrinsic_shuffle_xor:
|
|
|
|
case nir_intrinsic_shuffle_up:
|
|
|
|
case nir_intrinsic_shuffle_down:
|
|
|
|
case nir_intrinsic_quad_broadcast:
|
|
|
|
case nir_intrinsic_quad_swap_horizontal:
|
|
|
|
case nir_intrinsic_quad_swap_vertical:
|
|
|
|
case nir_intrinsic_quad_swap_diagonal:
|
2021-01-23 22:28:07 +00:00
|
|
|
if (intrin->src[0].ssa->bit_size == 8)
|
2020-11-06 05:19:31 +00:00
|
|
|
return 16;
|
|
|
|
return 0;
|
|
|
|
|
2020-11-06 05:23:07 +00:00
|
|
|
case nir_intrinsic_reduce:
|
|
|
|
case nir_intrinsic_inclusive_scan:
|
|
|
|
case nir_intrinsic_exclusive_scan:
|
|
|
|
/* There are a couple of register region issues that make things
|
|
|
|
* complicated for 8-bit types:
|
|
|
|
*
|
|
|
|
* 1. Only raw moves are allowed to write to a packed 8-bit
|
|
|
|
* destination.
|
|
|
|
* 2. If we use a strided destination, the efficient way to do
|
|
|
|
* scan operations ends up using strides that are too big to
|
|
|
|
* encode in an instruction.
|
|
|
|
*
|
|
|
|
* To get around these issues, we just do all 8-bit scan operations
|
|
|
|
* in 16 bits. It's actually fewer instructions than what we'd have
|
|
|
|
* to do if we were trying to do it in native 8-bit types and the
|
|
|
|
* results are the same once we truncate to 8 bits at the end.
|
|
|
|
*/
|
|
|
|
if (intrin->dest.ssa.bit_size == 8)
|
|
|
|
return 16;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-02-04 16:41:21 +00:00
|
|
|
case nir_instr_type_phi: {
|
|
|
|
nir_phi_instr *phi = nir_instr_as_phi(instr);
|
2021-01-23 22:28:07 +00:00
|
|
|
if (phi->dest.ssa.bit_size == 8)
|
2021-02-04 16:41:21 +00:00
|
|
|
return 16;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-26 09:07:56 +01:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-09 21:05:29 +00:00
|
|
|
/* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
|
|
|
|
* we will have nir_lower_tex() lower the source offset by returning true from
|
|
|
|
* this filter function.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
|
|
|
|
{
|
|
|
|
if (instr->type != nir_instr_type_tex)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
nir_tex_instr *tex = nir_instr_as_tex(instr);
|
|
|
|
|
|
|
|
if (tex->op != nir_texop_tg4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
|
|
|
|
if (offset_index < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!nir_src_is_const(tex->src[offset_index].src))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
|
|
|
|
int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
|
|
|
|
|
|
|
|
return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
|
|
|
|
}
|
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
/* Does some simple lowering and runs the standard suite of optimizations
|
|
|
|
*
|
|
|
|
* This is intended to be called more-or-less directly after you get the
|
|
|
|
* shader out of GLSL or some other source. While it is geared towards i965,
|
|
|
|
* it is not at all generator-specific except for the is_scalar flag. Even
|
|
|
|
* there, it is safe to call with is_scalar = false for a shader that is
|
|
|
|
* intended for the FS backend as long as nir_optimize is called again with
|
|
|
|
* is_scalar = true to scalarize everything prior to code gen.
|
|
|
|
*/
|
2019-06-05 00:19:06 +01:00
|
|
|
void
|
2019-03-04 21:55:19 +00:00
|
|
|
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
|
|
|
|
const nir_shader *softfp64)
|
2015-04-07 23:15:09 +01:00
|
|
|
{
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo = compiler->devinfo;
|
2017-03-09 19:40:17 +00:00
|
|
|
UNUSED bool progress; /* Written by OPT */
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
|
2016-04-07 23:09:56 +01:00
|
|
|
|
2020-06-01 21:43:53 +01:00
|
|
|
nir_validate_ssa_dominance(nir, "before brw_preprocess_nir");
|
|
|
|
|
2018-12-10 19:06:16 +00:00
|
|
|
if (is_scalar) {
|
2019-08-30 05:14:54 +01:00
|
|
|
OPT(nir_lower_alu_to_scalar, NULL, NULL);
|
2018-12-10 19:06:16 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
if (nir->info.stage == MESA_SHADER_GEOMETRY)
|
2020-06-08 11:16:13 +01:00
|
|
|
OPT(nir_lower_gs_intrinsics, 0);
|
2015-08-05 17:16:59 +01:00
|
|
|
|
2016-11-08 21:21:39 +00:00
|
|
|
/* See also brw_nir_trig_workarounds.py */
|
|
|
|
if (compiler->precise_trig &&
|
2021-09-22 13:06:58 +01:00
|
|
|
!(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
|
2016-04-07 23:04:35 +01:00
|
|
|
OPT(brw_nir_apply_trig_workarounds);
|
|
|
|
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver >= 12)
|
2020-01-13 13:11:25 +00:00
|
|
|
OPT(brw_nir_clamp_image_1d_2d_array_sizes);
|
|
|
|
|
2018-12-21 10:21:57 +00:00
|
|
|
const nir_lower_tex_options tex_options = {
|
2015-11-11 17:40:51 +00:00
|
|
|
.lower_txp = ~0,
|
2016-07-21 20:55:21 +01:00
|
|
|
.lower_txf_offset = true,
|
|
|
|
.lower_rect_offset = true,
|
2016-11-30 08:44:20 +00:00
|
|
|
.lower_txd_cube_map = true,
|
2022-03-31 00:36:06 +01:00
|
|
|
.lower_txd_3d = devinfo->verx10 >= 125, /* Wa_1209978020 */
|
|
|
|
.lower_txd_array = devinfo->verx10 >= 125, /* Wa_1209978020 */
|
2018-10-11 21:57:50 +01:00
|
|
|
.lower_txb_shadow_clamp = true,
|
|
|
|
.lower_txd_shadow_clamp = true,
|
|
|
|
.lower_txd_offset_clamp = true,
|
2019-03-19 18:55:21 +00:00
|
|
|
.lower_tg4_offsets = true,
|
2021-04-30 05:05:08 +01:00
|
|
|
.lower_txs_lod = true, /* Wa_14012320009 */
|
2021-12-09 21:05:29 +00:00
|
|
|
.lower_offset_filter =
|
|
|
|
devinfo->verx10 >= 125 ? lower_xehp_tg4_offset_filter : NULL,
|
2022-04-26 00:55:45 +01:00
|
|
|
.lower_invalid_implicit_lod = true,
|
2015-11-11 17:40:51 +00:00
|
|
|
};
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-11-11 18:46:09 +00:00
|
|
|
OPT(nir_lower_tex, &tex_options);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_normalize_cubemap_coords);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
OPT(nir_lower_global_vars_to_local);
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_split_var_copies);
|
2019-01-15 23:05:04 +00:00
|
|
|
OPT(nir_split_struct_vars, nir_var_function_temp);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2019-06-05 00:19:06 +01:00
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, true);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2019-07-11 22:59:31 +01:00
|
|
|
OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
|
2020-07-13 19:28:16 +01:00
|
|
|
OPT(nir_lower_int64);
|
2019-03-04 22:11:57 +00:00
|
|
|
|
2018-04-26 09:26:22 +01:00
|
|
|
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
|
2018-04-26 09:07:56 +01:00
|
|
|
|
i965: Use nir_lower_load_const_to_scalar().
I don't know why, but we never hooked up this pass Eric wrote.
Otherwise, you can end up with stupid scalarized code such as:
vec4 ssa_7 = load_const (0.0, 0.0, 0.0, 0.0)
vec4 ssa_8 = ...
vec1 ssa_9 = feq ssa_8, ssa_7
vec1 ssa_10 = feq ssa_8.y, ssa_7.y
vec1 ssa_11 = feq ssa_8, ssa_7.z
vec1 ssa_12 = feq ssa_8.y, ssa_7.w
ssa_8.xyxy == <0, 0, 0, 0> should only take two feq instructions.
shader-db on Skylake:
total instructions in shared programs: 9121153 -> 9120749 (-0.00%)
instructions in affected programs: 32421 -> 32017 (-1.25%)
helped: 277
HURT: 69
total cycles in shared programs: 69003364 -> 69000912 (-0.00%)
cycles in affected programs: 899186 -> 896734 (-0.27%)
helped: 313
HURT: 403
This also prevents regressions when disabling channel expressions.
v2: Don't call opt_cse afterwards (requested by Matt). It should
happen in the optimization loop below anyway.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eduardo Lima Mitev <elima@igalia.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-01-22 00:37:20 +00:00
|
|
|
if (is_scalar) {
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_load_const_to_scalar);
|
i965: Use nir_lower_load_const_to_scalar().
I don't know why, but we never hooked up this pass Eric wrote.
Otherwise, you can end up with stupid scalarized code such as:
vec4 ssa_7 = load_const (0.0, 0.0, 0.0, 0.0)
vec4 ssa_8 = ...
vec1 ssa_9 = feq ssa_8, ssa_7
vec1 ssa_10 = feq ssa_8.y, ssa_7.y
vec1 ssa_11 = feq ssa_8, ssa_7.z
vec1 ssa_12 = feq ssa_8.y, ssa_7.w
ssa_8.xyxy == <0, 0, 0, 0> should only take two feq instructions.
shader-db on Skylake:
total instructions in shared programs: 9121153 -> 9120749 (-0.00%)
instructions in affected programs: 32421 -> 32017 (-1.25%)
helped: 277
HURT: 69
total cycles in shared programs: 69003364 -> 69000912 (-0.00%)
cycles in affected programs: 899186 -> 896734 (-0.27%)
helped: 313
HURT: 403
This also prevents regressions when disabling channel expressions.
v2: Don't call opt_cse afterwards (requested by Matt). It should
happen in the optimization loop below anyway.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eduardo Lima Mitev <elima@igalia.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-01-22 00:37:20 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Lower a bunch of stuff */
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_var_copies);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2019-08-29 08:58:38 +01:00
|
|
|
/* This needs to be run after the first optimization pass but before we
|
|
|
|
* lower indirect derefs away
|
|
|
|
*/
|
|
|
|
if (compiler->supports_shader_constants) {
|
|
|
|
OPT(nir_opt_large_constants, NULL, 32);
|
|
|
|
}
|
|
|
|
|
2017-09-02 06:20:23 +01:00
|
|
|
OPT(nir_lower_system_values);
|
2020-08-21 18:40:45 +01:00
|
|
|
OPT(nir_lower_compute_system_values, NULL);
|
2017-09-02 06:20:23 +01:00
|
|
|
|
2017-08-22 21:23:59 +01:00
|
|
|
const nir_lower_subgroups_options subgroups_options = {
|
2017-08-23 02:44:51 +01:00
|
|
|
.ballot_bit_size = 32,
|
2020-09-10 17:48:04 +01:00
|
|
|
.ballot_components = 1,
|
2017-08-22 21:23:59 +01:00
|
|
|
.lower_to_scalar = true,
|
|
|
|
.lower_vote_trivial = !is_scalar,
|
2022-01-04 13:31:29 +00:00
|
|
|
.lower_relative_shuffle = true,
|
2019-12-16 16:43:18 +00:00
|
|
|
.lower_quad_broadcast_dynamic = true,
|
2020-09-01 16:35:24 +01:00
|
|
|
.lower_elect = true,
|
2017-08-22 21:23:59 +01:00
|
|
|
};
|
|
|
|
OPT(nir_lower_subgroups, &subgroups_options);
|
|
|
|
|
2017-10-28 16:50:54 +01:00
|
|
|
nir_variable_mode indirect_mask =
|
|
|
|
brw_nir_no_indirect_mask(compiler, nir->info.stage);
|
2020-07-14 19:55:19 +01:00
|
|
|
OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
|
2016-12-06 01:12:20 +00:00
|
|
|
|
2020-07-14 22:01:18 +01:00
|
|
|
/* Even in cases where we can handle indirect temporaries via scratch, we
|
|
|
|
* it can still be expensive. Lower indirects on small arrays to
|
|
|
|
* conditional load/stores.
|
|
|
|
*
|
|
|
|
* The threshold of 16 was chosen semi-arbitrarily. The idea is that an
|
|
|
|
* indirect on an array of 16 elements is about 30 instructions at which
|
|
|
|
* point, you may be better off doing a send. With a SIMD8 program, 16
|
|
|
|
* floats is 1/8 of the entire register file. Any array larger than that
|
|
|
|
* is likely to cause pressure issues. Also, this value is sufficiently
|
|
|
|
* high that the benchmarks known to suffer from large temporary array
|
|
|
|
* issues are helped but nothing else in shader-db is hurt except for maybe
|
|
|
|
* that one kerbal space program shader.
|
|
|
|
*/
|
|
|
|
if (is_scalar && !(indirect_mask & nir_var_function_temp))
|
|
|
|
OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
|
|
|
|
|
2019-03-14 17:58:16 +00:00
|
|
|
/* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
|
|
|
|
* SSBOs, our back-end is capable of loading an entire vec4 at a time and
|
|
|
|
* we would like to take advantage of that whenever possible regardless of
|
|
|
|
* whether or not the app gives us full loads. This should allow the
|
|
|
|
* optimizer to combine UBO and SSBO load operations and save us some send
|
|
|
|
* messages.
|
|
|
|
*/
|
|
|
|
OPT(nir_lower_array_deref_of_vec,
|
|
|
|
nir_var_mem_ubo | nir_var_mem_ssbo,
|
|
|
|
nir_lower_direct_array_deref_of_vec_load);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Get rid of split copies */
|
2019-06-05 00:19:06 +01:00
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
2015-11-11 17:40:51 +00:00
|
|
|
}
|
|
|
|
|
2017-10-28 16:57:23 +01:00
|
|
|
void
|
|
|
|
brw_nir_link_shaders(const struct brw_compiler *compiler,
|
2019-06-05 00:23:17 +01:00
|
|
|
nir_shader *producer, nir_shader *consumer)
|
2017-10-28 16:57:23 +01:00
|
|
|
{
|
2021-12-09 16:11:01 +00:00
|
|
|
if (producer->info.stage == MESA_SHADER_MESH &&
|
|
|
|
consumer->info.stage == MESA_SHADER_FRAGMENT) {
|
|
|
|
/* gl_MeshPerPrimitiveNV[].gl_ViewportIndex, gl_PrimitiveID and gl_Layer
|
|
|
|
* are per primitive, but fragment shader does not have them marked as
|
|
|
|
* such. Add the annotation here.
|
|
|
|
*/
|
|
|
|
nir_foreach_shader_in_variable(var, consumer) {
|
|
|
|
switch (var->data.location) {
|
|
|
|
case VARYING_SLOT_LAYER:
|
|
|
|
case VARYING_SLOT_PRIMITIVE_ID:
|
|
|
|
case VARYING_SLOT_VIEWPORT:
|
|
|
|
var->data.per_primitive = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-05 00:23:17 +01:00
|
|
|
nir_lower_io_arrays_to_elements(producer, consumer);
|
|
|
|
nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
|
|
|
|
nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
|
2018-07-26 06:52:39 +01:00
|
|
|
|
2019-06-05 00:23:17 +01:00
|
|
|
const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
|
|
|
|
const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
|
2018-07-24 06:20:41 +01:00
|
|
|
|
|
|
|
if (p_is_scalar && c_is_scalar) {
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
|
|
|
|
NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
|
2019-06-05 00:23:17 +01:00
|
|
|
brw_nir_optimize(producer, compiler, p_is_scalar, false);
|
|
|
|
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
|
2018-07-24 06:20:41 +01:00
|
|
|
}
|
|
|
|
|
2019-06-05 00:23:17 +01:00
|
|
|
if (nir_link_opt_varyings(producer, consumer))
|
|
|
|
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
|
2018-11-07 03:29:18 +00:00
|
|
|
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
|
|
|
|
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
|
2017-10-28 16:57:23 +01:00
|
|
|
|
2019-06-05 00:23:17 +01:00
|
|
|
if (nir_remove_unused_varyings(producer, consumer)) {
|
2022-07-13 12:52:52 +01:00
|
|
|
if (should_print_nir(producer)) {
|
|
|
|
printf("nir_remove_unused_varyings\n");
|
|
|
|
nir_print_shader(producer, stdout);
|
|
|
|
}
|
|
|
|
if (should_print_nir(consumer)) {
|
|
|
|
printf("nir_remove_unused_varyings\n");
|
|
|
|
nir_print_shader(consumer, stdout);
|
|
|
|
}
|
|
|
|
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
|
|
|
|
NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
|
2017-10-28 16:57:23 +01:00
|
|
|
|
|
|
|
/* The backend might not be able to handle indirects on
|
|
|
|
* temporaries so we need to lower indirects on any of the
|
|
|
|
* varyings we have demoted here.
|
|
|
|
*/
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_lower_indirect_derefs,
|
|
|
|
brw_nir_no_indirect_mask(compiler, producer->info.stage),
|
|
|
|
UINT32_MAX);
|
|
|
|
NIR_PASS(_, consumer, nir_lower_indirect_derefs,
|
|
|
|
brw_nir_no_indirect_mask(compiler, consumer->info.stage),
|
|
|
|
UINT32_MAX);
|
2017-10-28 16:57:23 +01:00
|
|
|
|
2019-06-05 00:23:17 +01:00
|
|
|
brw_nir_optimize(producer, compiler, p_is_scalar, false);
|
|
|
|
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
|
2017-10-28 16:57:23 +01:00
|
|
|
}
|
2019-03-07 21:01:37 +00:00
|
|
|
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
|
|
|
|
NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
|
|
|
|
NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
|
2019-03-07 21:01:37 +00:00
|
|
|
|
2021-10-29 20:10:00 +01:00
|
|
|
if (producer->info.stage != MESA_SHADER_TESS_CTRL &&
|
|
|
|
producer->info.stage != MESA_SHADER_MESH &&
|
|
|
|
producer->info.stage != MESA_SHADER_TASK) {
|
2019-03-07 21:01:37 +00:00
|
|
|
/* Calling lower_io_to_vector creates output variable writes with
|
|
|
|
* write-masks. On non-TCS outputs, the back-end can't handle it and we
|
|
|
|
* need to call nir_lower_io_to_temporaries to get rid of them. This,
|
|
|
|
* in turn, creates temporary variables and extra copy_deref intrinsics
|
|
|
|
* that we need to clean up.
|
2021-10-29 20:10:00 +01:00
|
|
|
*
|
|
|
|
* Note Mesh/Task don't support I/O as temporaries (I/O is shared
|
|
|
|
* between whole workgroup, possibly using multiple HW threads). For
|
|
|
|
* those write-mask in output is handled by I/O lowering.
|
2019-03-07 21:01:37 +00:00
|
|
|
*/
|
2019-06-05 00:23:17 +01:00
|
|
|
NIR_PASS_V(producer, nir_lower_io_to_temporaries,
|
|
|
|
nir_shader_get_entrypoint(producer), true, false);
|
2022-06-13 18:38:42 +01:00
|
|
|
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
|
|
|
|
NIR_PASS(_, producer, nir_split_var_copies);
|
|
|
|
NIR_PASS(_, producer, nir_lower_var_copies);
|
2019-03-07 21:01:37 +00:00
|
|
|
}
|
2017-10-28 16:57:23 +01:00
|
|
|
}
|
|
|
|
|
2019-12-14 16:44:39 +00:00
|
|
|
static bool
|
2020-09-08 18:58:49 +01:00
|
|
|
brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
|
|
|
|
unsigned bit_size,
|
2020-09-08 19:12:56 +01:00
|
|
|
unsigned num_components,
|
2019-12-14 16:44:39 +00:00
|
|
|
nir_intrinsic_instr *low,
|
2020-03-13 15:43:16 +00:00
|
|
|
nir_intrinsic_instr *high,
|
|
|
|
void *data)
|
2019-12-14 16:44:39 +00:00
|
|
|
{
|
|
|
|
/* Don't combine things to generate 64-bit loads/stores. We have to split
|
|
|
|
* those back into 32-bit ones anyway and UBO loads aren't split in NIR so
|
|
|
|
* we don't want to make a mess for the back-end.
|
|
|
|
*/
|
|
|
|
if (bit_size > 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We can handle at most a vec4 right now. Anything bigger would get
|
|
|
|
* immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
|
|
|
|
*/
|
|
|
|
if (num_components > 4)
|
|
|
|
return false;
|
|
|
|
|
2020-09-08 18:58:49 +01:00
|
|
|
|
|
|
|
uint32_t align;
|
|
|
|
if (align_offset)
|
|
|
|
align = 1 << (ffs(align_offset) - 1);
|
|
|
|
else
|
|
|
|
align = align_mul;
|
|
|
|
|
2019-12-14 16:44:39 +00:00
|
|
|
if (align < bit_size / 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-21 18:58:48 +00:00
|
|
|
static
|
|
|
|
bool combine_all_barriers(nir_intrinsic_instr *a,
|
|
|
|
nir_intrinsic_instr *b,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
/* Translation to backend IR will get rid of modes we don't care about, so
|
|
|
|
* no harm in always combining them.
|
|
|
|
*
|
|
|
|
* TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
|
|
|
|
* scheduling so that it can take advantage of the different semantics.
|
|
|
|
*/
|
|
|
|
nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
|
|
|
|
nir_intrinsic_memory_modes(b));
|
|
|
|
nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
|
|
|
|
nir_intrinsic_memory_semantics(b));
|
|
|
|
nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
|
|
|
|
nir_intrinsic_memory_scope(b)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-14 16:44:39 +00:00
|
|
|
static void
|
|
|
|
brw_vectorize_lower_mem_access(nir_shader *nir,
|
|
|
|
const struct brw_compiler *compiler,
|
2021-04-09 22:42:53 +01:00
|
|
|
bool is_scalar,
|
|
|
|
bool robust_buffer_access)
|
2019-12-14 16:44:39 +00:00
|
|
|
{
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo = compiler->devinfo;
|
2019-12-14 16:44:39 +00:00
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
if (is_scalar) {
|
2020-03-13 15:33:15 +00:00
|
|
|
nir_load_store_vectorize_options options = {
|
|
|
|
.modes = nir_var_mem_ubo | nir_var_mem_ssbo |
|
2022-06-13 13:44:30 +01:00
|
|
|
nir_var_mem_global | nir_var_mem_shared |
|
|
|
|
nir_var_mem_task_payload,
|
2020-03-13 15:33:15 +00:00
|
|
|
.callback = brw_nir_should_vectorize_mem,
|
|
|
|
.robust_modes = (nir_variable_mode)0,
|
|
|
|
};
|
|
|
|
|
2021-04-09 22:42:53 +01:00
|
|
|
if (robust_buffer_access) {
|
|
|
|
options.robust_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
|
|
|
|
nir_var_mem_global;
|
|
|
|
}
|
|
|
|
|
2020-03-13 15:33:15 +00:00
|
|
|
OPT(nir_opt_load_store_vectorize, &options);
|
2019-12-14 16:44:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
|
|
|
|
|
|
|
|
while (progress) {
|
|
|
|
progress = false;
|
|
|
|
|
|
|
|
OPT(nir_lower_pack);
|
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
|
|
|
OPT(nir_opt_algebraic);
|
|
|
|
OPT(nir_opt_constant_folding);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-14 22:01:18 +01:00
|
|
|
static bool
|
|
|
|
nir_shader_has_local_variables(const nir_shader *nir)
|
|
|
|
{
|
|
|
|
nir_foreach_function(func, nir) {
|
|
|
|
if (func->impl && !exec_list_is_empty(&func->impl->locals))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
/* Prepare the given shader for codegen
|
|
|
|
*
|
|
|
|
* This function is intended to be called right before going into the actual
|
|
|
|
* backend and is highly backend-specific. Also, once this function has been
|
|
|
|
* called on a shader, it will no longer be in SSA form so most optimizations
|
|
|
|
* will not work.
|
|
|
|
*/
|
2019-06-05 00:19:06 +01:00
|
|
|
void
|
2016-09-15 03:20:38 +01:00
|
|
|
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
|
2021-04-09 22:42:53 +01:00
|
|
|
bool is_scalar, bool debug_enabled,
|
|
|
|
bool robust_buffer_access)
|
2015-11-11 17:40:51 +00:00
|
|
|
{
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo = compiler->devinfo;
|
2015-11-11 17:40:51 +00:00
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
UNUSED bool progress; /* Written by OPT */
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2021-03-19 09:09:56 +00:00
|
|
|
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
|
|
|
|
|
2020-05-05 09:18:29 +01:00
|
|
|
OPT(brw_nir_lower_scoped_barriers);
|
2020-02-21 18:58:48 +00:00
|
|
|
OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
|
|
|
|
|
2017-01-13 06:25:11 +00:00
|
|
|
do {
|
|
|
|
progress = false;
|
|
|
|
OPT(nir_opt_algebraic_before_ffma);
|
|
|
|
} while (progress);
|
|
|
|
|
2018-11-19 22:54:43 +00:00
|
|
|
if (devinfo->verx10 >= 125) {
|
|
|
|
const nir_lower_idiv_options options = {
|
|
|
|
.imprecise_32bit_lowering = false,
|
|
|
|
.allow_fp16 = false
|
|
|
|
};
|
|
|
|
OPT(nir_lower_idiv, &options);
|
|
|
|
}
|
|
|
|
|
2021-02-17 11:47:36 +00:00
|
|
|
if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
|
|
|
|
brw_nir_lower_shading_rate_output(nir);
|
|
|
|
|
2019-06-05 00:19:06 +01:00
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
2016-02-25 05:40:37 +00:00
|
|
|
|
2020-07-14 22:01:18 +01:00
|
|
|
if (is_scalar && nir_shader_has_local_variables(nir)) {
|
|
|
|
OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
|
|
|
|
glsl_get_natural_size_align_bytes);
|
|
|
|
OPT(nir_lower_explicit_io, nir_var_function_temp,
|
|
|
|
nir_address_format_32bit_offset);
|
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
|
|
|
}
|
|
|
|
|
2021-04-09 22:42:53 +01:00
|
|
|
brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
|
|
|
|
robust_buffer_access);
|
2020-03-28 04:33:27 +00:00
|
|
|
|
2020-07-13 19:28:16 +01:00
|
|
|
if (OPT(nir_lower_int64))
|
2019-07-13 00:47:15 +01:00
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
|
|
|
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver >= 6) {
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Try and fuse multiply-adds */
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(brw_nir_opt_peephole_ffma);
|
2015-04-07 23:15:09 +01:00
|
|
|
}
|
|
|
|
|
2018-05-23 02:56:41 +01:00
|
|
|
if (OPT(nir_opt_comparison_pre)) {
|
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
|
|
|
|
|
|
|
/* Do the select peepehole again. nir_opt_comparison_pre (combined with
|
|
|
|
* the other optimization passes) will have removed at least one
|
|
|
|
* instruction from one of the branches of the if-statement, so now it
|
|
|
|
* might be under the threshold of conversion to bcsel.
|
|
|
|
*
|
|
|
|
* See brw_nir_optimize for the explanation of is_vec4_tessellation.
|
|
|
|
*/
|
|
|
|
const bool is_vec4_tessellation = !is_scalar &&
|
|
|
|
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
|
|
|
|
nir->info.stage == MESA_SHADER_TESS_EVAL);
|
|
|
|
OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
|
|
|
|
OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
|
2021-03-29 22:41:58 +01:00
|
|
|
compiler->devinfo->ver >= 6);
|
2018-05-23 02:56:41 +01:00
|
|
|
}
|
|
|
|
|
2019-03-05 20:08:29 +00:00
|
|
|
do {
|
|
|
|
progress = false;
|
|
|
|
if (OPT(nir_opt_algebraic_late)) {
|
|
|
|
/* At this late stage, anything that makes more constants will wreak
|
|
|
|
* havok on the vec4 backend. The handling of constants in the vec4
|
|
|
|
* backend is not good.
|
|
|
|
*/
|
intel/vec4: Allow late copy propagation on vec4
This change incurs a small amount of hurt now, but it enables a lot of
benefit on vec4 shaders on the next commit. nir_opt_algebraic_late
converts dph, dot3, etc. to dhp_replicated, dot_replicated3, etc. In
the process, it introduces extra moves. If the original NIR contained
vec1 32 ssa_45 = fdot4 ssa_51, ssa_44
vec1 32 ssa_46 = fneg ssa_45
nir_opt_algebraic_late will produce
vec4 32 ssa_18 = fdot_replicated4 ssa_1, ssa_15
vec1 32 ssa_19 = mov ssa_18.x
vec1 32 ssa_17 = fneg ssa_19
The algebraic pass added in the next commit can't see through the move
to know that the fneg applies to a fdot_replicated4.
Haswell, Ivy Bridge, and Sandybridge had similar results. (Haswell shown)
total cycles in shared programs: 187077604 -> 187079858 (<.01%)
cycles in affected programs: 350132 -> 352386 (0.64%)
helped: 174
HURT: 194
helped stats (abs) min: 2 max: 124 x̄: 23.60 x̃: 16
helped stats (rel) min: 0.12% max: 15.88% x̄: 4.98% x̃: 3.86%
HURT stats (abs) min: 2 max: 164 x̄: 32.78 x̃: 16
HURT stats (rel) min: 0.17% max: 22.82% x̄: 6.46% x̃: 0.86%
95% mean confidence interval for cycles value: 2.04 10.21
95% mean confidence interval for cycles %-change: 0.17% 1.93%
Cycles are HURT.
No shader-db changes on any other Intel platform.
Reviewed-by: Matt Turner <mattst88@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1359>
2019-06-11 00:45:08 +01:00
|
|
|
if (is_scalar)
|
2019-03-05 20:08:29 +00:00
|
|
|
OPT(nir_opt_constant_folding);
|
intel/vec4: Allow late copy propagation on vec4
This change incurs a small amount of hurt now, but it enables a lot of
benefit on vec4 shaders on the next commit. nir_opt_algebraic_late
converts dph, dot3, etc. to dhp_replicated, dot_replicated3, etc. In
the process, it introduces extra moves. If the original NIR contained
vec1 32 ssa_45 = fdot4 ssa_51, ssa_44
vec1 32 ssa_46 = fneg ssa_45
nir_opt_algebraic_late will produce
vec4 32 ssa_18 = fdot_replicated4 ssa_1, ssa_15
vec1 32 ssa_19 = mov ssa_18.x
vec1 32 ssa_17 = fneg ssa_19
The algebraic pass added in the next commit can't see through the move
to know that the fneg applies to a fdot_replicated4.
Haswell, Ivy Bridge, and Sandybridge had similar results. (Haswell shown)
total cycles in shared programs: 187077604 -> 187079858 (<.01%)
cycles in affected programs: 350132 -> 352386 (0.64%)
helped: 174
HURT: 194
helped stats (abs) min: 2 max: 124 x̄: 23.60 x̃: 16
helped stats (rel) min: 0.12% max: 15.88% x̄: 4.98% x̃: 3.86%
HURT stats (abs) min: 2 max: 164 x̄: 32.78 x̃: 16
HURT stats (rel) min: 0.17% max: 22.82% x̄: 6.46% x̃: 0.86%
95% mean confidence interval for cycles value: 2.04 10.21
95% mean confidence interval for cycles %-change: 0.17% 1.93%
Cycles are HURT.
No shader-db changes on any other Intel platform.
Reviewed-by: Matt Turner <mattst88@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1359>
2019-06-11 00:45:08 +01:00
|
|
|
|
|
|
|
OPT(nir_copy_prop);
|
2019-03-05 20:08:29 +00:00
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
|
|
|
}
|
|
|
|
} while (progress);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2018-12-17 08:17:06 +00:00
|
|
|
OPT(brw_nir_lower_conversions);
|
|
|
|
|
2019-05-06 18:25:29 +01:00
|
|
|
if (is_scalar)
|
2019-08-30 05:14:54 +01:00
|
|
|
OPT(nir_lower_alu_to_scalar, NULL, NULL);
|
nir/algebraic: Distribute source modifiers into instructions
There are three main classes of cases that are helped by this change:
1. When the negation is applied to a value being type converted (e.g.,
float(-x)). This could possibly also be handled with more clever
code generation.
2. When the negation is applied to a phi node source (e.g., x = -(...);
at the end of a basic block). This was the original case that caught
my attention while looking at shader-db dumps.
3. When the negation is applied to the source of an instruction that
cannot have source modifiers. This includes texture instructions and
math box instructions on pre-Gen7 platforms (see more details below).
In many these cases the negation can be propagated into the instructions
that generate the value (e.g., -(a*b) = (-a)*b).
In addition to the operations implemtned in this patch, I also tried:
- frcp - Helped 6 or fewer shaders on Gen7+, and hurt just as many on
pre-Gen7. On Gen6 and earlier, frcp is a math box instruction, and
math box instructions cannot have source modifiers.
I suspect this is why so many more shaders are helped on Gen6 than on
Gen5 or Gen7. Gen6 supports OpenGL 3.3, so a lot more shaders
compile on it. A lot of these shaders may have things like cos(-x)
or rcp(-x) that could result in an explicit negation instruction.
- bcsel - Hurt a few shaders with none helped. bcsel operates on
integer sources, so the fabs or fneg cannot be a source modifier in
the bcsel itself.
- Integer instructions - No changes on any Intel platform.
Some notes about the shader-db results below.
- On Tiger Lake, a single Deus Ex fragment shader is hurt for both
spills and fills.
- On Haswell, a different Deus Ex fragment shader is hurt for both
spills and fills.
- On GM45, the "LOST: 1" and "GAINED: 1" is a single Left4Dead 2
(very high graphics settings, lol) fragment shader that upgrades
from SIMD8 to SIMD16.
v2: Add support for fsign. Add some patterns that remove redundant
negations and redundant absolute value rather than trying to push them
down the tree.
Tiger Lake
total instructions in shared programs: 17611333 -> 17586465 (-0.14%)
instructions in affected programs: 3033734 -> 3008866 (-0.82%)
helped: 10310
HURT: 632
helped stats (abs) min: 1 max: 35 x̄: 2.61 x̃: 1
helped stats (rel) min: 0.04% max: 16.67% x̄: 1.43% x̃: 1.01%
HURT stats (abs) min: 1 max: 47 x̄: 3.21 x̃: 2
HURT stats (rel) min: 0.04% max: 5.08% x̄: 0.88% x̃: 0.63%
95% mean confidence interval for instructions value: -2.33 -2.21
95% mean confidence interval for instructions %-change: -1.32% -1.27%
Instructions are helped.
total cycles in shared programs: 338365223 -> 338262252 (-0.03%)
cycles in affected programs: 125291811 -> 125188840 (-0.08%)
helped: 5224
HURT: 2031
helped stats (abs) min: 1 max: 5670 x̄: 46.73 x̃: 12
helped stats (rel) min: <.01% max: 34.78% x̄: 1.91% x̃: 0.97%
HURT stats (abs) min: 1 max: 2882 x̄: 69.50 x̃: 14
HURT stats (rel) min: <.01% max: 44.93% x̄: 2.35% x̃: 0.74%
95% mean confidence interval for cycles value: -18.71 -9.68
95% mean confidence interval for cycles %-change: -0.80% -0.63%
Cycles are helped.
total spills in shared programs: 8942 -> 8946 (0.04%)
spills in affected programs: 8 -> 12 (50.00%)
helped: 0
HURT: 1
total fills in shared programs: 9399 -> 9401 (0.02%)
fills in affected programs: 21 -> 23 (9.52%)
helped: 0
HURT: 1
Ice Lake
total instructions in shared programs: 16124348 -> 16102258 (-0.14%)
instructions in affected programs: 2830928 -> 2808838 (-0.78%)
helped: 11294
HURT: 2
helped stats (abs) min: 1 max: 12 x̄: 1.96 x̃: 1
helped stats (rel) min: 0.07% max: 17.65% x̄: 1.32% x̃: 0.93%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 3.45% max: 4.00% x̄: 3.72% x̃: 3.72%
95% mean confidence interval for instructions value: -1.99 -1.93
95% mean confidence interval for instructions %-change: -1.34% -1.29%
Instructions are helped.
total cycles in shared programs: 335393932 -> 335325794 (-0.02%)
cycles in affected programs: 123834609 -> 123766471 (-0.06%)
helped: 5034
HURT: 2128
helped stats (abs) min: 1 max: 3256 x̄: 43.39 x̃: 11
helped stats (rel) min: <.01% max: 35.79% x̄: 1.98% x̃: 1.00%
HURT stats (abs) min: 1 max: 2634 x̄: 70.63 x̃: 16
HURT stats (rel) min: <.01% max: 49.49% x̄: 2.73% x̃: 0.62%
95% mean confidence interval for cycles value: -13.66 -5.37
95% mean confidence interval for cycles %-change: -0.69% -0.48%
Cycles are helped.
LOST: 0
GAINED: 2
Skylake
total instructions in shared programs: 14949240 -> 14927930 (-0.14%)
instructions in affected programs: 2594756 -> 2573446 (-0.82%)
helped: 11000
HURT: 2
helped stats (abs) min: 1 max: 12 x̄: 1.94 x̃: 1
helped stats (rel) min: 0.07% max: 18.75% x̄: 1.39% x̃: 0.94%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 4.76% max: 4.76% x̄: 4.76% x̃: 4.76%
95% mean confidence interval for instructions value: -1.97 -1.91
95% mean confidence interval for instructions %-change: -1.42% -1.37%
Instructions are helped.
total cycles in shared programs: 324829346 -> 324821596 (<.01%)
cycles in affected programs: 121566087 -> 121558337 (<.01%)
helped: 4611
HURT: 2147
helped stats (abs) min: 1 max: 3715 x̄: 33.29 x̃: 10
helped stats (rel) min: <.01% max: 36.08% x̄: 1.94% x̃: 1.00%
HURT stats (abs) min: 1 max: 2551 x̄: 67.88 x̃: 16
HURT stats (rel) min: <.01% max: 53.79% x̄: 3.69% x̃: 0.89%
95% mean confidence interval for cycles value: -4.25 1.96
95% mean confidence interval for cycles %-change: -0.28% -0.02%
Inconclusive result (value mean confidence interval includes 0).
Broadwell
total instructions in shared programs: 14971203 -> 14949957 (-0.14%)
instructions in affected programs: 2635699 -> 2614453 (-0.81%)
helped: 10982
HURT: 2
helped stats (abs) min: 1 max: 12 x̄: 1.93 x̃: 1
helped stats (rel) min: 0.07% max: 18.75% x̄: 1.39% x̃: 0.94%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 4.76% max: 4.76% x̄: 4.76% x̃: 4.76%
95% mean confidence interval for instructions value: -1.97 -1.90
95% mean confidence interval for instructions %-change: -1.42% -1.37%
Instructions are helped.
total cycles in shared programs: 336215033 -> 336086458 (-0.04%)
cycles in affected programs: 127383198 -> 127254623 (-0.10%)
helped: 4884
HURT: 1963
helped stats (abs) min: 1 max: 25696 x̄: 51.78 x̃: 12
helped stats (rel) min: <.01% max: 58.28% x̄: 2.00% x̃: 1.05%
HURT stats (abs) min: 1 max: 3401 x̄: 63.33 x̃: 16
HURT stats (rel) min: <.01% max: 39.95% x̄: 2.20% x̃: 0.70%
95% mean confidence interval for cycles value: -29.99 -7.57
95% mean confidence interval for cycles %-change: -0.89% -0.71%
Cycles are helped.
total fills in shared programs: 24905 -> 24901 (-0.02%)
fills in affected programs: 117 -> 113 (-3.42%)
helped: 4
HURT: 0
LOST: 0
GAINED: 16
Haswell
total instructions in shared programs: 13148927 -> 13131528 (-0.13%)
instructions in affected programs: 2220941 -> 2203542 (-0.78%)
helped: 8017
HURT: 4
helped stats (abs) min: 1 max: 12 x̄: 2.17 x̃: 1
helped stats (rel) min: 0.07% max: 15.25% x̄: 1.40% x̃: 0.93%
HURT stats (abs) min: 1 max: 7 x̄: 2.50 x̃: 1
HURT stats (rel) min: 0.33% max: 4.76% x̄: 2.73% x̃: 2.91%
95% mean confidence interval for instructions value: -2.21 -2.13
95% mean confidence interval for instructions %-change: -1.43% -1.37%
Instructions are helped.
total cycles in shared programs: 321221791 -> 321079870 (-0.04%)
cycles in affected programs: 126886055 -> 126744134 (-0.11%)
helped: 4674
HURT: 1729
helped stats (abs) min: 1 max: 23654 x̄: 56.47 x̃: 16
helped stats (rel) min: <.01% max: 53.22% x̄: 2.13% x̃: 1.05%
HURT stats (abs) min: 1 max: 3694 x̄: 70.58 x̃: 18
HURT stats (rel) min: <.01% max: 63.06% x̄: 2.48% x̃: 0.90%
95% mean confidence interval for cycles value: -33.31 -11.02
95% mean confidence interval for cycles %-change: -0.99% -0.78%
Cycles are helped.
total spills in shared programs: 19872 -> 19874 (0.01%)
spills in affected programs: 21 -> 23 (9.52%)
helped: 0
HURT: 1
total fills in shared programs: 20941 -> 20941 (0.00%)
fills in affected programs: 62 -> 62 (0.00%)
helped: 1
HURT: 1
LOST: 0
GAINED: 8
Ivy Bridge
total instructions in shared programs: 11875553 -> 11853839 (-0.18%)
instructions in affected programs: 1553112 -> 1531398 (-1.40%)
helped: 7304
HURT: 3
helped stats (abs) min: 1 max: 16 x̄: 2.97 x̃: 2
helped stats (rel) min: 0.07% max: 15.25% x̄: 1.62% x̃: 1.15%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 1.05% max: 3.33% x̄: 2.44% x̃: 2.94%
95% mean confidence interval for instructions value: -3.04 -2.90
95% mean confidence interval for instructions %-change: -1.65% -1.59%
Instructions are helped.
total cycles in shared programs: 178246425 -> 178184484 (-0.03%)
cycles in affected programs: 13702146 -> 13640205 (-0.45%)
helped: 4409
HURT: 1566
helped stats (abs) min: 1 max: 531 x̄: 24.52 x̃: 13
helped stats (rel) min: <.01% max: 38.67% x̄: 2.14% x̃: 1.02%
HURT stats (abs) min: 1 max: 356 x̄: 29.48 x̃: 10
HURT stats (rel) min: <.01% max: 64.73% x̄: 1.87% x̃: 0.70%
95% mean confidence interval for cycles value: -11.60 -9.14
95% mean confidence interval for cycles %-change: -1.19% -0.99%
Cycles are helped.
LOST: 0
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10695740 -> 10667483 (-0.26%)
instructions in affected programs: 2337607 -> 2309350 (-1.21%)
helped: 10720
HURT: 1
helped stats (abs) min: 1 max: 49 x̄: 2.64 x̃: 2
helped stats (rel) min: 0.07% max: 20.00% x̄: 1.54% x̃: 1.13%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 1.04% max: 1.04% x̄: 1.04% x̃: 1.04%
95% mean confidence interval for instructions value: -2.69 -2.58
95% mean confidence interval for instructions %-change: -1.57% -1.51%
Instructions are helped.
total cycles in shared programs: 153478839 -> 153416223 (-0.04%)
cycles in affected programs: 22050900 -> 21988284 (-0.28%)
helped: 5342
HURT: 2200
helped stats (abs) min: 1 max: 1020 x̄: 20.34 x̃: 16
helped stats (rel) min: <.01% max: 24.05% x̄: 1.51% x̃: 0.86%
HURT stats (abs) min: 1 max: 335 x̄: 20.93 x̃: 6
HURT stats (rel) min: <.01% max: 20.18% x̄: 1.03% x̃: 0.30%
95% mean confidence interval for cycles value: -9.18 -7.42
95% mean confidence interval for cycles %-change: -0.82% -0.71%
Cycles are helped.
Iron Lake
total instructions in shared programs: 8114882 -> 8105574 (-0.11%)
instructions in affected programs: 1232504 -> 1223196 (-0.76%)
helped: 4109
HURT: 2
helped stats (abs) min: 1 max: 6 x̄: 2.27 x̃: 1
helped stats (rel) min: 0.05% max: 8.33% x̄: 0.99% x̃: 0.66%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 0.94% max: 4.35% x̄: 2.65% x̃: 2.65%
95% mean confidence interval for instructions value: -2.31 -2.21
95% mean confidence interval for instructions %-change: -1.01% -0.96%
Instructions are helped.
total cycles in shared programs: 188504036 -> 188466296 (-0.02%)
cycles in affected programs: 31203798 -> 31166058 (-0.12%)
helped: 3447
HURT: 36
helped stats (abs) min: 2 max: 92 x̄: 11.03 x̃: 8
helped stats (rel) min: <.01% max: 5.41% x̄: 0.21% x̃: 0.13%
HURT stats (abs) min: 2 max: 30 x̄: 7.33 x̃: 6
HURT stats (rel) min: 0.01% max: 1.65% x̄: 0.18% x̃: 0.10%
95% mean confidence interval for cycles value: -11.16 -10.51
95% mean confidence interval for cycles %-change: -0.22% -0.20%
Cycles are helped.
LOST: 0
GAINED: 1
GM45
total instructions in shared programs: 4989697 -> 4984531 (-0.10%)
instructions in affected programs: 703952 -> 698786 (-0.73%)
helped: 2493
HURT: 2
helped stats (abs) min: 1 max: 6 x̄: 2.07 x̃: 1
helped stats (rel) min: 0.05% max: 8.33% x̄: 1.03% x̃: 0.66%
HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
HURT stats (rel) min: 0.95% max: 4.35% x̄: 2.65% x̃: 2.65%
95% mean confidence interval for instructions value: -2.13 -2.01
95% mean confidence interval for instructions %-change: -1.07% -0.99%
Instructions are helped.
total cycles in shared programs: 128929136 -> 128903886 (-0.02%)
cycles in affected programs: 21583096 -> 21557846 (-0.12%)
helped: 2214
HURT: 17
helped stats (abs) min: 2 max: 92 x̄: 11.44 x̃: 8
helped stats (rel) min: <.01% max: 5.41% x̄: 0.24% x̃: 0.13%
HURT stats (abs) min: 2 max: 8 x̄: 4.24 x̃: 4
HURT stats (rel) min: 0.01% max: 1.65% x̄: 0.20% x̃: 0.09%
95% mean confidence interval for cycles value: -11.75 -10.88
95% mean confidence interval for cycles %-change: -0.25% -0.22%
Cycles are helped.
LOST: 1
GAINED: 1
Reviewed-by: Matt Turner <mattst88@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1359>
2019-07-15 23:55:00 +01:00
|
|
|
|
|
|
|
while (OPT(nir_opt_algebraic_distribute_src_mods)) {
|
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
2019-07-24 19:23:21 +01:00
|
|
|
OPT(nir_opt_move, nir_move_comparisons);
|
2019-03-25 09:31:34 +00:00
|
|
|
OPT(nir_opt_dead_cf);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2022-03-15 23:25:55 +00:00
|
|
|
NIR_PASS_V(nir, nir_convert_to_lcssa, true, true);
|
|
|
|
NIR_PASS_V(nir, nir_divergence_analysis);
|
|
|
|
|
intel/compiler: Use nir_opt_uniform_atomics()
In general, an atomic intrinsic may perform separate atomics for every
enabled SIMD channel, as each channel may operate on different memory.
However, an extremely common case is for all channels to access the same
memory location. In this case, we can simply perform a reduction/scan
across the subgroup, and perform one atomic for the whole subgroup,
rather than one per channel. For example, if an intrinsic says to take
the minimum value of the existing memory and the value in each channel,
we can do a thread-local minimum of all enabled channels, then do a
single atomic to take the minimum of that and the existing memory.
Our hardware doesn't optimize the case where multiple channels ask for
atomics on the same memory location; it assumes the compiler will do so.
nir_opt_uniform_atomics() uses divergence analysis to detect this case,
adds the necessary subgroup operations, and moves the atomic inside a
conditional that disables all but a single invocation. It even detects
cases where the shader code already performs this kind of optimization,
and avoids doing it a second time.
This may not be the optimal solution for us. In the backend, we could
detect this case and emit send(1) instructions with NoMask, rather than
generating if...send(16)...endif, and a lot of unnecessary ALU ops. But
it's simple to do, reuses the same path as ACO, and still provides most
of the benefit by cutting up to 16x atomics down to a single atomic,
which is more merciful to the memory bus.
Improves performance of Shadow of the Tomb Raider by 5.5% on XeHP.
Improves performance of a customer-internal benchmark on XeHP at
3840x2160 and low settings by approximately 30%.
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Caio Oliveira <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15484>
2022-03-16 10:06:22 +00:00
|
|
|
/* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
|
|
|
|
* It currently fails Vulkan tests on Haswell for an unknown reason.
|
2022-04-21 15:32:23 +01:00
|
|
|
*
|
|
|
|
* TODO: Using this optimization on RT/OpenCL kernels also seems to cause
|
|
|
|
* issues. Until we can understand those issues, disable it.
|
intel/compiler: Use nir_opt_uniform_atomics()
In general, an atomic intrinsic may perform separate atomics for every
enabled SIMD channel, as each channel may operate on different memory.
However, an extremely common case is for all channels to access the same
memory location. In this case, we can simply perform a reduction/scan
across the subgroup, and perform one atomic for the whole subgroup,
rather than one per channel. For example, if an intrinsic says to take
the minimum value of the existing memory and the value in each channel,
we can do a thread-local minimum of all enabled channels, then do a
single atomic to take the minimum of that and the existing memory.
Our hardware doesn't optimize the case where multiple channels ask for
atomics on the same memory location; it assumes the compiler will do so.
nir_opt_uniform_atomics() uses divergence analysis to detect this case,
adds the necessary subgroup operations, and moves the atomic inside a
conditional that disables all but a single invocation. It even detects
cases where the shader code already performs this kind of optimization,
and avoids doing it a second time.
This may not be the optimal solution for us. In the backend, we could
detect this case and emit send(1) instructions with NoMask, rather than
generating if...send(16)...endif, and a lot of unnecessary ALU ops. But
it's simple to do, reuses the same path as ACO, and still provides most
of the benefit by cutting up to 16x atomics down to a single atomic,
which is more merciful to the memory bus.
Improves performance of Shadow of the Tomb Raider by 5.5% on XeHP.
Improves performance of a customer-internal benchmark on XeHP at
3840x2160 and low settings by approximately 30%.
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Caio Oliveira <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15484>
2022-03-16 10:06:22 +00:00
|
|
|
*/
|
2022-04-21 15:32:23 +01:00
|
|
|
bool opt_uniform_atomic_stage_allowed =
|
|
|
|
devinfo->ver >= 8 &&
|
|
|
|
nir->info.stage != MESA_SHADER_KERNEL &&
|
|
|
|
nir->info.stage != MESA_SHADER_RAYGEN &&
|
|
|
|
!gl_shader_stage_is_callable(nir->info.stage);
|
|
|
|
|
|
|
|
if (opt_uniform_atomic_stage_allowed && OPT(nir_opt_uniform_atomics)) {
|
intel/compiler: Use nir_opt_uniform_atomics()
In general, an atomic intrinsic may perform separate atomics for every
enabled SIMD channel, as each channel may operate on different memory.
However, an extremely common case is for all channels to access the same
memory location. In this case, we can simply perform a reduction/scan
across the subgroup, and perform one atomic for the whole subgroup,
rather than one per channel. For example, if an intrinsic says to take
the minimum value of the existing memory and the value in each channel,
we can do a thread-local minimum of all enabled channels, then do a
single atomic to take the minimum of that and the existing memory.
Our hardware doesn't optimize the case where multiple channels ask for
atomics on the same memory location; it assumes the compiler will do so.
nir_opt_uniform_atomics() uses divergence analysis to detect this case,
adds the necessary subgroup operations, and moves the atomic inside a
conditional that disables all but a single invocation. It even detects
cases where the shader code already performs this kind of optimization,
and avoids doing it a second time.
This may not be the optimal solution for us. In the backend, we could
detect this case and emit send(1) instructions with NoMask, rather than
generating if...send(16)...endif, and a lot of unnecessary ALU ops. But
it's simple to do, reuses the same path as ACO, and still provides most
of the benefit by cutting up to 16x atomics down to a single atomic,
which is more merciful to the memory bus.
Improves performance of Shadow of the Tomb Raider by 5.5% on XeHP.
Improves performance of a customer-internal benchmark on XeHP at
3840x2160 and low settings by approximately 30%.
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Caio Oliveira <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15484>
2022-03-16 10:06:22 +00:00
|
|
|
const nir_lower_subgroups_options subgroups_options = {
|
|
|
|
.ballot_bit_size = 32,
|
|
|
|
.ballot_components = 1,
|
|
|
|
.lower_elect = true,
|
|
|
|
};
|
|
|
|
OPT(nir_lower_subgroups, &subgroups_options);
|
|
|
|
|
|
|
|
if (OPT(nir_lower_int64))
|
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
|
|
|
}
|
|
|
|
|
2022-03-15 23:25:55 +00:00
|
|
|
/* Clean up LCSSA phis */
|
|
|
|
OPT(nir_opt_remove_phis);
|
|
|
|
|
2018-12-19 07:05:19 +00:00
|
|
|
OPT(nir_lower_bool_to_int32);
|
2020-03-27 16:05:27 +00:00
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
2018-12-19 07:05:19 +00:00
|
|
|
|
2016-08-10 02:02:46 +01:00
|
|
|
OPT(nir_lower_locals_to_regs);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
if (unlikely(debug_enabled)) {
|
2015-06-10 09:46:13 +01:00
|
|
|
/* Re-index SSA defs so we print more sensible numbers. */
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2015-12-26 18:00:47 +00:00
|
|
|
if (function->impl)
|
|
|
|
nir_index_ssa_defs(function->impl);
|
2015-06-10 09:46:13 +01:00
|
|
|
}
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
|
2017-09-15 03:52:38 +01:00
|
|
|
_mesa_shader_stage_to_string(nir->info.stage));
|
2015-04-07 23:15:09 +01:00
|
|
|
nir_print_shader(nir, stderr);
|
|
|
|
}
|
|
|
|
|
2020-06-01 21:43:53 +01:00
|
|
|
nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
|
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_convert_from_ssa, true);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-06-16 21:58:15 +01:00
|
|
|
if (!is_scalar) {
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_move_vec_src_uses_to_dest);
|
2020-08-30 14:07:23 +01:00
|
|
|
OPT(nir_lower_vec_to_movs, NULL, NULL);
|
2015-06-16 21:58:15 +01:00
|
|
|
}
|
|
|
|
|
2018-07-03 19:49:07 +01:00
|
|
|
OPT(nir_opt_dce);
|
|
|
|
|
2019-05-20 19:24:57 +01:00
|
|
|
if (OPT(nir_opt_rematerialize_compares))
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* This is the last pass we run before we start emitting stuff. It
|
|
|
|
* determines when we need to insert boolean resolves on Gen <= 5. We
|
|
|
|
* run it last because it stashes data in instr->pass_flags and we don't
|
|
|
|
* want that to be squashed by other NIR passes.
|
|
|
|
*/
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver <= 5)
|
2015-04-07 23:15:09 +01:00
|
|
|
brw_nir_analyze_boolean_resolves(nir);
|
|
|
|
|
|
|
|
nir_sweep(nir);
|
|
|
|
|
|
|
|
if (unlikely(debug_enabled)) {
|
|
|
|
fprintf(stderr, "NIR (final form) for %s shader:\n",
|
2017-09-15 03:52:38 +01:00
|
|
|
_mesa_shader_stage_to_string(nir->info.stage));
|
2015-04-07 23:15:09 +01:00
|
|
|
nir_print_shader(nir, stderr);
|
|
|
|
}
|
|
|
|
}
|
2015-04-17 17:10:50 +01:00
|
|
|
|
2019-02-22 17:15:21 +00:00
|
|
|
static bool
|
2015-11-11 19:01:59 +00:00
|
|
|
brw_nir_apply_sampler_key(nir_shader *nir,
|
2016-09-15 03:20:38 +01:00
|
|
|
const struct brw_compiler *compiler,
|
2019-02-22 17:15:21 +00:00
|
|
|
const struct brw_sampler_prog_key_data *key_tex)
|
2015-11-11 19:01:59 +00:00
|
|
|
{
|
2021-04-05 21:19:39 +01:00
|
|
|
const struct intel_device_info *devinfo = compiler->devinfo;
|
2019-02-08 23:51:24 +00:00
|
|
|
nir_lower_tex_options tex_options = {
|
2019-02-08 23:56:52 +00:00
|
|
|
.lower_txd_clamp_bindless_sampler = true,
|
2019-02-08 23:51:24 +00:00
|
|
|
.lower_txd_clamp_if_sampler_index_not_lt_16 = true,
|
2022-04-26 00:55:45 +01:00
|
|
|
.lower_invalid_implicit_lod = true,
|
2019-02-08 23:51:24 +00:00
|
|
|
};
|
2015-11-11 19:01:59 +00:00
|
|
|
|
|
|
|
/* Iron Lake and prior require lowering of all rectangle textures */
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver < 6)
|
2015-11-11 19:01:59 +00:00
|
|
|
tex_options.lower_rect = true;
|
|
|
|
|
|
|
|
/* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
|
2021-03-29 22:41:58 +01:00
|
|
|
if (devinfo->ver < 8) {
|
2015-11-11 19:01:59 +00:00
|
|
|
tex_options.saturate_s = key_tex->gl_clamp_mask[0];
|
|
|
|
tex_options.saturate_t = key_tex->gl_clamp_mask[1];
|
|
|
|
tex_options.saturate_r = key_tex->gl_clamp_mask[2];
|
|
|
|
}
|
|
|
|
|
2016-12-13 09:24:19 +00:00
|
|
|
/* Prior to Haswell, we have to lower gradients on shadow samplers */
|
2021-05-14 17:04:46 +01:00
|
|
|
tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
|
2016-12-13 09:24:19 +00:00
|
|
|
|
2016-05-02 05:22:54 +01:00
|
|
|
tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
|
|
|
|
tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
|
|
|
|
tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
|
2017-06-16 06:51:34 +01:00
|
|
|
tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
|
2018-11-08 17:26:36 +00:00
|
|
|
tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
|
2019-02-13 01:03:52 +00:00
|
|
|
tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
|
2020-07-30 10:44:41 +01:00
|
|
|
tex_options.bt709_external = key_tex->bt709_mask;
|
|
|
|
tex_options.bt2020_external = key_tex->bt2020_mask;
|
2016-05-02 05:22:54 +01:00
|
|
|
|
2019-02-11 08:06:09 +00:00
|
|
|
/* Setup array of scaling factors for each texture. */
|
|
|
|
memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
|
|
|
|
sizeof(tex_options.scale_factors));
|
|
|
|
|
2019-02-22 17:15:21 +00:00
|
|
|
return nir_lower_tex(nir, &tex_options);
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:48:39 +00:00
|
|
|
static unsigned
|
2022-07-07 20:39:19 +01:00
|
|
|
get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
|
2019-02-22 16:48:39 +00:00
|
|
|
{
|
2022-07-07 20:39:19 +01:00
|
|
|
switch (info->subgroup_size) {
|
|
|
|
case SUBGROUP_SIZE_API_CONSTANT:
|
2019-02-22 16:48:39 +00:00
|
|
|
/* We have to use the global constant size. */
|
|
|
|
return BRW_SUBGROUP_SIZE;
|
|
|
|
|
2022-07-07 20:39:19 +01:00
|
|
|
case SUBGROUP_SIZE_UNIFORM:
|
2019-02-22 16:48:39 +00:00
|
|
|
/* It has to be uniform across all invocations but can vary per stage
|
|
|
|
* if we want. This gives us a bit more freedom.
|
|
|
|
*
|
|
|
|
* For compute, brw_nir_apply_key is called per-dispatch-width so this
|
|
|
|
* is the actual subgroup size and not a maximum. However, we only
|
|
|
|
* invoke one size of any given compute shader so it's still guaranteed
|
|
|
|
* to be uniform across invocations.
|
|
|
|
*/
|
|
|
|
return max_subgroup_size;
|
2019-02-22 21:28:24 +00:00
|
|
|
|
2022-07-07 20:39:19 +01:00
|
|
|
case SUBGROUP_SIZE_VARYING:
|
2019-02-22 21:28:24 +00:00
|
|
|
/* The subgroup size is allowed to be fully varying. For geometry
|
|
|
|
* stages, we know it's always 8 which is max_subgroup_size so we can
|
|
|
|
* return that. For compute, brw_nir_apply_key is called once per
|
|
|
|
* dispatch-width so max_subgroup_size is the real subgroup size.
|
|
|
|
*
|
|
|
|
* For fragment, we return 0 and let it fall through to the back-end
|
|
|
|
* compiler. This means we can't optimize based on subgroup size but
|
|
|
|
* that's a risk the client took when it asked for a varying subgroup
|
|
|
|
* size.
|
|
|
|
*/
|
2022-07-07 20:39:19 +01:00
|
|
|
return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
|
2019-07-09 20:28:18 +01:00
|
|
|
|
2022-07-07 20:39:19 +01:00
|
|
|
case SUBGROUP_SIZE_REQUIRE_8:
|
|
|
|
case SUBGROUP_SIZE_REQUIRE_16:
|
|
|
|
case SUBGROUP_SIZE_REQUIRE_32:
|
|
|
|
assert(gl_shader_stage_uses_workgroup(info->stage));
|
2019-07-09 20:28:18 +01:00
|
|
|
/* These enum values are expressly chosen to be equal to the subgroup
|
|
|
|
* size that they require.
|
|
|
|
*/
|
2022-07-07 20:39:19 +01:00
|
|
|
return info->subgroup_size;
|
|
|
|
|
|
|
|
case SUBGROUP_SIZE_FULL_SUBGROUPS:
|
|
|
|
case SUBGROUP_SIZE_REQUIRE_64:
|
|
|
|
case SUBGROUP_SIZE_REQUIRE_128:
|
|
|
|
break;
|
2019-02-22 16:48:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unreachable("Invalid subgroup size type");
|
|
|
|
}
|
|
|
|
|
2019-02-22 17:15:21 +00:00
|
|
|
void
|
|
|
|
brw_nir_apply_key(nir_shader *nir,
|
|
|
|
const struct brw_compiler *compiler,
|
|
|
|
const struct brw_base_prog_key *key,
|
2019-02-22 16:48:39 +00:00
|
|
|
unsigned max_subgroup_size,
|
2019-02-22 17:15:21 +00:00
|
|
|
bool is_scalar)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
OPT(brw_nir_apply_sampler_key, compiler, &key->tex);
|
|
|
|
|
2019-02-22 16:48:39 +00:00
|
|
|
const nir_lower_subgroups_options subgroups_options = {
|
2022-07-07 20:39:19 +01:00
|
|
|
.subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
|
2019-02-22 16:48:39 +00:00
|
|
|
.ballot_bit_size = 32,
|
2020-09-10 17:48:04 +01:00
|
|
|
.ballot_components = 1,
|
2019-02-22 16:48:39 +00:00
|
|
|
.lower_subgroup_masks = true,
|
|
|
|
};
|
|
|
|
OPT(nir_lower_subgroups, &subgroups_options);
|
|
|
|
|
2022-05-06 16:52:47 +01:00
|
|
|
if (key->limit_trig_input_range)
|
|
|
|
OPT(brw_nir_limit_trig_input_range_workaround);
|
|
|
|
|
2019-02-22 17:15:21 +00:00
|
|
|
if (progress)
|
2019-06-05 00:19:06 +01:00
|
|
|
brw_nir_optimize(nir, compiler, is_scalar, false);
|
2015-11-11 19:01:59 +00:00
|
|
|
}
|
|
|
|
|
2019-08-02 21:19:16 +01:00
|
|
|
enum brw_conditional_mod
|
|
|
|
brw_cmod_for_nir_comparison(nir_op op)
|
|
|
|
{
|
|
|
|
switch (op) {
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_flt:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_flt32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_ilt:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_ilt32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_ult:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_ult32:
|
|
|
|
return BRW_CONDITIONAL_L;
|
|
|
|
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_fge:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_fge32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_ige:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_ige32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_uge:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_uge32:
|
|
|
|
return BRW_CONDITIONAL_GE;
|
|
|
|
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_feq:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_feq32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_ieq:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_ieq32:
|
|
|
|
case nir_op_b32all_fequal2:
|
|
|
|
case nir_op_b32all_iequal2:
|
|
|
|
case nir_op_b32all_fequal3:
|
|
|
|
case nir_op_b32all_iequal3:
|
|
|
|
case nir_op_b32all_fequal4:
|
|
|
|
case nir_op_b32all_iequal4:
|
|
|
|
return BRW_CONDITIONAL_Z;
|
|
|
|
|
2020-08-18 18:51:57 +01:00
|
|
|
case nir_op_fneu:
|
|
|
|
case nir_op_fneu32:
|
2019-08-02 21:21:14 +01:00
|
|
|
case nir_op_ine:
|
2019-08-02 21:19:16 +01:00
|
|
|
case nir_op_ine32:
|
|
|
|
case nir_op_b32any_fnequal2:
|
|
|
|
case nir_op_b32any_inequal2:
|
|
|
|
case nir_op_b32any_fnequal3:
|
|
|
|
case nir_op_b32any_inequal3:
|
|
|
|
case nir_op_b32any_fnequal4:
|
|
|
|
case nir_op_b32any_inequal4:
|
|
|
|
return BRW_CONDITIONAL_NZ;
|
|
|
|
|
|
|
|
default:
|
|
|
|
unreachable("Unsupported NIR comparison op");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 05:10:50 +01:00
|
|
|
uint32_t
|
|
|
|
brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
|
|
|
|
{
|
|
|
|
switch (atomic->intrinsic) {
|
|
|
|
#define AOP_CASE(atom) \
|
|
|
|
case nir_intrinsic_image_atomic_##atom: \
|
|
|
|
case nir_intrinsic_bindless_image_atomic_##atom: \
|
|
|
|
case nir_intrinsic_ssbo_atomic_##atom: \
|
|
|
|
case nir_intrinsic_shared_atomic_##atom: \
|
|
|
|
case nir_intrinsic_global_atomic_##atom
|
|
|
|
|
|
|
|
AOP_CASE(add): {
|
|
|
|
unsigned src_idx;
|
|
|
|
switch (atomic->intrinsic) {
|
|
|
|
case nir_intrinsic_image_atomic_add:
|
|
|
|
case nir_intrinsic_bindless_image_atomic_add:
|
|
|
|
src_idx = 3;
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_ssbo_atomic_add:
|
|
|
|
src_idx = 2;
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_shared_atomic_add:
|
|
|
|
case nir_intrinsic_global_atomic_add:
|
|
|
|
src_idx = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Invalid add atomic opcode");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nir_src_is_const(atomic->src[src_idx])) {
|
|
|
|
int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
|
|
|
|
if (add_val == 1)
|
|
|
|
return BRW_AOP_INC;
|
|
|
|
else if (add_val == -1)
|
|
|
|
return BRW_AOP_DEC;
|
|
|
|
}
|
|
|
|
return BRW_AOP_ADD;
|
|
|
|
}
|
|
|
|
|
|
|
|
AOP_CASE(imin): return BRW_AOP_IMIN;
|
|
|
|
AOP_CASE(umin): return BRW_AOP_UMIN;
|
|
|
|
AOP_CASE(imax): return BRW_AOP_IMAX;
|
|
|
|
AOP_CASE(umax): return BRW_AOP_UMAX;
|
|
|
|
AOP_CASE(and): return BRW_AOP_AND;
|
|
|
|
AOP_CASE(or): return BRW_AOP_OR;
|
|
|
|
AOP_CASE(xor): return BRW_AOP_XOR;
|
|
|
|
AOP_CASE(exchange): return BRW_AOP_MOV;
|
|
|
|
AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
|
|
|
|
|
|
|
|
#undef AOP_CASE
|
|
|
|
#define AOP_CASE(atom) \
|
|
|
|
case nir_intrinsic_ssbo_atomic_##atom: \
|
|
|
|
case nir_intrinsic_shared_atomic_##atom: \
|
|
|
|
case nir_intrinsic_global_atomic_##atom
|
|
|
|
|
|
|
|
AOP_CASE(fmin): return BRW_AOP_FMIN;
|
|
|
|
AOP_CASE(fmax): return BRW_AOP_FMAX;
|
|
|
|
AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
|
2020-05-26 21:28:56 +01:00
|
|
|
AOP_CASE(fadd): return BRW_AOP_FADD;
|
2019-08-21 05:10:50 +01:00
|
|
|
|
|
|
|
#undef AOP_CASE
|
|
|
|
|
|
|
|
default:
|
|
|
|
unreachable("Unsupported NIR atomic intrinsic");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-17 17:10:50 +01:00
|
|
|
enum brw_reg_type
|
2021-04-13 04:17:16 +01:00
|
|
|
brw_type_for_nir_type(const struct intel_device_info *devinfo,
|
|
|
|
nir_alu_type type)
|
2015-04-17 17:10:50 +01:00
|
|
|
{
|
|
|
|
switch (type) {
|
2015-05-15 17:14:47 +01:00
|
|
|
case nir_type_uint:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_uint32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_UD;
|
|
|
|
case nir_type_bool:
|
|
|
|
case nir_type_int:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_bool32:
|
|
|
|
case nir_type_int32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_D;
|
|
|
|
case nir_type_float:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_F;
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_float16:
|
|
|
|
return BRW_REGISTER_TYPE_HF;
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float64:
|
|
|
|
return BRW_REGISTER_TYPE_DF;
|
|
|
|
case nir_type_int64:
|
2021-03-29 22:41:58 +01:00
|
|
|
return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_uint64:
|
2021-03-29 22:41:58 +01:00
|
|
|
return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_int16:
|
|
|
|
return BRW_REGISTER_TYPE_W;
|
|
|
|
case nir_type_uint16:
|
|
|
|
return BRW_REGISTER_TYPE_UW;
|
2018-07-09 01:00:06 +01:00
|
|
|
case nir_type_int8:
|
|
|
|
return BRW_REGISTER_TYPE_B;
|
|
|
|
case nir_type_uint8:
|
|
|
|
return BRW_REGISTER_TYPE_UB;
|
2015-04-17 17:10:50 +01:00
|
|
|
default:
|
|
|
|
unreachable("unknown type");
|
|
|
|
}
|
|
|
|
|
|
|
|
return BRW_REGISTER_TYPE_F;
|
|
|
|
}
|
2015-06-17 09:59:10 +01:00
|
|
|
|
2018-09-21 21:26:03 +01:00
|
|
|
nir_shader *
|
|
|
|
brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
|
|
|
|
const nir_shader_compiler_options *options,
|
|
|
|
const struct brw_tcs_prog_key *key)
|
|
|
|
{
|
2020-10-26 18:28:33 +00:00
|
|
|
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_TESS_CTRL,
|
2020-10-26 18:37:25 +00:00
|
|
|
options, "passthrough TCS");
|
2021-06-29 16:49:42 +01:00
|
|
|
ralloc_steal(mem_ctx, b.shader);
|
2018-09-21 21:26:03 +01:00
|
|
|
nir_shader *nir = b.shader;
|
|
|
|
nir_variable *var;
|
2021-01-02 08:14:08 +00:00
|
|
|
nir_ssa_def *load;
|
2018-09-21 21:26:03 +01:00
|
|
|
nir_ssa_def *zero = nir_imm_int(&b, 0);
|
2018-12-04 16:15:42 +00:00
|
|
|
nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
|
2018-09-21 21:26:03 +01:00
|
|
|
|
|
|
|
nir->info.inputs_read = key->outputs_written &
|
|
|
|
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
|
|
|
|
nir->info.outputs_written = key->outputs_written;
|
|
|
|
nir->info.tess.tcs_vertices_out = key->input_vertices;
|
|
|
|
nir->num_uniforms = 8 * sizeof(uint32_t);
|
|
|
|
|
|
|
|
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
|
|
|
|
var->data.location = 0;
|
|
|
|
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
|
|
|
|
var->data.location = 1;
|
|
|
|
|
|
|
|
/* Write the patch URB header. */
|
|
|
|
for (int i = 0; i <= 1; i++) {
|
2021-01-02 08:14:08 +00:00
|
|
|
load = nir_load_uniform(&b, 4, 32, zero, .base = i * 4 * sizeof(uint32_t));
|
|
|
|
|
|
|
|
nir_store_output(&b, load, zero,
|
|
|
|
.base = VARYING_SLOT_TESS_LEVEL_INNER - i,
|
|
|
|
.write_mask = WRITEMASK_XYZW);
|
2018-09-21 21:26:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy inputs to outputs. */
|
|
|
|
uint64_t varyings = nir->info.inputs_read;
|
|
|
|
|
|
|
|
while (varyings != 0) {
|
|
|
|
const int varying = ffsll(varyings) - 1;
|
|
|
|
|
2021-01-02 08:14:08 +00:00
|
|
|
load = nir_load_per_vertex_input(&b, 4, 32, invoc_id, zero, .base = varying);
|
|
|
|
|
|
|
|
nir_store_per_vertex_output(&b, load, invoc_id, zero,
|
|
|
|
.base = varying,
|
|
|
|
.write_mask = WRITEMASK_XYZW);
|
2018-09-21 21:26:03 +01:00
|
|
|
|
|
|
|
varyings &= ~BITFIELD64_BIT(varying);
|
|
|
|
}
|
|
|
|
|
2018-10-18 21:18:30 +01:00
|
|
|
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
|
2018-09-21 21:26:03 +01:00
|
|
|
|
2019-06-05 00:19:06 +01:00
|
|
|
brw_preprocess_nir(compiler, nir, NULL);
|
2018-09-21 21:26:03 +01:00
|
|
|
|
|
|
|
return nir;
|
|
|
|
}
|
2021-07-12 12:20:22 +01:00
|
|
|
|
|
|
|
nir_ssa_def *
|
|
|
|
brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
|
|
|
|
nir_ssa_def *base_addr, unsigned off)
|
|
|
|
{
|
|
|
|
assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
|
|
|
|
assert(load_uniform->dest.is_ssa);
|
|
|
|
assert(load_uniform->src[0].is_ssa);
|
|
|
|
|
|
|
|
unsigned bit_size = load_uniform->dest.ssa.bit_size;
|
|
|
|
assert(bit_size >= 8 && bit_size % 8 == 0);
|
|
|
|
unsigned byte_size = bit_size / 8;
|
|
|
|
nir_ssa_def *sysval;
|
|
|
|
|
|
|
|
if (nir_src_is_const(load_uniform->src[0])) {
|
|
|
|
uint64_t offset = off +
|
|
|
|
nir_intrinsic_base(load_uniform) +
|
|
|
|
nir_src_as_uint(load_uniform->src[0]);
|
|
|
|
|
|
|
|
/* Things should be component-aligned. */
|
|
|
|
assert(offset % byte_size == 0);
|
|
|
|
|
|
|
|
unsigned suboffset = offset % 64;
|
|
|
|
uint64_t aligned_offset = offset - suboffset;
|
|
|
|
|
|
|
|
/* Load two just in case we go over a 64B boundary */
|
|
|
|
nir_ssa_def *data[2];
|
|
|
|
for (unsigned i = 0; i < 2; i++) {
|
|
|
|
nir_ssa_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
|
|
|
|
data[i] = nir_load_global_const_block_intel(b, 16, addr,
|
|
|
|
nir_imm_true(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
sysval = nir_extract_bits(b, data, 2, suboffset * 8,
|
|
|
|
load_uniform->num_components, bit_size);
|
|
|
|
} else {
|
|
|
|
nir_ssa_def *offset32 =
|
|
|
|
nir_iadd_imm(b, load_uniform->src[0].ssa,
|
|
|
|
off + nir_intrinsic_base(load_uniform));
|
|
|
|
nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
|
|
|
|
sysval = nir_load_global_constant(b, addr, byte_size,
|
|
|
|
load_uniform->num_components, bit_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return sysval;
|
|
|
|
}
|