mesa/src/intel/compiler/brw_nir.c

1128 lines
38 KiB
C
Raw Normal View History

/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "brw_nir.h"
#include "brw_shader.h"
#include "dev/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
#include "util/u_math.h"
static bool
remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
GLenum primitive_mode)
{
const int location = nir_intrinsic_base(intr);
const unsigned component = nir_intrinsic_component(intr);
bool out_of_bounds;
if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
switch (primitive_mode) {
case GL_QUADS:
/* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
nir_intrinsic_set_base(intr, 0);
nir_intrinsic_set_component(intr, 3 - component);
out_of_bounds = false;
break;
case GL_TRIANGLES:
/* gl_TessLevelInner[0] lives at DWord 4. */
nir_intrinsic_set_base(intr, 1);
out_of_bounds = component > 0;
break;
case GL_ISOLINES:
out_of_bounds = true;
break;
default:
unreachable("Bogus tessellation domain");
}
} else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
if (primitive_mode == GL_ISOLINES) {
/* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
nir_intrinsic_set_base(intr, 1);
nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
out_of_bounds = component > 1;
} else {
/* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
nir_intrinsic_set_base(intr, 1);
nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES;
}
} else {
return false;
}
if (out_of_bounds) {
if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
b->cursor = nir_before_instr(&intr->instr);
nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(undef));
}
nir_instr_remove(&intr->instr);
}
return true;
}
static bool
is_input(nir_intrinsic_instr *intrin)
{
return intrin->intrinsic == nir_intrinsic_load_input ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
intrin->intrinsic == nir_intrinsic_load_interpolated_input;
}
static bool
is_output(nir_intrinsic_instr *intrin)
{
return intrin->intrinsic == nir_intrinsic_load_output ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_store_output ||
intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
}
static bool
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map,
GLenum tes_primitive_mode)
{
const bool is_passthrough_tcs = b->shader->info.name &&
strcmp(b->shader->info.name, "passthrough") == 0;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
gl_shader_stage stage = b->shader->info.stage;
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
if (!is_passthrough_tcs &&
remap_tess_levels(b, intrin, tes_primitive_mode))
continue;
int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
nir_src *vertex = nir_get_io_vertex_index_src(intrin);
if (vertex) {
if (nir_src_is_const(*vertex)) {
intrin->const_index[0] += nir_src_as_uint(*vertex) *
vue_map->num_per_vertex_slots;
} else {
b->cursor = nir_before_instr(&intrin->instr);
/* Multiply by the number of per-vertex slots. */
nir_ssa_def *vertex_offset =
nir_imul(b,
nir_ssa_for_src(b, *vertex, 1),
nir_imm_int(b,
vue_map->num_per_vertex_slots));
/* Add it to the existing offset */
nir_src *offset = nir_get_io_offset_src(intrin);
nir_ssa_def *total_offset =
nir_iadd(b, vertex_offset,
nir_ssa_for_src(b, *offset, 1));
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(total_offset));
}
}
}
}
return true;
}
void
brw_nir_lower_vs_inputs(nir_shader *nir,
const uint8_t *vs_attrib_wa_flags)
{
/* Start with the location of the variable's base. */
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
}
/* Now use nir_lower_io to walk dereference chains. Attribute arrays are
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
nir/i965: use two slots from inputs_read for dvec3/dvec4 vertex input attributes So far, input_reads was a bitmap tracking which vertex input locations were being used. In OpenGL, an attribute bigger than a vec4 (like a dvec3 or dvec4) consumes just one location, any other small attribute. So we mark the proper bit in inputs_read, and also the same bit in double_inputs_read if the attribute is a dvec3/dvec4. But in Vulkan, this is slightly different: a dvec3/dvec4 attribute consumes two locations, not just one. And hence two bits would be marked in inputs_read for the same vertex input attribute. To avoid handling two different situations in NIR, we just choose the latest one: in OpenGL, when creating NIR from GLSL/IR, any dvec3/dvec4 vertex input attribute is marked with two bits in the inputs_read bitmap (and also in the double_inputs_read), and following attributes are adjusted accordingly. As example, if in our GLSL/IR shader we have three attributes: layout(location = 0) vec3 attr0; layout(location = 1) dvec4 attr1; layout(location = 2) dvec3 attr2; then in our NIR shader we put attr0 in location 0, attr1 in locations 1 and 2, and attr2 in location 3 and 4. Checking carefully, basically we are using slots rather than locations in NIR. When emitting the vertices, we do a inverse map to know the corresponding location for each slot. v2 (Jason): - use two slots from inputs_read for dvec3/dvec4 NIR from GLSL/IR. v3 (Jason): - Fix commit log error. - Use ladder ifs and fix braces. - elements_double is divisible by 2, don't need DIV_ROUND_UP(). - Use if ladder instead of a switch. - Add comment about hardware restriction in 64bit vertex attributes. Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-12-16 09:24:43 +00:00
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
i965: Drop support for the legacy SNORM -> Float equation. Older OpenGL defines two equations for converting from signed-normalized to floating point data. These are: f = (2c + 1)/(2^b - 1) (equation 2.2) f = max{c/2^(b-1) - 1), -1.0} (equation 2.3) Both OpenGL 4.2+ and OpenGL ES 3.0+ mandate that equation 2.3 is to be used in all scenarios, and remove equation 2.2. DirectX uses equation 2.3 as well. Intel hardware only supports equation 2.3, so Gen7.5+ systems that use the vertex fetcher hardware to do the conversions always get formula 2.3. This can make a big difference for 10-10-10-2 formats - the 2-bit value can represent 0 with equation 2.3, and cannot with equation 2.2. Ivybridge and older were using equation 2.2 for OpenGL, and 2.3 for ES. Now that Ivybridge supports OpenGL 4.2, this is wrong - we need to use the new rules, at least in core profile. That would leave Gen4-6 doing something different than all other hardware, which seems...lame. With context version promotion, applications that requested a pre-4.2 context may get promoted to 4.2, and thus get the new rules. Zero cases have been reported of this being a problem. However, we've received a report that following the old rules breaks expectations. SuperTuxKart apparently renders the cars red when following equation 2.2, and works correctly when following equation 2.3: https://github.com/supertuxkart/stk-code/issues/2885#issuecomment-353858405 So, this patch deletes the legacy equation 2.2 support entirely, making all hardware and APIs consistently use the new equation 2.3 rules. If we ever find an application that truly requires the old formula, then we'd likely want that application to work on modern hardware, too. We'd likely restore this support as a driconf option. Until then, drop it. This commit will regress Piglit's draw-vertices-2101010 test on pre-Haswell without the corresponding Piglit patch to accept either formula (commit 35daaa1695ea01eb85bc02f9be9b6ebd1a7113a1): draw-vertices-2101010: Accept either SNORM conversion formula. Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> Reviewed-by: Chris Forbes <chrisforbes@google.com>
2017-12-26 03:10:22 +00:00
brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
/* The last step is to remap VERT_ATTRIB_* to actual registers */
/* Whether or not we have any system generated values. gl_DrawID is not
* included here as it lives in its own vec4.
*/
const bool has_sgvs =
nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_first_vertex:
case nir_intrinsic_load_base_instance:
case nir_intrinsic_load_vertex_id_zero_base:
case nir_intrinsic_load_instance_id:
case nir_intrinsic_load_is_indexed_draw:
case nir_intrinsic_load_draw_id: {
b.cursor = nir_after_instr(&intrin->instr);
/* gl_VertexID and friends are stored by the VF as the last
* vertex element. We convert them to load_input intrinsics at
* the right location.
*/
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
nir_intrinsic_set_base(load, num_inputs);
switch (intrin->intrinsic) {
case nir_intrinsic_load_first_vertex:
nir_intrinsic_set_component(load, 0);
break;
case nir_intrinsic_load_base_instance:
nir_intrinsic_set_component(load, 1);
break;
case nir_intrinsic_load_vertex_id_zero_base:
nir_intrinsic_set_component(load, 2);
break;
case nir_intrinsic_load_instance_id:
nir_intrinsic_set_component(load, 3);
break;
case nir_intrinsic_load_draw_id:
case nir_intrinsic_load_is_indexed_draw:
/* gl_DrawID and IsIndexedDraw are stored right after
* gl_VertexID and friends if any of them exist.
*/
nir_intrinsic_set_base(load, num_inputs + has_sgvs);
if (intrin->intrinsic == nir_intrinsic_load_draw_id)
nir_intrinsic_set_component(load, 0);
else
nir_intrinsic_set_component(load, 1);
break;
default:
unreachable("Invalid system value intrinsic");
}
load->num_components = 1;
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
nir_src_for_ssa(&load->dest.ssa));
nir_instr_remove(&intrin->instr);
break;
}
case nir_intrinsic_load_input: {
/* Attributes come in a contiguous block, ordered by their
* gl_vert_attrib value. That means we can compute the slot
* number for an attribute by masking out the enabled attributes
* before it and counting the bits.
*/
int attr = nir_intrinsic_base(intrin);
int slot = util_bitcount64(nir->info.inputs_read &
BITFIELD64_MASK(attr));
nir_intrinsic_set_base(intrin, slot);
break;
}
default:
break; /* Nothing to do */
}
}
}
}
}
void
brw_nir_lower_vue_inputs(nir_shader *nir,
const struct brw_vue_map *vue_map)
{
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
}
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic == nir_intrinsic_load_input ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
/* Offset 0 is the VUE header, which contains
* VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
* VARYING_SLOT_PSIZ [.w].
*/
int varying = nir_intrinsic_base(intrin);
int vue_slot;
switch (varying) {
case VARYING_SLOT_PSIZ:
nir_intrinsic_set_base(intrin, 0);
nir_intrinsic_set_component(intrin, 3);
break;
default:
vue_slot = vue_map->varying_to_slot[varying];
assert(vue_slot != -1);
nir_intrinsic_set_base(intrin, vue_slot);
break;
}
}
}
}
}
}
void
brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (function->impl) {
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
remap_patch_urb_offsets(block, &b, vue_map,
nir->info.tess.primitive_mode);
}
}
}
}
void
brw_nir_lower_fs_inputs(nir_shader *nir,
const struct gen_device_info *devinfo,
const struct brw_wm_prog_key *key)
{
i965: Rewrite FS input handling to use the new NIR intrinsics. This eliminates the need to walk the list of input variables, recurse into their types (via logic largely redundant with nir_lower_io), and interpolate all possible inputs up front. The backend no longer has to care about variables at all, which eliminates complications from trying to pack multiple variables into the same location. Instead, each intrinsic specifies exactly what's needed. This should unblock Timothy's work on GL_ARB_enhanced_layouts. Each load_interpolated_input intrinsic corresponds to PLN instructions, while load_barycentric_at_* intrinsics correspond to pixel interpolator messages. The pixel/centroid/sample barycentric intrinsics simply refer to payload fields (delta_xy[]), and don't actually generate any code. Because we use a single intrinsic for both centroid-qualified variables and interpolateAtCentroid(), they become indistinguishable. We stop sending pixel interpolator messages for those, and instead use the payload provided data, which should be considerably faster. On Broadwell: total instructions in shared programs: 9067751 -> 9067570 (-0.00%) instructions in affected programs: 145902 -> 145721 (-0.12%) helped: 422 HURT: 209 total spills in shared programs: 2849 -> 2899 (1.76%) spills in affected programs: 760 -> 810 (6.58%) helped: 0 HURT: 10 total fills in shared programs: 3910 -> 3950 (1.02%) fills in affected programs: 617 -> 657 (6.48%) helped: 0 HURT: 10 LOST: 3 GAINED: 3 The differences mostly appear to be slight changes in MOVs. v2: Use nir_shader_compiler_options::use_interpolated_input_intrinsics flag rather than passing it directly to nir_lower_io. Use the unreachable() macro rather than assert in one place. (Review feedback from Chris Forbes.) Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Chris Forbes <chrisforbes@google.com> Acked-by: Jason Ekstrand <jason@jlekstrand.net>
2016-07-12 11:57:25 +01:00
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
/* Apply default interpolation mode.
*
* Everything defaults to smooth except for the legacy GL color
* built-in variables, which might be flat depending on API state.
*/
if (var->data.interpolation == INTERP_MODE_NONE) {
const bool flat = key->flat_shade &&
(var->data.location == VARYING_SLOT_COL0 ||
var->data.location == VARYING_SLOT_COL1);
var->data.interpolation = flat ? INTERP_MODE_FLAT
: INTERP_MODE_SMOOTH;
}
/* On Ironlake and below, there is only one interpolation mode.
* Centroid interpolation doesn't mean anything on this hardware --
* there is no multisampling.
*/
if (devinfo->gen < 6) {
var->data.centroid = false;
var->data.sample = false;
}
i965: Rewrite FS input handling to use the new NIR intrinsics. This eliminates the need to walk the list of input variables, recurse into their types (via logic largely redundant with nir_lower_io), and interpolate all possible inputs up front. The backend no longer has to care about variables at all, which eliminates complications from trying to pack multiple variables into the same location. Instead, each intrinsic specifies exactly what's needed. This should unblock Timothy's work on GL_ARB_enhanced_layouts. Each load_interpolated_input intrinsic corresponds to PLN instructions, while load_barycentric_at_* intrinsics correspond to pixel interpolator messages. The pixel/centroid/sample barycentric intrinsics simply refer to payload fields (delta_xy[]), and don't actually generate any code. Because we use a single intrinsic for both centroid-qualified variables and interpolateAtCentroid(), they become indistinguishable. We stop sending pixel interpolator messages for those, and instead use the payload provided data, which should be considerably faster. On Broadwell: total instructions in shared programs: 9067751 -> 9067570 (-0.00%) instructions in affected programs: 145902 -> 145721 (-0.12%) helped: 422 HURT: 209 total spills in shared programs: 2849 -> 2899 (1.76%) spills in affected programs: 760 -> 810 (6.58%) helped: 0 HURT: 10 total fills in shared programs: 3910 -> 3950 (1.02%) fills in affected programs: 617 -> 657 (6.48%) helped: 0 HURT: 10 LOST: 3 GAINED: 3 The differences mostly appear to be slight changes in MOVs. v2: Use nir_shader_compiler_options::use_interpolated_input_intrinsics flag rather than passing it directly to nir_lower_io. Use the unreachable() macro rather than assert in one place. (Review feedback from Chris Forbes.) Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Chris Forbes <chrisforbes@google.com> Acked-by: Jason Ekstrand <jason@jlekstrand.net>
2016-07-12 11:57:25 +01:00
}
nir_lower_io_options lower_io_options = 0;
if (key->persample_interp)
lower_io_options |= nir_lower_io_force_sample_interpolation;
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
intel/fs: Use nir_lower_interpolation on gen11+ On gen11, the removed the PLN instruction so we have to emit a pile of MAD to emulate it. We may as well do that in NIR so we can optimize and later schedule it. Shader-db results on Ice Lake: total instructions in shared programs: 17145644 -> 16556440 (-3.44%) instructions in affected programs: 11507454 -> 10918250 (-5.12%) helped: 35763 HURT: 42085 helped stats (abs) min: 1 max: 140 x̄: 19.09 x̃: 18 helped stats (rel) min: 0.04% max: 37.93% x̄: 15.40% x̃: 14.49% HURT stats (abs) min: 1 max: 248 x̄: 2.22 x̃: 2 HURT stats (rel) min: 0.05% max: 50.00% x̄: 5.00% x̃: 2.47% 95% mean confidence interval for instructions value: -7.67 -7.47 95% mean confidence interval for instructions %-change: -4.46% -4.29% Instructions are helped. total loops in shared programs: 4370 -> 4370 (0.00%) loops in affected programs: 0 -> 0 helped: 0 HURT: 0 total cycles in shared programs: 360624645 -> 368220857 (2.11%) cycles in affected programs: 269631244 -> 277227456 (2.82%) helped: 15583 HURT: 65874 helped stats (abs) min: 1 max: 28561 x̄: 78.45 x̃: 32 helped stats (rel) min: <.01% max: 67.81% x̄: 5.38% x̃: 2.44% HURT stats (abs) min: 1 max: 238638 x̄: 133.87 x̃: 20 HURT stats (rel) min: <.01% max: 306.25% x̄: 5.81% x̃: 3.97% 95% mean confidence interval for cycles value: 67.42 119.09 95% mean confidence interval for cycles %-change: 3.61% 3.73% Cycles are HURT. total spills in shared programs: 8943 -> 8981 (0.42%) spills in affected programs: 1925 -> 1963 (1.97%) helped: 44 HURT: 14 total fills in shared programs: 21815 -> 21925 (0.50%) fills in affected programs: 3511 -> 3621 (3.13%) helped: 41 HURT: 18 LOST: 70 GAINED: 14 Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-04-11 20:57:12 +01:00
if (devinfo->gen >= 11)
nir_lower_interpolation(nir, ~0);
i965: Rewrite FS input handling to use the new NIR intrinsics. This eliminates the need to walk the list of input variables, recurse into their types (via logic largely redundant with nir_lower_io), and interpolate all possible inputs up front. The backend no longer has to care about variables at all, which eliminates complications from trying to pack multiple variables into the same location. Instead, each intrinsic specifies exactly what's needed. This should unblock Timothy's work on GL_ARB_enhanced_layouts. Each load_interpolated_input intrinsic corresponds to PLN instructions, while load_barycentric_at_* intrinsics correspond to pixel interpolator messages. The pixel/centroid/sample barycentric intrinsics simply refer to payload fields (delta_xy[]), and don't actually generate any code. Because we use a single intrinsic for both centroid-qualified variables and interpolateAtCentroid(), they become indistinguishable. We stop sending pixel interpolator messages for those, and instead use the payload provided data, which should be considerably faster. On Broadwell: total instructions in shared programs: 9067751 -> 9067570 (-0.00%) instructions in affected programs: 145902 -> 145721 (-0.12%) helped: 422 HURT: 209 total spills in shared programs: 2849 -> 2899 (1.76%) spills in affected programs: 760 -> 810 (6.58%) helped: 0 HURT: 10 total fills in shared programs: 3910 -> 3950 (1.02%) fills in affected programs: 617 -> 657 (6.48%) helped: 0 HURT: 10 LOST: 3 GAINED: 3 The differences mostly appear to be slight changes in MOVs. v2: Use nir_shader_compiler_options::use_interpolated_input_intrinsics flag rather than passing it directly to nir_lower_io. Use the unreachable() macro rather than assert in one place. (Review feedback from Chris Forbes.) Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Chris Forbes <chrisforbes@google.com> Acked-by: Jason Ekstrand <jason@jlekstrand.net>
2016-07-12 11:57:25 +01:00
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
}
void
brw_nir_lower_vue_outputs(nir_shader *nir)
{
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
}
void
brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
GLenum tes_primitive_mode)
{
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
nir_foreach_function(function, nir) {
if (function->impl) {
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
}
}
}
}
void
brw_nir_lower_fs_outputs(nir_shader *nir)
{
i965/fs: Rework representation of fragment output locations in NIR. The problem with the current approach is that driver output locations are represented as a linear offset within the nir_outputs array, which makes it rather difficult for the back-end to figure out what color output and index some nir_intrinsic_load/store_output was meant for, because the offset of a given output within the nir_output array is dependent on the type and size of all previously allocated outputs. Instead this defines the driver location of an output to be the pair formed by its GLSL-assigned location and index (I've borrowed the bitfield macros from brw_defines.h in order to represent the pair of integers as a single scalar value that can be assigned to nir_variable_data::driver_location). nir_assign_var_locations is no longer useful for fragment outputs. Because fragment outputs are now allocated independently rather than within the nir_outputs array, the get_frag_output() helper becomes necessary in order to obtain the right temporary register for a given location-index pair. The type_size helper passed to nir_lower_io is now type_size_dvec4 rather than type_size_vec4_times_4 so that output array offsets are provided in terms of whole array elements rather than in terms of scalar components (dvec4 is the largest vector type supported by the GLSL so this will cause all individual fragment outputs to have a size of one regardless of the type). Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
2016-07-22 05:26:20 +01:00
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location =
SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
}
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
}
#define OPT(pass, ...) ({ \
bool this_progress = false; \
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
if (this_progress) \
progress = true; \
this_progress; \
})
static nir_variable_mode
brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
gl_shader_stage stage)
{
nir_variable_mode indirect_mask = 0;
if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
indirect_mask |= nir_var_shader_in;
if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
indirect_mask |= nir_var_shader_out;
if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
indirect_mask |= nir_var_function_temp;
return indirect_mask;
}
void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
{
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
bool progress;
unsigned lower_flrp =
(nir->options->lower_flrp16 ? 16 : 0) |
(nir->options->lower_flrp32 ? 32 : 0) |
(nir->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
OPT(nir_split_array_vars, nir_var_function_temp);
OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
OPT(nir_opt_deref);
OPT(nir_lower_vars_to_ssa);
if (allow_copies) {
/* Only run this pass in the first call to brw_nir_optimize. Later
* calls assume that we've lowered away any copy_deref instructions
* and we don't want to introduce any more.
*/
OPT(nir_opt_find_array_copies);
}
OPT(nir_opt_copy_prop_vars);
OPT(nir_opt_dead_write_vars);
OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
OPT(nir_lower_alu_to_scalar, NULL);
}
OPT(nir_copy_prop);
if (is_scalar) {
OPT(nir_lower_phis_to_scalar);
}
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
OPT(nir_opt_combine_stores, nir_var_all);
intel/compiler: More peephole select Shader-db results: The one shader hurt for instructions is a compute shader that had both spills and fills hurt. v2: Fix typo in comment noticed by Caio. v3: Fix inverted condition in brw_nir.c. Noticed by Lionel. Skylake, Broadwell, and Haswell had similar results. (Skylake shown) total instructions in shared programs: 15072761 -> 15047884 (-0.17%) instructions in affected programs: 895539 -> 870662 (-2.78%) helped: 3623 HURT: 1 helped stats (abs) min: 1 max: 181 x̄: 6.89 x̃: 4 helped stats (rel) min: 0.10% max: 25.00% x̄: 3.93% x̃: 3.20% HURT stats (abs) min: 92 max: 92 x̄: 92.00 x̃: 92 HURT stats (rel) min: 1.92% max: 1.92% x̄: 1.92% x̃: 1.92% 95% mean confidence interval for instructions value: -7.10 -6.63 95% mean confidence interval for instructions %-change: -4.03% -3.82% Instructions are helped. total cycles in shared programs: 369738930 -> 369535732 (-0.05%) cycles in affected programs: 68027851 -> 67824653 (-0.30%) helped: 2609 HURT: 1035 helped stats (abs) min: 1 max: 4508 x̄: 181.44 x̃: 77 helped stats (rel) min: <.01% max: 71.31% x̄: 9.14% x̃: 5.47% HURT stats (abs) min: 1 max: 33336 x̄: 261.04 x̃: 20 HURT stats (rel) min: <.01% max: 47.61% x̄: 2.93% x̃: 1.47% 95% mean confidence interval for cycles value: -96.43 -15.09 95% mean confidence interval for cycles %-change: -6.07% -5.36% Cycles are helped. total spills in shared programs: 10158 -> 10159 (<.01%) spills in affected programs: 166 -> 167 (0.60%) helped: 1 HURT: 1 total fills in shared programs: 22105 -> 22116 (0.05%) fills in affected programs: 837 -> 848 (1.31%) helped: 4 HURT: 1 Ivy Bridge total instructions in shared programs: 12021190 -> 11990256 (-0.26%) instructions in affected programs: 910561 -> 879627 (-3.40%) helped: 3344 HURT: 18 helped stats (abs) min: 1 max: 99 x̄: 9.29 x̃: 6 helped stats (rel) min: 0.11% max: 31.18% x̄: 5.19% x̃: 3.31% HURT stats (abs) min: 2 max: 20 x̄: 7.89 x̃: 6 HURT stats (rel) min: 0.70% max: 2.59% x̄: 1.63% x̃: 1.70% 95% mean confidence interval for instructions value: -9.49 -8.91 95% mean confidence interval for instructions %-change: -5.32% -4.98% Instructions are helped. total cycles in shared programs: 179077826 -> 178570196 (-0.28%) cycles in affected programs: 63205667 -> 62698037 (-0.80%) helped: 2767 HURT: 620 helped stats (abs) min: 1 max: 7531 x̄: 217.58 x̃: 88 helped stats (rel) min: <.01% max: 75.86% x̄: 9.59% x̃: 6.09% HURT stats (abs) min: 1 max: 31255 x̄: 152.27 x̃: 11 HURT stats (rel) min: <.01% max: 36.36% x̄: 2.77% x̃: 0.58% 95% mean confidence interval for cycles value: -173.94 -125.81 95% mean confidence interval for cycles %-change: -7.68% -6.97% Cycles are helped. Sandy Bridge total instructions in shared programs: 10852569 -> 10843758 (-0.08%) instructions in affected programs: 235803 -> 226992 (-3.74%) helped: 800 HURT: 0 helped stats (abs) min: 1 max: 88 x̄: 11.01 x̃: 8 helped stats (rel) min: 0.11% max: 23.08% x̄: 4.69% x̃: 3.36% 95% mean confidence interval for instructions value: -11.93 -10.10 95% mean confidence interval for instructions %-change: -4.99% -4.39% Instructions are helped. total cycles in shared programs: 154732047 -> 154608941 (-0.08%) cycles in affected programs: 4063110 -> 3940004 (-3.03%) helped: 606 HURT: 253 helped stats (abs) min: 1 max: 2524 x̄: 227.93 x̃: 62 helped stats (rel) min: 0.02% max: 39.24% x̄: 4.36% x̃: 1.81% HURT stats (abs) min: 1 max: 1966 x̄: 59.36 x̃: 11 HURT stats (rel) min: 0.02% max: 67.10% x̄: 3.22% x̃: 0.67% 95% mean confidence interval for cycles value: -170.49 -116.13 95% mean confidence interval for cycles %-change: -2.61% -1.65% Cycles are helped. No change on Iron Lake or GM45. Signed-off-by: Ian Romanick <ian.d.romanick@intel.com> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2018-05-23 02:56:41 +01:00
/* Passing 0 to the peephole select pass causes it to convert
* if-statements that contain only move instructions in the branches
* regardless of the count.
*
* Passing 1 to the peephole select pass causes it to convert
* if-statements that contain at most a single ALU instruction (total)
* in both branches. Before Gen6, some math instructions were
* prohibitively expensive and the results of compare operations need an
* extra resolve step. For these reasons, this pass is more harmful
* than good on those platforms.
*
* For indirect loads of uniforms (push constants), we assume that array
* indices will nearly always be in bounds and the cost of the load is
* low. Therefore there shouldn't be a performance benefit to avoid it.
* However, in vec4 tessellation shaders, these loads operate by
* actually pulling from memory.
*/
const bool is_vec4_tessellation = !is_scalar &&
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
intel/compiler: More peephole_select for pre-Gen6 No shader-db changes on any Gen6+ platform. All of the shaders with cycles hurt by more than ~2% are from Master of Orion. All of the shaders have instructions helped. It looks like the pass enables some control flow to be converted to bcsels, then the scheduler does dumb things. These are new shaders (just added before doing this shader-db run), so there's probably some low-hanging fruit. Iron Lake total instructions in shared programs: 8214327 -> 8213684 (<.01%) instructions in affected programs: 84469 -> 83826 (-0.76%) helped: 114 HURT: 26 helped stats (abs) min: 2 max: 18 x̄: 7.75 x̃: 9 helped stats (rel) min: 0.17% max: 13.73% x̄: 2.52% x̃: 1.05% HURT stats (abs) min: 2 max: 20 x̄: 9.23 x̃: 8 HURT stats (rel) min: 0.70% max: 2.48% x̄: 1.66% x̃: 1.61% 95% mean confidence interval for instructions value: -5.87 -3.32 95% mean confidence interval for instructions %-change: -2.32% -1.17% Instructions are helped. total cycles in shared programs: 187736850 -> 187749314 (<.01%) cycles in affected programs: 506750 -> 519214 (2.46%) helped: 104 HURT: 36 helped stats (abs) min: 2 max: 72 x̄: 21.96 x̃: 16 helped stats (rel) min: 0.02% max: 6.16% x̄: 0.97% x̃: 0.63% HURT stats (abs) min: 4 max: 1402 x̄: 409.67 x̃: 40 HURT stats (rel) min: 0.33% max: 23.12% x̄: 5.79% x̃: 1.39% 95% mean confidence interval for cycles value: 28.32 149.74 95% mean confidence interval for cycles %-change: -0.07% 1.61% Inconclusive result (%-change mean confidence interval includes 0). GM45 total instructions in shared programs: 5044014 -> 5043652 (<.01%) instructions in affected programs: 46751 -> 46389 (-0.77%) helped: 63 HURT: 13 helped stats (abs) min: 2 max: 29 x̄: 7.65 x̃: 9 helped stats (rel) min: 0.17% max: 13.73% x̄: 2.93% x̃: 1.04% HURT stats (abs) min: 2 max: 20 x̄: 9.23 x̃: 8 HURT stats (rel) min: 0.66% max: 2.35% x̄: 1.58% x̃: 1.52% 95% mean confidence interval for instructions value: -6.54 -2.99 95% mean confidence interval for instructions %-change: -3.04% -1.28% Instructions are helped. total cycles in shared programs: 128143042 -> 128150188 (<.01%) cycles in affected programs: 324564 -> 331710 (2.20%) helped: 57 HURT: 19 helped stats (abs) min: 6 max: 74 x̄: 30.70 x̃: 32 helped stats (rel) min: 0.08% max: 4.74% x̄: 1.22% x̃: 0.81% HURT stats (abs) min: 10 max: 1400 x̄: 468.21 x̃: 60 HURT stats (rel) min: 0.56% max: 19.94% x̄: 5.80% x̃: 1.70% 95% mean confidence interval for cycles value: 6.90 181.15 95% mean confidence interval for cycles %-change: -0.52% 1.59% Inconclusive result (%-change mean confidence interval includes 0). Signed-off-by: Ian Romanick <ian.d.romanick@intel.com> Acked-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2018-06-19 01:09:41 +01:00
OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
compiler->devinfo->gen >= 6);
OPT(nir_opt_intrinsics);
OPT(nir_opt_idiv_const, 32);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
if (lower_flrp != 0) {
if (OPT(nir_lower_flrp,
lower_flrp,
false /* always_precise */,
compiler->devinfo->gen >= 6)) {
OPT(nir_opt_constant_folding);
}
/* Nothing should rematerialize any flrps, so we only need to do this
* lowering once.
*/
lower_flrp = 0;
}
OPT(nir_opt_dead_cf);
if (OPT(nir_opt_trivial_continues)) {
/* If nir_opt_trivial_continues makes progress, then we need to clean
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(nir_copy_prop);
OPT(nir_opt_dce);
}
OPT(nir_opt_if, false);
OPT(nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations != 0) {
OPT(nir_opt_loop_unroll, indirect_mask);
}
OPT(nir_opt_remove_phis);
OPT(nir_opt_undef);
OPT(nir_lower_pack);
} while (progress);
/* Workaround Gfxbench unused local sampler variable which will trigger an
* assert in the opt_large_constants pass.
*/
OPT(nir_remove_dead_variables, nir_var_function_temp);
}
static unsigned
lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
{
assert(alu->dest.dest.is_ssa);
if (alu->dest.dest.ssa.bit_size >= 32)
return 0;
const struct brw_compiler *compiler = (const struct brw_compiler *) data;
switch (alu->op) {
case nir_op_idiv:
case nir_op_imod:
case nir_op_irem:
case nir_op_udiv:
case nir_op_umod:
case nir_op_fceil:
case nir_op_ffloor:
case nir_op_ffract:
case nir_op_fround_even:
case nir_op_ftrunc:
return 32;
case nir_op_frcp:
case nir_op_frsq:
case nir_op_fsqrt:
case nir_op_fpow:
case nir_op_fexp2:
case nir_op_flog2:
case nir_op_fsin:
case nir_op_fcos:
return compiler->devinfo->gen < 9 ? 32 : 0;
default:
return 0;
}
}
/* Does some simple lowering and runs the standard suite of optimizations
*
* This is intended to be called more-or-less directly after you get the
* shader out of GLSL or some other source. While it is geared towards i965,
* it is not at all generator-specific except for the is_scalar flag. Even
* there, it is safe to call with is_scalar = false for a shader that is
* intended for the FS backend as long as nir_optimize is called again with
* is_scalar = true to scalarize everything prior to code gen.
*/
void
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
const nir_shader *softfp64)
{
const struct gen_device_info *devinfo = compiler->devinfo;
UNUSED bool progress; /* Written by OPT */
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
if (is_scalar) {
OPT(nir_lower_alu_to_scalar, NULL);
}
if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
/* See also brw_nir_trig_workarounds.py */
if (compiler->precise_trig &&
!(devinfo->gen >= 10 || devinfo->is_kabylake))
OPT(brw_nir_apply_trig_workarounds);
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_txf_offset = true,
.lower_rect_offset = true,
.lower_tex_without_implicit_lod = true,
.lower_txd_cube_map = true,
.lower_txb_shadow_clamp = true,
.lower_txd_shadow_clamp = true,
.lower_txd_offset_clamp = true,
.lower_tg4_offsets = true,
};
OPT(nir_lower_tex, &tex_options);
OPT(nir_normalize_cubemap_coords);
OPT(nir_lower_global_vars_to_local);
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
brw_nir_optimize(nir, compiler, is_scalar, true);
OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
OPT(nir_lower_int64, nir->options->lower_int64_options);
/* This needs to be run after the first optimization pass but before we
* lower indirect derefs away
*/
if (compiler->supports_shader_constants) {
OPT(nir_opt_large_constants, NULL, 32);
}
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
if (is_scalar) {
OPT(nir_lower_load_const_to_scalar);
}
/* Lower a bunch of stuff */
OPT(nir_lower_var_copies);
OPT(nir_lower_system_values);
const nir_lower_subgroups_options subgroups_options = {
.subgroup_size = BRW_SUBGROUP_SIZE,
.ballot_bit_size = 32,
.lower_to_scalar = true,
.lower_subgroup_masks = true,
.lower_vote_trivial = !is_scalar,
.lower_shuffle = true,
};
OPT(nir_lower_subgroups, &subgroups_options);
OPT(nir_lower_clip_cull_distance_arrays);
i965: use nir_lower_indirect_derefs() for GLSL This moves the nir_lower_indirect_derefs() call into brw_preprocess_nir() so thats is called by both OpenGL and Vulkan and removes that call to the old GLSL IR pass lower_variable_index_to_cond_assign() We want to do this pass in nir to be able to move loop unrolling to nir. There is a increase of 1-3 instructions in a small number of shaders, and 2 Kerbal Space program shaders that increase by 32 instructions. The changes seem to be caused be the difference in the GLSL IR vs NIR variable index lowering passes. The GLSL IR pass creates a simple if ladder for arrays of size 4 or less, while the NIR pass implements a binary search for all arrays regardless of size. Shader-db results BDW: total instructions in shared programs: 13021176 -> 13021819 (0.00%) instructions in affected programs: 57693 -> 58336 (1.11%) helped: 20 HURT: 190 total cycles in shared programs: 299805580 -> 299750826 (-0.02%) cycles in affected programs: 2290024 -> 2235270 (-2.39%) helped: 337 HURT: 442 total fills in shared programs: 19984 -> 19984 (0.00%) fills in affected programs: 0 -> 0 helped: 0 HURT: 0 LOST: 4 GAINED: 0 V2: remove the do_copy_propagation() call from the i965 GLSL IR linking code. This call was added in f7741c52111 but since we are moving the variable index lowering to NIR we no longer need it and can just rely on the nir copy propagation pass. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-12-06 01:12:20 +00:00
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
i965: use nir_lower_indirect_derefs() for GLSL This moves the nir_lower_indirect_derefs() call into brw_preprocess_nir() so thats is called by both OpenGL and Vulkan and removes that call to the old GLSL IR pass lower_variable_index_to_cond_assign() We want to do this pass in nir to be able to move loop unrolling to nir. There is a increase of 1-3 instructions in a small number of shaders, and 2 Kerbal Space program shaders that increase by 32 instructions. The changes seem to be caused be the difference in the GLSL IR vs NIR variable index lowering passes. The GLSL IR pass creates a simple if ladder for arrays of size 4 or less, while the NIR pass implements a binary search for all arrays regardless of size. Shader-db results BDW: total instructions in shared programs: 13021176 -> 13021819 (0.00%) instructions in affected programs: 57693 -> 58336 (1.11%) helped: 20 HURT: 190 total cycles in shared programs: 299805580 -> 299750826 (-0.02%) cycles in affected programs: 2290024 -> 2235270 (-2.39%) helped: 337 HURT: 442 total fills in shared programs: 19984 -> 19984 (0.00%) fills in affected programs: 0 -> 0 helped: 0 HURT: 0 LOST: 4 GAINED: 0 V2: remove the do_copy_propagation() call from the i965 GLSL IR linking code. This call was added in f7741c52111 but since we are moving the variable index lowering to NIR we no longer need it and can just rely on the nir copy propagation pass. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-12-06 01:12:20 +00:00
intel/nir: Lower array-deref-of-vector UBO and SSBO loads This fixes a serious performance issue with DXVK: https://github.com/doitsujin/dxvk/issues/937 This was caused by a recent change that to improve performance on RADV which back-fired on ANV and killed performance for some apps: https://github.com/doitsujin/dxvk/commit/e5a06d3f4a103a54cd4eb51970fedee405d1d698 Throwing in this bit of lowering lets us come along and CSE those UBO loads (or copy-prop for SSBO load) and get one load where we previously would have gotten several. VkPipeline-db results on Kaby Lake: total instructions in shared programs: 5115361 -> 5073185 (-0.82%) instructions in affected programs: 1754333 -> 1712157 (-2.40%) helped: 5331 HURT: 63 total cycles in shared programs: 2544501169 -> 2481144545 (-2.49%) cycles in affected programs: 2531058653 -> 2467702029 (-2.50%) helped: 9202 HURT: 4323 total loops in shared programs: 3340 -> 3331 (-0.27%) loops in affected programs: 9 -> 0 helped: 9 HURT: 0 total spills in shared programs: 3246 -> 3053 (-5.95%) spills in affected programs: 384 -> 191 (-50.26%) helped: 10 HURT: 5 total fills in shared programs: 4626 -> 4452 (-3.76%) fills in affected programs: 439 -> 265 (-39.64%) helped: 10 HURT: 5 All of the shaders with hurt spilling were in Rise of the Tomb Raider which also had shaders solidly helped in the spilling department. Not shown in those results (because I've not had success dumping the shaders) is Witcher 3 where this reduces spilling and improves over-all perf by around 20-25%. There were no shader-db changes. Apparently, this just isn't a pattern that happens in OpenGL. Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com> Cc: "19.0" mesa-stable@lists.freedesktop.org
2019-03-14 17:58:16 +00:00
/* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
* SSBOs, our back-end is capable of loading an entire vec4 at a time and
* we would like to take advantage of that whenever possible regardless of
* whether or not the app gives us full loads. This should allow the
* optimizer to combine UBO and SSBO load operations and save us some send
* messages.
*/
OPT(nir_lower_array_deref_of_vec,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
brw_nir_optimize(nir, compiler, is_scalar, false);
}
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
nir_shader *producer, nir_shader *consumer)
{
nir_lower_io_arrays_to_elements(producer, consumer);
nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
if (p_is_scalar && c_is_scalar) {
NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
/* The backend might not be able to handle indirects on
* temporaries so we need to lower indirects on any of the
* varyings we have demoted here.
*/
NIR_PASS_V(producer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, producer->info.stage));
NIR_PASS_V(consumer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, consumer->info.stage));
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
/* Calling lower_io_to_vector creates output variable writes with
* write-masks. On non-TCS outputs, the back-end can't handle it and we
* need to call nir_lower_io_to_temporaries to get rid of them. This,
* in turn, creates temporary variables and extra copy_deref intrinsics
* that we need to clean up.
*/
NIR_PASS_V(producer, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(producer), true, false);
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
NIR_PASS_V(producer, nir_split_var_copies);
NIR_PASS_V(producer, nir_lower_var_copies);
}
}
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
* backend and is highly backend-specific. Also, once this function has been
* called on a shader, it will no longer be in SSA form so most optimizations
* will not work.
*/
void
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar)
{
const struct gen_device_info *devinfo = compiler->devinfo;
bool debug_enabled =
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->info.stage));
UNUSED bool progress; /* Written by OPT */
OPT(brw_nir_lower_mem_access_bit_sizes);
do {
progress = false;
OPT(nir_opt_algebraic_before_ffma);
} while (progress);
brw_nir_optimize(nir, compiler, is_scalar, false);
if (OPT(nir_lower_int64, nir->options->lower_int64_options))
brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(brw_nir_opt_peephole_ffma);
}
intel/compiler: Use partial redundancy elimination for compares Almost all of the hurt shaders are repeated instances of the same shader in synmark's compilation speed tests. shader-db results: All Gen6+ platforms had similar results. (Skylake shown) total instructions in shared programs: 15256840 -> 15256389 (<.01%) instructions in affected programs: 54137 -> 53686 (-0.83%) helped: 288 HURT: 0 helped stats (abs) min: 1 max: 15 x̄: 1.57 x̃: 1 helped stats (rel) min: 0.06% max: 26.67% x̄: 1.99% x̃: 0.74% 95% mean confidence interval for instructions value: -1.76 -1.38 95% mean confidence interval for instructions %-change: -2.47% -1.50% Instructions are helped. total cycles in shared programs: 372286583 -> 372283851 (<.01%) cycles in affected programs: 833829 -> 831097 (-0.33%) helped: 265 HURT: 16 helped stats (abs) min: 2 max: 74 x̄: 11.81 x̃: 4 helped stats (rel) min: 0.04% max: 9.07% x̄: 0.99% x̃: 0.35% HURT stats (abs) min: 2 max: 130 x̄: 24.88 x̃: 8 HURT stats (rel) min: <.01% max: 12.31% x̄: 1.44% x̃: 0.27% 95% mean confidence interval for cycles value: -12.30 -7.15 95% mean confidence interval for cycles %-change: -1.06% -0.64% Cycles are helped. Iron Lake and GM45 had similar results. (GM45 shown) total instructions in shared programs: 5038653 -> 5038495 (<.01%) instructions in affected programs: 13939 -> 13781 (-1.13%) helped: 50 HURT: 1 helped stats (abs) min: 1 max: 15 x̄: 3.18 x̃: 4 helped stats (rel) min: 0.33% max: 13.33% x̄: 2.24% x̃: 1.09% HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1 HURT stats (rel) min: 0.83% max: 0.83% x̄: 0.83% x̃: 0.83% 95% mean confidence interval for instructions value: -3.73 -2.47 95% mean confidence interval for instructions %-change: -3.16% -1.21% Instructions are helped. total cycles in shared programs: 128118922 -> 128118228 (<.01%) cycles in affected programs: 134906 -> 134212 (-0.51%) helped: 50 HURT: 0 helped stats (abs) min: 2 max: 60 x̄: 13.88 x̃: 18 helped stats (rel) min: 0.06% max: 3.19% x̄: 0.74% x̃: 0.70% 95% mean confidence interval for cycles value: -16.54 -11.22 95% mean confidence interval for cycles %-change: -0.95% -0.53% Cycles are helped. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
2018-05-23 02:56:41 +01:00
if (OPT(nir_opt_comparison_pre)) {
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
/* Do the select peepehole again. nir_opt_comparison_pre (combined with
* the other optimization passes) will have removed at least one
* instruction from one of the branches of the if-statement, so now it
* might be under the threshold of conversion to bcsel.
*
* See brw_nir_optimize for the explanation of is_vec4_tessellation.
*/
const bool is_vec4_tessellation = !is_scalar &&
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
compiler->devinfo->gen >= 6);
}
intel/compiler: Repeat nir_opt_algebraic_late A tiny bit of help seems to come from nir_copy_prop. Future patches will benefit from this change. Doing more copy propagation on the vec4 backend led to a disaster in hurt cycles. v2: Fix typo in comment. Noticed by Matt. All Gen8+ platforms had similar results. (Ice Lake shown) total instructions in shared programs: 17224634 -> 17224623 (<.01%) instructions in affected programs: 4586 -> 4575 (-0.24%) helped: 11 HURT: 0 helped stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1 helped stats (rel) min: 0.19% max: 0.53% x̄: 0.27% x̃: 0.23% 95% mean confidence interval for instructions value: -1.00 -1.00 95% mean confidence interval for instructions %-change: -0.36% -0.19% Instructions are helped. total cycles in shared programs: 360828542 -> 360828714 (<.01%) cycles in affected programs: 151159 -> 151331 (0.11%) helped: 49 HURT: 28 helped stats (abs) min: 1 max: 254 x̄: 26.41 x̃: 6 helped stats (rel) min: 0.06% max: 12.02% x̄: 1.34% x̃: 0.42% HURT stats (abs) min: 1 max: 196 x̄: 52.36 x̃: 15 HURT stats (rel) min: 0.05% max: 10.74% x̄: 2.55% x̃: 0.88% 95% mean confidence interval for cycles value: -13.48 17.95 95% mean confidence interval for cycles %-change: -0.69% 0.84% Inconclusive result (value mean confidence interval includes 0). Haswell, Ivy Bridge, and Sandy Bridge had similar results. (Haswell shown) total instructions in shared programs: 13529544 -> 13529542 (<.01%) instructions in affected programs: 358 -> 356 (-0.56%) helped: 2 HURT: 0 total cycles in shared programs: 357290311 -> 357289678 (<.01%) cycles in affected programs: 178324 -> 177691 (-0.35%) helped: 48 HURT: 40 helped stats (abs) min: 1 max: 201 x̄: 31.52 x̃: 13 helped stats (rel) min: 0.06% max: 10.92% x̄: 1.71% x̃: 0.66% HURT stats (abs) min: 1 max: 224 x̄: 22.00 x̃: 6 HURT stats (rel) min: 0.05% max: 15.84% x̄: 1.29% x̃: 0.31% 95% mean confidence interval for cycles value: -18.28 3.89 95% mean confidence interval for cycles %-change: -1.01% 0.32% Inconclusive result (value mean confidence interval includes 0). Iron Lake and GM45 had similar results. (Iron Lake shown) total instructions in shared programs: 8159110 -> 8158980 (<.01%) instructions in affected programs: 22719 -> 22589 (-0.57%) helped: 65 HURT: 0 helped stats (abs) min: 1 max: 3 x̄: 2.00 x̃: 2 helped stats (rel) min: 0.07% max: 1.05% x̄: 0.73% x̃: 0.74% 95% mean confidence interval for instructions value: -2.06 -1.94 95% mean confidence interval for instructions %-change: -0.78% -0.68% Instructions are helped. total cycles in shared programs: 188609448 -> 188609214 (<.01%) cycles in affected programs: 1875852 -> 1875618 (-0.01%) helped: 109 HURT: 104 helped stats (abs) min: 2 max: 46 x̄: 5.30 x̃: 4 helped stats (rel) min: 0.02% max: 0.90% x̄: 0.09% x̃: 0.07% HURT stats (abs) min: 2 max: 20 x̄: 3.31 x̃: 2 HURT stats (rel) min: 0.01% max: 0.26% x̄: 0.04% x̃: 0.02% 95% mean confidence interval for cycles value: -1.95 -0.25 95% mean confidence interval for cycles %-change: -0.04% -0.01% Cycles are helped. Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-03-05 20:08:29 +00:00
do {
progress = false;
if (OPT(nir_opt_algebraic_late)) {
/* At this late stage, anything that makes more constants will wreak
* havok on the vec4 backend. The handling of constants in the vec4
* backend is not good.
*/
if (is_scalar) {
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
}
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
} while (progress);
OPT(brw_nir_lower_conversions);
if (is_scalar)
OPT(nir_lower_alu_to_scalar, NULL);
OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_move_comparisons);
OPT(nir_lower_bool_to_int32);
OPT(nir_lower_locals_to_regs);
if (unlikely(debug_enabled)) {
/* Re-index SSA defs so we print more sensible numbers. */
nir_foreach_function(function, nir) {
if (function->impl)
nir_index_ssa_defs(function->impl);
}
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
_mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
OPT(nir_convert_from_ssa, true);
if (!is_scalar) {
OPT(nir_move_vec_src_uses_to_dest);
OPT(nir_lower_vec_to_movs);
}
intel/compiler: More DCE after lowering Some of the lowering passes, nir_lower_locals_to_regs for example, can cause some previously live code to be dead. This pass in particular leaves a bunch of nir_instr_type_deref instructions floating around. This causes shader-db runs on Gen5 through Haswell to spew tons of messages like: VS instruction not yet implemented by NIR->vec4 UnrealEngine4/EffectsCaveDemo/239.shader_test is one shader that generates these messages. Cleaning up the dead code fixes that. To verify, I did a shader-db before and after. Even though all the messages are gone, the results make my brain hurt. :( Haswell total cycles in shared programs: 411890163 -> 411891145 (<.01%) cycles in affected programs: 57016 -> 57998 (1.72%) helped: 3 HURT: 11 helped stats (abs) min: 2 max: 154 x̄: 96.67 x̃: 134 helped stats (rel) min: 0.08% max: 2.23% x̄: 1.42% x̃: 1.96% HURT stats (abs) min: 18 max: 686 x̄: 115.64 x̃: 20 HURT stats (rel) min: 0.81% max: 7.12% x̄: 1.87% x̃: 0.93% 95% mean confidence interval for cycles value: -51.39 191.67 95% mean confidence interval for cycles %-change: -0.14% 2.46% Inconclusive result (value mean confidence interval includes 0). Ivy Bridge total cycles in shared programs: 259114802 -> 259115032 (<.01%) cycles in affected programs: 24034 -> 24264 (0.96%) helped: 1 HURT: 9 helped stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2 helped stats (rel) min: 0.08% max: 0.08% x̄: 0.08% x̃: 0.08% HURT stats (abs) min: 18 max: 48 x̄: 25.78 x̃: 20 HURT stats (rel) min: 0.80% max: 1.94% x̄: 1.08% x̃: 0.80% 95% mean confidence interval for cycles value: 12.42 33.58 95% mean confidence interval for cycles %-change: 0.54% 1.38% Cycles are HURT. Signed-off-by: Ian Romanick <ian.d.romanick@intel.com> Fixes: 5a02ffb733e nir: Rework lower_locals_to_regs to use deref instructions Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-07-03 19:49:07 +01:00
OPT(nir_opt_dce);
intel/compiler: Use compare rematerialization pass Almost all of the spill / fill benefit is in Deus Ex. Haswell and all Gen8+ platforms had similar results. (Ice Lake shown) total instructions in shared programs: 17224438 -> 17196395 (-0.16%) instructions in affected programs: 1518658 -> 1490615 (-1.85%) helped: 1550 HURT: 3 helped stats (abs) min: 1 max: 170 x̄: 18.11 x̃: 2 helped stats (rel) min: 0.04% max: 8.35% x̄: 1.12% x̃: 0.45% HURT stats (abs) min: 5 max: 10 x̄: 6.67 x̃: 5 HURT stats (rel) min: 0.32% max: 0.41% x̄: 0.35% x̃: 0.32% 95% mean confidence interval for instructions value: -19.86 -16.26 95% mean confidence interval for instructions %-change: -1.19% -1.04% Instructions are helped. total cycles in shared programs: 361468455 -> 361288721 (-0.05%) cycles in affected programs: 197367688 -> 197187954 (-0.09%) helped: 990 HURT: 683 helped stats (abs) min: 1 max: 119045 x̄: 806.00 x̃: 16 helped stats (rel) min: <.01% max: 38.56% x̄: 1.06% x̃: 0.26% HURT stats (abs) min: 1 max: 12190 x̄: 905.14 x̃: 22 HURT stats (rel) min: <.01% max: 25.18% x̄: 1.16% x̃: 0.47% 95% mean confidence interval for cycles value: -315.45 100.58 95% mean confidence interval for cycles %-change: -0.31% <.01% Inconclusive result (value mean confidence interval includes 0). total spills in shared programs: 12147 -> 8948 (-26.34%) spills in affected programs: 5433 -> 2234 (-58.88%) helped: 343 HURT: 0 total fills in shared programs: 25262 -> 21814 (-13.65%) fills in affected programs: 7771 -> 4323 (-44.37%) helped: 343 HURT: 3 LOST: 0 GAINED: 17 Ivy Bridge total instructions in shared programs: 12083517 -> 12081427 (-0.02%) instructions in affected programs: 540744 -> 538654 (-0.39%) helped: 786 HURT: 29 helped stats (abs) min: 1 max: 42 x̄: 2.70 x̃: 2 helped stats (rel) min: 0.06% max: 5.44% x̄: 0.55% x̃: 0.36% HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1 HURT stats (rel) min: 0.16% max: 0.95% x̄: 0.38% x̃: 0.31% 95% mean confidence interval for instructions value: -2.83 -2.30 95% mean confidence interval for instructions %-change: -0.57% -0.47% Instructions are helped. total cycles in shared programs: 180153463 -> 180124798 (-0.02%) cycles in affected programs: 72597920 -> 72569255 (-0.04%) helped: 572 HURT: 249 helped stats (abs) min: 1 max: 14830 x̄: 109.48 x̃: 13 helped stats (rel) min: <.01% max: 8.92% x̄: 0.71% x̃: 0.26% HURT stats (abs) min: 1 max: 11060 x̄: 136.37 x̃: 10 HURT stats (rel) min: <.01% max: 10.85% x̄: 0.54% x̃: 0.32% 95% mean confidence interval for cycles value: -96.22 26.39 95% mean confidence interval for cycles %-change: -0.43% -0.23% Inconclusive result (value mean confidence interval includes 0). total spills in shared programs: 3625 -> 3623 (-0.06%) spills in affected programs: 46 -> 44 (-4.35%) helped: 1 HURT: 0 total fills in shared programs: 4065 -> 4061 (-0.10%) fills in affected programs: 104 -> 100 (-3.85%) helped: 1 HURT: 0 LOST: 0 GAINED: 8 Sandy Bridge total instructions in shared programs: 10879656 -> 10878699 (<.01%) instructions in affected programs: 275167 -> 274210 (-0.35%) helped: 544 HURT: 0 helped stats (abs) min: 1 max: 20 x̄: 1.76 x̃: 1 helped stats (rel) min: 0.06% max: 3.11% x̄: 0.39% x̃: 0.25% 95% mean confidence interval for instructions value: -1.97 -1.55 95% mean confidence interval for instructions %-change: -0.43% -0.36% Instructions are helped. total cycles in shared programs: 154089096 -> 154081132 (<.01%) cycles in affected programs: 4422722 -> 4414758 (-0.18%) helped: 459 HURT: 214 helped stats (abs) min: 1 max: 258 x̄: 26.67 x̃: 8 helped stats (rel) min: <.01% max: 5.45% x̄: 0.51% x̃: 0.14% HURT stats (abs) min: 1 max: 226 x̄: 19.99 x̃: 4 HURT stats (rel) min: <.01% max: 3.15% x̄: 0.34% x̃: 0.09% 95% mean confidence interval for cycles value: -15.51 -8.15 95% mean confidence interval for cycles %-change: -0.31% -0.17% Cycles are helped. total spills in shared programs: 2880 -> 2876 (-0.14%) spills in affected programs: 636 -> 632 (-0.63%) helped: 2 HURT: 0 total fills in shared programs: 3161 -> 3157 (-0.13%) fills in affected programs: 1519 -> 1515 (-0.26%) helped: 2 HURT: 0 LOST: 0 GAINED: 2 Iron Lake and GM45 had similar results. (Iron Lake shown) total instructions in shared programs: 8157361 -> 8155067 (-0.03%) instructions in affected programs: 382491 -> 380197 (-0.60%) helped: 677 HURT: 0 helped stats (abs) min: 1 max: 43 x̄: 3.39 x̃: 2 helped stats (rel) min: 0.09% max: 5.19% x̄: 0.66% x̃: 0.42% 95% mean confidence interval for instructions value: -3.76 -3.01 95% mean confidence interval for instructions %-change: -0.72% -0.59% Instructions are helped. total cycles in shared programs: 188588292 -> 188583040 (<.01%) cycles in affected programs: 3155064 -> 3149812 (-0.17%) helped: 377 HURT: 13 helped stats (abs) min: 2 max: 180 x̄: 14.13 x̃: 6 helped stats (rel) min: <.01% max: 3.96% x̄: 0.39% x̃: 0.12% HURT stats (abs) min: 2 max: 8 x̄: 5.85 x̃: 6 HURT stats (rel) min: <.01% max: 0.22% x̄: 0.06% x̃: 0.04% 95% mean confidence interval for cycles value: -15.67 -11.27 95% mean confidence interval for cycles %-change: -0.45% -0.30% Cycles are helped. Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 19:24:57 +01:00
if (OPT(nir_opt_rematerialize_compares))
OPT(nir_opt_dce);
/* This is the last pass we run before we start emitting stuff. It
* determines when we need to insert boolean resolves on Gen <= 5. We
* run it last because it stashes data in instr->pass_flags and we don't
* want that to be squashed by other NIR passes.
*/
if (devinfo->gen <= 5)
brw_nir_analyze_boolean_resolves(nir);
nir_sweep(nir);
if (unlikely(debug_enabled)) {
fprintf(stderr, "NIR (final form) for %s shader:\n",
_mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
}
void
brw_nir_apply_sampler_key(nir_shader *nir,
const struct brw_compiler *compiler,
const struct brw_sampler_prog_key_data *key_tex,
bool is_scalar)
{
const struct gen_device_info *devinfo = compiler->devinfo;
nir_lower_tex_options tex_options = {
.lower_txd_clamp_bindless_sampler = true,
.lower_txd_clamp_if_sampler_index_not_lt_16 = true,
};
/* Iron Lake and prior require lowering of all rectangle textures */
if (devinfo->gen < 6)
tex_options.lower_rect = true;
/* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
if (devinfo->gen < 8) {
tex_options.saturate_s = key_tex->gl_clamp_mask[0];
tex_options.saturate_t = key_tex->gl_clamp_mask[1];
tex_options.saturate_r = key_tex->gl_clamp_mask[2];
}
/* Prior to Haswell, we have to fake texture swizzle */
for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
if (key_tex->swizzles[s] == SWIZZLE_NOOP)
continue;
tex_options.swizzle_result |= (1 << s);
for (unsigned c = 0; c < 4; c++)
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
}
/* Prior to Haswell, we have to lower gradients on shadow samplers */
tex_options.lower_txd_shadow = devinfo->gen < 8 && !devinfo->is_haswell;
tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
/* Setup array of scaling factors for each texture. */
memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
sizeof(tex_options.scale_factors));
if (nir_lower_tex(nir, &tex_options)) {
nir_validate_shader(nir, "after nir_lower_tex");
brw_nir_optimize(nir, compiler, is_scalar, false);
}
}
enum brw_reg_type
brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
{
switch (type) {
case nir_type_uint:
case nir_type_uint32:
return BRW_REGISTER_TYPE_UD;
case nir_type_bool:
case nir_type_int:
case nir_type_bool32:
case nir_type_int32:
return BRW_REGISTER_TYPE_D;
case nir_type_float:
case nir_type_float32:
return BRW_REGISTER_TYPE_F;
case nir_type_float16:
return BRW_REGISTER_TYPE_HF;
case nir_type_float64:
return BRW_REGISTER_TYPE_DF;
case nir_type_int64:
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
case nir_type_uint64:
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
case nir_type_int16:
return BRW_REGISTER_TYPE_W;
case nir_type_uint16:
return BRW_REGISTER_TYPE_UW;
case nir_type_int8:
return BRW_REGISTER_TYPE_B;
case nir_type_uint8:
return BRW_REGISTER_TYPE_UB;
default:
unreachable("unknown type");
}
return BRW_REGISTER_TYPE_F;
}
/* Returns the glsl_base_type corresponding to a nir_alu_type.
* This is used by both brw_vec4_nir and brw_fs_nir.
*/
enum glsl_base_type
brw_glsl_base_type_for_nir_type(nir_alu_type type)
{
switch (type) {
case nir_type_float:
case nir_type_float32:
return GLSL_TYPE_FLOAT;
case nir_type_float16:
return GLSL_TYPE_FLOAT16;
case nir_type_float64:
return GLSL_TYPE_DOUBLE;
case nir_type_int:
case nir_type_int32:
return GLSL_TYPE_INT;
case nir_type_uint:
case nir_type_uint32:
return GLSL_TYPE_UINT;
case nir_type_int16:
return GLSL_TYPE_INT16;
case nir_type_uint16:
return GLSL_TYPE_UINT16;
default:
unreachable("bad type");
}
}
nir_shader *
brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
const nir_shader_compiler_options *options,
const struct brw_tcs_prog_key *key)
{
nir_builder b;
nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_TESS_CTRL,
options);
nir_shader *nir = b.shader;
nir_variable *var;
nir_intrinsic_instr *load;
nir_intrinsic_instr *store;
nir_ssa_def *zero = nir_imm_int(&b, 0);
nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
nir->info.inputs_read = key->outputs_written &
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
nir->info.outputs_written = key->outputs_written;
nir->info.tess.tcs_vertices_out = key->input_vertices;
nir->info.name = ralloc_strdup(nir, "passthrough");
nir->num_uniforms = 8 * sizeof(uint32_t);
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
var->data.location = 0;
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
var->data.location = 1;
/* Write the patch URB header. */
for (int i = 0; i <= 1; i++) {
load = nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
load->num_components = 4;
load->src[0] = nir_src_for_ssa(zero);
nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
nir_intrinsic_set_base(load, i * 4 * sizeof(uint32_t));
nir_builder_instr_insert(&b, &load->instr);
store = nir_intrinsic_instr_create(nir, nir_intrinsic_store_output);
store->num_components = 4;
store->src[0] = nir_src_for_ssa(&load->dest.ssa);
store->src[1] = nir_src_for_ssa(zero);
nir_intrinsic_set_base(store, VARYING_SLOT_TESS_LEVEL_INNER - i);
nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
nir_builder_instr_insert(&b, &store->instr);
}
/* Copy inputs to outputs. */
uint64_t varyings = nir->info.inputs_read;
while (varyings != 0) {
const int varying = ffsll(varyings) - 1;
load = nir_intrinsic_instr_create(nir,
nir_intrinsic_load_per_vertex_input);
load->num_components = 4;
load->src[0] = nir_src_for_ssa(invoc_id);
load->src[1] = nir_src_for_ssa(zero);
nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
nir_intrinsic_set_base(load, varying);
nir_builder_instr_insert(&b, &load->instr);
store = nir_intrinsic_instr_create(nir,
nir_intrinsic_store_per_vertex_output);
store->num_components = 4;
store->src[0] = nir_src_for_ssa(&load->dest.ssa);
store->src[1] = nir_src_for_ssa(invoc_id);
store->src[2] = nir_src_for_ssa(zero);
nir_intrinsic_set_base(store, varying);
nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
nir_builder_instr_insert(&b, &store->instr);
varyings &= ~BITFIELD64_BIT(varying);
}
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
brw_preprocess_nir(compiler, nir, NULL);
return nir;
}