2015-04-07 23:15:09 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "brw_nir.h"
|
2015-08-12 22:29:25 +01:00
|
|
|
#include "brw_shader.h"
|
2017-03-01 19:20:25 +00:00
|
|
|
#include "common/gen_debug.h"
|
2016-04-15 11:51:05 +01:00
|
|
|
#include "compiler/glsl_types.h"
|
2016-01-18 10:54:03 +00:00
|
|
|
#include "compiler/nir/nir_builder.h"
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-12-09 10:37:52 +00:00
|
|
|
static bool
|
|
|
|
is_input(nir_intrinsic_instr *intrin)
|
|
|
|
{
|
|
|
|
return intrin->intrinsic == nir_intrinsic_load_input ||
|
2016-07-12 11:57:25 +01:00
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_interpolated_input;
|
2015-12-09 10:37:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
is_output(nir_intrinsic_instr *intrin)
|
|
|
|
{
|
|
|
|
return intrin->intrinsic == nir_intrinsic_load_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_store_output ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* In many cases, we just add the base and offset together, so there's no
|
|
|
|
* reason to keep them separate. Sometimes, combining them is essential:
|
|
|
|
* if a shader only accesses part of a compound variable (such as a matrix
|
|
|
|
* or array), the variable's base may not actually exist in the VUE map.
|
|
|
|
*
|
|
|
|
* This pass adds constant offsets to instr->const_index[0], and resets
|
|
|
|
* the offset source to 0. Non-constant offsets remain unchanged - since
|
|
|
|
* we don't know what part of a compound variable is accessed, we allocate
|
|
|
|
* storage for the entire thing.
|
|
|
|
*/
|
2015-11-25 22:14:05 +00:00
|
|
|
|
2015-08-15 00:01:33 +01:00
|
|
|
static bool
|
2016-04-13 03:56:14 +01:00
|
|
|
add_const_offset_to_base_block(nir_block *block, nir_builder *b,
|
|
|
|
nir_variable_mode mode)
|
2015-08-15 00:01:33 +01:00
|
|
|
{
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2015-08-15 00:01:33 +01:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
2016-04-13 03:56:14 +01:00
|
|
|
if ((mode == nir_var_shader_in && is_input(intrin)) ||
|
|
|
|
(mode == nir_var_shader_out && is_output(intrin))) {
|
2015-12-09 10:37:52 +00:00
|
|
|
nir_src *offset = nir_get_io_offset_src(intrin);
|
|
|
|
nir_const_value *const_offset = nir_src_as_const_value(*offset);
|
|
|
|
|
|
|
|
if (const_offset) {
|
2016-03-16 11:11:34 +00:00
|
|
|
intrin->const_index[0] += const_offset->u32[0];
|
2015-12-09 10:37:52 +00:00
|
|
|
b->cursor = nir_before_instr(&intrin->instr);
|
|
|
|
nir_instr_rewrite_src(&intrin->instr, offset,
|
|
|
|
nir_src_for_ssa(nir_imm_int(b, 0)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
2016-01-13 23:04:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
|
|
|
|
{
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(f, nir) {
|
2016-01-13 23:04:39 +00:00
|
|
|
if (f->impl) {
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, f->impl);
|
|
|
|
nir_foreach_block(block, f->impl) {
|
|
|
|
add_const_offset_to_base_block(block, &b, mode);
|
|
|
|
}
|
2016-01-13 23:04:39 +00:00
|
|
|
}
|
|
|
|
}
|
2015-12-09 10:37:52 +00:00
|
|
|
}
|
|
|
|
|
2016-11-24 09:50:10 +00:00
|
|
|
static bool
|
|
|
|
remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
|
|
|
|
GLenum primitive_mode)
|
|
|
|
{
|
|
|
|
const int location = nir_intrinsic_base(intr);
|
|
|
|
const unsigned component = nir_intrinsic_component(intr);
|
|
|
|
bool out_of_bounds;
|
|
|
|
|
|
|
|
if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
|
|
|
|
switch (primitive_mode) {
|
|
|
|
case GL_QUADS:
|
|
|
|
/* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
|
|
|
|
nir_intrinsic_set_base(intr, 0);
|
|
|
|
nir_intrinsic_set_component(intr, 3 - component);
|
|
|
|
out_of_bounds = false;
|
|
|
|
break;
|
|
|
|
case GL_TRIANGLES:
|
|
|
|
/* gl_TessLevelInner[0] lives at DWord 4. */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
out_of_bounds = component > 0;
|
|
|
|
break;
|
|
|
|
case GL_ISOLINES:
|
|
|
|
out_of_bounds = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Bogus tessellation domain");
|
|
|
|
}
|
|
|
|
} else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
|
|
|
|
if (primitive_mode == GL_ISOLINES) {
|
|
|
|
/* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
|
|
|
|
out_of_bounds = component > 1;
|
|
|
|
} else {
|
|
|
|
/* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
|
|
|
|
nir_intrinsic_set_base(intr, 1);
|
|
|
|
nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
|
|
|
|
out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_of_bounds) {
|
|
|
|
if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
|
|
|
|
b->cursor = nir_before_instr(&intr->instr);
|
|
|
|
nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
|
|
|
|
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(undef));
|
|
|
|
}
|
|
|
|
nir_instr_remove(&intr->instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-12-10 05:41:35 +00:00
|
|
|
static bool
|
2016-04-13 03:56:14 +01:00
|
|
|
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
|
2016-11-24 09:50:10 +00:00
|
|
|
const struct brw_vue_map *vue_map,
|
|
|
|
GLenum tes_primitive_mode)
|
2015-12-10 05:41:35 +00:00
|
|
|
{
|
2017-05-08 17:20:21 +01:00
|
|
|
const bool is_passthrough_tcs = b->shader->info.name &&
|
|
|
|
strcmp(b->shader->info.name, "passthrough") == 0;
|
2016-11-24 09:50:10 +00:00
|
|
|
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2015-12-10 05:41:35 +00:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
gl_shader_stage stage = b->shader->info.stage;
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
|
|
|
|
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
|
2016-11-24 09:50:10 +00:00
|
|
|
|
|
|
|
if (!is_passthrough_tcs &&
|
|
|
|
remap_tess_levels(b, intrin, tes_primitive_mode))
|
|
|
|
continue;
|
|
|
|
|
2016-04-13 03:56:14 +01:00
|
|
|
int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
|
2015-12-10 05:41:35 +00:00
|
|
|
assert(vue_slot != -1);
|
|
|
|
intrin->const_index[0] = vue_slot;
|
|
|
|
|
|
|
|
nir_src *vertex = nir_get_io_vertex_index_src(intrin);
|
|
|
|
if (vertex) {
|
|
|
|
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
|
|
|
|
if (const_vertex) {
|
2016-03-16 11:11:34 +00:00
|
|
|
intrin->const_index[0] += const_vertex->u32[0] *
|
2016-04-13 03:56:14 +01:00
|
|
|
vue_map->num_per_vertex_slots;
|
2015-12-10 05:41:35 +00:00
|
|
|
} else {
|
2016-04-13 03:56:14 +01:00
|
|
|
b->cursor = nir_before_instr(&intrin->instr);
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
/* Multiply by the number of per-vertex slots. */
|
|
|
|
nir_ssa_def *vertex_offset =
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_imul(b,
|
|
|
|
nir_ssa_for_src(b, *vertex, 1),
|
|
|
|
nir_imm_int(b,
|
|
|
|
vue_map->num_per_vertex_slots));
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
/* Add it to the existing offset */
|
|
|
|
nir_src *offset = nir_get_io_offset_src(intrin);
|
|
|
|
nir_ssa_def *total_offset =
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_iadd(b, vertex_offset,
|
|
|
|
nir_ssa_for_src(b, *offset, 1));
|
2015-12-10 05:41:35 +00:00
|
|
|
|
|
|
|
nir_instr_rewrite_src(&intrin->instr, offset,
|
|
|
|
nir_src_for_ssa(total_offset));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:02:28 +00:00
|
|
|
brw_nir_lower_vs_inputs(nir_shader *nir,
|
|
|
|
const uint8_t *vs_attrib_wa_flags)
|
2015-08-26 11:07:29 +01:00
|
|
|
{
|
2016-02-25 06:02:28 +00:00
|
|
|
/* Start with the location of the variable's base. */
|
|
|
|
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
}
|
2015-08-14 23:15:11 +01:00
|
|
|
|
2016-04-01 16:25:03 +01:00
|
|
|
/* Now use nir_lower_io to walk dereference chains. Attribute arrays are
|
|
|
|
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
|
|
|
|
* whether it is a double-precision type or not.
|
2016-02-25 06:02:28 +00:00
|
|
|
*/
|
nir/i965: use two slots from inputs_read for dvec3/dvec4 vertex input attributes
So far, input_reads was a bitmap tracking which vertex input locations
were being used.
In OpenGL, an attribute bigger than a vec4 (like a dvec3 or dvec4)
consumes just one location, any other small attribute. So we mark the
proper bit in inputs_read, and also the same bit in double_inputs_read
if the attribute is a dvec3/dvec4.
But in Vulkan, this is slightly different: a dvec3/dvec4 attribute
consumes two locations, not just one. And hence two bits would be marked
in inputs_read for the same vertex input attribute.
To avoid handling two different situations in NIR, we just choose the
latest one: in OpenGL, when creating NIR from GLSL/IR, any dvec3/dvec4
vertex input attribute is marked with two bits in the inputs_read bitmap
(and also in the double_inputs_read), and following attributes are
adjusted accordingly.
As example, if in our GLSL/IR shader we have three attributes:
layout(location = 0) vec3 attr0;
layout(location = 1) dvec4 attr1;
layout(location = 2) dvec3 attr2;
then in our NIR shader we put attr0 in location 0, attr1 in locations 1
and 2, and attr2 in location 3 and 4.
Checking carefully, basically we are using slots rather than locations
in NIR.
When emitting the vertices, we do a inverse map to know the
corresponding location for each slot.
v2 (Jason):
- use two slots from inputs_read for dvec3/dvec4 NIR from GLSL/IR.
v3 (Jason):
- Fix commit log error.
- Use ladder ifs and fix braces.
- elements_double is divisible by 2, don't need DIV_ROUND_UP().
- Use if ladder instead of a switch.
- Add comment about hardware restriction in 64bit vertex attributes.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-12-16 09:24:43 +00:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
|
2015-08-15 00:01:33 +01:00
|
|
|
|
2016-02-25 06:02:28 +00:00
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
2016-01-13 23:07:18 +00:00
|
|
|
|
2016-02-25 06:02:28 +00:00
|
|
|
add_const_offset_to_base(nir, nir_var_shader_in);
|
2016-01-13 23:07:18 +00:00
|
|
|
|
i965: Drop support for the legacy SNORM -> Float equation.
Older OpenGL defines two equations for converting from signed-normalized
to floating point data. These are:
f = (2c + 1)/(2^b - 1) (equation 2.2)
f = max{c/2^(b-1) - 1), -1.0} (equation 2.3)
Both OpenGL 4.2+ and OpenGL ES 3.0+ mandate that equation 2.3 is to be
used in all scenarios, and remove equation 2.2. DirectX uses equation
2.3 as well. Intel hardware only supports equation 2.3, so Gen7.5+
systems that use the vertex fetcher hardware to do the conversions
always get formula 2.3.
This can make a big difference for 10-10-10-2 formats - the 2-bit value
can represent 0 with equation 2.3, and cannot with equation 2.2.
Ivybridge and older were using equation 2.2 for OpenGL, and 2.3 for ES.
Now that Ivybridge supports OpenGL 4.2, this is wrong - we need to use
the new rules, at least in core profile. That would leave Gen4-6 doing
something different than all other hardware, which seems...lame.
With context version promotion, applications that requested a pre-4.2
context may get promoted to 4.2, and thus get the new rules. Zero cases
have been reported of this being a problem. However, we've received a
report that following the old rules breaks expectations. SuperTuxKart
apparently renders the cars red when following equation 2.2, and works
correctly when following equation 2.3:
https://github.com/supertuxkart/stk-code/issues/2885#issuecomment-353858405
So, this patch deletes the legacy equation 2.2 support entirely, making
all hardware and APIs consistently use the new equation 2.3 rules.
If we ever find an application that truly requires the old formula, then
we'd likely want that application to work on modern hardware, too. We'd
likely restore this support as a driconf option. Until then, drop it.
This commit will regress Piglit's draw-vertices-2101010 test on
pre-Haswell without the corresponding Piglit patch to accept either
formula (commit 35daaa1695ea01eb85bc02f9be9b6ebd1a7113a1):
draw-vertices-2101010: Accept either SNORM conversion formula.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Chris Forbes <chrisforbes@google.com>
2017-12-26 03:10:22 +00:00
|
|
|
brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
|
2016-01-14 04:33:15 +00:00
|
|
|
|
2017-05-04 01:40:54 +01:00
|
|
|
/* The last step is to remap VERT_ATTRIB_* to actual registers */
|
2017-05-04 00:41:43 +01:00
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
/* Whether or not we have any system generated values. gl_DrawID is not
|
|
|
|
* included here as it lives in its own vec4.
|
|
|
|
*/
|
|
|
|
const bool has_sgvs =
|
|
|
|
nir->info.system_values_read &
|
2018-04-28 13:09:22 +01:00
|
|
|
(BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX) |
|
2017-05-04 00:53:40 +01:00
|
|
|
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
|
|
|
|
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
|
|
|
|
BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
|
|
|
|
|
|
|
|
const unsigned num_inputs = _mesa_bitcount_64(nir->info.inputs_read);
|
|
|
|
|
2017-05-04 00:41:43 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
|
|
|
if (!function->impl)
|
|
|
|
continue;
|
2017-01-13 16:47:57 +00:00
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
|
2017-05-04 00:41:43 +01:00
|
|
|
nir_foreach_block(block, function->impl) {
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2017-05-04 00:41:43 +01:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
2017-05-04 00:53:40 +01:00
|
|
|
switch (intrin->intrinsic) {
|
2018-01-25 18:15:40 +00:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2017-05-04 00:53:40 +01:00
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
case nir_intrinsic_load_instance_id:
|
2018-04-28 13:09:20 +01:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
2017-05-04 00:53:40 +01:00
|
|
|
case nir_intrinsic_load_draw_id: {
|
|
|
|
b.cursor = nir_after_instr(&intrin->instr);
|
|
|
|
|
|
|
|
/* gl_VertexID and friends are stored by the VF as the last
|
|
|
|
* vertex element. We convert them to load_input intrinsics at
|
|
|
|
* the right location.
|
|
|
|
*/
|
|
|
|
nir_intrinsic_instr *load =
|
|
|
|
nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
|
|
|
|
load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
|
|
|
|
nir_intrinsic_set_base(load, num_inputs);
|
|
|
|
switch (intrin->intrinsic) {
|
2018-01-25 18:15:40 +00:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2017-05-04 00:53:40 +01:00
|
|
|
nir_intrinsic_set_component(load, 0);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
nir_intrinsic_set_component(load, 1);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
nir_intrinsic_set_component(load, 2);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_instance_id:
|
|
|
|
nir_intrinsic_set_component(load, 3);
|
|
|
|
break;
|
|
|
|
case nir_intrinsic_load_draw_id:
|
2018-04-28 13:09:20 +01:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
|
|
|
/* gl_DrawID and IsIndexedDraw are stored right after
|
|
|
|
* gl_VertexID and friends if any of them exist.
|
2017-05-04 00:53:40 +01:00
|
|
|
*/
|
|
|
|
nir_intrinsic_set_base(load, num_inputs + has_sgvs);
|
2018-04-28 13:09:20 +01:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_load_draw_id)
|
|
|
|
nir_intrinsic_set_component(load, 0);
|
|
|
|
else
|
|
|
|
nir_intrinsic_set_component(load, 1);
|
2017-05-04 00:53:40 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Invalid system value intrinsic");
|
|
|
|
}
|
|
|
|
|
|
|
|
load->num_components = 1;
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
|
|
|
|
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
|
|
|
|
nir_src_for_ssa(&load->dest.ssa));
|
|
|
|
nir_instr_remove(&intrin->instr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case nir_intrinsic_load_input: {
|
2017-05-04 00:41:43 +01:00
|
|
|
/* Attributes come in a contiguous block, ordered by their
|
|
|
|
* gl_vert_attrib value. That means we can compute the slot
|
|
|
|
* number for an attribute by masking out the enabled attributes
|
|
|
|
* before it and counting the bits.
|
|
|
|
*/
|
|
|
|
int attr = nir_intrinsic_base(intrin);
|
|
|
|
int slot = _mesa_bitcount_64(nir->info.inputs_read &
|
|
|
|
BITFIELD64_MASK(attr));
|
2017-05-04 00:56:15 +01:00
|
|
|
nir_intrinsic_set_base(intrin, slot);
|
2017-05-04 00:53:40 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break; /* Nothing to do */
|
2017-01-13 16:47:57 +00:00
|
|
|
}
|
2015-08-15 00:01:33 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2017-05-05 00:36:26 +01:00
|
|
|
brw_nir_lower_vue_inputs(nir_shader *nir,
|
2016-02-25 07:43:17 +00:00
|
|
|
const struct brw_vue_map *vue_map)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2016-02-25 07:44:46 +00:00
|
|
|
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2016-02-25 07:44:46 +00:00
|
|
|
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
|
2016-09-14 18:29:38 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2017-05-04 22:57:52 +01:00
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
|
|
|
add_const_offset_to_base(nir, nir_var_shader_in);
|
|
|
|
|
|
|
|
nir_foreach_function(function, nir) {
|
|
|
|
if (!function->impl)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_foreach_block(block, function->impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2017-05-04 22:57:52 +01:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_load_input ||
|
|
|
|
intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
|
2017-05-05 00:33:32 +01:00
|
|
|
/* Offset 0 is the VUE header, which contains
|
|
|
|
* VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
|
|
|
|
* VARYING_SLOT_PSIZ [.w].
|
|
|
|
*/
|
|
|
|
int varying = nir_intrinsic_base(intrin);
|
|
|
|
int vue_slot;
|
|
|
|
switch (varying) {
|
|
|
|
case VARYING_SLOT_PSIZ:
|
|
|
|
nir_intrinsic_set_base(intrin, 0);
|
|
|
|
nir_intrinsic_set_component(intrin, 3);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
vue_slot = vue_map->varying_to_slot[varying];
|
|
|
|
assert(vue_slot != -1);
|
|
|
|
nir_intrinsic_set_base(intrin, vue_slot);
|
|
|
|
break;
|
|
|
|
}
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:34:51 +00:00
|
|
|
brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
|
|
|
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
}
|
|
|
|
|
2016-09-14 18:29:38 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
|
|
|
add_const_offset_to_base(nir, nir_var_shader_in);
|
|
|
|
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2016-02-25 06:02:28 +00:00
|
|
|
if (function->impl) {
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
nir_foreach_block(block, function->impl) {
|
2016-11-24 09:50:10 +00:00
|
|
|
remap_patch_urb_offsets(block, &b, vue_map,
|
2017-05-08 17:20:21 +01:00
|
|
|
nir->info.tess.primitive_mode);
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
i965: Move Gen4-5 interpolation stuff to brw_wm_prog_data.
This fixes glxgears rendering, which had surprisingly been broken since
late October! Specifically, commit 91d61fbf7cb61a44adcaae51ee08ad0dd6b.
glxgears uses glShadeModel(GL_FLAT) when drawing the main portion of the
gears, then uses glShadeModel(GL_SMOOTH) for drawing the Gouraud-shaded
inner portion of the gears. This results in the same fragment program
having two different state-dependent interpolation maps: one where
gl_Color is flat, and another where it's smooth.
The problem is that there's only one gen4_fragment_program, so it can't
store both. Each FS compile would trash the last one. But, the FS
compiles are cached, so the first one would store FLAT, and the second
would see a matching program in the cache and never bother to compile
one with SMOOTH. (Clearing the program cache on every draw made it
render correctly.)
Instead, move it to brw_wm_prog_data, where we can keep a copy for
every specialization of the program. The only downside is bloating
the structure a bit, but we can tighten that up a bit if we need to.
This also lets us kill gen4_fragment_program entirely!
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Timothy Arceri <timothy.arceri@collabora.com>
2017-01-13 22:29:52 +00:00
|
|
|
brw_nir_lower_fs_inputs(nir_shader *nir,
|
2016-09-14 18:42:42 +01:00
|
|
|
const struct gen_device_info *devinfo,
|
2016-09-14 18:39:52 +01:00
|
|
|
const struct brw_wm_prog_key *key)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
2016-07-12 11:57:25 +01:00
|
|
|
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
2016-09-14 18:42:42 +01:00
|
|
|
|
|
|
|
/* Apply default interpolation mode.
|
|
|
|
*
|
|
|
|
* Everything defaults to smooth except for the legacy GL color
|
|
|
|
* built-in variables, which might be flat depending on API state.
|
|
|
|
*/
|
|
|
|
if (var->data.interpolation == INTERP_MODE_NONE) {
|
|
|
|
const bool flat = key->flat_shade &&
|
|
|
|
(var->data.location == VARYING_SLOT_COL0 ||
|
|
|
|
var->data.location == VARYING_SLOT_COL1);
|
|
|
|
|
|
|
|
var->data.interpolation = flat ? INTERP_MODE_FLAT
|
|
|
|
: INTERP_MODE_SMOOTH;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On Ironlake and below, there is only one interpolation mode.
|
|
|
|
* Centroid interpolation doesn't mean anything on this hardware --
|
|
|
|
* there is no multisampling.
|
|
|
|
*/
|
|
|
|
if (devinfo->gen < 6) {
|
|
|
|
var->data.centroid = false;
|
|
|
|
var->data.sample = false;
|
|
|
|
}
|
2016-07-12 11:57:25 +01:00
|
|
|
}
|
|
|
|
|
2016-09-14 18:39:52 +01:00
|
|
|
nir_lower_io_options lower_io_options = 0;
|
|
|
|
if (key->persample_interp)
|
|
|
|
lower_io_options |= nir_lower_io_force_sample_interpolation;
|
|
|
|
|
|
|
|
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
|
2016-07-12 11:57:25 +01:00
|
|
|
|
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
|
|
|
add_const_offset_to_base(nir, nir_var_shader_in);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2018-05-23 19:33:51 +01:00
|
|
|
brw_nir_lower_vue_outputs(nir_shader *nir)
|
2015-08-26 11:07:29 +01:00
|
|
|
{
|
2016-10-13 06:41:09 +01:00
|
|
|
nir_foreach_variable(var, &nir->outputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
2016-10-13 06:41:09 +01:00
|
|
|
|
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-11-24 09:50:10 +00:00
|
|
|
brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
|
|
|
|
GLenum tes_primitive_mode)
|
2016-02-25 06:02:28 +00:00
|
|
|
{
|
|
|
|
nir_foreach_variable(var, &nir->outputs) {
|
|
|
|
var->data.driver_location = var->data.location;
|
|
|
|
}
|
2016-01-13 23:04:39 +00:00
|
|
|
|
2016-09-14 18:29:38 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
|
2016-02-25 06:02:28 +00:00
|
|
|
|
|
|
|
/* This pass needs actual constants */
|
|
|
|
nir_opt_constant_folding(nir);
|
|
|
|
|
|
|
|
add_const_offset_to_base(nir, nir_var_shader_out);
|
|
|
|
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2016-02-25 06:02:28 +00:00
|
|
|
if (function->impl) {
|
2016-04-13 03:56:14 +01:00
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, function->impl);
|
|
|
|
nir_foreach_block(block, function->impl) {
|
2016-11-24 09:50:10 +00:00
|
|
|
remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
|
2016-04-13 03:56:14 +01:00
|
|
|
}
|
2015-12-10 05:41:35 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 06:11:35 +00:00
|
|
|
void
|
2016-02-25 06:02:28 +00:00
|
|
|
brw_nir_lower_fs_outputs(nir_shader *nir)
|
|
|
|
{
|
2016-07-22 05:26:20 +01:00
|
|
|
nir_foreach_variable(var, &nir->outputs) {
|
|
|
|
var->data.driver_location =
|
|
|
|
SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
|
|
|
|
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
|
|
|
|
}
|
|
|
|
|
2016-09-14 18:29:38 +01:00
|
|
|
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
|
2016-02-25 06:02:28 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 21:33:41 +00:00
|
|
|
#define OPT(pass, ...) ({ \
|
|
|
|
bool this_progress = false; \
|
|
|
|
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
|
|
|
|
if (this_progress) \
|
|
|
|
progress = true; \
|
|
|
|
this_progress; \
|
|
|
|
})
|
|
|
|
|
2017-10-28 16:50:54 +01:00
|
|
|
static nir_variable_mode
|
|
|
|
brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
|
|
|
|
gl_shader_stage stage)
|
2015-04-07 23:15:09 +01:00
|
|
|
{
|
2016-12-13 00:36:51 +00:00
|
|
|
nir_variable_mode indirect_mask = 0;
|
2017-10-28 16:50:54 +01:00
|
|
|
|
|
|
|
if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
|
2016-12-13 00:36:51 +00:00
|
|
|
indirect_mask |= nir_var_shader_in;
|
2017-10-28 16:50:54 +01:00
|
|
|
if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
|
2016-12-13 00:36:51 +00:00
|
|
|
indirect_mask |= nir_var_shader_out;
|
2017-10-28 16:50:54 +01:00
|
|
|
if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
|
2016-12-13 00:36:51 +00:00
|
|
|
indirect_mask |= nir_var_local;
|
|
|
|
|
2017-10-28 16:50:54 +01:00
|
|
|
return indirect_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
nir_shader *
|
|
|
|
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
|
2018-07-24 06:20:41 +01:00
|
|
|
bool is_scalar, bool allow_copies)
|
2017-10-28 16:50:54 +01:00
|
|
|
{
|
|
|
|
nir_variable_mode indirect_mask =
|
|
|
|
brw_nir_no_indirect_mask(compiler, nir->info.stage);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
bool progress;
|
|
|
|
do {
|
|
|
|
progress = false;
|
2018-07-24 18:08:20 +01:00
|
|
|
OPT(nir_split_array_vars, nir_var_local);
|
2018-07-25 16:54:09 +01:00
|
|
|
OPT(nir_shrink_vec_array_vars, nir_var_local);
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_vars_to_ssa);
|
2018-07-24 06:20:41 +01:00
|
|
|
if (allow_copies) {
|
|
|
|
/* Only run this pass in the first call to brw_nir_optimize. Later
|
|
|
|
* calls assume that we've lowered away any copy_deref instructions
|
|
|
|
* and we don't want to introduce any more.
|
|
|
|
*/
|
|
|
|
OPT(nir_opt_find_array_copies);
|
|
|
|
}
|
2018-03-28 05:00:01 +01:00
|
|
|
OPT(nir_opt_copy_prop_vars);
|
2015-06-25 08:52:35 +01:00
|
|
|
|
|
|
|
if (is_scalar) {
|
2016-09-13 23:14:28 +01:00
|
|
|
OPT(nir_lower_alu_to_scalar);
|
2015-06-25 08:52:35 +01:00
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
2015-07-15 08:32:17 +01:00
|
|
|
|
|
|
|
if (is_scalar) {
|
2016-09-13 23:14:28 +01:00
|
|
|
OPT(nir_lower_phis_to_scalar);
|
2015-07-15 08:32:17 +01:00
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
OPT(nir_opt_cse);
|
2016-09-07 03:45:51 +01:00
|
|
|
OPT(nir_opt_peephole_select, 0);
|
2017-06-22 20:13:25 +01:00
|
|
|
OPT(nir_opt_intrinsics);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_algebraic);
|
|
|
|
OPT(nir_opt_constant_folding);
|
|
|
|
OPT(nir_opt_dead_cf);
|
2016-12-17 07:30:40 +00:00
|
|
|
if (OPT(nir_opt_trivial_continues)) {
|
|
|
|
/* If nir_opt_trivial_continues makes progress, then we need to clean
|
|
|
|
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
|
|
|
|
* to make progress.
|
|
|
|
*/
|
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
|
|
|
}
|
|
|
|
OPT(nir_opt_if);
|
2016-12-13 00:36:51 +00:00
|
|
|
if (nir->options->max_unroll_iterations != 0) {
|
|
|
|
OPT(nir_opt_loop_unroll, indirect_mask);
|
|
|
|
}
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_remove_phis);
|
|
|
|
OPT(nir_opt_undef);
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_doubles, nir_lower_drcp |
|
|
|
|
nir_lower_dsqrt |
|
|
|
|
nir_lower_drsq |
|
|
|
|
nir_lower_dtrunc |
|
|
|
|
nir_lower_dfloor |
|
|
|
|
nir_lower_dceil |
|
|
|
|
nir_lower_dfract |
|
|
|
|
nir_lower_dround_even |
|
|
|
|
nir_lower_dmod);
|
2018-04-27 08:28:48 +01:00
|
|
|
OPT(nir_lower_pack);
|
2015-04-07 23:15:09 +01:00
|
|
|
} while (progress);
|
2015-11-16 19:48:05 +00:00
|
|
|
|
2018-08-23 14:34:19 +01:00
|
|
|
/* Workaround Gfxbench unused local sampler variable which will trigger an
|
|
|
|
* assert in the opt_large_constants pass.
|
|
|
|
*/
|
|
|
|
OPT(nir_remove_dead_variables, nir_var_local);
|
|
|
|
|
2015-11-16 19:48:05 +00:00
|
|
|
return nir;
|
2015-04-07 23:15:09 +01:00
|
|
|
}
|
|
|
|
|
2018-04-26 09:07:56 +01:00
|
|
|
static unsigned
|
2018-05-23 19:33:51 +01:00
|
|
|
lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
|
2018-04-26 09:07:56 +01:00
|
|
|
{
|
|
|
|
assert(alu->dest.dest.is_ssa);
|
|
|
|
if (alu->dest.dest.ssa.bit_size != 16)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (alu->op) {
|
|
|
|
case nir_op_idiv:
|
|
|
|
case nir_op_imod:
|
|
|
|
case nir_op_irem:
|
|
|
|
case nir_op_udiv:
|
|
|
|
case nir_op_umod:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
/* Does some simple lowering and runs the standard suite of optimizations
|
|
|
|
*
|
|
|
|
* This is intended to be called more-or-less directly after you get the
|
|
|
|
* shader out of GLSL or some other source. While it is geared towards i965,
|
|
|
|
* it is not at all generator-specific except for the is_scalar flag. Even
|
|
|
|
* there, it is safe to call with is_scalar = false for a shader that is
|
|
|
|
* intended for the FS backend as long as nir_optimize is called again with
|
|
|
|
* is_scalar = true to scalarize everything prior to code gen.
|
|
|
|
*/
|
2015-04-07 23:15:09 +01:00
|
|
|
nir_shader *
|
2016-04-07 23:09:56 +01:00
|
|
|
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
|
2015-04-07 23:15:09 +01:00
|
|
|
{
|
2016-11-08 21:21:39 +00:00
|
|
|
const struct gen_device_info *devinfo = compiler->devinfo;
|
2017-03-09 19:40:17 +00:00
|
|
|
UNUSED bool progress; /* Written by OPT */
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
|
2016-04-07 23:09:56 +01:00
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
if (nir->info.stage == MESA_SHADER_GEOMETRY)
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_lower_gs_intrinsics);
|
2015-08-05 17:16:59 +01:00
|
|
|
|
2016-11-08 21:21:39 +00:00
|
|
|
/* See also brw_nir_trig_workarounds.py */
|
|
|
|
if (compiler->precise_trig &&
|
|
|
|
!(devinfo->gen >= 10 || devinfo->is_kabylake))
|
2016-04-07 23:04:35 +01:00
|
|
|
OPT(brw_nir_apply_trig_workarounds);
|
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
static const nir_lower_tex_options tex_options = {
|
|
|
|
.lower_txp = ~0,
|
2016-07-21 20:55:21 +01:00
|
|
|
.lower_txf_offset = true,
|
|
|
|
.lower_rect_offset = true,
|
2016-11-30 08:44:20 +00:00
|
|
|
.lower_txd_cube_map = true,
|
2015-11-11 17:40:51 +00:00
|
|
|
};
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-11-11 18:46:09 +00:00
|
|
|
OPT(nir_lower_tex, &tex_options);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_normalize_cubemap_coords);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
OPT(nir_lower_global_vars_to_local);
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_split_var_copies);
|
2018-07-24 18:08:20 +01:00
|
|
|
OPT(nir_split_struct_vars, nir_var_local);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-12-01 12:46:23 +00:00
|
|
|
/* Run opt_algebraic before int64 lowering so we can hopefully get rid
|
|
|
|
* of some int64 instructions.
|
|
|
|
*/
|
|
|
|
OPT(nir_opt_algebraic);
|
|
|
|
|
|
|
|
/* Lower int64 instructions before nir_optimize so that loop unrolling
|
|
|
|
* sees their actual cost.
|
|
|
|
*/
|
|
|
|
nir_lower_int64(nir, nir_lower_imul64 |
|
|
|
|
nir_lower_isign64 |
|
|
|
|
nir_lower_divmod64);
|
|
|
|
|
2018-07-24 06:20:41 +01:00
|
|
|
nir = brw_nir_optimize(nir, compiler, is_scalar, true);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2018-06-29 06:44:43 +01:00
|
|
|
/* This needs to be run after the first optimization pass but before we
|
|
|
|
* lower indirect derefs away
|
|
|
|
*/
|
|
|
|
if (compiler->supports_shader_constants) {
|
|
|
|
OPT(nir_opt_large_constants, NULL, 32);
|
|
|
|
}
|
|
|
|
|
2018-04-26 09:07:56 +01:00
|
|
|
nir_lower_bit_size(nir, lower_bit_size_callback, NULL);
|
|
|
|
|
i965: Use nir_lower_load_const_to_scalar().
I don't know why, but we never hooked up this pass Eric wrote.
Otherwise, you can end up with stupid scalarized code such as:
vec4 ssa_7 = load_const (0.0, 0.0, 0.0, 0.0)
vec4 ssa_8 = ...
vec1 ssa_9 = feq ssa_8, ssa_7
vec1 ssa_10 = feq ssa_8.y, ssa_7.y
vec1 ssa_11 = feq ssa_8, ssa_7.z
vec1 ssa_12 = feq ssa_8.y, ssa_7.w
ssa_8.xyxy == <0, 0, 0, 0> should only take two feq instructions.
shader-db on Skylake:
total instructions in shared programs: 9121153 -> 9120749 (-0.00%)
instructions in affected programs: 32421 -> 32017 (-1.25%)
helped: 277
HURT: 69
total cycles in shared programs: 69003364 -> 69000912 (-0.00%)
cycles in affected programs: 899186 -> 896734 (-0.27%)
helped: 313
HURT: 403
This also prevents regressions when disabling channel expressions.
v2: Don't call opt_cse afterwards (requested by Matt). It should
happen in the optimization loop below anyway.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eduardo Lima Mitev <elima@igalia.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-01-22 00:37:20 +00:00
|
|
|
if (is_scalar) {
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_load_const_to_scalar);
|
i965: Use nir_lower_load_const_to_scalar().
I don't know why, but we never hooked up this pass Eric wrote.
Otherwise, you can end up with stupid scalarized code such as:
vec4 ssa_7 = load_const (0.0, 0.0, 0.0, 0.0)
vec4 ssa_8 = ...
vec1 ssa_9 = feq ssa_8, ssa_7
vec1 ssa_10 = feq ssa_8.y, ssa_7.y
vec1 ssa_11 = feq ssa_8, ssa_7.z
vec1 ssa_12 = feq ssa_8.y, ssa_7.w
ssa_8.xyxy == <0, 0, 0, 0> should only take two feq instructions.
shader-db on Skylake:
total instructions in shared programs: 9121153 -> 9120749 (-0.00%)
instructions in affected programs: 32421 -> 32017 (-1.25%)
helped: 277
HURT: 69
total cycles in shared programs: 69003364 -> 69000912 (-0.00%)
cycles in affected programs: 899186 -> 896734 (-0.27%)
helped: 313
HURT: 403
This also prevents regressions when disabling channel expressions.
v2: Don't call opt_cse afterwards (requested by Matt). It should
happen in the optimization loop below anyway.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eduardo Lima Mitev <elima@igalia.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-01-22 00:37:20 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Lower a bunch of stuff */
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_var_copies);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-09-02 06:20:23 +01:00
|
|
|
OPT(nir_lower_system_values);
|
|
|
|
|
2017-08-22 21:23:59 +01:00
|
|
|
const nir_lower_subgroups_options subgroups_options = {
|
2017-04-28 09:22:39 +01:00
|
|
|
.subgroup_size = BRW_SUBGROUP_SIZE,
|
2017-08-23 02:44:51 +01:00
|
|
|
.ballot_bit_size = 32,
|
2017-08-22 21:23:59 +01:00
|
|
|
.lower_to_scalar = true,
|
|
|
|
.lower_subgroup_masks = true,
|
|
|
|
.lower_vote_trivial = !is_scalar,
|
2017-08-29 17:21:32 +01:00
|
|
|
.lower_shuffle = true,
|
2017-08-22 21:23:59 +01:00
|
|
|
};
|
|
|
|
OPT(nir_lower_subgroups, &subgroups_options);
|
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_clip_cull_distance_arrays);
|
2016-12-06 01:12:20 +00:00
|
|
|
|
2017-10-28 16:50:54 +01:00
|
|
|
nir_variable_mode indirect_mask =
|
|
|
|
brw_nir_no_indirect_mask(compiler, nir->info.stage);
|
2016-12-06 01:12:20 +00:00
|
|
|
nir_lower_indirect_derefs(nir, indirect_mask);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Get rid of split copies */
|
2018-07-24 06:20:41 +01:00
|
|
|
nir = brw_nir_optimize(nir, compiler, is_scalar, false);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
return nir;
|
|
|
|
}
|
|
|
|
|
2017-10-28 16:57:23 +01:00
|
|
|
void
|
|
|
|
brw_nir_link_shaders(const struct brw_compiler *compiler,
|
|
|
|
nir_shader **producer, nir_shader **consumer)
|
|
|
|
{
|
2018-07-26 06:52:39 +01:00
|
|
|
nir_lower_io_arrays_to_elements(*producer, *consumer);
|
|
|
|
nir_validate_shader(*producer);
|
|
|
|
nir_validate_shader(*consumer);
|
|
|
|
|
2018-07-24 06:20:41 +01:00
|
|
|
const bool p_is_scalar =
|
|
|
|
compiler->scalar_stage[(*producer)->info.stage];
|
|
|
|
const bool c_is_scalar =
|
|
|
|
compiler->scalar_stage[(*consumer)->info.stage];
|
|
|
|
|
|
|
|
if (p_is_scalar && c_is_scalar) {
|
|
|
|
NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
|
|
|
|
NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
|
|
|
|
*producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
|
|
|
|
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
|
|
|
|
}
|
|
|
|
|
2017-10-28 16:57:23 +01:00
|
|
|
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
|
|
|
|
NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
|
|
|
|
|
|
|
|
if (nir_remove_unused_varyings(*producer, *consumer)) {
|
|
|
|
NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
|
|
|
|
NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
|
|
|
|
|
|
|
|
/* The backend might not be able to handle indirects on
|
|
|
|
* temporaries so we need to lower indirects on any of the
|
|
|
|
* varyings we have demoted here.
|
|
|
|
*/
|
2017-10-28 17:02:14 +01:00
|
|
|
NIR_PASS_V(*producer, nir_lower_indirect_derefs,
|
|
|
|
brw_nir_no_indirect_mask(compiler, (*producer)->info.stage));
|
|
|
|
NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
|
|
|
|
brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
|
2017-10-28 16:57:23 +01:00
|
|
|
|
2018-07-24 06:20:41 +01:00
|
|
|
*producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
|
|
|
|
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
|
2017-10-28 16:57:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
/* Prepare the given shader for codegen
|
|
|
|
*
|
|
|
|
* This function is intended to be called right before going into the actual
|
|
|
|
* backend and is highly backend-specific. Also, once this function has been
|
|
|
|
* called on a shader, it will no longer be in SSA form so most optimizations
|
|
|
|
* will not work.
|
|
|
|
*/
|
|
|
|
nir_shader *
|
2016-09-15 03:20:38 +01:00
|
|
|
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
|
2015-11-11 17:40:51 +00:00
|
|
|
bool is_scalar)
|
|
|
|
{
|
2016-09-15 03:20:38 +01:00
|
|
|
const struct gen_device_info *devinfo = compiler->devinfo;
|
2015-11-11 17:40:51 +00:00
|
|
|
bool debug_enabled =
|
2017-09-15 03:52:38 +01:00
|
|
|
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->info.stage));
|
2015-11-11 17:40:51 +00:00
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
UNUSED bool progress; /* Written by OPT */
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-01-13 06:25:11 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
progress = false;
|
|
|
|
OPT(nir_opt_algebraic_before_ffma);
|
|
|
|
} while (progress);
|
|
|
|
|
2018-07-24 06:20:41 +01:00
|
|
|
nir = brw_nir_optimize(nir, compiler, is_scalar, false);
|
2016-02-25 05:40:37 +00:00
|
|
|
|
2015-11-11 17:40:51 +00:00
|
|
|
if (devinfo->gen >= 6) {
|
2015-04-07 23:15:09 +01:00
|
|
|
/* Try and fuse multiply-adds */
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(brw_nir_opt_peephole_ffma);
|
2015-04-07 23:15:09 +01:00
|
|
|
}
|
|
|
|
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_opt_algebraic_late);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_lower_to_source_mods);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_copy_prop);
|
|
|
|
OPT(nir_opt_dce);
|
2016-08-08 22:44:06 +01:00
|
|
|
OPT(nir_opt_move_comparisons);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2016-08-10 02:02:46 +01:00
|
|
|
OPT(nir_lower_locals_to_regs);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
if (unlikely(debug_enabled)) {
|
2015-06-10 09:46:13 +01:00
|
|
|
/* Re-index SSA defs so we print more sensible numbers. */
|
2016-04-27 04:26:42 +01:00
|
|
|
nir_foreach_function(function, nir) {
|
2015-12-26 18:00:47 +00:00
|
|
|
if (function->impl)
|
|
|
|
nir_index_ssa_defs(function->impl);
|
2015-06-10 09:46:13 +01:00
|
|
|
}
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
|
2017-09-15 03:52:38 +01:00
|
|
|
_mesa_shader_stage_to_string(nir->info.stage));
|
2015-04-07 23:15:09 +01:00
|
|
|
nir_print_shader(nir, stderr);
|
|
|
|
}
|
|
|
|
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_convert_from_ssa, true);
|
2015-04-07 23:15:09 +01:00
|
|
|
|
2015-06-16 21:58:15 +01:00
|
|
|
if (!is_scalar) {
|
2017-03-09 19:40:17 +00:00
|
|
|
OPT(nir_move_vec_src_uses_to_dest);
|
2015-11-03 08:31:15 +00:00
|
|
|
OPT(nir_lower_vec_to_movs);
|
2015-06-16 21:58:15 +01:00
|
|
|
}
|
|
|
|
|
2018-07-03 19:49:07 +01:00
|
|
|
OPT(nir_opt_dce);
|
|
|
|
|
2015-04-07 23:15:09 +01:00
|
|
|
/* This is the last pass we run before we start emitting stuff. It
|
|
|
|
* determines when we need to insert boolean resolves on Gen <= 5. We
|
|
|
|
* run it last because it stashes data in instr->pass_flags and we don't
|
|
|
|
* want that to be squashed by other NIR passes.
|
|
|
|
*/
|
2015-11-11 17:40:51 +00:00
|
|
|
if (devinfo->gen <= 5)
|
2015-04-07 23:15:09 +01:00
|
|
|
brw_nir_analyze_boolean_resolves(nir);
|
|
|
|
|
|
|
|
nir_sweep(nir);
|
|
|
|
|
|
|
|
if (unlikely(debug_enabled)) {
|
|
|
|
fprintf(stderr, "NIR (final form) for %s shader:\n",
|
2017-09-15 03:52:38 +01:00
|
|
|
_mesa_shader_stage_to_string(nir->info.stage));
|
2015-04-07 23:15:09 +01:00
|
|
|
nir_print_shader(nir, stderr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nir;
|
|
|
|
}
|
2015-04-17 17:10:50 +01:00
|
|
|
|
2015-11-11 19:01:59 +00:00
|
|
|
nir_shader *
|
|
|
|
brw_nir_apply_sampler_key(nir_shader *nir,
|
2016-09-15 03:20:38 +01:00
|
|
|
const struct brw_compiler *compiler,
|
2015-11-11 19:01:59 +00:00
|
|
|
const struct brw_sampler_prog_key_data *key_tex,
|
|
|
|
bool is_scalar)
|
|
|
|
{
|
2016-09-15 03:20:38 +01:00
|
|
|
const struct gen_device_info *devinfo = compiler->devinfo;
|
2015-11-11 19:01:59 +00:00
|
|
|
nir_lower_tex_options tex_options = { 0 };
|
|
|
|
|
|
|
|
/* Iron Lake and prior require lowering of all rectangle textures */
|
|
|
|
if (devinfo->gen < 6)
|
|
|
|
tex_options.lower_rect = true;
|
|
|
|
|
|
|
|
/* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
|
|
|
|
if (devinfo->gen < 8) {
|
|
|
|
tex_options.saturate_s = key_tex->gl_clamp_mask[0];
|
|
|
|
tex_options.saturate_t = key_tex->gl_clamp_mask[1];
|
|
|
|
tex_options.saturate_r = key_tex->gl_clamp_mask[2];
|
|
|
|
}
|
|
|
|
|
2015-11-12 02:41:37 +00:00
|
|
|
/* Prior to Haswell, we have to fake texture swizzle */
|
|
|
|
for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
|
|
|
|
if (key_tex->swizzles[s] == SWIZZLE_NOOP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tex_options.swizzle_result |= (1 << s);
|
|
|
|
for (unsigned c = 0; c < 4; c++)
|
|
|
|
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
|
|
|
|
}
|
|
|
|
|
2016-12-13 09:24:19 +00:00
|
|
|
/* Prior to Haswell, we have to lower gradients on shadow samplers */
|
|
|
|
tex_options.lower_txd_shadow = devinfo->gen < 8 && !devinfo->is_haswell;
|
|
|
|
|
2016-05-02 05:22:54 +01:00
|
|
|
tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
|
|
|
|
tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
|
|
|
|
tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
|
2017-06-16 06:51:34 +01:00
|
|
|
tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
|
2016-05-02 05:22:54 +01:00
|
|
|
|
2015-11-11 19:01:59 +00:00
|
|
|
if (nir_lower_tex(nir, &tex_options)) {
|
|
|
|
nir_validate_shader(nir);
|
2018-07-24 06:20:41 +01:00
|
|
|
nir = brw_nir_optimize(nir, compiler, is_scalar, false);
|
2015-11-11 19:01:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nir;
|
|
|
|
}
|
|
|
|
|
2015-04-17 17:10:50 +01:00
|
|
|
enum brw_reg_type
|
2017-01-21 03:03:21 +00:00
|
|
|
brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
|
2015-04-17 17:10:50 +01:00
|
|
|
{
|
|
|
|
switch (type) {
|
2015-05-15 17:14:47 +01:00
|
|
|
case nir_type_uint:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_uint32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_UD;
|
|
|
|
case nir_type_bool:
|
|
|
|
case nir_type_int:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_bool32:
|
|
|
|
case nir_type_int32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_D;
|
|
|
|
case nir_type_float:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float32:
|
2015-04-17 17:10:50 +01:00
|
|
|
return BRW_REGISTER_TYPE_F;
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_float16:
|
|
|
|
return BRW_REGISTER_TYPE_HF;
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float64:
|
|
|
|
return BRW_REGISTER_TYPE_DF;
|
|
|
|
case nir_type_int64:
|
2017-01-21 03:03:21 +00:00
|
|
|
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_uint64:
|
2017-01-21 03:03:21 +00:00
|
|
|
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_int16:
|
|
|
|
return BRW_REGISTER_TYPE_W;
|
|
|
|
case nir_type_uint16:
|
|
|
|
return BRW_REGISTER_TYPE_UW;
|
2018-07-09 01:00:06 +01:00
|
|
|
case nir_type_int8:
|
|
|
|
return BRW_REGISTER_TYPE_B;
|
|
|
|
case nir_type_uint8:
|
|
|
|
return BRW_REGISTER_TYPE_UB;
|
2015-04-17 17:10:50 +01:00
|
|
|
default:
|
|
|
|
unreachable("unknown type");
|
|
|
|
}
|
|
|
|
|
|
|
|
return BRW_REGISTER_TYPE_F;
|
|
|
|
}
|
2015-06-17 09:59:10 +01:00
|
|
|
|
|
|
|
/* Returns the glsl_base_type corresponding to a nir_alu_type.
|
|
|
|
* This is used by both brw_vec4_nir and brw_fs_nir.
|
|
|
|
*/
|
|
|
|
enum glsl_base_type
|
|
|
|
brw_glsl_base_type_for_nir_type(nir_alu_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case nir_type_float:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float32:
|
2015-06-17 09:59:10 +01:00
|
|
|
return GLSL_TYPE_FLOAT;
|
|
|
|
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_float16:
|
|
|
|
return GLSL_TYPE_FLOAT16;
|
|
|
|
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_float64:
|
|
|
|
return GLSL_TYPE_DOUBLE;
|
|
|
|
|
2015-06-17 09:59:10 +01:00
|
|
|
case nir_type_int:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_int32:
|
2015-06-17 09:59:10 +01:00
|
|
|
return GLSL_TYPE_INT;
|
|
|
|
|
2015-05-15 17:14:47 +01:00
|
|
|
case nir_type_uint:
|
2015-08-14 18:45:06 +01:00
|
|
|
case nir_type_uint32:
|
2015-06-17 09:59:10 +01:00
|
|
|
return GLSL_TYPE_UINT;
|
|
|
|
|
2017-07-01 07:06:45 +01:00
|
|
|
case nir_type_int16:
|
|
|
|
return GLSL_TYPE_INT16;
|
|
|
|
|
|
|
|
case nir_type_uint16:
|
|
|
|
return GLSL_TYPE_UINT16;
|
|
|
|
|
2015-06-17 09:59:10 +01:00
|
|
|
default:
|
|
|
|
unreachable("bad type");
|
|
|
|
}
|
|
|
|
}
|