2017-09-07 14:27:59 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "nir.h"
|
2018-11-08 22:24:11 +00:00
|
|
|
#include "nir_builder.h"
|
2017-09-07 14:27:59 +01:00
|
|
|
#include "util/set.h"
|
|
|
|
#include "util/hash_table.h"
|
|
|
|
|
|
|
|
/* This file contains various little helpers for doing simple linking in
|
|
|
|
* NIR. Eventually, we'll probably want a full-blown varying packing
|
|
|
|
* implementation in here. Right now, it just deletes unused things.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the bits in the inputs_read, outputs_written, or
|
|
|
|
* system_values_read bitfield corresponding to this variable.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
|
|
|
|
{
|
2017-10-30 04:11:10 +00:00
|
|
|
if (var->data.location < 0)
|
2017-09-07 14:27:59 +01:00
|
|
|
return 0;
|
|
|
|
|
2017-10-30 04:11:10 +00:00
|
|
|
unsigned location = var->data.patch ?
|
|
|
|
var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
|
|
|
|
|
2017-09-07 14:27:59 +01:00
|
|
|
assert(var->data.mode == nir_var_shader_in ||
|
|
|
|
var->data.mode == nir_var_shader_out ||
|
|
|
|
var->data.mode == nir_var_system_value);
|
|
|
|
assert(var->data.location >= 0);
|
|
|
|
|
|
|
|
const struct glsl_type *type = var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
|
2017-09-07 14:27:59 +01:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned slots = glsl_count_attribute_slots(type, false);
|
2017-10-30 04:11:10 +00:00
|
|
|
return ((1ull << slots) - 1) << location;
|
2017-09-07 14:27:59 +01:00
|
|
|
}
|
|
|
|
|
2019-04-25 02:17:42 +01:00
|
|
|
static uint8_t
|
|
|
|
get_num_components(nir_variable *var)
|
|
|
|
{
|
|
|
|
if (glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
return glsl_get_vector_elements(glsl_without_array(var->type));
|
|
|
|
}
|
|
|
|
|
2017-09-26 04:18:58 +01:00
|
|
|
static void
|
2017-10-30 04:11:10 +00:00
|
|
|
tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
|
2017-09-07 14:27:59 +01:00
|
|
|
{
|
|
|
|
nir_foreach_function(function, shader) {
|
2018-03-21 05:10:58 +00:00
|
|
|
if (!function->impl)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_foreach_block(block, function->impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
2018-03-27 15:56:49 +01:00
|
|
|
if (intrin->intrinsic != nir_intrinsic_load_deref)
|
2018-03-21 05:10:58 +00:00
|
|
|
continue;
|
2018-03-27 15:56:49 +01:00
|
|
|
|
2018-12-13 20:31:54 +00:00
|
|
|
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
|
|
|
|
if (deref->mode != nir_var_shader_out)
|
2018-03-21 05:10:58 +00:00
|
|
|
continue;
|
|
|
|
|
2018-12-13 20:31:54 +00:00
|
|
|
nir_variable *var = nir_deref_instr_get_variable(deref);
|
2019-04-25 02:17:42 +01:00
|
|
|
for (unsigned i = 0; i < get_num_components(var); i++) {
|
|
|
|
if (var->data.patch) {
|
|
|
|
patches_read[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, shader->info.stage);
|
|
|
|
} else {
|
|
|
|
read[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, shader->info.stage);
|
|
|
|
}
|
2017-09-07 14:27:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-26 17:13:13 +01:00
|
|
|
/**
|
|
|
|
* Helper for removing unused shader I/O variables, by demoting them to global
|
|
|
|
* variables (which may then by dead code eliminated).
|
|
|
|
*
|
|
|
|
* Example usage is:
|
|
|
|
*
|
|
|
|
* progress = nir_remove_unused_io_vars(producer,
|
|
|
|
* &producer->outputs,
|
|
|
|
* read, patches_read) ||
|
|
|
|
* progress;
|
|
|
|
*
|
|
|
|
* The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
|
|
|
|
* representing each .location_frac used. Note that for vector variables,
|
|
|
|
* only the first channel (.location_frac) is examined for deciding if the
|
|
|
|
* variable is used!
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
|
|
|
|
uint64_t *used_by_other_stage,
|
|
|
|
uint64_t *used_by_other_stage_patches)
|
2017-09-07 14:27:59 +01:00
|
|
|
{
|
|
|
|
bool progress = false;
|
2017-10-30 04:11:10 +00:00
|
|
|
uint64_t *used;
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
nir_foreach_variable_safe(var, var_list) {
|
|
|
|
if (var->data.patch)
|
2017-10-30 04:11:10 +00:00
|
|
|
used = used_by_other_stage_patches;
|
|
|
|
else
|
|
|
|
used = used_by_other_stage;
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
if (var->data.location < VARYING_SLOT_VAR0 && var->data.location >= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (var->data.always_active_io)
|
|
|
|
continue;
|
|
|
|
|
2018-05-11 09:29:17 +01:00
|
|
|
if (var->data.explicit_xfb_buffer)
|
|
|
|
continue;
|
|
|
|
|
2017-10-30 04:11:10 +00:00
|
|
|
uint64_t other_stage = used[var->data.location_frac];
|
2017-09-26 04:18:58 +01:00
|
|
|
|
2017-09-15 03:52:38 +01:00
|
|
|
if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
|
2017-09-07 14:27:59 +01:00
|
|
|
/* This one is invalid, make it a global variable instead */
|
|
|
|
var->data.location = 0;
|
2019-01-15 22:56:29 +00:00
|
|
|
var->data.mode = nir_var_shader_temp;
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
exec_node_remove(&var->node);
|
|
|
|
exec_list_push_tail(&shader->globals, &var->node);
|
|
|
|
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-26 19:33:09 +01:00
|
|
|
if (progress)
|
|
|
|
nir_fixup_deref_modes(shader);
|
|
|
|
|
2017-09-07 14:27:59 +01:00
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
|
|
|
|
{
|
2017-09-15 03:52:38 +01:00
|
|
|
assert(producer->info.stage != MESA_SHADER_FRAGMENT);
|
|
|
|
assert(consumer->info.stage != MESA_SHADER_VERTEX);
|
2017-09-07 14:27:59 +01:00
|
|
|
|
2017-09-26 04:18:58 +01:00
|
|
|
uint64_t read[4] = { 0 }, written[4] = { 0 };
|
2017-10-30 04:11:10 +00:00
|
|
|
uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
|
2017-09-07 14:27:59 +01:00
|
|
|
|
2017-09-26 04:18:58 +01:00
|
|
|
nir_foreach_variable(var, &producer->outputs) {
|
2019-04-25 02:17:42 +01:00
|
|
|
for (unsigned i = 0; i < get_num_components(var); i++) {
|
|
|
|
if (var->data.patch) {
|
|
|
|
patches_written[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, producer->info.stage);
|
|
|
|
} else {
|
|
|
|
written[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, producer->info.stage);
|
|
|
|
}
|
2017-10-30 04:11:10 +00:00
|
|
|
}
|
2017-09-26 04:18:58 +01:00
|
|
|
}
|
2017-09-07 14:27:59 +01:00
|
|
|
|
2017-09-26 04:18:58 +01:00
|
|
|
nir_foreach_variable(var, &consumer->inputs) {
|
2019-04-25 02:17:42 +01:00
|
|
|
for (unsigned i = 0; i < get_num_components(var); i++) {
|
|
|
|
if (var->data.patch) {
|
|
|
|
patches_read[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, consumer->info.stage);
|
|
|
|
} else {
|
|
|
|
read[var->data.location_frac + i] |=
|
|
|
|
get_variable_io_mask(var, consumer->info.stage);
|
|
|
|
}
|
2017-10-30 04:11:10 +00:00
|
|
|
}
|
2017-09-26 04:18:58 +01:00
|
|
|
}
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
/* Each TCS invocation can read data written by other TCS invocations,
|
|
|
|
* so even if the outputs are not used by the TES we must also make
|
|
|
|
* sure they are not read by the TCS before demoting them to globals.
|
|
|
|
*/
|
2017-09-15 03:52:38 +01:00
|
|
|
if (producer->info.stage == MESA_SHADER_TESS_CTRL)
|
2017-10-30 04:11:10 +00:00
|
|
|
tcs_add_output_reads(producer, read, patches_read);
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
bool progress = false;
|
2018-09-26 17:13:13 +01:00
|
|
|
progress = nir_remove_unused_io_vars(producer, &producer->outputs, read,
|
|
|
|
patches_read);
|
2017-09-07 14:27:59 +01:00
|
|
|
|
2018-09-26 17:13:13 +01:00
|
|
|
progress = nir_remove_unused_io_vars(consumer, &consumer->inputs, written,
|
|
|
|
patches_written) || progress;
|
2017-09-07 14:27:59 +01:00
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
static uint8_t
|
2018-11-12 02:25:27 +00:00
|
|
|
get_interp_type(nir_variable *var, const struct glsl_type *type,
|
|
|
|
bool default_to_smooth_interp)
|
2017-10-18 09:40:06 +01:00
|
|
|
{
|
2018-11-12 02:25:27 +00:00
|
|
|
if (glsl_type_is_integer(type))
|
|
|
|
return INTERP_MODE_FLAT;
|
|
|
|
else if (var->data.interpolation != INTERP_MODE_NONE)
|
2017-10-18 09:40:06 +01:00
|
|
|
return var->data.interpolation;
|
|
|
|
else if (default_to_smooth_interp)
|
|
|
|
return INTERP_MODE_SMOOTH;
|
|
|
|
else
|
|
|
|
return INTERP_MODE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define INTERPOLATE_LOC_SAMPLE 0
|
|
|
|
#define INTERPOLATE_LOC_CENTROID 1
|
|
|
|
#define INTERPOLATE_LOC_CENTER 2
|
|
|
|
|
|
|
|
static uint8_t
|
|
|
|
get_interp_loc(nir_variable *var)
|
|
|
|
{
|
|
|
|
if (var->data.sample)
|
|
|
|
return INTERPOLATE_LOC_SAMPLE;
|
|
|
|
else if (var->data.centroid)
|
|
|
|
return INTERPOLATE_LOC_CENTROID;
|
|
|
|
else
|
|
|
|
return INTERPOLATE_LOC_CENTER;
|
|
|
|
}
|
|
|
|
|
2018-12-09 22:42:42 +00:00
|
|
|
static bool
|
|
|
|
is_packing_supported_for_type(const struct glsl_type *type)
|
|
|
|
{
|
|
|
|
/* We ignore complex types such as arrays, matrices, structs and bitsizes
|
|
|
|
* other then 32bit. All other vector types should have been split into
|
|
|
|
* scalar variables by the lower_io_to_scalar pass. The only exception
|
|
|
|
* should be OpenGL xfb varyings.
|
|
|
|
* TODO: add support for more complex types?
|
|
|
|
*/
|
|
|
|
return glsl_type_is_scalar(type) && glsl_type_is_32bit(type);
|
|
|
|
}
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
struct assigned_comps
|
|
|
|
{
|
|
|
|
uint8_t comps;
|
|
|
|
uint8_t interp_type;
|
|
|
|
uint8_t interp_loc;
|
2019-04-05 10:26:12 +01:00
|
|
|
bool is_32bit;
|
2018-12-09 23:23:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Packing arrays and dual slot varyings is difficult so to avoid complex
|
|
|
|
* algorithms this function just assigns them their existing location for now.
|
|
|
|
* TODO: allow better packing of complex types.
|
|
|
|
*/
|
2017-10-18 09:40:06 +01:00
|
|
|
static void
|
2018-12-09 23:23:51 +00:00
|
|
|
get_unmoveable_components_masks(struct exec_list *var_list,
|
|
|
|
struct assigned_comps *comps,
|
|
|
|
gl_shader_stage stage,
|
|
|
|
bool default_to_smooth_interp)
|
2017-10-18 09:40:06 +01:00
|
|
|
{
|
|
|
|
nir_foreach_variable_safe(var, var_list) {
|
|
|
|
assert(var->data.location >= 0);
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* Only remap things that aren't built-ins. */
|
2017-10-18 09:40:06 +01:00
|
|
|
if (var->data.location >= VARYING_SLOT_VAR0 &&
|
2018-12-09 23:23:51 +00:00
|
|
|
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
const struct glsl_type *type = var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
|
2017-10-18 09:40:06 +01:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* If we can pack this varying then don't mark the components as
|
|
|
|
* used.
|
|
|
|
*/
|
|
|
|
if (is_packing_supported_for_type(type))
|
|
|
|
continue;
|
|
|
|
|
2017-10-18 09:40:06 +01:00
|
|
|
unsigned location = var->data.location - VARYING_SLOT_VAR0;
|
2019-03-20 04:40:51 +00:00
|
|
|
|
2017-10-18 09:40:06 +01:00
|
|
|
unsigned elements =
|
2019-03-20 04:40:51 +00:00
|
|
|
glsl_type_is_vector_or_scalar(glsl_without_array(type)) ?
|
|
|
|
glsl_get_vector_elements(glsl_without_array(type)) : 4;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
|
|
|
|
unsigned slots = glsl_count_attribute_slots(type, false);
|
2018-12-09 23:23:51 +00:00
|
|
|
unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
|
2017-10-18 09:40:06 +01:00
|
|
|
unsigned comps_slot2 = 0;
|
|
|
|
for (unsigned i = 0; i < slots; i++) {
|
|
|
|
if (dual_slot) {
|
|
|
|
if (i & 1) {
|
2018-12-09 23:23:51 +00:00
|
|
|
comps[location + i].comps |= ((1 << comps_slot2) - 1);
|
2017-10-18 09:40:06 +01:00
|
|
|
} else {
|
|
|
|
unsigned num_comps = 4 - var->data.location_frac;
|
2018-12-09 23:23:51 +00:00
|
|
|
comps_slot2 = (elements * dmul) - num_comps;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
/* Assume ARB_enhanced_layouts packing rules for doubles */
|
|
|
|
assert(var->data.location_frac == 0 ||
|
|
|
|
var->data.location_frac == 2);
|
|
|
|
assert(comps_slot2 <= 4);
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
comps[location + i].comps |=
|
2017-10-18 09:40:06 +01:00
|
|
|
((1 << num_comps) - 1) << var->data.location_frac;
|
|
|
|
}
|
|
|
|
} else {
|
2018-12-09 23:23:51 +00:00
|
|
|
comps[location + i].comps |=
|
|
|
|
((1 << (elements * dmul)) - 1) << var->data.location_frac;
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
2018-12-09 23:23:51 +00:00
|
|
|
|
|
|
|
comps[location + i].interp_type =
|
|
|
|
get_interp_type(var, type, default_to_smooth_interp);
|
|
|
|
comps[location + i].interp_loc = get_interp_loc(var);
|
2019-04-15 06:00:02 +01:00
|
|
|
comps[location + i].is_32bit =
|
|
|
|
glsl_type_is_32bit(glsl_without_array(type));
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct varying_loc
|
|
|
|
{
|
|
|
|
uint8_t component;
|
|
|
|
uint32_t location;
|
|
|
|
};
|
|
|
|
|
2018-12-02 23:42:40 +00:00
|
|
|
static void
|
|
|
|
mark_all_used_slots(nir_variable *var, uint64_t *slots_used,
|
|
|
|
uint64_t slots_used_mask, unsigned num_slots)
|
|
|
|
{
|
|
|
|
unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
|
|
|
|
|
|
|
|
slots_used[var->data.patch ? 1 : 0] |= slots_used_mask &
|
|
|
|
BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mark_used_slot(nir_variable *var, uint64_t *slots_used, unsigned offset)
|
|
|
|
{
|
|
|
|
unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
|
|
|
|
|
|
|
|
slots_used[var->data.patch ? 1 : 0] |=
|
|
|
|
BITFIELD64_BIT(var->data.location - loc_offset + offset);
|
|
|
|
}
|
|
|
|
|
2017-10-18 09:40:06 +01:00
|
|
|
static void
|
|
|
|
remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage,
|
|
|
|
struct varying_loc (*remap)[4],
|
2018-12-02 23:42:40 +00:00
|
|
|
uint64_t *slots_used, uint64_t *out_slots_read,
|
|
|
|
uint32_t *p_slots_used, uint32_t *p_out_slots_read)
|
2017-10-18 09:40:06 +01:00
|
|
|
{
|
2018-12-02 23:42:40 +00:00
|
|
|
uint64_t out_slots_read_tmp[2] = {0};
|
|
|
|
uint64_t slots_used_tmp[2] = {0};
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
/* We don't touch builtins so just copy the bitmask */
|
2018-12-02 23:42:40 +00:00
|
|
|
slots_used_tmp[0] = *slots_used & BITFIELD64_RANGE(0, VARYING_SLOT_VAR0);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
nir_foreach_variable(var, var_list) {
|
|
|
|
assert(var->data.location >= 0);
|
|
|
|
|
|
|
|
/* Only remap things that aren't built-ins */
|
|
|
|
if (var->data.location >= VARYING_SLOT_VAR0 &&
|
2018-12-02 23:42:40 +00:00
|
|
|
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
const struct glsl_type *type = var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
|
2017-10-18 09:40:06 +01:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned num_slots = glsl_count_attribute_slots(type, false);
|
|
|
|
bool used_across_stages = false;
|
|
|
|
bool outputs_read = false;
|
|
|
|
|
|
|
|
unsigned location = var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
struct varying_loc *new_loc = &remap[location][var->data.location_frac];
|
|
|
|
|
2018-12-02 23:42:40 +00:00
|
|
|
unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
|
|
|
|
uint64_t used = var->data.patch ? *p_slots_used : *slots_used;
|
|
|
|
uint64_t outs_used =
|
|
|
|
var->data.patch ? *p_out_slots_read : *out_slots_read;
|
|
|
|
uint64_t slots =
|
|
|
|
BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
|
|
|
|
|
|
|
|
if (slots & used)
|
2017-12-18 06:49:43 +00:00
|
|
|
used_across_stages = true;
|
|
|
|
|
2018-12-02 23:42:40 +00:00
|
|
|
if (slots & outs_used)
|
2017-12-18 06:49:43 +00:00
|
|
|
outputs_read = true;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2017-12-18 06:49:43 +00:00
|
|
|
if (new_loc->location) {
|
2017-10-18 09:40:06 +01:00
|
|
|
var->data.location = new_loc->location;
|
|
|
|
var->data.location_frac = new_loc->component;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (var->data.always_active_io) {
|
|
|
|
/* We can't apply link time optimisations (specifically array
|
|
|
|
* splitting) to these so we need to copy the existing mask
|
|
|
|
* otherwise we will mess up the mask for things like partially
|
|
|
|
* marked arrays.
|
|
|
|
*/
|
2018-12-02 23:42:40 +00:00
|
|
|
if (used_across_stages)
|
|
|
|
mark_all_used_slots(var, slots_used_tmp, used, num_slots);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
if (outputs_read) {
|
2018-12-02 23:42:40 +00:00
|
|
|
mark_all_used_slots(var, out_slots_read_tmp, outs_used,
|
|
|
|
num_slots);
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (unsigned i = 0; i < num_slots; i++) {
|
|
|
|
if (used_across_stages)
|
2018-12-02 23:42:40 +00:00
|
|
|
mark_used_slot(var, slots_used_tmp, i);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
if (outputs_read)
|
2018-12-02 23:42:40 +00:00
|
|
|
mark_used_slot(var, out_slots_read_tmp, i);
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-02 23:42:40 +00:00
|
|
|
*slots_used = slots_used_tmp[0];
|
|
|
|
*out_slots_read = out_slots_read_tmp[0];
|
|
|
|
*p_slots_used = slots_used_tmp[1];
|
|
|
|
*p_out_slots_read = out_slots_read_tmp[1];
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
struct varying_component {
|
|
|
|
nir_variable *var;
|
|
|
|
uint8_t interp_type;
|
|
|
|
uint8_t interp_loc;
|
2019-04-05 10:26:12 +01:00
|
|
|
bool is_32bit;
|
2018-12-09 23:23:51 +00:00
|
|
|
bool is_patch;
|
2020-03-26 01:23:23 +00:00
|
|
|
bool is_intra_stage_only;
|
2018-12-09 23:23:51 +00:00
|
|
|
bool initialised;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
cmp_varying_component(const void *comp1_v, const void *comp2_v)
|
2017-10-18 09:40:06 +01:00
|
|
|
{
|
2018-12-09 23:23:51 +00:00
|
|
|
struct varying_component *comp1 = (struct varying_component *) comp1_v;
|
|
|
|
struct varying_component *comp2 = (struct varying_component *) comp2_v;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* We want patches to be order at the end of the array */
|
|
|
|
if (comp1->is_patch != comp2->is_patch)
|
|
|
|
return comp1->is_patch ? 1 : -1;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2020-03-26 01:23:23 +00:00
|
|
|
/* We want to try to group together TCS outputs that are only read by other
|
|
|
|
* TCS invocations and not consumed by the follow stage.
|
|
|
|
*/
|
|
|
|
if (comp1->is_intra_stage_only != comp2->is_intra_stage_only)
|
|
|
|
return comp1->is_intra_stage_only ? 1 : -1;
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* We can only pack varyings with matching interpolation types so group
|
|
|
|
* them together.
|
2017-10-18 09:40:06 +01:00
|
|
|
*/
|
2018-12-09 23:23:51 +00:00
|
|
|
if (comp1->interp_type != comp2->interp_type)
|
|
|
|
return comp1->interp_type - comp2->interp_type;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* Interpolation loc must match also. */
|
|
|
|
if (comp1->interp_loc != comp2->interp_loc)
|
|
|
|
return comp1->interp_loc - comp2->interp_loc;
|
|
|
|
|
|
|
|
/* If everything else matches just use the original location to sort */
|
|
|
|
return comp1->var->data.location - comp2->var->data.location;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-03-26 01:23:23 +00:00
|
|
|
gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
|
2018-12-09 23:23:51 +00:00
|
|
|
struct varying_component **varying_comp_info,
|
|
|
|
unsigned *varying_comp_info_size,
|
|
|
|
bool default_to_smooth_interp)
|
|
|
|
{
|
2019-04-23 12:35:17 +01:00
|
|
|
unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = {{0}};
|
2018-12-09 23:23:51 +00:00
|
|
|
unsigned num_of_comps_to_pack = 0;
|
|
|
|
|
|
|
|
/* Count the number of varying that can be packed and create a mapping
|
|
|
|
* of those varyings to the array we will pass to qsort.
|
|
|
|
*/
|
2020-03-26 01:23:23 +00:00
|
|
|
nir_foreach_variable(var, &producer->outputs) {
|
2018-12-09 23:23:51 +00:00
|
|
|
|
|
|
|
/* Only remap things that aren't builtins. */
|
2017-10-18 09:40:06 +01:00
|
|
|
if (var->data.location >= VARYING_SLOT_VAR0 &&
|
2018-12-09 23:23:51 +00:00
|
|
|
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
/* We can't repack xfb varyings. */
|
|
|
|
if (var->data.always_active_io)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const struct glsl_type *type = var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
|
2017-10-18 09:40:06 +01:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
2018-12-09 22:42:42 +00:00
|
|
|
if (!is_packing_supported_for_type(type))
|
2017-10-18 09:40:06 +01:00
|
|
|
continue;
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
unsigned loc = var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
store_varying_info_idx[loc][var->data.location_frac] =
|
|
|
|
++num_of_comps_to_pack;
|
|
|
|
}
|
|
|
|
}
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
*varying_comp_info_size = num_of_comps_to_pack;
|
|
|
|
*varying_comp_info = rzalloc_array(NULL, struct varying_component,
|
|
|
|
num_of_comps_to_pack);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
nir_function_impl *impl = nir_shader_get_entrypoint(consumer);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* Walk over the shader and populate the varying component info array */
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
|
|
|
if (intr->intrinsic != nir_intrinsic_load_deref &&
|
|
|
|
intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
|
|
|
|
intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
|
2020-01-24 15:01:04 +00:00
|
|
|
intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
|
|
|
|
intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
|
2018-12-09 23:23:51 +00:00
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
|
|
|
|
if (deref->mode != nir_var_shader_in)
|
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
/* We only remap things that aren't builtins. */
|
|
|
|
nir_variable *in_var = nir_deref_instr_get_variable(deref);
|
|
|
|
if (in_var->data.location < VARYING_SLOT_VAR0)
|
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
unsigned location = in_var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
if (location >= MAX_VARYINGS_INCL_PATCH)
|
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
unsigned var_info_idx =
|
|
|
|
store_varying_info_idx[location][in_var->data.location_frac];
|
|
|
|
if (!var_info_idx)
|
|
|
|
continue;
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
struct varying_component *vc_info =
|
|
|
|
&(*varying_comp_info)[var_info_idx-1];
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
if (!vc_info->initialised) {
|
|
|
|
const struct glsl_type *type = in_var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
|
|
|
|
in_var->data.per_view) {
|
2018-12-09 23:23:51 +00:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
vc_info->var = in_var;
|
|
|
|
vc_info->interp_type =
|
|
|
|
get_interp_type(in_var, type, default_to_smooth_interp);
|
|
|
|
vc_info->interp_loc = get_interp_loc(in_var);
|
2019-04-05 10:26:12 +01:00
|
|
|
vc_info->is_32bit = glsl_type_is_32bit(type);
|
2018-12-09 23:23:51 +00:00
|
|
|
vc_info->is_patch = in_var->data.patch;
|
2020-03-26 01:23:23 +00:00
|
|
|
vc_info->is_intra_stage_only = false;
|
2020-03-27 15:17:54 +00:00
|
|
|
vc_info->initialised = true;
|
2020-03-26 01:23:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Walk over the shader and populate the varying component info array
|
|
|
|
* for varyings which are read by other TCS instances but are not consumed
|
|
|
|
* by the TES.
|
|
|
|
*/
|
|
|
|
if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
|
|
|
|
impl = nir_shader_get_entrypoint(producer);
|
|
|
|
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
|
|
|
if (intr->intrinsic != nir_intrinsic_load_deref)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
|
|
|
|
if (deref->mode != nir_var_shader_out)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* We only remap things that aren't builtins. */
|
|
|
|
nir_variable *out_var = nir_deref_instr_get_variable(deref);
|
|
|
|
if (out_var->data.location < VARYING_SLOT_VAR0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned location = out_var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
if (location >= MAX_VARYINGS_INCL_PATCH)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned var_info_idx =
|
|
|
|
store_varying_info_idx[location][out_var->data.location_frac];
|
2020-03-27 15:17:54 +00:00
|
|
|
if (!var_info_idx) {
|
|
|
|
/* Something went wrong, the shader interfaces didn't match, so
|
|
|
|
* abandon packing. This can happen for example when the
|
|
|
|
* inputs are scalars but the outputs are struct members.
|
|
|
|
*/
|
|
|
|
*varying_comp_info_size = 0;
|
|
|
|
break;
|
|
|
|
}
|
2020-03-26 01:23:23 +00:00
|
|
|
|
|
|
|
struct varying_component *vc_info =
|
|
|
|
&(*varying_comp_info)[var_info_idx-1];
|
|
|
|
|
|
|
|
if (!vc_info->initialised) {
|
|
|
|
const struct glsl_type *type = out_var->type;
|
|
|
|
if (nir_is_per_vertex_io(out_var, producer->info.stage)) {
|
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
vc_info->var = out_var;
|
|
|
|
vc_info->interp_type =
|
|
|
|
get_interp_type(out_var, type, default_to_smooth_interp);
|
|
|
|
vc_info->interp_loc = get_interp_loc(out_var);
|
|
|
|
vc_info->is_32bit = glsl_type_is_32bit(type);
|
|
|
|
vc_info->is_patch = out_var->data.patch;
|
|
|
|
vc_info->is_intra_stage_only = true;
|
2020-03-27 15:17:54 +00:00
|
|
|
vc_info->initialised = true;
|
2020-03-26 01:23:23 +00:00
|
|
|
}
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-27 15:17:54 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < *varying_comp_info_size; i++ ) {
|
|
|
|
struct varying_component *vc_info = &(*varying_comp_info)[i];
|
|
|
|
if (!vc_info->initialised) {
|
|
|
|
/* Something went wrong, the shader interfaces didn't match, so
|
|
|
|
* abandon packing. This can happen for example when the outputs are
|
|
|
|
* scalars but the inputs are struct members.
|
|
|
|
*/
|
|
|
|
*varying_comp_info_size = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-12-09 23:23:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
assign_remap_locations(struct varying_loc (*remap)[4],
|
|
|
|
struct assigned_comps *assigned_comps,
|
|
|
|
struct varying_component *info,
|
|
|
|
unsigned *cursor, unsigned *comp,
|
|
|
|
unsigned max_location)
|
|
|
|
{
|
|
|
|
unsigned tmp_cursor = *cursor;
|
|
|
|
unsigned tmp_comp = *comp;
|
|
|
|
|
|
|
|
for (; tmp_cursor < max_location; tmp_cursor++) {
|
|
|
|
|
|
|
|
if (assigned_comps[tmp_cursor].comps) {
|
|
|
|
/* We can only pack varyings with matching interpolation types,
|
|
|
|
* interpolation loc must match also.
|
|
|
|
* TODO: i965 can handle interpolation locations that don't match,
|
|
|
|
* but the radeonsi nir backend handles everything as vec4s and so
|
|
|
|
* expects this to be the same for all components. We could make this
|
|
|
|
* check driver specfific or drop it if NIR ever become the only
|
|
|
|
* radeonsi backend.
|
|
|
|
*/
|
|
|
|
if (assigned_comps[tmp_cursor].interp_type != info->interp_type ||
|
|
|
|
assigned_comps[tmp_cursor].interp_loc != info->interp_loc) {
|
|
|
|
tmp_comp = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-04-05 10:26:12 +01:00
|
|
|
/* We can only pack varyings with matching types, and the current
|
|
|
|
* algorithm only supports packing 32-bit.
|
|
|
|
*/
|
|
|
|
if (!assigned_comps[tmp_cursor].is_32bit) {
|
|
|
|
tmp_comp = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
while (tmp_comp < 4 &&
|
|
|
|
(assigned_comps[tmp_cursor].comps & (1 << tmp_comp))) {
|
|
|
|
tmp_comp++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tmp_comp == 4) {
|
|
|
|
tmp_comp = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned location = info->var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
|
|
|
|
/* Once we have assigned a location mark it as used */
|
|
|
|
assigned_comps[tmp_cursor].comps |= (1 << tmp_comp);
|
|
|
|
assigned_comps[tmp_cursor].interp_type = info->interp_type;
|
|
|
|
assigned_comps[tmp_cursor].interp_loc = info->interp_loc;
|
2019-04-05 10:26:12 +01:00
|
|
|
assigned_comps[tmp_cursor].is_32bit = info->is_32bit;
|
2018-12-09 23:23:51 +00:00
|
|
|
|
|
|
|
/* Assign remap location */
|
|
|
|
remap[location][info->var->data.location_frac].component = tmp_comp++;
|
|
|
|
remap[location][info->var->data.location_frac].location =
|
|
|
|
tmp_cursor + VARYING_SLOT_VAR0;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cursor = tmp_cursor;
|
|
|
|
*comp = tmp_comp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If there are empty components in the slot compact the remaining components
|
|
|
|
* as close to component 0 as possible. This will make it easier to fill the
|
|
|
|
* empty components with components from a different slot in a following pass.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
compact_components(nir_shader *producer, nir_shader *consumer,
|
|
|
|
struct assigned_comps *assigned_comps,
|
|
|
|
bool default_to_smooth_interp)
|
|
|
|
{
|
|
|
|
struct exec_list *input_list = &consumer->inputs;
|
|
|
|
struct exec_list *output_list = &producer->outputs;
|
|
|
|
struct varying_loc remap[MAX_VARYINGS_INCL_PATCH][4] = {{{0}, {0}}};
|
|
|
|
struct varying_component *varying_comp_info;
|
|
|
|
unsigned varying_comp_info_size;
|
|
|
|
|
|
|
|
/* Gather varying component info */
|
2020-03-26 01:23:23 +00:00
|
|
|
gather_varying_component_info(producer, consumer, &varying_comp_info,
|
2018-12-09 23:23:51 +00:00
|
|
|
&varying_comp_info_size,
|
|
|
|
default_to_smooth_interp);
|
|
|
|
|
|
|
|
/* Sort varying components. */
|
|
|
|
qsort(varying_comp_info, varying_comp_info_size,
|
|
|
|
sizeof(struct varying_component), cmp_varying_component);
|
|
|
|
|
|
|
|
unsigned cursor = 0;
|
|
|
|
unsigned comp = 0;
|
|
|
|
|
|
|
|
/* Set the remap array based on the sorted components */
|
|
|
|
for (unsigned i = 0; i < varying_comp_info_size; i++ ) {
|
|
|
|
struct varying_component *info = &varying_comp_info[i];
|
|
|
|
|
|
|
|
assert(info->is_patch || cursor < MAX_VARYING);
|
|
|
|
if (info->is_patch) {
|
|
|
|
/* The list should be sorted with all non-patch inputs first followed
|
|
|
|
* by patch inputs. When we hit our first patch input, we need to
|
|
|
|
* reset the cursor to MAX_VARYING so we put them in the right slot.
|
|
|
|
*/
|
|
|
|
if (cursor < MAX_VARYING) {
|
|
|
|
cursor = MAX_VARYING;
|
|
|
|
comp = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assign_remap_locations(remap, assigned_comps, info,
|
|
|
|
&cursor, &comp, MAX_VARYINGS_INCL_PATCH);
|
|
|
|
} else {
|
|
|
|
assign_remap_locations(remap, assigned_comps, info,
|
|
|
|
&cursor, &comp, MAX_VARYING);
|
|
|
|
|
|
|
|
/* Check if we failed to assign a remap location. This can happen if
|
|
|
|
* for example there are a bunch of unmovable components with
|
|
|
|
* mismatching interpolation types causing us to skip over locations
|
|
|
|
* that would have been useful for packing later components.
|
|
|
|
* The solution is to iterate over the locations again (this should
|
|
|
|
* happen very rarely in practice).
|
|
|
|
*/
|
|
|
|
if (cursor == MAX_VARYING) {
|
|
|
|
cursor = 0;
|
|
|
|
comp = 0;
|
|
|
|
assign_remap_locations(remap, assigned_comps, info,
|
|
|
|
&cursor, &comp, MAX_VARYING);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ralloc_free(varying_comp_info);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
|
|
|
uint64_t zero = 0;
|
2018-12-02 23:42:40 +00:00
|
|
|
uint32_t zero32 = 0;
|
2017-10-18 09:40:06 +01:00
|
|
|
remap_slots_and_components(input_list, consumer->info.stage, remap,
|
2018-12-02 23:42:40 +00:00
|
|
|
&consumer->info.inputs_read, &zero,
|
|
|
|
&consumer->info.patch_inputs_read, &zero32);
|
2017-10-18 09:40:06 +01:00
|
|
|
remap_slots_and_components(output_list, producer->info.stage, remap,
|
|
|
|
&producer->info.outputs_written,
|
2018-12-02 23:42:40 +00:00
|
|
|
&producer->info.outputs_read,
|
|
|
|
&producer->info.patch_outputs_written,
|
|
|
|
&producer->info.patch_outputs_read);
|
2017-10-18 09:40:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We assume that this has been called more-or-less directly after
|
|
|
|
* remove_unused_varyings. At this point, all of the varyings that we
|
|
|
|
* aren't going to be using have been completely removed and the
|
|
|
|
* inputs_read and outputs_written fields in nir_shader_info reflect
|
|
|
|
* this. Therefore, the total set of valid slots is the OR of the two
|
|
|
|
* sets of varyings; this accounts for varyings which one side may need
|
|
|
|
* to read/write even if the other doesn't. This can happen if, for
|
|
|
|
* instance, an array is used indirectly from one side causing it to be
|
|
|
|
* unsplittable but directly from the other.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
|
|
|
|
bool default_to_smooth_interp)
|
|
|
|
{
|
|
|
|
assert(producer->info.stage != MESA_SHADER_FRAGMENT);
|
|
|
|
assert(consumer->info.stage != MESA_SHADER_VERTEX);
|
|
|
|
|
2019-04-23 12:35:17 +01:00
|
|
|
struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = {{0}};
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
get_unmoveable_components_masks(&producer->outputs, assigned_comps,
|
|
|
|
producer->info.stage,
|
|
|
|
default_to_smooth_interp);
|
|
|
|
get_unmoveable_components_masks(&consumer->inputs, assigned_comps,
|
|
|
|
consumer->info.stage,
|
|
|
|
default_to_smooth_interp);
|
2017-10-18 09:40:06 +01:00
|
|
|
|
2018-12-09 23:23:51 +00:00
|
|
|
compact_components(producer, consumer, assigned_comps,
|
2017-10-18 09:40:06 +01:00
|
|
|
default_to_smooth_interp);
|
|
|
|
}
|
2018-10-23 11:56:29 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark XFB varyings as always_active_io in the consumer so the linking opts
|
|
|
|
* don't touch them.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer)
|
|
|
|
{
|
2018-10-24 11:33:09 +01:00
|
|
|
nir_variable *input_vars[MAX_VARYING] = { 0 };
|
2018-10-23 11:56:29 +01:00
|
|
|
|
|
|
|
nir_foreach_variable(var, &consumer->inputs) {
|
|
|
|
if (var->data.location >= VARYING_SLOT_VAR0 &&
|
|
|
|
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
|
|
|
|
|
|
|
|
unsigned location = var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
input_vars[location] = var;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nir_foreach_variable(var, &producer->outputs) {
|
|
|
|
if (var->data.location >= VARYING_SLOT_VAR0 &&
|
|
|
|
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
|
|
|
|
|
|
|
|
if (!var->data.always_active_io)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned location = var->data.location - VARYING_SLOT_VAR0;
|
|
|
|
if (input_vars[location]) {
|
|
|
|
input_vars[location]->data.always_active_io = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-11-08 22:24:11 +00:00
|
|
|
|
2018-12-10 08:36:47 +00:00
|
|
|
static bool
|
|
|
|
does_varying_match(nir_variable *out_var, nir_variable *in_var)
|
|
|
|
{
|
2019-01-02 05:00:12 +00:00
|
|
|
return in_var->data.location == out_var->data.location &&
|
|
|
|
in_var->data.location_frac == out_var->data.location_frac;
|
2018-12-10 08:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static nir_variable *
|
|
|
|
get_matching_input_var(nir_shader *consumer, nir_variable *out_var)
|
|
|
|
{
|
|
|
|
nir_foreach_variable(var, &consumer->inputs) {
|
|
|
|
if (does_varying_match(out_var, var))
|
|
|
|
return var;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-08 22:24:11 +00:00
|
|
|
static bool
|
2018-12-19 09:09:42 +00:00
|
|
|
can_replace_varying(nir_variable *out_var)
|
2018-11-08 22:24:11 +00:00
|
|
|
{
|
|
|
|
/* Skip types that require more complex handling.
|
|
|
|
* TODO: add support for these types.
|
|
|
|
*/
|
|
|
|
if (glsl_type_is_array(out_var->type) ||
|
|
|
|
glsl_type_is_dual_slot(out_var->type) ||
|
|
|
|
glsl_type_is_matrix(out_var->type) ||
|
2019-03-05 05:07:12 +00:00
|
|
|
glsl_type_is_struct_or_ifc(out_var->type))
|
2018-11-08 22:24:11 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Limit this pass to scalars for now to keep things simple. Most varyings
|
|
|
|
* should have been lowered to scalars at this point anyway.
|
|
|
|
*/
|
2018-12-19 09:09:42 +00:00
|
|
|
if (!glsl_type_is_scalar(out_var->type))
|
2018-11-08 22:24:11 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (out_var->data.location < VARYING_SLOT_VAR0 ||
|
|
|
|
out_var->data.location - VARYING_SLOT_VAR0 >= MAX_VARYING)
|
|
|
|
return false;
|
|
|
|
|
2018-12-11 00:53:54 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2018-12-19 09:09:42 +00:00
|
|
|
replace_constant_input(nir_shader *shader, nir_intrinsic_instr *store_intr)
|
2018-12-11 00:53:54 +00:00
|
|
|
{
|
2018-11-08 22:24:11 +00:00
|
|
|
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
|
|
|
|
|
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, impl);
|
|
|
|
|
2018-12-11 00:53:54 +00:00
|
|
|
nir_variable *out_var =
|
|
|
|
nir_deref_instr_get_variable(nir_src_as_deref(store_intr->src[0]));
|
|
|
|
|
2018-11-08 22:24:11 +00:00
|
|
|
bool progress = false;
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
|
|
|
if (intr->intrinsic != nir_intrinsic_load_deref)
|
|
|
|
continue;
|
|
|
|
|
2018-12-13 20:31:54 +00:00
|
|
|
nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
|
|
|
|
if (in_deref->mode != nir_var_shader_in)
|
2018-11-08 22:24:11 +00:00
|
|
|
continue;
|
|
|
|
|
2018-12-13 20:31:54 +00:00
|
|
|
nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
|
|
|
|
|
2019-01-02 05:00:11 +00:00
|
|
|
if (!does_varying_match(out_var, in_var))
|
2018-11-08 22:24:11 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
b.cursor = nir_before_instr(instr);
|
|
|
|
|
|
|
|
nir_load_const_instr *out_const =
|
|
|
|
nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
|
|
|
|
|
|
|
|
/* Add new const to replace the input */
|
|
|
|
nir_ssa_def *nconst = nir_build_imm(&b, store_intr->num_components,
|
|
|
|
intr->dest.ssa.bit_size,
|
|
|
|
out_const->value);
|
|
|
|
|
|
|
|
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(nconst));
|
|
|
|
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2018-12-10 08:36:47 +00:00
|
|
|
static bool
|
|
|
|
replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
|
|
|
|
nir_intrinsic_instr *dup_store_intr)
|
|
|
|
{
|
|
|
|
assert(input_var);
|
|
|
|
|
|
|
|
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
|
|
|
|
|
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init(&b, impl);
|
|
|
|
|
|
|
|
nir_variable *dup_out_var =
|
|
|
|
nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr->src[0]));
|
|
|
|
|
|
|
|
bool progress = false;
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
|
|
|
if (intr->intrinsic != nir_intrinsic_load_deref)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
|
|
|
|
if (in_deref->mode != nir_var_shader_in)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
|
|
|
|
|
|
|
|
if (!does_varying_match(dup_out_var, in_var) ||
|
|
|
|
in_var->data.interpolation != input_var->data.interpolation ||
|
|
|
|
get_interp_loc(in_var) != get_interp_loc(input_var))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
b.cursor = nir_before_instr(instr);
|
|
|
|
|
|
|
|
nir_ssa_def *load = nir_load_var(&b, input_var);
|
|
|
|
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(load));
|
|
|
|
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2018-11-08 22:24:11 +00:00
|
|
|
bool
|
2018-12-11 00:49:00 +00:00
|
|
|
nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
|
2018-11-08 22:24:11 +00:00
|
|
|
{
|
|
|
|
/* TODO: Add support for more shader stage combinations */
|
|
|
|
if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
|
|
|
|
(producer->info.stage != MESA_SHADER_VERTEX &&
|
|
|
|
producer->info.stage != MESA_SHADER_TESS_EVAL))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
nir_function_impl *impl = nir_shader_get_entrypoint(producer);
|
|
|
|
|
2019-01-11 19:50:53 +00:00
|
|
|
struct hash_table *varying_values = _mesa_pointer_hash_table_create(NULL);
|
2018-12-10 08:36:47 +00:00
|
|
|
|
2018-11-08 22:24:11 +00:00
|
|
|
/* If we find a store in the last block of the producer we can be sure this
|
|
|
|
* is the only possible value for this output.
|
|
|
|
*/
|
|
|
|
nir_block *last_block = nir_impl_last_block(impl);
|
|
|
|
nir_foreach_instr_reverse(instr, last_block) {
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
|
|
|
|
|
|
|
if (intr->intrinsic != nir_intrinsic_store_deref)
|
|
|
|
continue;
|
|
|
|
|
2018-12-19 09:09:42 +00:00
|
|
|
nir_deref_instr *out_deref = nir_src_as_deref(intr->src[0]);
|
|
|
|
if (out_deref->mode != nir_var_shader_out)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nir_variable *out_var = nir_deref_instr_get_variable(out_deref);
|
|
|
|
if (!can_replace_varying(out_var))
|
2018-11-08 22:24:11 +00:00
|
|
|
continue;
|
|
|
|
|
2018-12-19 09:09:42 +00:00
|
|
|
if (intr->src[1].ssa->parent_instr->type == nir_instr_type_load_const) {
|
|
|
|
progress |= replace_constant_input(consumer, intr);
|
2018-12-10 08:36:47 +00:00
|
|
|
} else {
|
|
|
|
struct hash_entry *entry =
|
|
|
|
_mesa_hash_table_search(varying_values, intr->src[1].ssa);
|
|
|
|
if (entry) {
|
|
|
|
progress |= replace_duplicate_input(consumer,
|
|
|
|
(nir_variable *) entry->data,
|
|
|
|
intr);
|
|
|
|
} else {
|
|
|
|
nir_variable *in_var = get_matching_input_var(consumer, out_var);
|
|
|
|
if (in_var) {
|
|
|
|
_mesa_hash_table_insert(varying_values, intr->src[1].ssa,
|
|
|
|
in_var);
|
|
|
|
}
|
|
|
|
}
|
2018-12-19 09:09:42 +00:00
|
|
|
}
|
2018-11-08 22:24:11 +00:00
|
|
|
}
|
|
|
|
|
2018-12-10 08:36:47 +00:00
|
|
|
_mesa_hash_table_destroy(varying_values, NULL);
|
|
|
|
|
2018-11-08 22:24:11 +00:00
|
|
|
return progress;
|
|
|
|
}
|
2019-05-10 09:18:12 +01:00
|
|
|
|
|
|
|
/* TODO any better helper somewhere to sort a list? */
|
|
|
|
|
|
|
|
static void
|
|
|
|
insert_sorted(struct exec_list *var_list, nir_variable *new_var)
|
|
|
|
{
|
|
|
|
nir_foreach_variable(var, var_list) {
|
|
|
|
if (var->data.location > new_var->data.location) {
|
|
|
|
exec_node_insert_node_before(&var->node, &new_var->node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
exec_list_push_tail(var_list, &new_var->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sort_varyings(struct exec_list *var_list)
|
|
|
|
{
|
|
|
|
struct exec_list new_list;
|
|
|
|
exec_list_make_empty(&new_list);
|
|
|
|
nir_foreach_variable_safe(var, var_list) {
|
|
|
|
exec_node_remove(&var->node);
|
|
|
|
insert_sorted(&new_list, var);
|
|
|
|
}
|
|
|
|
exec_list_move_nodes_to(&new_list, var_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
|
|
|
|
gl_shader_stage stage)
|
|
|
|
{
|
|
|
|
unsigned location = 0;
|
|
|
|
unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
|
|
|
|
uint64_t processed_locs[2] = {0};
|
|
|
|
|
|
|
|
sort_varyings(var_list);
|
|
|
|
|
|
|
|
int UNUSED last_loc = 0;
|
2019-05-14 13:08:46 +01:00
|
|
|
bool last_partial = false;
|
2019-05-10 09:18:12 +01:00
|
|
|
nir_foreach_variable(var, var_list) {
|
|
|
|
const struct glsl_type *type = var->type;
|
2020-02-11 22:41:05 +00:00
|
|
|
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
|
2019-05-10 09:18:12 +01:00
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
type = glsl_get_array_element(type);
|
|
|
|
}
|
|
|
|
|
2019-12-15 23:50:29 +00:00
|
|
|
int base;
|
|
|
|
if (var->data.mode == nir_var_shader_in && stage == MESA_SHADER_VERTEX)
|
|
|
|
base = VERT_ATTRIB_GENERIC0;
|
|
|
|
else if (var->data.mode == nir_var_shader_out &&
|
|
|
|
stage == MESA_SHADER_FRAGMENT)
|
|
|
|
base = FRAG_RESULT_DATA0;
|
|
|
|
else
|
|
|
|
base = VARYING_SLOT_VAR0;
|
|
|
|
|
2019-05-14 13:08:46 +01:00
|
|
|
unsigned var_size;
|
|
|
|
if (var->data.compact) {
|
2020-04-20 02:23:45 +01:00
|
|
|
/* If we are inside a partial compact,
|
|
|
|
* don't allow another compact to be in this slot
|
|
|
|
* if it starts at component 0.
|
|
|
|
*/
|
|
|
|
if (last_partial && var->data.location_frac == 0) {
|
|
|
|
location++;
|
|
|
|
}
|
|
|
|
|
2019-05-14 13:08:46 +01:00
|
|
|
/* compact variables must be arrays of scalars */
|
|
|
|
assert(glsl_type_is_array(type));
|
|
|
|
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
|
|
|
|
unsigned start = 4 * location + var->data.location_frac;
|
|
|
|
unsigned end = start + glsl_get_length(type);
|
|
|
|
var_size = end / 4 - location;
|
|
|
|
last_partial = end % 4 != 0;
|
|
|
|
} else {
|
|
|
|
/* Compact variables bypass the normal varying compacting pass,
|
|
|
|
* which means they cannot be in the same vec4 slot as a normal
|
|
|
|
* variable. If part of the current slot is taken up by a compact
|
|
|
|
* variable, we need to go to the next one.
|
|
|
|
*/
|
|
|
|
if (last_partial) {
|
|
|
|
location++;
|
|
|
|
last_partial = false;
|
|
|
|
}
|
|
|
|
var_size = glsl_count_attribute_slots(type, false);
|
|
|
|
}
|
2019-05-10 09:18:12 +01:00
|
|
|
|
|
|
|
/* Builtins don't allow component packing so we only need to worry about
|
|
|
|
* user defined varyings sharing the same location.
|
|
|
|
*/
|
|
|
|
bool processed = false;
|
|
|
|
if (var->data.location >= base) {
|
|
|
|
unsigned glsl_location = var->data.location - base;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < var_size; i++) {
|
|
|
|
if (processed_locs[var->data.index] &
|
|
|
|
((uint64_t)1 << (glsl_location + i)))
|
|
|
|
processed = true;
|
|
|
|
else
|
|
|
|
processed_locs[var->data.index] |=
|
|
|
|
((uint64_t)1 << (glsl_location + i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Because component packing allows varyings to share the same location
|
|
|
|
* we may have already have processed this location.
|
|
|
|
*/
|
|
|
|
if (processed) {
|
|
|
|
unsigned driver_location = assigned_locations[var->data.location];
|
|
|
|
var->data.driver_location = driver_location;
|
|
|
|
|
|
|
|
/* An array may be packed such that is crosses multiple other arrays
|
|
|
|
* or variables, we need to make sure we have allocated the elements
|
|
|
|
* consecutively if the previously proccessed var was shorter than
|
|
|
|
* the current array we are processing.
|
|
|
|
*
|
|
|
|
* NOTE: The code below assumes the var list is ordered in ascending
|
|
|
|
* location order.
|
|
|
|
*/
|
|
|
|
assert(last_loc <= var->data.location);
|
|
|
|
last_loc = var->data.location;
|
|
|
|
unsigned last_slot_location = driver_location + var_size;
|
|
|
|
if (last_slot_location > location) {
|
|
|
|
unsigned num_unallocated_slots = last_slot_location - location;
|
|
|
|
unsigned first_unallocated_slot = var_size - num_unallocated_slots;
|
2019-09-24 16:29:53 +01:00
|
|
|
for (unsigned i = first_unallocated_slot; i < var_size; i++) {
|
2019-05-10 09:18:12 +01:00
|
|
|
assigned_locations[var->data.location + i] = location;
|
|
|
|
location++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < var_size; i++) {
|
|
|
|
assigned_locations[var->data.location + i] = location + i;
|
|
|
|
}
|
|
|
|
|
|
|
|
var->data.driver_location = location;
|
|
|
|
location += var_size;
|
|
|
|
}
|
|
|
|
|
2019-05-17 13:56:45 +01:00
|
|
|
if (last_partial)
|
|
|
|
location++;
|
|
|
|
|
|
|
|
*size = location;
|
2019-05-10 09:18:12 +01:00
|
|
|
}
|
|
|
|
|