nir: Remove old-school deref chain support

Acked-by: Rob Clark <robdclark@gmail.com>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Acked-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Jason Ekstrand 2018-03-26 14:50:38 -07:00
parent 9800b81ffb
commit a331d7d1cd
44 changed files with 6 additions and 1365 deletions

View File

@ -142,8 +142,6 @@ gl_nir_lower_atomics(nir_shader *shader,
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_atomic_counter_derefs);
nir_foreach_function(function, shader) {
if (!function->impl)
continue;

View File

@ -173,8 +173,6 @@ gl_nir_lower_samplers(nir_shader *shader,
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_texture_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= lower_impl(function->impl, shader_program);

View File

@ -282,8 +282,6 @@ gl_nir_lower_samplers_as_deref(nir_shader *shader,
bool progress = false;
struct lower_samplers_as_deref_state state;
nir_assert_unlowered_derefs(shader, nir_lower_texture_derefs);
state.shader = shader;
state.shader_program = shader_program;
state.remap_table = _mesa_hash_table_create(NULL, _mesa_key_hash_string,

View File

@ -64,7 +64,6 @@ nir_shader_create(void *mem_ctx,
shader->num_outputs = 0;
shader->num_uniforms = 0;
shader->num_shared = 0;
shader->lowered_derefs = 0;
return shader;
}
@ -540,9 +539,7 @@ nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
instr->texture_index = 0;
instr->texture_array_size = 0;
instr->texture = NULL;
instr->sampler_index = 0;
instr->sampler = NULL;
return instr;
}
@ -620,218 +617,6 @@ nir_ssa_undef_instr_create(nir_shader *shader,
return instr;
}
nir_deref_var *
nir_deref_var_create(void *mem_ctx, nir_variable *var)
{
nir_deref_var *deref = ralloc(mem_ctx, nir_deref_var);
deref->deref.deref_type = nir_deref_type_var;
deref->deref.child = NULL;
deref->deref.type = var->type;
deref->var = var;
return deref;
}
nir_deref_array *
nir_deref_array_create(void *mem_ctx)
{
nir_deref_array *deref = ralloc(mem_ctx, nir_deref_array);
deref->deref.deref_type = nir_deref_type_array;
deref->deref.child = NULL;
deref->deref_array_type = nir_deref_array_type_direct;
src_init(&deref->indirect);
deref->base_offset = 0;
return deref;
}
nir_deref_struct *
nir_deref_struct_create(void *mem_ctx, unsigned field_index)
{
nir_deref_struct *deref = ralloc(mem_ctx, nir_deref_struct);
deref->deref.deref_type = nir_deref_type_struct;
deref->deref.child = NULL;
deref->index = field_index;
return deref;
}
nir_deref_var *
nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx)
{
if (deref == NULL)
return NULL;
nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_array *
deref_array_clone(const nir_deref_array *deref, void *mem_ctx)
{
nir_deref_array *ret = nir_deref_array_create(mem_ctx);
ret->base_offset = deref->base_offset;
ret->deref_array_type = deref->deref_array_type;
if (deref->deref_array_type == nir_deref_array_type_indirect) {
nir_src_copy(&ret->indirect, &deref->indirect, mem_ctx);
}
ret->deref.type = deref->deref.type;
if (deref->deref.child)
ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_struct *
deref_struct_clone(const nir_deref_struct *deref, void *mem_ctx)
{
nir_deref_struct *ret = nir_deref_struct_create(mem_ctx, deref->index);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
nir_deref *
nir_deref_clone(const nir_deref *deref, void *mem_ctx)
{
if (deref == NULL)
return NULL;
switch (deref->deref_type) {
case nir_deref_type_var:
return &nir_deref_var_clone(nir_deref_as_var(deref), mem_ctx)->deref;
case nir_deref_type_array:
return &deref_array_clone(nir_deref_as_array(deref), mem_ctx)->deref;
case nir_deref_type_struct:
return &deref_struct_clone(nir_deref_as_struct(deref), mem_ctx)->deref;
default:
unreachable("Invalid dereference type");
}
return NULL;
}
/* This is the second step in the recursion. We've found the tail and made a
* copy. Now we need to iterate over all possible leaves and call the
* callback on each one.
*/
static bool
deref_foreach_leaf_build_recur(nir_deref_var *deref, nir_deref *tail,
nir_deref_foreach_leaf_cb cb, void *state)
{
unsigned length;
union {
nir_deref_array arr;
nir_deref_struct str;
} tmp;
assert(tail->child == NULL);
switch (glsl_get_base_type(tail->type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_UINT16:
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT:
case GLSL_TYPE_INT16:
case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_FLOAT16:
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_BOOL:
if (glsl_type_is_vector_or_scalar(tail->type))
return cb(deref, state);
/* Fall Through */
case GLSL_TYPE_ARRAY:
tmp.arr.deref.deref_type = nir_deref_type_array;
tmp.arr.deref.type = glsl_get_array_element(tail->type);
tmp.arr.deref_array_type = nir_deref_array_type_direct;
tmp.arr.indirect = NIR_SRC_INIT;
tail->child = &tmp.arr.deref;
length = glsl_get_length(tail->type);
for (unsigned i = 0; i < length; i++) {
tmp.arr.deref.child = NULL;
tmp.arr.base_offset = i;
if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
return false;
}
return true;
case GLSL_TYPE_STRUCT:
tmp.str.deref.deref_type = nir_deref_type_struct;
tail->child = &tmp.str.deref;
length = glsl_get_length(tail->type);
for (unsigned i = 0; i < length; i++) {
tmp.arr.deref.child = NULL;
tmp.str.deref.type = glsl_get_struct_field(tail->type, i);
tmp.str.index = i;
if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
return false;
}
return true;
default:
unreachable("Invalid type for dereference");
}
}
/* This is the first step of the foreach_leaf recursion. In this step we are
* walking to the end of the deref chain and making a copy in the stack as we
* go. This is because we don't want to mutate the deref chain that was
* passed in by the caller. The downside is that this deref chain is on the
* stack and , if the caller wants to do anything with it, they will have to
* make their own copy because this one will go away.
*/
static bool
deref_foreach_leaf_copy_recur(nir_deref_var *deref, nir_deref *tail,
nir_deref_foreach_leaf_cb cb, void *state)
{
union {
nir_deref_array arr;
nir_deref_struct str;
} c;
if (tail->child) {
switch (tail->child->deref_type) {
case nir_deref_type_array:
c.arr = *nir_deref_as_array(tail->child);
tail->child = &c.arr.deref;
return deref_foreach_leaf_copy_recur(deref, &c.arr.deref, cb, state);
case nir_deref_type_struct:
c.str = *nir_deref_as_struct(tail->child);
tail->child = &c.str.deref;
return deref_foreach_leaf_copy_recur(deref, &c.str.deref, cb, state);
case nir_deref_type_var:
default:
unreachable("Invalid deref type for a child");
}
} else {
/* We've gotten to the end of the original deref. Time to start
* building our own derefs.
*/
return deref_foreach_leaf_build_recur(deref, tail, cb, state);
}
}
/**
* This function iterates over all of the possible derefs that can be created
* with the given deref as the head. It then calls the provided callback with
* a full deref for each one.
*
* The deref passed to the callback will be allocated on the stack. You will
* need to make a copy if you want it to hang around.
*/
bool
nir_deref_foreach_leaf(nir_deref_var *deref,
nir_deref_foreach_leaf_cb cb, void *state)
{
nir_deref_var copy = *deref;
return deref_foreach_leaf_copy_recur(&copy, &copy.deref, cb, state);
}
static nir_const_value
const_value_float(double d, unsigned bit_size)
{
@ -1259,31 +1044,6 @@ visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
return true;
}
static bool
visit_deref_array_src(nir_deref_array *deref, nir_foreach_src_cb cb,
void *state)
{
if (deref->deref_array_type == nir_deref_array_type_indirect)
return visit_src(&deref->indirect, cb, state);
return true;
}
static bool
visit_deref_src(nir_deref_var *deref, nir_foreach_src_cb cb, void *state)
{
nir_deref *cur = &deref->deref;
while (cur != NULL) {
if (cur->deref_type == nir_deref_type_array) {
if (!visit_deref_array_src(nir_deref_as_array(cur), cb, state))
return false;
}
cur = cur->child;
}
return true;
}
static bool
visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
{
@ -1319,16 +1079,6 @@ visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
return false;
}
if (instr->texture != NULL) {
if (!visit_deref_src(instr->texture, cb, state))
return false;
}
if (instr->sampler != NULL) {
if (!visit_deref_src(instr->sampler, cb, state))
return false;
}
return true;
}
@ -1342,13 +1092,6 @@ visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
return false;
}
unsigned num_vars =
nir_intrinsic_infos[instr->intrinsic].num_variables;
for (unsigned i = 0; i < num_vars; i++) {
if (!visit_deref_src(instr->variables[i], cb, state))
return false;
}
return true;
}
@ -1588,19 +1331,6 @@ nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
src_add_all_uses(dest->reg.indirect, instr, NULL);
}
void
nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
nir_deref_var *new_deref)
{
if (*deref)
visit_deref_src(*deref, remove_use_cb, NULL);
*deref = new_deref;
if (*deref)
visit_deref_src(*deref, add_use_cb, instr);
}
/* note: does *not* take ownership of 'name' */
void
nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,

View File

@ -915,62 +915,6 @@ typedef enum {
nir_deref_type_cast,
} nir_deref_type;
typedef struct nir_deref {
nir_deref_type deref_type;
struct nir_deref *child;
const struct glsl_type *type;
} nir_deref;
typedef struct {
nir_deref deref;
nir_variable *var;
} nir_deref_var;
/* This enum describes how the array is referenced. If the deref is
* direct then the base_offset is used. If the deref is indirect then
* offset is given by base_offset + indirect. If the deref is a wildcard
* then the deref refers to all of the elements of the array at the same
* time. Wildcard dereferences are only ever allowed in copy_var
* intrinsics and the source and destination derefs must have matching
* wildcards.
*/
typedef enum {
nir_deref_array_type_direct,
nir_deref_array_type_indirect,
nir_deref_array_type_wildcard,
} nir_deref_array_type;
typedef struct {
nir_deref deref;
nir_deref_array_type deref_array_type;
unsigned base_offset;
nir_src indirect;
} nir_deref_array;
typedef struct {
nir_deref deref;
unsigned index;
} nir_deref_struct;
NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
deref_type, nir_deref_type_var)
NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
deref_type, nir_deref_type_array)
NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
deref_type, nir_deref_type_struct)
/* Returns the last deref in the chain. */
static inline nir_deref *
nir_deref_tail(nir_deref *deref)
{
while (deref->child)
deref = deref->child;
return deref;
}
typedef struct {
nir_instr instr;
@ -1045,9 +989,6 @@ nir_deref_instr_get_variable(const nir_deref_instr *instr)
bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
nir_deref_var *
nir_deref_instr_to_deref(nir_deref_instr *instr, void *mem_ctx);
typedef struct {
nir_instr instr;
@ -1106,8 +1047,6 @@ typedef struct {
int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
nir_deref_var *variables[2];
nir_src src[];
} nir_intrinsic_instr;
@ -1236,9 +1175,6 @@ typedef struct {
*/
unsigned dest_components;
/** the number of inputs/outputs that are variables */
unsigned num_variables;
/** the number of constant indices used by the intrinsic */
unsigned num_indices;
@ -1383,12 +1319,6 @@ typedef struct {
/** The size of the texture array or 0 if it's not an array */
unsigned texture_array_size;
/** The texture deref
*
* If this is null, use texture_index instead.
*/
nir_deref_var *texture;
/** The sampler index
*
* The following operations do not require a sampler and, as such, this
@ -1405,12 +1335,6 @@ typedef struct {
* then the sampler index is given by sampler_index + sampler_offset.
*/
unsigned sampler_index;
/** The sampler deref
*
* If this is null, use sampler_index instead.
*/
nir_deref_var *sampler;
} nir_tex_instr;
static inline unsigned
@ -2141,19 +2065,8 @@ typedef struct nir_shader {
* access plus one
*/
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
/* temporary, tracking for which derefs instructions have been lowered
* to deref chains
*/
unsigned lowered_derefs;
} nir_shader;
#define nir_assert_lowered_derefs(shader, mask) \
assert(((shader)->lowered_derefs & (mask)) == (mask))
#define nir_assert_unlowered_derefs(shader, mask) \
assert(!((shader)->lowered_derefs & (mask)))
static inline nir_function_impl *
nir_shader_get_entrypoint(nir_shader *shader)
{
@ -2246,14 +2159,6 @@ nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
unsigned num_components,
unsigned bit_size);
nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
nir_deref_array *nir_deref_array_create(void *mem_ctx);
nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
bool nir_deref_foreach_leaf(nir_deref_var *deref,
nir_deref_foreach_leaf_cb cb, void *state);
nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
/**
@ -2485,8 +2390,6 @@ void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
nir_dest new_dest);
void nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
nir_deref_var *new_deref);
void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
unsigned num_components, unsigned bit_size,
@ -2581,8 +2484,6 @@ nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
nir_deref *nir_deref_clone(const nir_deref *deref, void *mem_ctx);
nir_deref_var *nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx);
nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
@ -2690,19 +2591,6 @@ bool nir_inline_functions(nir_shader *shader);
bool nir_propagate_invariant(nir_shader *shader);
enum nir_lower_deref_flags {
nir_lower_load_store_derefs = (1 << 0),
nir_lower_texture_derefs = (1 << 1),
nir_lower_interp_derefs = (1 << 2),
nir_lower_atomic_counter_derefs = (1 << 3),
nir_lower_atomic_derefs = (1 << 4),
nir_lower_image_derefs = (1 << 5),
nir_lower_all_derefs = (1 << 6) - 1,
};
bool nir_lower_deref_instrs(nir_shader *shader,
enum nir_lower_deref_flags flags);
void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
void nir_lower_deref_copy_instr(struct nir_builder *b,
nir_intrinsic_instr *copy);

View File

@ -644,29 +644,6 @@ nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
return deref;
}
static inline nir_deref_instr *
nir_build_deref_for_chain(nir_builder *b, nir_deref_var *deref_var)
{
nir_deref_instr *tail = nir_build_deref_var(b, deref_var->var);
for (nir_deref *d = deref_var->deref.child; d; d = d->child) {
if (d->deref_type == nir_deref_type_array) {
nir_deref_array *a = nir_deref_as_array(d);
assert(a->deref_array_type != nir_deref_array_type_wildcard);
nir_ssa_def *index = nir_imm_int(b, a->base_offset);
if (a->deref_array_type == nir_deref_array_type_indirect)
index = nir_iadd(b, index, nir_ssa_for_src(b, a->indirect, 1));
tail = nir_build_deref_array(b, tail, index);
} else {
nir_deref_struct *s = nir_deref_as_struct(d);
tail = nir_build_deref_struct(b, tail, s->index);
}
}
return tail;
}
/** Returns a deref that follows another but starting from the given parent
*
* The new deref will be the same type and take the same array or struct index
@ -764,22 +741,6 @@ nir_load_var(nir_builder *build, nir_variable *var)
return nir_load_deref(build, nir_build_deref_var(build, var));
}
static inline nir_ssa_def *
nir_load_deref_var(nir_builder *build, nir_deref_var *deref)
{
const struct glsl_type *type = nir_deref_tail(&deref->deref)->type;
const unsigned num_components = glsl_get_vector_elements(type);
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_var);
load->num_components = num_components;
load->variables[0] = nir_deref_var_clone(deref, load);
nir_ssa_dest_init(&load->instr, &load->dest, num_components,
glsl_get_bit_size(type), NULL);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
static inline void
nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
unsigned writemask)
@ -787,35 +748,6 @@ nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
}
static inline void
nir_store_deref_var(nir_builder *build, nir_deref_var *deref,
nir_ssa_def *value, unsigned writemask)
{
const unsigned num_components =
glsl_get_vector_elements(nir_deref_tail(&deref->deref)->type);
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
store->num_components = num_components;
store->const_index[0] = writemask & ((1 << num_components) - 1);
store->variables[0] = nir_deref_var_clone(deref, store);
store->src[0] = nir_src_for_ssa(value);
nir_builder_instr_insert(build, &store->instr);
}
static inline void
nir_copy_deref_var(nir_builder *build, nir_deref_var *dest, nir_deref_var *src)
{
assert(nir_deref_tail(&dest->deref)->type ==
nir_deref_tail(&src->deref)->type);
nir_intrinsic_instr *copy =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
copy->variables[0] = nir_deref_var_clone(dest, copy);
copy->variables[1] = nir_deref_var_clone(src, copy);
nir_builder_instr_insert(build, &copy->instr);
}
static inline void
nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
{

View File

@ -266,73 +266,6 @@ __clone_dst(clone_state *state, nir_instr *ninstr,
}
}
static nir_deref *clone_deref(clone_state *state, const nir_deref *deref,
nir_instr *ninstr, nir_deref *parent);
static nir_deref_var *
clone_deref_var(clone_state *state, const nir_deref_var *dvar,
nir_instr *ninstr)
{
nir_variable *nvar = remap_var(state, dvar->var);
nir_deref_var *ndvar = nir_deref_var_create(ninstr, nvar);
if (dvar->deref.child)
ndvar->deref.child = clone_deref(state, dvar->deref.child,
ninstr, &ndvar->deref);
return ndvar;
}
static nir_deref_array *
clone_deref_array(clone_state *state, const nir_deref_array *darr,
nir_instr *ninstr, nir_deref *parent)
{
nir_deref_array *ndarr = nir_deref_array_create(parent);
ndarr->deref.type = darr->deref.type;
if (darr->deref.child)
ndarr->deref.child = clone_deref(state, darr->deref.child,
ninstr, &ndarr->deref);
ndarr->deref_array_type = darr->deref_array_type;
ndarr->base_offset = darr->base_offset;
if (ndarr->deref_array_type == nir_deref_array_type_indirect)
__clone_src(state, ninstr, &ndarr->indirect, &darr->indirect);
return ndarr;
}
static nir_deref_struct *
clone_deref_struct(clone_state *state, const nir_deref_struct *dstr,
nir_instr *ninstr, nir_deref *parent)
{
nir_deref_struct *ndstr = nir_deref_struct_create(parent, dstr->index);
ndstr->deref.type = dstr->deref.type;
if (dstr->deref.child)
ndstr->deref.child = clone_deref(state, dstr->deref.child,
ninstr, &ndstr->deref);
return ndstr;
}
static nir_deref *
clone_deref(clone_state *state, const nir_deref *dref,
nir_instr *ninstr, nir_deref *parent)
{
switch (dref->deref_type) {
case nir_deref_type_array:
return &clone_deref_array(state, nir_deref_as_array(dref),
ninstr, parent)->deref;
case nir_deref_type_struct:
return &clone_deref_struct(state, nir_deref_as_struct(dref),
ninstr, parent)->deref;
default:
unreachable("bad deref type");
return NULL;
}
}
static nir_alu_instr *
clone_alu(clone_state *state, const nir_alu_instr *alu)
{
@ -400,7 +333,6 @@ clone_intrinsic(clone_state *state, const nir_intrinsic_instr *itr)
nir_intrinsic_instr *nitr =
nir_intrinsic_instr_create(state->ns, itr->intrinsic);
unsigned num_variables = nir_intrinsic_infos[itr->intrinsic].num_variables;
unsigned num_srcs = nir_intrinsic_infos[itr->intrinsic].num_srcs;
if (nir_intrinsic_infos[itr->intrinsic].has_dest)
@ -409,11 +341,6 @@ clone_intrinsic(clone_state *state, const nir_intrinsic_instr *itr)
nitr->num_components = itr->num_components;
memcpy(nitr->const_index, itr->const_index, sizeof(nitr->const_index));
for (unsigned i = 0; i < num_variables; i++) {
nitr->variables[i] = clone_deref_var(state, itr->variables[i],
&nitr->instr);
}
for (unsigned i = 0; i < num_srcs; i++)
__clone_src(state, &nitr->instr, &nitr->src[i], &itr->src[i]);
@ -466,13 +393,8 @@ clone_tex(clone_state *state, const nir_tex_instr *tex)
ntex->component = tex->component;
ntex->texture_index = tex->texture_index;
if (tex->texture)
ntex->texture = clone_deref_var(state, tex->texture, &ntex->instr);
ntex->texture_array_size = tex->texture_array_size;
ntex->sampler_index = tex->sampler_index;
if (tex->sampler)
ntex->sampler = clone_deref_var(state, tex->sampler, &ntex->instr);
return ntex;
}
@ -811,7 +733,6 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
ns->num_uniforms = s->num_uniforms;
ns->num_outputs = s->num_outputs;
ns->num_shared = s->num_shared;
ns->lowered_derefs = s->lowered_derefs;
free_clone_state(&state);

View File

@ -134,284 +134,6 @@ nir_remove_dead_derefs(nir_shader *shader)
return progress;
}
nir_deref_var *
nir_deref_instr_to_deref(nir_deref_instr *instr, void *mem_ctx)
{
nir_deref *deref = NULL;
while (instr->deref_type != nir_deref_type_var) {
nir_deref *nderef;
switch (instr->deref_type) {
case nir_deref_type_array:
case nir_deref_type_array_wildcard: {
nir_deref_array *deref_arr = nir_deref_array_create(mem_ctx);
if (instr->deref_type == nir_deref_type_array) {
nir_const_value *const_index =
nir_src_as_const_value(instr->arr.index);
if (const_index) {
deref_arr->deref_array_type = nir_deref_array_type_direct;
deref_arr->base_offset = const_index->u32[0];
} else {
deref_arr->deref_array_type = nir_deref_array_type_indirect;
deref_arr->base_offset = 0;
nir_src_copy(&deref_arr->indirect, &instr->arr.index, mem_ctx);
}
} else {
deref_arr->deref_array_type = nir_deref_array_type_wildcard;
}
nderef = &deref_arr->deref;
break;
}
case nir_deref_type_struct:
nderef = &nir_deref_struct_create(mem_ctx, instr->strct.index)->deref;
break;
default:
unreachable("Invalid deref instruction type");
}
nderef->child = deref;
ralloc_steal(nderef, deref);
nderef->type = instr->type;
deref = nderef;
assert(instr->parent.is_ssa);
instr = nir_src_as_deref(instr->parent);
}
assert(instr->deref_type == nir_deref_type_var);
nir_deref_var *deref_var = nir_deref_var_create(mem_ctx, instr->var);
deref_var->deref.child = deref;
ralloc_steal(deref_var, deref);
return deref_var;
}
static nir_deref_var *
nir_deref_src_to_deref(nir_src src, void *mem_ctx)
{
return nir_deref_instr_to_deref(nir_src_as_deref(src), mem_ctx);
}
static bool
nir_lower_deref_instrs_tex(nir_tex_instr *tex)
{
bool progress = false;
/* Remove the instruction before we modify it. This way we won't mess up
* use-def chains when we move sources around.
*/
nir_cursor cursor = nir_instr_remove(&tex->instr);
unsigned new_num_srcs = 0;
for (unsigned i = 0; i < tex->num_srcs; i++) {
if (tex->src[i].src_type == nir_tex_src_texture_deref) {
tex->texture = nir_deref_src_to_deref(tex->src[i].src, tex);
progress = true;
continue;
} else if (tex->src[i].src_type == nir_tex_src_sampler_deref) {
tex->sampler = nir_deref_src_to_deref(tex->src[i].src, tex);
progress = true;
continue;
}
/* Compact the sources down to remove the deref sources */
assert(new_num_srcs <= i);
tex->src[new_num_srcs++] = tex->src[i];
}
tex->num_srcs = new_num_srcs;
nir_instr_insert(cursor, &tex->instr);
return progress;
}
static bool
nir_lower_deref_instrs_intrin(nir_intrinsic_instr *intrin,
enum nir_lower_deref_flags flags)
{
nir_intrinsic_op deref_op = intrin->intrinsic;
nir_intrinsic_op var_op;
switch (deref_op) {
#define CASE(a) \
case nir_intrinsic_##a##_deref: \
if (!(flags & nir_lower_load_store_derefs)) \
return false; \
var_op = nir_intrinsic_##a##_var; \
break;
CASE(load)
CASE(store)
CASE(copy)
#undef CASE
#define CASE(a) \
case nir_intrinsic_interp_deref_##a: \
if (!(flags & nir_lower_interp_derefs)) \
return false; \
var_op = nir_intrinsic_interp_var_##a; \
break;
CASE(at_centroid)
CASE(at_sample)
CASE(at_offset)
#undef CASE
#define CASE(a) \
case nir_intrinsic_atomic_counter_##a##_deref: \
if (!(flags & nir_lower_atomic_counter_derefs)) \
return false; \
var_op = nir_intrinsic_atomic_counter_##a##_var; \
break;
CASE(inc)
CASE(dec)
CASE(read)
CASE(add)
CASE(min)
CASE(max)
CASE(and)
CASE(or)
CASE(xor)
CASE(exchange)
CASE(comp_swap)
#undef CASE
#define CASE(a) \
case nir_intrinsic_deref_atomic_##a: \
if (!(flags & nir_lower_atomic_derefs)) \
return false; \
var_op = nir_intrinsic_var_atomic_##a; \
break;
CASE(add)
CASE(imin)
CASE(umin)
CASE(imax)
CASE(umax)
CASE(and)
CASE(or)
CASE(xor)
CASE(exchange)
CASE(comp_swap)
#undef CASE
#define CASE(a) \
case nir_intrinsic_image_deref_##a: \
if (!(flags & nir_lower_image_derefs)) \
return false; \
var_op = nir_intrinsic_image_var_##a; \
break;
CASE(load)
CASE(store)
CASE(atomic_add)
CASE(atomic_min)
CASE(atomic_max)
CASE(atomic_and)
CASE(atomic_or)
CASE(atomic_xor)
CASE(atomic_exchange)
CASE(atomic_comp_swap)
CASE(size)
CASE(samples)
#undef CASE
default:
return false;
}
/* Remove the instruction before we modify it. This way we won't mess up
* use-def chains when we move sources around.
*/
nir_cursor cursor = nir_instr_remove(&intrin->instr);
unsigned num_derefs = nir_intrinsic_infos[var_op].num_variables;
assert(nir_intrinsic_infos[var_op].num_srcs + num_derefs ==
nir_intrinsic_infos[deref_op].num_srcs);
/* Move deref sources to variables */
for (unsigned i = 0; i < num_derefs; i++)
intrin->variables[i] = nir_deref_src_to_deref(intrin->src[i], intrin);
/* Shift all the other sources down */
for (unsigned i = 0; i < nir_intrinsic_infos[var_op].num_srcs; i++)
nir_src_copy(&intrin->src[i], &intrin->src[i + num_derefs], intrin);
/* Rewrite the extra sources to NIR_SRC_INIT just in case */
for (unsigned i = 0; i < num_derefs; i++)
intrin->src[nir_intrinsic_infos[var_op].num_srcs + i] = NIR_SRC_INIT;
/* It's safe to just stomp the intrinsic to var intrinsic since every
* intrinsic has room for some variables and the number of sources only
* shrinks.
*/
intrin->intrinsic = var_op;
nir_instr_insert(cursor, &intrin->instr);
return true;
}
static bool
nir_lower_deref_instrs_impl(nir_function_impl *impl,
enum nir_lower_deref_flags flags)
{
bool progress = false;
/* Walk the instructions in reverse order so that we can safely clean up
* the deref instructions after we clean up their uses.
*/
nir_foreach_block_reverse(block, impl) {
nir_foreach_instr_reverse_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_deref:
if (list_empty(&nir_instr_as_deref(instr)->dest.ssa.uses)) {
nir_instr_remove(instr);
progress = true;
}
break;
case nir_instr_type_tex:
if (flags & nir_lower_texture_derefs)
progress |= nir_lower_deref_instrs_tex(nir_instr_as_tex(instr));
break;
case nir_instr_type_intrinsic:
progress |=
nir_lower_deref_instrs_intrin(nir_instr_as_intrinsic(instr),
flags);
break;
default:
break; /* Nothing to do */
}
}
}
if (progress) {
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
}
return progress;
}
bool
nir_lower_deref_instrs(nir_shader *shader,
enum nir_lower_deref_flags flags)
{
bool progress = false;
nir_foreach_function(function, shader) {
if (!function->impl)
continue;
progress |= nir_lower_deref_instrs_impl(function->impl, flags);
}
shader->lowered_derefs |= flags;
return progress;
}
void
nir_fixup_deref_modes(nir_shader *shader)
{

View File

@ -165,8 +165,6 @@ hash_intrinsic(uint32_t hash, const nir_intrinsic_instr *instr)
hash = HASH(hash, instr->dest.ssa.bit_size);
}
assert(info->num_variables == 0);
hash = _mesa_fnv32_1a_accumulate_block(hash, instr->const_index,
info->num_indices
* sizeof(instr->const_index[0]));
@ -195,8 +193,6 @@ hash_tex(uint32_t hash, const nir_tex_instr *instr)
hash = HASH(hash, instr->texture_array_size);
hash = HASH(hash, instr->sampler_index);
assert(!instr->texture && !instr->sampler);
return hash;
}
@ -391,10 +387,6 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
return false;
}
/* Don't support un-lowered sampler derefs currently. */
assert(!tex1->texture && !tex1->sampler &&
!tex2->texture && !tex2->sampler);
return true;
}
case nir_instr_type_load_const: {
@ -453,8 +445,6 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
return false;
}
assert(info->num_variables == 0);
for (unsigned i = 0; i < info->num_indices; i++) {
if (intrinsic1->const_index[i] != intrinsic2->const_index[i])
return false;
@ -505,24 +495,15 @@ instr_can_rewrite(nir_instr *instr)
switch (instr->type) {
case nir_instr_type_alu:
case nir_instr_type_deref:
case nir_instr_type_tex:
case nir_instr_type_load_const:
case nir_instr_type_phi:
return true;
case nir_instr_type_tex: {
nir_tex_instr *tex = nir_instr_as_tex(instr);
/* Don't support un-lowered sampler derefs currently. */
if (tex->texture || tex->sampler)
return false;
return true;
}
case nir_instr_type_intrinsic: {
const nir_intrinsic_info *info =
&nir_intrinsic_infos[nir_instr_as_intrinsic(instr)->intrinsic];
return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
(info->flags & NIR_INTRINSIC_CAN_REORDER) &&
info->num_variables == 0; /* not implemented yet */
(info->flags & NIR_INTRINSIC_CAN_REORDER);
}
case nir_instr_type_call:
case nir_instr_type_jump:

View File

@ -31,7 +31,7 @@ class Intrinsic(object):
"""Class that represents all the information about an intrinsic opcode.
NOTE: this must be kept in sync with nir_intrinsic_info.
"""
def __init__(self, name, src_components, dest_components, num_variables,
def __init__(self, name, src_components, dest_components,
indices, flags, sysval):
"""Parameters:
@ -42,7 +42,6 @@ class Intrinsic(object):
- dest_components: number of destination components, -1 means no
dest, 0 means number of components given in num_components field
in nir_intrinsic_instr.
- num_variables: the number of variables
- indices: list of constant indicies
- flags: list of semantic flags
- sysval: is this a system-value intrinsic
@ -52,7 +51,6 @@ class Intrinsic(object):
if src_components:
assert isinstance(src_components[0], int)
assert isinstance(dest_components, int)
assert isinstance(num_variables, int)
assert isinstance(indices, list)
if indices:
assert isinstance(indices[0], str)
@ -66,7 +64,6 @@ class Intrinsic(object):
self.src_components = src_components
self.has_dest = (dest_components >= 0)
self.dest_components = dest_components
self.num_variables = num_variables
self.num_indices = len(indices)
self.indices = indices
self.flags = flags
@ -114,37 +111,20 @@ CAN_REORDER = "NIR_INTRINSIC_CAN_REORDER"
INTR_OPCODES = {}
def intrinsic(name, src_comp=[], dest_comp=-1, num_vars=0, indices=[],
def intrinsic(name, src_comp=[], dest_comp=-1, indices=[],
flags=[], sysval=False):
assert name not in INTR_OPCODES
INTR_OPCODES[name] = Intrinsic(name, src_comp, dest_comp, num_vars,
INTR_OPCODES[name] = Intrinsic(name, src_comp, dest_comp,
indices, flags, sysval)
intrinsic("nop", flags=[CAN_ELIMINATE])
intrinsic("load_param", dest_comp=0, indices=[PARAM_IDX], flags=[CAN_ELIMINATE])
intrinsic("load_var", dest_comp=0, num_vars=1, flags=[CAN_ELIMINATE])
intrinsic("store_var", src_comp=[0], num_vars=1, indices=[WRMASK])
intrinsic("copy_var", num_vars=2)
intrinsic("load_deref", dest_comp=0, src_comp=[1], flags=[CAN_ELIMINATE])
intrinsic("store_deref", src_comp=[1, 0], indices=[WRMASK])
intrinsic("copy_deref", src_comp=[1, 1])
# Interpolation of input. The interp_var_at* intrinsics are similar to the
# load_var intrinsic acting on a shader input except that they interpolate
# the input differently. The at_sample and at_offset intrinsics take an
# additional source that is an integer sample id or a vec2 position offset
# respectively.
intrinsic("interp_var_at_centroid", dest_comp=0, num_vars=1,
flags=[ CAN_ELIMINATE, CAN_REORDER])
intrinsic("interp_var_at_sample", src_comp=[1], dest_comp=0, num_vars=1,
flags=[CAN_ELIMINATE, CAN_REORDER])
intrinsic("interp_var_at_offset", src_comp=[2], dest_comp=0, num_vars=1,
flags=[CAN_ELIMINATE, CAN_REORDER])
# Interpolation of input. The interp_deref_at* intrinsics are similar to the
# load_var intrinsic acting on a shader input except that they interpolate the
# input differently. The at_sample and at_offset intrinsics take an
@ -278,17 +258,14 @@ intrinsic("set_vertex_count", src_comp=[1])
# lowered, variants take a constant buffer index and register offset.
def atomic(name, flags=[]):
intrinsic(name + "_var", dest_comp=1, num_vars=1, flags=flags)
intrinsic(name + "_deref", src_comp=[1], dest_comp=1, flags=flags)
intrinsic(name, src_comp=[1], dest_comp=1, indices=[BASE], flags=flags)
def atomic2(name):
intrinsic(name + "_var", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic(name + "_deref", src_comp=[1, 1], dest_comp=1)
intrinsic(name, src_comp=[1, 1], dest_comp=1, indices=[BASE])
def atomic3(name):
intrinsic(name + "_var", src_comp=[1, 1], dest_comp=1, num_vars=1)
intrinsic(name + "_deref", src_comp=[1, 1, 1], dest_comp=1)
intrinsic(name, src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
@ -304,33 +281,6 @@ atomic2("atomic_counter_xor")
atomic2("atomic_counter_exchange")
atomic3("atomic_counter_comp_swap")
# Image load, store and atomic intrinsics.
#
# All image intrinsics take an image target passed as a nir_variable. Image
# variables contain a number of memory and layout qualifiers that influence
# the semantics of the intrinsic.
#
# All image intrinsics take a four-coordinate vector and a sample index as
# first two sources, determining the location within the image that will be
# accessed by the intrinsic. Components not applicable to the image target
# in use are undefined. Image store takes an additional four-component
# argument with the value to be written, and image atomic operations take
# either one or two additional scalar arguments with the same meaning as in
# the ARB_shader_image_load_store specification.
intrinsic("image_var_load", src_comp=[4, 1], dest_comp=4, num_vars=1,
flags=[CAN_ELIMINATE])
intrinsic("image_var_store", src_comp=[4, 1, 4], num_vars=1)
intrinsic("image_var_atomic_add", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_min", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_max", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_and", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_or", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_xor", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_exchange", src_comp=[4, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1, num_vars=1)
intrinsic("image_var_size", dest_comp=0, num_vars=1, flags=[CAN_ELIMINATE, CAN_REORDER])
intrinsic("image_var_samples", dest_comp=1, num_vars=1, flags=[CAN_ELIMINATE, CAN_REORDER])
# Image load, store and atomic intrinsics.
#
# All image intrinsics take an image target passed as a nir_variable. The
@ -383,31 +333,6 @@ intrinsic("vulkan_resource_index", src_comp=[1], dest_comp=1,
intrinsic("vulkan_resource_reindex", src_comp=[1, 1], dest_comp=1,
flags=[CAN_ELIMINATE, CAN_REORDER])
# variable atomic intrinsics
#
# All of these variable atomic memory operations read a value from memory,
# compute a new value using one of the operations below, write the new value
# to memory, and return the original value read.
#
# All operations take 1 source except CompSwap that takes 2. These sources
# represent:
#
# 0: The data parameter to the atomic function (i.e. the value to add
# in shared_atomic_add, etc).
# 1: For CompSwap only: the second data parameter.
#
# All operations take 1 variable deref.
intrinsic("var_atomic_add", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_imin", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_umin", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_imax", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_umax", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_and", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_or", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_xor", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_exchange", src_comp=[1], dest_comp=1, num_vars=1)
intrinsic("var_atomic_comp_swap", src_comp=[1, 1], dest_comp=1, num_vars=1)
# variable atomic intrinsics
#
# All of these variable atomic memory operations read a value from memory,
@ -484,7 +409,7 @@ intrinsic("shared_atomic_exchange", src_comp=[1, 1], dest_comp=1, indices=[BASE]
intrinsic("shared_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
def system_value(name, dest_comp, indices=[]):
intrinsic("load_" + name, [], dest_comp, 0, indices,
intrinsic("load_" + name, [], dest_comp, indices,
flags=[CAN_ELIMINATE, CAN_REORDER], sysval=True)
system_value("frag_coord", 4)

View File

@ -36,7 +36,6 @@ const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics] = {
% endif
.has_dest = ${"true" if opcode.has_dest else "false"},
.dest_components = ${max(opcode.dest_components, 0)},
.num_variables = ${opcode.num_variables},
.num_indices = ${opcode.num_indices},
% if opcode.indices:
.index_map = {

View File

@ -134,8 +134,6 @@ nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
{
assert(producer->info.stage != MESA_SHADER_FRAGMENT);
assert(consumer->info.stage != MESA_SHADER_VERTEX);
nir_assert_unlowered_derefs(producer, nir_lower_load_store_derefs);
nir_assert_unlowered_derefs(consumer, nir_lower_load_store_derefs);
uint64_t read[4] = { 0 }, written[4] = { 0 };
uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };

View File

@ -41,8 +41,6 @@ nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
nir_function_impl *impl = function->impl;
nir_builder b;

View File

@ -135,8 +135,6 @@ nir_lower_clamp_color_outputs(nir_shader *shader)
.shader = shader,
};
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= lower_impl(&state, function->impl);

View File

@ -203,8 +203,6 @@ nir_lower_clip_cull_distance_arrays(nir_shader *nir)
{
bool progress = false;
nir_assert_unlowered_derefs(nir, nir_lower_load_store_derefs);
if (nir->info.stage <= MESA_SHADER_GEOMETRY)
progress |= combine_clip_cull(nir, &nir->outputs, true);

View File

@ -253,11 +253,6 @@ nir_lower_drawpixels(nir_shader *shader,
.shader = shader,
};
/* note that this pass already assumes texture/sampler derefs are already
* lowered to index
*/
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {

View File

@ -78,9 +78,6 @@ nir_lower_global_vars_to_local(nir_shader *shader)
_mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs | nir_lower_interp_derefs |
nir_lower_atomic_counter_derefs | nir_lower_atomic_derefs | nir_lower_image_derefs);
nir_foreach_function(function, shader) {
if (function->impl) {
nir_foreach_block(block, function->impl)

View File

@ -205,8 +205,6 @@ nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs | nir_lower_interp_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress = lower_indirects_impl(function->impl, modes) || progress;

View File

@ -510,8 +510,6 @@ nir_lower_io(nir_shader *shader, nir_variable_mode modes,
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs | nir_lower_interp_derefs | nir_lower_atomic_derefs);
nir_foreach_function(function, shader) {
if (function->impl) {
progress |= nir_lower_io_impl(function->impl, modes,

View File

@ -353,8 +353,6 @@ nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
_mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs | nir_lower_interp_derefs);
uint64_t indirects[4] = {0}, patch_indirects[4] = {0};
lower_io_arrays_to_elements(shader, nir_var_shader_out, indirects,
@ -399,9 +397,6 @@ nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer)
_mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(producer, nir_lower_load_store_derefs | nir_lower_interp_derefs);
nir_assert_unlowered_derefs(consumer, nir_lower_load_store_derefs | nir_lower_interp_derefs);
uint64_t indirects[4] = {0}, patch_indirects[4] = {0};
create_indirects_mask(producer, indirects, patch_indirects,
nir_var_shader_out);

View File

@ -286,8 +286,6 @@ nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask)
_mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs | nir_lower_interp_derefs);
nir_foreach_function(function, shader) {
if (function->impl) {
nir_builder b;

View File

@ -280,8 +280,6 @@ nir_lower_locals_to_regs(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress = nir_lower_locals_to_regs_impl(function->impl) || progress;

View File

@ -301,8 +301,6 @@ nir_lower_phis_to_scalar(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress = lower_phis_to_scalar_impl(function->impl) || progress;

View File

@ -204,8 +204,6 @@ nir_lower_system_values(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress = convert_impl(function->impl) || progress;

View File

@ -882,8 +882,6 @@ nir_lower_tex(nir_shader *shader, const nir_lower_tex_options *options)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_texture_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= nir_lower_tex_impl(function->impl, options);

View File

@ -155,8 +155,6 @@ nir_lower_var_copies(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= lower_var_copies_impl(function->impl);

View File

@ -753,8 +753,6 @@ nir_lower_vars_to_ssa(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= nir_lower_vars_to_ssa_impl(function->impl);

View File

@ -107,8 +107,6 @@ nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading)
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl) {
nir_builder_init(&b, function->impl);

View File

@ -349,8 +349,6 @@ nir_lower_wpos_ytransform(nir_shader *shader,
.shader = shader,
};
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {

View File

@ -124,47 +124,11 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
return true;
}
static bool
constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
{
bool progress = false;
for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
if (tail->deref_type != nir_deref_type_array)
continue;
nir_deref_array *arr = nir_deref_as_array(tail);
if (arr->deref_array_type == nir_deref_array_type_indirect &&
arr->indirect.is_ssa &&
arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
nir_load_const_instr *indirect =
nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
arr->base_offset += indirect->value.u32[0];
/* Clear out the source */
nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
arr->deref_array_type = nir_deref_array_type_direct;
progress = true;
}
}
return progress;
}
static bool
constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
{
bool progress = false;
unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
for (unsigned i = 0; i < num_vars; i++) {
progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
}
if (instr->intrinsic == nir_intrinsic_discard_if) {
nir_const_value *src_val = nir_src_as_const_value(instr->src[0]);
if (src_val && src_val->u32[0] == NIR_FALSE) {
@ -191,20 +155,6 @@ constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
return progress;
}
static bool
constant_fold_tex_instr(nir_tex_instr *instr)
{
bool progress = false;
if (instr->texture)
progress |= constant_fold_deref(&instr->instr, instr->texture);
if (instr->sampler)
progress |= constant_fold_deref(&instr->instr, instr->sampler);
return progress;
}
static bool
constant_fold_block(nir_block *block, void *mem_ctx)
{
@ -219,9 +169,6 @@ constant_fold_block(nir_block *block, void *mem_ctx)
progress |=
constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
break;
case nir_instr_type_tex:
progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
break;
default:
/* Don't know how to constant fold */
break;

View File

@ -744,8 +744,6 @@ nir_opt_copy_prop_vars(nir_shader *shader)
{
struct copy_prop_var_state state;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
state.shader = shader;
state.mem_ctx = ralloc_context(NULL);
list_inithead(&state.copies);

View File

@ -218,28 +218,6 @@ copy_prop_dest(nir_dest *dest, nir_instr *instr)
return false;
}
static bool
copy_prop_deref_var(nir_instr *instr, nir_deref_var *deref_var)
{
if (!deref_var)
return false;
bool progress = false;
for (nir_deref *deref = deref_var->deref.child;
deref; deref = deref->child) {
if (deref->deref_type != nir_deref_type_array)
continue;
nir_deref_array *arr = nir_deref_as_array(deref);
if (arr->deref_array_type != nir_deref_array_type_indirect)
continue;
while (copy_prop_src(&arr->indirect, instr, NULL, 1))
progress = true;
}
return progress;
}
static bool
copy_prop_instr(nir_instr *instr)
{
@ -284,11 +262,6 @@ copy_prop_instr(nir_instr *instr)
progress = true;
}
if (copy_prop_deref_var(instr, tex->texture))
progress = true;
if (copy_prop_deref_var(instr, tex->sampler))
progress = true;
while (copy_prop_dest(&tex->dest, instr))
progress = true;
@ -305,12 +278,6 @@ copy_prop_instr(nir_instr *instr)
progress = true;
}
for (unsigned i = 0;
i < nir_intrinsic_infos[intrin->intrinsic].num_variables; i++) {
if (copy_prop_deref_var(instr, intrin->variables[i]))
progress = true;
}
if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
while (copy_prop_dest(&intrin->dest, instr))
progress = true;

View File

@ -255,8 +255,6 @@ nir_opt_peephole_select(nir_shader *shader, unsigned limit)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress |= nir_opt_peephole_select_impl(function->impl, limit);

View File

@ -137,8 +137,6 @@ nir_opt_undef(nir_shader *shader)
nir_builder b;
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl) {
nir_builder_init(&b, function->impl);

View File

@ -640,81 +640,6 @@ print_deref_instr(nir_deref_instr *instr, print_state *state)
}
}
static void
print_var(nir_variable *var, print_state *state)
{
FILE *fp = state->fp;
fprintf(fp, "%s", get_var_name(var, state));
}
static void
print_deref_var(nir_deref_var *deref, print_state *state)
{
print_var(deref->var, state);
}
static void
print_deref_array(nir_deref_array *deref, print_state *state)
{
FILE *fp = state->fp;
fprintf(fp, "[");
switch (deref->deref_array_type) {
case nir_deref_array_type_direct:
fprintf(fp, "%u", deref->base_offset);
break;
case nir_deref_array_type_indirect:
if (deref->base_offset != 0)
fprintf(fp, "%u + ", deref->base_offset);
print_src(&deref->indirect, state);
break;
case nir_deref_array_type_wildcard:
fprintf(fp, "*");
break;
}
fprintf(fp, "]");
}
static void
print_deref_struct(nir_deref_struct *deref, const struct glsl_type *parent_type,
print_state *state)
{
FILE *fp = state->fp;
fprintf(fp, ".%s", glsl_get_struct_elem_name(parent_type, deref->index));
}
static void
print_deref(nir_deref_var *deref, print_state *state)
{
nir_deref *tail = &deref->deref;
nir_deref *pretail = NULL;
while (tail != NULL) {
switch (tail->deref_type) {
case nir_deref_type_var:
assert(pretail == NULL);
assert(tail == &deref->deref);
print_deref_var(deref, state);
break;
case nir_deref_type_array:
assert(pretail != NULL);
print_deref_array(nir_deref_as_array(tail), state);
break;
case nir_deref_type_struct:
assert(pretail != NULL);
print_deref_struct(nir_deref_as_struct(tail),
pretail->type, state);
break;
default:
unreachable("Invalid deref type");
}
pretail = tail;
tail = pretail->child;
}
}
static void
print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
{
@ -738,15 +663,6 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
fprintf(fp, ") (");
for (unsigned i = 0; i < info->num_variables; i++) {
if (i != 0)
fprintf(fp, ", ");
print_deref(instr->variables[i], state);
}
fprintf(fp, ") (");
for (unsigned i = 0; i < info->num_indices; i++) {
if (i != 0)
fprintf(fp, ", ");
@ -942,19 +858,6 @@ print_tex_instr(nir_tex_instr *instr, print_state *state)
if (instr->op == nir_texop_tg4) {
fprintf(fp, "%u (gather_component), ", instr->component);
}
if (instr->texture) {
print_deref(instr->texture, state);
fprintf(fp, " (texture)");
if (instr->sampler) {
print_deref(instr->sampler, state);
fprintf(fp, " (sampler)");
}
} else {
assert(instr->sampler == NULL);
fprintf(fp, "%u (texture) %u (sampler)",
instr->texture_index, instr->sampler_index);
}
}
static void

View File

@ -185,8 +185,6 @@ nir_propagate_invariant(nir_shader *shader)
struct set *invariants = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
bool progress = false;
nir_foreach_function(function, shader) {
if (function->impl && propagate_invariant_impl(function->impl, invariants))

View File

@ -164,8 +164,6 @@ nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes)
struct set *live =
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_all_derefs);
add_var_use_shader(shader, live, modes);
if (modes & nir_var_uniform)

View File

@ -369,81 +369,6 @@ read_dest(read_ctx *ctx, nir_dest *dst, nir_instr *instr)
}
}
static void
write_deref_chain(write_ctx *ctx, const nir_deref_var *deref_var)
{
write_object(ctx, deref_var->var);
uint32_t len = 0;
for (const nir_deref *d = deref_var->deref.child; d; d = d->child)
len++;
blob_write_uint32(ctx->blob, len);
for (const nir_deref *d = deref_var->deref.child; d; d = d->child) {
blob_write_uint32(ctx->blob, d->deref_type);
switch (d->deref_type) {
case nir_deref_type_array: {
const nir_deref_array *deref_array = nir_deref_as_array(d);
blob_write_uint32(ctx->blob, deref_array->deref_array_type);
blob_write_uint32(ctx->blob, deref_array->base_offset);
if (deref_array->deref_array_type == nir_deref_array_type_indirect)
write_src(ctx, &deref_array->indirect);
break;
}
case nir_deref_type_struct: {
const nir_deref_struct *deref_struct = nir_deref_as_struct(d);
blob_write_uint32(ctx->blob, deref_struct->index);
break;
}
case nir_deref_type_var:
unreachable("Invalid deref type");
}
encode_type_to_blob(ctx->blob, d->type);
}
}
static nir_deref_var *
read_deref_chain(read_ctx *ctx, void *mem_ctx)
{
nir_variable *var = read_object(ctx);
nir_deref_var *deref_var = nir_deref_var_create(mem_ctx, var);
uint32_t len = blob_read_uint32(ctx->blob);
nir_deref *tail = &deref_var->deref;
for (uint32_t i = 0; i < len; i++) {
nir_deref_type deref_type = blob_read_uint32(ctx->blob);
nir_deref *deref = NULL;
switch (deref_type) {
case nir_deref_type_array: {
nir_deref_array *deref_array = nir_deref_array_create(tail);
deref_array->deref_array_type = blob_read_uint32(ctx->blob);
deref_array->base_offset = blob_read_uint32(ctx->blob);
if (deref_array->deref_array_type == nir_deref_array_type_indirect)
read_src(ctx, &deref_array->indirect, mem_ctx);
deref = &deref_array->deref;
break;
}
case nir_deref_type_struct: {
uint32_t index = blob_read_uint32(ctx->blob);
nir_deref_struct *deref_struct = nir_deref_struct_create(tail, index);
deref = &deref_struct->deref;
break;
}
case nir_deref_type_var:
unreachable("Invalid deref type");
}
deref->type = decode_type_from_blob(ctx->blob);
tail->child = deref;
tail = deref;
}
return deref_var;
}
static void
write_alu(write_ctx *ctx, const nir_alu_instr *alu)
{
@ -570,7 +495,6 @@ write_intrinsic(write_ctx *ctx, const nir_intrinsic_instr *intrin)
{
blob_write_uint32(ctx->blob, intrin->intrinsic);
unsigned num_variables = nir_intrinsic_infos[intrin->intrinsic].num_variables;
unsigned num_srcs = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
unsigned num_indices = nir_intrinsic_infos[intrin->intrinsic].num_indices;
@ -579,9 +503,6 @@ write_intrinsic(write_ctx *ctx, const nir_intrinsic_instr *intrin)
if (nir_intrinsic_infos[intrin->intrinsic].has_dest)
write_dest(ctx, &intrin->dest);
for (unsigned i = 0; i < num_variables; i++)
write_deref_chain(ctx, intrin->variables[i]);
for (unsigned i = 0; i < num_srcs; i++)
write_src(ctx, &intrin->src[i]);
@ -596,7 +517,6 @@ read_intrinsic(read_ctx *ctx)
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(ctx->nir, op);
unsigned num_variables = nir_intrinsic_infos[op].num_variables;
unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
unsigned num_indices = nir_intrinsic_infos[op].num_indices;
@ -605,9 +525,6 @@ read_intrinsic(read_ctx *ctx)
if (nir_intrinsic_infos[op].has_dest)
read_dest(ctx, &intrin->dest, &intrin->instr);
for (unsigned i = 0; i < num_variables; i++)
intrin->variables[i] = read_deref_chain(ctx, &intrin->instr);
for (unsigned i = 0; i < num_srcs; i++)
read_src(ctx, &intrin->src[i], &intrin->instr);
@ -671,8 +588,6 @@ union packed_tex_data {
unsigned is_shadow:1;
unsigned is_new_style_shadow:1;
unsigned component:2;
unsigned has_texture_deref:1;
unsigned has_sampler_deref:1;
unsigned unused:10; /* Mark unused for valgrind. */
} u;
};
@ -695,8 +610,6 @@ write_tex(write_ctx *ctx, const nir_tex_instr *tex)
.u.is_shadow = tex->is_shadow,
.u.is_new_style_shadow = tex->is_new_style_shadow,
.u.component = tex->component,
.u.has_texture_deref = tex->texture != NULL,
.u.has_sampler_deref = tex->sampler != NULL,
};
blob_write_uint32(ctx->blob, packed.u32);
@ -705,11 +618,6 @@ write_tex(write_ctx *ctx, const nir_tex_instr *tex)
blob_write_uint32(ctx->blob, tex->src[i].src_type);
write_src(ctx, &tex->src[i].src);
}
if (tex->texture)
write_deref_chain(ctx, tex->texture);
if (tex->sampler)
write_deref_chain(ctx, tex->sampler);
}
static nir_tex_instr *
@ -739,11 +647,6 @@ read_tex(read_ctx *ctx)
read_src(ctx, &tex->src[i].src, &tex->instr);
}
tex->texture = packed.u.has_texture_deref ?
read_deref_chain(ctx, &tex->instr) : NULL;
tex->sampler = packed.u.has_sampler_deref ?
read_deref_chain(ctx, &tex->instr) : NULL;
return tex;
}
@ -1203,7 +1106,6 @@ nir_serialize(struct blob *blob, const nir_shader *nir)
blob_write_uint32(blob, nir->num_uniforms);
blob_write_uint32(blob, nir->num_outputs);
blob_write_uint32(blob, nir->num_shared);
blob_write_uint32(blob, nir->lowered_derefs);
blob_write_uint32(blob, exec_list_length(&nir->functions));
nir_foreach_function(fxn, nir) {
@ -1259,7 +1161,6 @@ nir_deserialize(void *mem_ctx,
ctx.nir->num_uniforms = blob_read_uint32(blob);
ctx.nir->num_outputs = blob_read_uint32(blob);
ctx.nir->num_shared = blob_read_uint32(blob);
ctx.nir->lowered_derefs = blob_read_uint32(blob);
unsigned num_functions = blob_read_uint32(blob);
for (unsigned i = 0; i < num_functions; i++)

View File

@ -177,8 +177,6 @@ nir_split_per_member_structs(nir_shader *shader)
_mesa_hash_table_create(dead_ctx, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_assert_unlowered_derefs(shader, nir_lower_all_derefs);
progress |= split_variables_in_list(&shader->inputs, shader,
var_to_member_map, dead_ctx);
progress |= split_variables_in_list(&shader->outputs, shader,

View File

@ -123,8 +123,6 @@ nir_split_var_copies(nir_shader *shader)
{
bool progress = false;
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
nir_foreach_function(function, shader) {
if (function->impl)
progress = split_var_copies_impl(function->impl) || progress;

View File

@ -392,55 +392,6 @@ validate_alu_instr(nir_alu_instr *instr, validate_state *state)
validate_alu_dest(instr, state);
}
static void
validate_deref_chain(nir_deref *deref, nir_variable_mode mode,
validate_state *state)
{
validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref);
nir_deref *parent = NULL;
while (deref != NULL) {
switch (deref->deref_type) {
case nir_deref_type_array:
if (mode == nir_var_shared) {
/* Shared variables have a bit more relaxed rules because we need
* to be able to handle array derefs on vectors. Fortunately,
* nir_lower_io handles these just fine.
*/
validate_assert(state, glsl_type_is_array(parent->type) ||
glsl_type_is_matrix(parent->type) ||
glsl_type_is_vector(parent->type));
} else {
/* Most of NIR cannot handle array derefs on vectors */
validate_assert(state, glsl_type_is_array(parent->type) ||
glsl_type_is_matrix(parent->type));
}
validate_assert(state, deref->type == glsl_get_array_element(parent->type));
if (nir_deref_as_array(deref)->deref_array_type ==
nir_deref_array_type_indirect)
validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1);
break;
case nir_deref_type_struct:
assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */
validate_assert(state, deref->type ==
glsl_get_struct_field(parent->type,
nir_deref_as_struct(deref)->index));
break;
case nir_deref_type_var:
break;
default:
validate_assert(state, !"Invalid deref type");
break;
}
parent = deref;
deref = deref->child;
}
}
static void
validate_var_use(nir_variable *var, validate_state *state)
{
@ -450,18 +401,6 @@ validate_var_use(nir_variable *var, validate_state *state)
validate_assert(state, (nir_function_impl *) entry->data == state->impl);
}
static void
validate_deref_var(void *parent_mem_ctx, nir_deref_var *deref, validate_state *state)
{
validate_assert(state, deref != NULL);
validate_assert(state, ralloc_parent(deref) == parent_mem_ctx);
validate_assert(state, deref->deref.type == deref->var->type);
validate_var_use(deref->var, state);
validate_deref_chain(&deref->deref, deref->var->data.mode, state);
}
static void
validate_deref_instr(nir_deref_instr *instr, validate_state *state)
{
@ -590,41 +529,6 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
break;
}
case nir_intrinsic_load_var: {
const struct glsl_type *type =
nir_deref_tail(&instr->variables[0]->deref)->type;
validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
(instr->variables[0]->var->data.mode == nir_var_uniform &&
glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
validate_assert(state, instr->num_components ==
glsl_get_vector_elements(type));
dest_bit_size = glsl_get_bit_size(type);
break;
}
case nir_intrinsic_store_var: {
const struct glsl_type *type =
nir_deref_tail(&instr->variables[0]->deref)->type;
validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
(instr->variables[0]->var->data.mode == nir_var_uniform &&
glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
validate_assert(state, instr->num_components == glsl_get_vector_elements(type));
src_bit_sizes[0] = glsl_get_bit_size(type);
validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
instr->variables[0]->var->data.mode != nir_var_uniform &&
instr->variables[0]->var->data.mode != nir_var_shader_storage);
validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
break;
}
case nir_intrinsic_copy_var:
validate_assert(state, nir_deref_tail(&instr->variables[0]->deref)->type ==
nir_deref_tail(&instr->variables[1]->deref)->type);
validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
instr->variables[0]->var->data.mode != nir_var_uniform &&
instr->variables[0]->var->data.mode != nir_var_shader_storage);
break;
default:
break;
}
@ -638,11 +542,6 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
}
unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
for (unsigned i = 0; i < num_vars; i++) {
validate_deref_var(instr, instr->variables[i], state);
}
if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
unsigned components_written = nir_intrinsic_dest_components(instr);
@ -666,12 +565,6 @@ validate_tex_instr(nir_tex_instr *instr, validate_state *state)
0, nir_tex_instr_src_size(instr, i));
}
if (instr->texture != NULL)
validate_deref_var(instr, instr->texture, state);
if (instr->sampler != NULL)
validate_deref_var(instr, instr->sampler, state);
validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
}

View File

@ -113,7 +113,6 @@ load_glsl(unsigned num_files, char* const* files, gl_shader_stage stage)
errx(1, "couldn't parse `%s'", files[0]);
nir_shader *nir = glsl_to_nir(prog, stage, ir3_get_compiler_options(compiler));
nir_lower_deref_instrs(nir, ~0);
/* required NIR passes: */
/* TODO cmdline args for some of the conditional lowering passes? */
@ -232,8 +231,6 @@ load_spirv(const char *filename, const char *entry, gl_shader_stage stage)
&spirv_options,
ir3_get_compiler_options(compiler));
NIR_PASS_V(entry_point->shader, nir_lower_deref_instrs, ~0);
nir_print_shader(entry_point->shader, stdout);
return entry_point->shader;

View File

@ -109,7 +109,6 @@ create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
}
nir_validate_shader(nir);
nir_lower_deref_instrs(nir, ~0);
nir = brw_preprocess_nir(compiler, nir);

View File

@ -253,7 +253,6 @@ lower_builtin_impl(lower_builtin_state *state, nir_function_impl *impl)
void
st_nir_lower_builtin(nir_shader *shader)
{
nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
lower_builtin_state state;
state.shader = shader;
nir_foreach_function(function, shader) {