nir: make nir_const_value scalar

v2: remove & operator in a couple of memsets
    add some memsets
v3: fixup lima

Signed-off-by: Karol Herbst <kherbst@redhat.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> (v2)
This commit is contained in:
Karol Herbst 2019-03-27 00:59:03 +01:00
parent 73d883037d
commit 14531d676b
43 changed files with 470 additions and 416 deletions

View File

@ -1126,19 +1126,19 @@ static void visit_load_const(struct ac_nir_context *ctx,
switch (instr->def.bit_size) {
case 8:
values[i] = LLVMConstInt(element_type,
instr->value.u8[i], false);
instr->value[i].u8, false);
break;
case 16:
values[i] = LLVMConstInt(element_type,
instr->value.u16[i], false);
instr->value[i].u16, false);
break;
case 32:
values[i] = LLVMConstInt(element_type,
instr->value.u32[i], false);
instr->value[i].u32, false);
break;
case 64:
values[i] = LLVMConstInt(element_type,
instr->value.u64[i], false);
instr->value[i].u64, false);
break;
default:
fprintf(stderr,

View File

@ -1541,7 +1541,7 @@ ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
*/
struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
for (int i = 0; i < instr->def.num_components; i++)
qregs[i] = vir_uniform_ui(c, instr->value.u32[i]);
qregs[i] = vir_uniform_ui(c, instr->value[i].u32);
_mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
}

View File

@ -124,25 +124,25 @@ copy_constant_to_storage(union gl_constant_value *storage,
for (unsigned int row = 0; row < n_rows; row++) {
switch (base_type) {
case GLSL_TYPE_UINT:
storage[i].u = val->values[column].u32[row];
storage[i].u = val->values[column][row].u32;
break;
case GLSL_TYPE_INT:
case GLSL_TYPE_SAMPLER:
storage[i].i = val->values[column].i32[row];
storage[i].i = val->values[column][row].i32;
break;
case GLSL_TYPE_FLOAT:
storage[i].f = val->values[column].f32[row];
storage[i].f = val->values[column][row].f32;
break;
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT64:
/* XXX need to check on big-endian */
memcpy(&storage[i * 2].u,
&val->values[column].f64[row],
&val->values[column][row].f64,
sizeof(double));
break;
case GLSL_TYPE_BOOL:
storage[i].b = val->values[column].u32[row] ? boolean_true : 0;
storage[i].b = val->values[column][row].u32 ? boolean_true : 0;
break;
case GLSL_TYPE_ARRAY:
case GLSL_TYPE_STRUCT:

View File

@ -308,9 +308,9 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
for (unsigned r = 0; r < rows; r++)
if (supports_ints)
ret->values[0].u32[r] = ir->value.u[r];
ret->values[0][r].u32 = ir->value.u[r];
else
ret->values[0].f32[r] = ir->value.u[r];
ret->values[0][r].f32 = ir->value.u[r];
break;
@ -320,23 +320,23 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
for (unsigned r = 0; r < rows; r++)
if (supports_ints)
ret->values[0].i32[r] = ir->value.i[r];
ret->values[0][r].i32 = ir->value.i[r];
else
ret->values[0].f32[r] = ir->value.i[r];
ret->values[0][r].f32 = ir->value.i[r];
break;
case GLSL_TYPE_FLOAT:
for (unsigned c = 0; c < cols; c++) {
for (unsigned r = 0; r < rows; r++)
ret->values[c].f32[r] = ir->value.f[c * rows + r];
ret->values[c][r].f32 = ir->value.f[c * rows + r];
}
break;
case GLSL_TYPE_DOUBLE:
for (unsigned c = 0; c < cols; c++) {
for (unsigned r = 0; r < rows; r++)
ret->values[c].f64[r] = ir->value.d[c * rows + r];
ret->values[c][r].f64 = ir->value.d[c * rows + r];
}
break;
@ -345,7 +345,7 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
ret->values[0].u64[r] = ir->value.u64[r];
ret->values[0][r].u64 = ir->value.u64[r];
break;
case GLSL_TYPE_INT64:
@ -353,7 +353,7 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
ret->values[0].i64[r] = ir->value.i64[r];
ret->values[0][r].i64 = ir->value.i64[r];
break;
case GLSL_TYPE_BOOL:
@ -361,7 +361,7 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
ret->values[0].b[r] = ir->value.b[r];
ret->values[0][r].b = ir->value.b[r];
break;

View File

@ -473,7 +473,8 @@ nir_load_const_instr *
nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
unsigned bit_size)
{
nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
nir_load_const_instr *instr =
rzalloc_size(shader, sizeof(*instr) + num_components * sizeof(*instr->value));
instr_init(&instr->instr, nir_instr_type_load_const);
nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
@ -634,10 +635,11 @@ static nir_const_value
const_value_float(double d, unsigned bit_size)
{
nir_const_value v;
memset(&v, 0, sizeof(v));
switch (bit_size) {
case 16: v.u16[0] = _mesa_float_to_half(d); break;
case 32: v.f32[0] = d; break;
case 64: v.f64[0] = d; break;
case 16: v.u16 = _mesa_float_to_half(d); break;
case 32: v.f32 = d; break;
case 64: v.f64 = d; break;
default:
unreachable("Invalid bit size");
}
@ -648,12 +650,13 @@ static nir_const_value
const_value_int(int64_t i, unsigned bit_size)
{
nir_const_value v;
memset(&v, 0, sizeof(v));
switch (bit_size) {
case 1: v.b[0] = i & 1; break;
case 8: v.i8[0] = i; break;
case 16: v.i16[0] = i; break;
case 32: v.i32[0] = i; break;
case 64: v.i64[0] = i; break;
case 1: v.b = i & 1; break;
case 8: v.i8 = i; break;
case 16: v.i16 = i; break;
case 32: v.i32 = i; break;
case 64: v.i64 = i; break;
default:
unreachable("Invalid bit size");
}
@ -1210,11 +1213,11 @@ nir_src_comp_as_int(nir_src src, unsigned comp)
assert(comp < load->def.num_components);
switch (load->def.bit_size) {
/* int1_t uses 0/-1 convention */
case 1: return -(int)load->value.b[comp];
case 8: return load->value.i8[comp];
case 16: return load->value.i16[comp];
case 32: return load->value.i32[comp];
case 64: return load->value.i64[comp];
case 1: return -(int)load->value[comp].b;
case 8: return load->value[comp].i8;
case 16: return load->value[comp].i16;
case 32: return load->value[comp].i32;
case 64: return load->value[comp].i64;
default:
unreachable("Invalid bit size");
}
@ -1228,11 +1231,11 @@ nir_src_comp_as_uint(nir_src src, unsigned comp)
assert(comp < load->def.num_components);
switch (load->def.bit_size) {
case 1: return load->value.b[comp];
case 8: return load->value.u8[comp];
case 16: return load->value.u16[comp];
case 32: return load->value.u32[comp];
case 64: return load->value.u64[comp];
case 1: return load->value[comp].b;
case 8: return load->value[comp].u8;
case 16: return load->value[comp].u16;
case 32: return load->value[comp].u32;
case 64: return load->value[comp].u64;
default:
unreachable("Invalid bit size");
}
@ -1257,9 +1260,9 @@ nir_src_comp_as_float(nir_src src, unsigned comp)
assert(comp < load->def.num_components);
switch (load->def.bit_size) {
case 16: return _mesa_half_to_float(load->value.u16[comp]);
case 32: return load->value.f32[comp];
case 64: return load->value.f64[comp];
case 16: return _mesa_half_to_float(load->value[comp].u16);
case 32: return load->value[comp].f32;
case 64: return load->value[comp].f64;
default:
unreachable("Invalid bit size");
}
@ -1304,7 +1307,7 @@ nir_src_as_const_value(nir_src src)
nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
return &load->value;
return load->value;
}
/**

View File

@ -121,19 +121,25 @@ typedef enum {
} nir_rounding_mode;
typedef union {
bool b[NIR_MAX_VEC_COMPONENTS];
float f32[NIR_MAX_VEC_COMPONENTS];
double f64[NIR_MAX_VEC_COMPONENTS];
int8_t i8[NIR_MAX_VEC_COMPONENTS];
uint8_t u8[NIR_MAX_VEC_COMPONENTS];
int16_t i16[NIR_MAX_VEC_COMPONENTS];
uint16_t u16[NIR_MAX_VEC_COMPONENTS];
int32_t i32[NIR_MAX_VEC_COMPONENTS];
uint32_t u32[NIR_MAX_VEC_COMPONENTS];
int64_t i64[NIR_MAX_VEC_COMPONENTS];
uint64_t u64[NIR_MAX_VEC_COMPONENTS];
bool b;
float f32;
double f64;
int8_t i8;
uint8_t u8;
int16_t i16;
uint16_t u16;
int32_t i32;
uint32_t u32;
int64_t i64;
uint64_t u64;
} nir_const_value;
#define nir_const_value_to_array(arr, c, components, m) \
{ \
for (unsigned i = 0; i < components; ++i) \
arr[i] = c[i].m; \
} while (false)
typedef struct nir_constant {
/**
* Value of the constant.
@ -142,7 +148,7 @@ typedef struct nir_constant {
* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
nir_const_value values[NIR_MAX_MATRIX_COLUMNS];
nir_const_value values[NIR_MAX_MATRIX_COLUMNS][NIR_MAX_VEC_COMPONENTS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
@ -1715,11 +1721,16 @@ bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
typedef struct {
nir_instr instr;
nir_const_value value;
nir_ssa_def def;
nir_const_value value[];
} nir_load_const_instr;
#define nir_const_load_to_arr(arr, l, m) \
{ \
nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
} while (false);
typedef enum {
nir_jump_return,
nir_jump_break,

View File

@ -194,14 +194,14 @@ nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
static inline nir_ssa_def *
nir_build_imm(nir_builder *build, unsigned num_components,
unsigned bit_size, nir_const_value value)
unsigned bit_size, const nir_const_value *value)
{
nir_load_const_instr *load_const =
nir_load_const_instr_create(build->shader, num_components, bit_size);
if (!load_const)
return NULL;
load_const->value = value;
memcpy(load_const->value, value, sizeof(nir_const_value) * num_components);
nir_builder_instr_insert(build, &load_const->instr);
@ -227,9 +227,9 @@ nir_imm_bool(nir_builder *build, bool x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.b[0] = x;
v.b = x;
return nir_build_imm(build, 1, 1, v);
return nir_build_imm(build, 1, 1, &v);
}
static inline nir_ssa_def *
@ -250,9 +250,9 @@ nir_imm_float16(nir_builder *build, float x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.u16[0] = _mesa_float_to_half(x);
v.u16 = _mesa_float_to_half(x);
return nir_build_imm(build, 1, 16, v);
return nir_build_imm(build, 1, 16, &v);
}
static inline nir_ssa_def *
@ -261,9 +261,9 @@ nir_imm_float(nir_builder *build, float x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.f32[0] = x;
v.f32 = x;
return nir_build_imm(build, 1, 32, v);
return nir_build_imm(build, 1, 32, &v);
}
static inline nir_ssa_def *
@ -272,9 +272,9 @@ nir_imm_double(nir_builder *build, double x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.f64[0] = x;
v.f64 = x;
return nir_build_imm(build, 1, 64, v);
return nir_build_imm(build, 1, 64, &v);
}
static inline nir_ssa_def *
@ -295,11 +295,11 @@ nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
static inline nir_ssa_def *
nir_imm_vec2(nir_builder *build, float x, float y)
{
nir_const_value v;
nir_const_value v[2];
memset(&v, 0, sizeof(v));
v.f32[0] = x;
v.f32[1] = y;
memset(v, 0, sizeof(v));
v[0].f32 = x;
v[1].f32 = y;
return nir_build_imm(build, 2, 32, v);
}
@ -307,13 +307,13 @@ nir_imm_vec2(nir_builder *build, float x, float y)
static inline nir_ssa_def *
nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
{
nir_const_value v;
nir_const_value v[4];
memset(&v, 0, sizeof(v));
v.f32[0] = x;
v.f32[1] = y;
v.f32[2] = z;
v.f32[3] = w;
memset(v, 0, sizeof(v));
v[0].f32 = x;
v[1].f32 = y;
v[2].f32 = z;
v[3].f32 = w;
return nir_build_imm(build, 4, 32, v);
}
@ -321,11 +321,11 @@ nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
static inline nir_ssa_def *
nir_imm_ivec2(nir_builder *build, int x, int y)
{
nir_const_value v;
nir_const_value v[2];
memset(&v, 0, sizeof(v));
v.i32[0] = x;
v.i32[1] = y;
memset(v, 0, sizeof(v));
v[0].i32 = x;
v[1].i32 = y;
return nir_build_imm(build, 2, 32, v);
}
@ -336,9 +336,9 @@ nir_imm_int(nir_builder *build, int x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.i32[0] = x;
v.i32 = x;
return nir_build_imm(build, 1, 32, v);
return nir_build_imm(build, 1, 32, &v);
}
static inline nir_ssa_def *
@ -347,9 +347,9 @@ nir_imm_int64(nir_builder *build, int64_t x)
nir_const_value v;
memset(&v, 0, sizeof(v));
v.i64[0] = x;
v.i64 = x;
return nir_build_imm(build, 1, 64, v);
return nir_build_imm(build, 1, 64, &v);
}
static inline nir_ssa_def *
@ -360,23 +360,23 @@ nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
memset(&v, 0, sizeof(v));
assert(bit_size <= 64);
if (bit_size == 1)
v.b[0] = x & 1;
v.b = x & 1;
else
v.i64[0] = x & (~0ull >> (64 - bit_size));
v.i64 = x & (~0ull >> (64 - bit_size));
return nir_build_imm(build, 1, bit_size, v);
return nir_build_imm(build, 1, bit_size, &v);
}
static inline nir_ssa_def *
nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
{
nir_const_value v;
nir_const_value v[4];
memset(&v, 0, sizeof(v));
v.i32[0] = x;
v.i32[1] = y;
v.i32[2] = z;
v.i32[3] = w;
memset(v, 0, sizeof(v));
v[0].i32 = x;
v[1].i32 = y;
v[2].i32 = z;
v[3].i32 = w;
return nir_build_imm(build, 4, 32, v);
}

View File

@ -355,7 +355,7 @@ clone_load_const(clone_state *state, const nir_load_const_instr *lc)
nir_load_const_instr_create(state->ns, lc->def.num_components,
lc->def.bit_size);
memcpy(&nlc->value, &lc->value, sizeof(nlc->value));
memcpy(&nlc->value, &lc->value, sizeof(*nlc->value) * lc->def.num_components);
add_remap(state, &nlc->def, &lc->def);

View File

@ -30,7 +30,8 @@
#include "nir.h"
nir_const_value nir_eval_const_opcode(nir_op op, unsigned num_components,
unsigned bit_size, nir_const_value *src);
void nir_eval_const_opcode(nir_op op, nir_const_value *dest,
unsigned num_components, unsigned bit_size,
nir_const_value **src);
#endif /* NIR_CONSTANT_EXPRESSIONS_H */

View File

@ -281,11 +281,11 @@ struct ${type}${width}_vec {
% for k in range(op.input_sizes[j]):
% if input_types[j] == "int1":
/* 1-bit integers use a 0/-1 convention */
-(int1_t)_src[${j}].b[${k}],
-(int1_t)_src[${j}][${k}].b,
% elif input_types[j] == "float16":
_mesa_half_to_float(_src[${j}].u16[${k}]),
_mesa_half_to_float(_src[${j}][${k}].u16),
% else:
_src[${j}].${get_const_field(input_types[j])}[${k}],
_src[${j}][${k}].${get_const_field(input_types[j])},
% endif
% endfor
% for k in range(op.input_sizes[j], 4):
@ -309,13 +309,13 @@ struct ${type}${width}_vec {
<% continue %>
% elif input_types[j] == "int1":
/* 1-bit integers use a 0/-1 convention */
const int1_t src${j} = -(int1_t)_src[${j}].b[_i];
const int1_t src${j} = -(int1_t)_src[${j}][_i].b;
% elif input_types[j] == "float16":
const float src${j} =
_mesa_half_to_float(_src[${j}].u16[_i]);
_mesa_half_to_float(_src[${j}][_i].u16);
% else:
const ${input_types[j]}_t src${j} =
_src[${j}].${get_const_field(input_types[j])}[_i];
_src[${j}][_i].${get_const_field(input_types[j])};
% endif
% endfor
@ -334,14 +334,14 @@ struct ${type}${width}_vec {
## value of dst.
% if output_type == "int1" or output_type == "uint1":
/* 1-bit integers get truncated */
_dst_val.b[_i] = dst & 1;
_dst_val[_i].b = dst & 1;
% elif output_type.startswith("bool"):
## Sanitize the C value to a proper NIR 0/-1 bool
_dst_val.${get_const_field(output_type)}[_i] = -(int)dst;
_dst_val[_i].${get_const_field(output_type)} = -(int)dst;
% elif output_type == "float16":
_dst_val.u16[_i] = _mesa_float_to_half(dst);
_dst_val[_i].u16 = _mesa_float_to_half(dst);
% else:
_dst_val.${get_const_field(output_type)}[_i] = dst;
_dst_val[_i].${get_const_field(output_type)} = dst;
% endif
}
% else:
@ -366,27 +366,26 @@ struct ${type}${width}_vec {
% for k in range(op.output_size):
% if output_type == "int1" or output_type == "uint1":
/* 1-bit integers get truncated */
_dst_val.b[${k}] = dst.${"xyzw"[k]} & 1;
_dst_val[${k}].b = dst.${"xyzw"[k]} & 1;
% elif output_type.startswith("bool"):
## Sanitize the C value to a proper NIR 0/-1 bool
_dst_val.${get_const_field(output_type)}[${k}] = -(int)dst.${"xyzw"[k]};
_dst_val[${k}].${get_const_field(output_type)} = -(int)dst.${"xyzw"[k]};
% elif output_type == "float16":
_dst_val.u16[${k}] = _mesa_float_to_half(dst.${"xyzw"[k]});
_dst_val[${k}].u16 = _mesa_float_to_half(dst.${"xyzw"[k]});
% else:
_dst_val.${get_const_field(output_type)}[${k}] = dst.${"xyzw"[k]};
_dst_val[${k}].${get_const_field(output_type)} = dst.${"xyzw"[k]};
% endif
% endfor
% endif
</%def>
% for name, op in sorted(opcodes.items()):
static nir_const_value
evaluate_${name}(MAYBE_UNUSED unsigned num_components,
static void
evaluate_${name}(nir_const_value *_dst_val,
MAYBE_UNUSED unsigned num_components,
${"UNUSED" if op_bit_sizes(op) is None else ""} unsigned bit_size,
MAYBE_UNUSED nir_const_value *_src)
MAYBE_UNUSED nir_const_value **_src)
{
nir_const_value _dst_val = { {0, } };
% if op_bit_sizes(op) is not None:
switch (bit_size) {
% for bit_size in op_bit_sizes(op):
@ -402,19 +401,18 @@ evaluate_${name}(MAYBE_UNUSED unsigned num_components,
% else:
${evaluate_op(op, 0)}
% endif
return _dst_val;
}
% endfor
nir_const_value
nir_eval_const_opcode(nir_op op, unsigned num_components,
unsigned bit_width, nir_const_value *src)
void
nir_eval_const_opcode(nir_op op, nir_const_value *dest,
unsigned num_components, unsigned bit_width,
nir_const_value **src)
{
switch (op) {
% for name in sorted(opcodes.keys()):
case nir_op_${name}:
return evaluate_${name}(num_components, bit_width, src);
return evaluate_${name}(dest, num_components, bit_width, src);
% endfor
default:
unreachable("shouldn't get here");

View File

@ -53,10 +53,11 @@ nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
static inline nir_ssa_def *
nir_format_mask_uvec(nir_builder *b, nir_ssa_def *src, const unsigned *bits)
{
nir_const_value mask;
nir_const_value mask[NIR_MAX_VEC_COMPONENTS];
memset(mask, 0, sizeof(mask));
for (unsigned i = 0; i < src->num_components; i++) {
assert(bits[i] < 32);
mask.u32[i] = (1u << bits[i]) - 1;
mask[i].u32 = (1u << bits[i]) - 1;
}
return nir_iand(b, src, nir_build_imm(b, src->num_components, 32, mask));
}
@ -210,10 +211,11 @@ _nir_format_norm_factor(nir_builder *b, const unsigned *bits,
unsigned num_components,
bool is_signed)
{
nir_const_value factor;
nir_const_value factor[NIR_MAX_VEC_COMPONENTS];
memset(factor, 0, sizeof(factor));
for (unsigned i = 0; i < num_components; i++) {
assert(bits[i] < 32);
factor.f32[i] = (1ul << (bits[i] - is_signed)) - 1;
factor[i].f32 = (1ul << (bits[i] - is_signed)) - 1;
}
return nir_build_imm(b, num_components, 32, factor);
}
@ -309,10 +311,11 @@ nir_format_clamp_uint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
if (bits[0] == 32)
return f;
nir_const_value max;
nir_const_value max[NIR_MAX_VEC_COMPONENTS];
memset(max, 0, sizeof(max));
for (unsigned i = 0; i < f->num_components; i++) {
assert(bits[i] < 32);
max.u32[i] = (1 << bits[i]) - 1;
max[i].u32 = (1 << bits[i]) - 1;
}
return nir_umin(b, f, nir_build_imm(b, f->num_components, 32, max));
}
@ -326,11 +329,13 @@ nir_format_clamp_sint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
if (bits[0] == 32)
return f;
nir_const_value min, max;
nir_const_value min[NIR_MAX_VEC_COMPONENTS], max[NIR_MAX_VEC_COMPONENTS];
memset(min, 0, sizeof(min));
memset(max, 0, sizeof(max));
for (unsigned i = 0; i < f->num_components; i++) {
assert(bits[i] < 32);
max.i32[i] = (1 << (bits[i] - 1)) - 1;
min.i32[i] = -(1 << (bits[i] - 1));
max[i].i32 = (1 << (bits[i] - 1)) - 1;
min[i].i32 = -(1 << (bits[i] - 1));
}
f = nir_imin(b, f, nir_build_imm(b, f->num_components, 32, max));
f = nir_imax(b, f, nir_build_imm(b, f->num_components, 32, min));

View File

@ -124,12 +124,12 @@ hash_load_const(uint32_t hash, const nir_load_const_instr *instr)
if (instr->def.bit_size == 1) {
for (unsigned i = 0; i < instr->def.num_components; i++) {
uint8_t b = instr->value.b[i];
uint8_t b = instr->value[i].b;
hash = HASH(hash, b);
}
} else {
unsigned size = instr->def.num_components * (instr->def.bit_size / 8);
hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value.f32, size);
unsigned size = instr->def.num_components * sizeof(*instr->value);
hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value, size);
}
return hash;
@ -309,8 +309,8 @@ nir_const_value_negative_equal(const nir_const_value *c1,
switch (bits) {
case 16:
for (unsigned i = 0; i < components; i++) {
if (_mesa_half_to_float(c1->u16[i]) !=
-_mesa_half_to_float(c2->u16[i])) {
if (_mesa_half_to_float(c1[i].u16) !=
-_mesa_half_to_float(c2[i].u16)) {
return false;
}
}
@ -319,7 +319,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
case 32:
for (unsigned i = 0; i < components; i++) {
if (c1->f32[i] != -c2->f32[i])
if (c1[i].f32 != -c2[i].f32)
return false;
}
@ -327,7 +327,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
case 64:
for (unsigned i = 0; i < components; i++) {
if (c1->f64[i] != -c2->f64[i])
if (c1[i].f64 != -c2[i].f64)
return false;
}
@ -344,7 +344,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
switch (bits) {
case 8:
for (unsigned i = 0; i < components; i++) {
if (c1->i8[i] != -c2->i8[i])
if (c1[i].i8 != -c2[i].i8)
return false;
}
@ -352,7 +352,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
case 16:
for (unsigned i = 0; i < components; i++) {
if (c1->i16[i] != -c2->i16[i])
if (c1[i].i16 != -c2[i].i16)
return false;
}
@ -361,7 +361,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
case 32:
for (unsigned i = 0; i < components; i++) {
if (c1->i32[i] != -c2->i32[i])
if (c1[i].i32 != -c2[i].i32)
return false;
}
@ -369,7 +369,7 @@ nir_const_value_negative_equal(const nir_const_value *c1,
case 64:
for (unsigned i = 0; i < components; i++) {
if (c1->i64[i] != -c2->i64[i])
if (c1[i].i64 != -c2[i].i64)
return false;
}
@ -628,13 +628,31 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
if (load1->def.bit_size != load2->def.bit_size)
return false;
if (load1->def.bit_size == 1) {
unsigned size = load1->def.num_components * sizeof(bool);
return memcmp(load1->value.b, load2->value.b, size) == 0;
} else {
unsigned size = load1->def.num_components * (load1->def.bit_size / 8);
return memcmp(load1->value.f32, load2->value.f32, size) == 0;
for (unsigned i = 0; i < load1->def.num_components; ++i) {
switch (load1->def.bit_size) {
case 1:
if (load1->value[i].b != load2->value[i].b)
return false;
break;
case 8:
if (load1->value[i].u8 != load2->value[i].u8)
return false;
break;
case 16:
if (load1->value[i].u16 != load2->value[i].u16)
return false;
break;
case 32:
if (load1->value[i].u32 != load2->value[i].u32)
return false;
break;
case 64:
if (load1->value[i].u64 != load2->value[i].u64)
return false;
break;
}
}
return true;
}
case nir_instr_type_phi: {
nir_phi_instr *phi1 = nir_instr_as_phi(instr1);

View File

@ -535,7 +535,7 @@ guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
}
if (min_array_size) {
limit_val->i32[0] = min_array_size;
limit_val->i32 = min_array_size;
return true;
}
@ -561,7 +561,7 @@ try_find_limit_of_alu(nir_loop_variable *limit, nir_const_value *limit_val,
if (!is_var_constant(limit))
return false;
*limit_val = nir_instr_as_load_const(limit->def->parent_instr)->value;
*limit_val = nir_instr_as_load_const(limit->def->parent_instr)->value[0];
terminator->exact_trip_count_unknown = true;
@ -582,25 +582,25 @@ get_iteration(nir_op cond_op, nir_const_value *initial, nir_const_value *step,
case nir_op_ilt:
case nir_op_ieq:
case nir_op_ine: {
int32_t initial_val = initial->i32[0];
int32_t span = limit->i32[0] - initial_val;
iter = span / step->i32[0];
int32_t initial_val = initial->i32;
int32_t span = limit->i32 - initial_val;
iter = span / step->i32;
break;
}
case nir_op_uge:
case nir_op_ult: {
uint32_t initial_val = initial->u32[0];
uint32_t span = limit->u32[0] - initial_val;
iter = span / step->u32[0];
uint32_t initial_val = initial->u32;
uint32_t span = limit->u32 - initial_val;
iter = span / step->u32;
break;
}
case nir_op_fge:
case nir_op_flt:
case nir_op_feq:
case nir_op_fne: {
float initial_val = initial->f32[0];
float span = limit->f32[0] - initial_val;
iter = span / step->f32[0];
float initial_val = initial->f32;
float span = limit->f32 - initial_val;
iter = span / step->f32;
break;
}
default:
@ -618,18 +618,18 @@ test_iterations(int32_t iter_int, nir_const_value *step,
{
assert(nir_op_infos[cond_op].num_inputs == 2);
nir_const_value iter_src = { {0, } };
nir_const_value iter_src = {0, };
nir_op mul_op;
nir_op add_op;
switch (induction_base_type) {
case nir_type_float:
iter_src.f32[0] = (float) iter_int;
iter_src.f32 = (float) iter_int;
mul_op = nir_op_fmul;
add_op = nir_op_fadd;
break;
case nir_type_int:
case nir_type_uint:
iter_src.i32[0] = iter_int;
iter_src.i32 = iter_int;
mul_op = nir_op_imul;
add_op = nir_op_iadd;
break;
@ -640,23 +640,24 @@ test_iterations(int32_t iter_int, nir_const_value *step,
/* Multiple the iteration count we are testing by the number of times we
* step the induction variable each iteration.
*/
nir_const_value mul_src[2] = { iter_src, *step };
nir_const_value mul_result =
nir_eval_const_opcode(mul_op, 1, bit_size, mul_src);
nir_const_value *mul_src[2] = { &iter_src, step };
nir_const_value mul_result;
nir_eval_const_opcode(mul_op, &mul_result, 1, bit_size, mul_src);
/* Add the initial value to the accumulated induction variable total */
nir_const_value add_src[2] = { mul_result, *initial };
nir_const_value add_result =
nir_eval_const_opcode(add_op, 1, bit_size, add_src);
nir_const_value *add_src[2] = { &mul_result, initial };
nir_const_value add_result;
nir_eval_const_opcode(add_op, &add_result, 1, bit_size, add_src);
nir_const_value src[2] = { { {0, } }, { {0, } } };
src[limit_rhs ? 0 : 1] = add_result;
src[limit_rhs ? 1 : 0] = *limit;
nir_const_value *src[2];
src[limit_rhs ? 0 : 1] = &add_result;
src[limit_rhs ? 1 : 0] = limit;
/* Evaluate the loop exit condition */
nir_const_value result = nir_eval_const_opcode(cond_op, 1, bit_size, src);
nir_const_value result;
nir_eval_const_opcode(cond_op, &result, 1, bit_size, src);
return invert_cond ? !result.b[0] : result.b[0];
return invert_cond ? !result.b : result.b;
}
static int
@ -822,9 +823,9 @@ try_find_trip_count_vars_in_iand(nir_alu_instr **alu,
}
/* If the loop is not breaking on (x && y) == 0 then return */
nir_const_value zero =
nir_const_value *zero =
nir_instr_as_load_const(zero_def->parent_instr)->value;
if (zero.i32[0] != 0)
if (zero[0].i32 != 0)
return;
}
@ -932,7 +933,7 @@ find_trip_count(loop_info_state *state)
nir_const_value limit_val;
if (is_var_constant(limit)) {
limit_val =
nir_instr_as_load_const(limit->def->parent_instr)->value;
nir_instr_as_load_const(limit->def->parent_instr)->value[0];
} else {
trip_count_known = false;
@ -954,15 +955,15 @@ find_trip_count(loop_info_state *state)
* Thats all thats needed to calculate the trip-count
*/
nir_const_value initial_val =
nir_const_value *initial_val =
nir_instr_as_load_const(basic_ind->ind->def_outside_loop->
def->parent_instr)->value;
nir_const_value step_val =
nir_const_value *step_val =
nir_instr_as_load_const(basic_ind->ind->invariant->def->
parent_instr)->value;
int iterations = calculate_iterations(&initial_val, &step_val,
int iterations = calculate_iterations(initial_val, step_val,
&limit_val,
basic_ind->ind->alu_def, alu,
alu_op, limit_rhs,

View File

@ -139,9 +139,9 @@ nir_lower_bool_to_float_impl(nir_function_impl *impl)
case nir_instr_type_load_const: {
nir_load_const_instr *load = nir_instr_as_load_const(instr);
if (load->def.bit_size == 1) {
nir_const_value value = load->value;
nir_const_value *value = load->value;
for (unsigned i = 0; i < load->def.num_components; i++)
load->value.f32[i] = value.b[i] ? 1.0 : 0.0;
load->value[i].f32 = value[i].b ? 1.0 : 0.0;
load->def.bit_size = 32;
progress = true;
}

View File

@ -117,9 +117,9 @@ nir_lower_bool_to_int32_impl(nir_function_impl *impl)
case nir_instr_type_load_const: {
nir_load_const_instr *load = nir_instr_as_load_const(instr);
if (load->def.bit_size == 1) {
nir_const_value value = load->value;
nir_const_value *value = load->value;
for (unsigned i = 0; i < load->def.num_components; i++)
load->value.u32[i] = value.b[i] ? NIR_TRUE : NIR_FALSE;
load->value[i].u32 = value[i].b ? NIR_TRUE : NIR_FALSE;
load->def.bit_size = 32;
progress = true;
}

View File

@ -32,7 +32,7 @@ build_constant_load(nir_builder *b, nir_deref_instr *deref, nir_constant *c)
nir_load_const_instr_create(b->shader,
glsl_get_vector_elements(deref->type),
glsl_get_bit_size(deref->type));
load->value = c->values[0];
memcpy(load->value, c->values[0], sizeof(*load->value) * load->def.num_components);
nir_builder_instr_insert(b, &load->instr);
nir_store_deref(b, deref, &load->def, ~0);
} else if (glsl_type_is_matrix(deref->type)) {
@ -42,7 +42,7 @@ build_constant_load(nir_builder *b, nir_deref_instr *deref, nir_constant *c)
for (unsigned i = 0; i < cols; i++) {
nir_load_const_instr *load =
nir_load_const_instr_create(b->shader, rows, bit_size);
load->value = c->values[i];
memcpy(load->value, c->values[i], sizeof(*load->value) * load->def.num_components);
nir_builder_instr_insert(b, &load->instr);
nir_store_deref(b, nir_build_deref_array_imm(b, deref, i),
&load->def, ~0);

View File

@ -52,19 +52,19 @@ lower_load_const_instr_scalar(nir_load_const_instr *lower)
nir_load_const_instr_create(b.shader, 1, lower->def.bit_size);
switch (lower->def.bit_size) {
case 64:
load_comp->value.u64[0] = lower->value.u64[i];
load_comp->value[0].u64 = lower->value[i].u64;
break;
case 32:
load_comp->value.u32[0] = lower->value.u32[i];
load_comp->value[0].u32 = lower->value[i].u32;
break;
case 16:
load_comp->value.u16[0] = lower->value.u16[i];
load_comp->value[0].u16 = lower->value[i].u16;
break;
case 8:
load_comp->value.u8[0] = lower->value.u8[i];
load_comp->value[0].u8 = lower->value[i].u8;
break;
case 1:
load_comp->value.b[0] = lower->value.b[i];
load_comp->value[0].b = lower->value[i].b;
break;
default:
assert(!"invalid bit size");

View File

@ -42,11 +42,11 @@ build_local_group_size(nir_builder *b, unsigned bit_size)
} else {
/* using a 32 bit constant is safe here as no device/driver needs more
* than 32 bits for the local size */
nir_const_value local_size_const;
memset(&local_size_const, 0, sizeof(local_size_const));
local_size_const.u32[0] = b->shader->info.cs.local_size[0];
local_size_const.u32[1] = b->shader->info.cs.local_size[1];
local_size_const.u32[2] = b->shader->info.cs.local_size[2];
nir_const_value local_size_const[3];
memset(local_size_const, 0, sizeof(local_size_const));
local_size_const[0].u32 = b->shader->info.cs.local_size[0];
local_size_const[1].u32 = b->shader->info.cs.local_size[1];
local_size_const[2].u32 = b->shader->info.cs.local_size[2];
local_size = nir_build_imm(b, 3, 32, local_size_const);
}

View File

@ -356,10 +356,10 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v,
nir_ssa_def *a)
{
nir_const_value m[3] = {
{ .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } },
{ .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } },
{ .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } }
nir_const_value m[3][4] = {
{ { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 1.59602678f }, { .f32 = 0.0f } },
{ { .f32 = 1.0f }, { .f32 = -0.39176229f }, { .f32 = -0.81296764f }, { .f32 = 0.0f } },
{ { .f32 = 1.0f }, { .f32 = 2.01723214f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
};
nir_ssa_def *yuv =
@ -755,18 +755,18 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
static nir_ssa_def *
get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
{
nir_const_value v;
nir_const_value v[4];
memset(&v, 0, sizeof(v));
if (swizzle_val == 4) {
v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 0;
} else {
assert(swizzle_val == 5);
if (type == nir_type_float)
v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
v[0].f32 = v[1].f32 = v[2].f32 = v[3].f32 = 1.0;
else
v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 1;
}
return nir_build_imm(b, 4, 32, v);

View File

@ -41,7 +41,7 @@ struct constant_fold_state {
static bool
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
{
nir_const_value src[NIR_MAX_VEC_COMPONENTS];
nir_const_value src[NIR_MAX_VEC_COMPONENTS][NIR_MAX_VEC_COMPONENTS];
if (!instr->dest.dest.is_ssa)
return false;
@ -77,19 +77,19 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
j++) {
switch(load_const->def.bit_size) {
case 64:
src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
src[i][j].u64 = load_const->value[instr->src[i].swizzle[j]].u64;
break;
case 32:
src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
src[i][j].u32 = load_const->value[instr->src[i].swizzle[j]].u32;
break;
case 16:
src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
src[i][j].u16 = load_const->value[instr->src[i].swizzle[j]].u16;
break;
case 8:
src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
src[i][j].u8 = load_const->value[instr->src[i].swizzle[j]].u8;
break;
case 1:
src[i].b[j] = load_const->value.b[instr->src[i].swizzle[j]];
src[i][j].b = load_const->value[instr->src[i].swizzle[j]].b;
break;
default:
unreachable("Invalid bit size");
@ -106,16 +106,20 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
/* We shouldn't have any saturate modifiers in the optimization loop. */
assert(!instr->dest.saturate);
nir_const_value dest =
nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
bit_size, src);
nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
nir_const_value *srcs[NIR_MAX_VEC_COMPONENTS];
memset(dest, 0, sizeof(dest));
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; ++i)
srcs[i] = src[i];
nir_eval_const_opcode(instr->op, dest, instr->dest.dest.ssa.num_components,
bit_size, srcs);
nir_load_const_instr *new_instr =
nir_load_const_instr_create(mem_ctx,
instr->dest.dest.ssa.num_components,
instr->dest.dest.ssa.bit_size);
new_instr->value = dest;
memcpy(new_instr->value, dest, sizeof(*new_instr->value) * new_instr->def.num_components);
nir_instr_insert_before(&instr->instr, &new_instr->instr);

View File

@ -119,16 +119,16 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_alu_instr *alu)
int64_t d;
switch (bit_size) {
case 8:
d = const_denom->i8[alu->src[1].swizzle[comp]];
d = const_denom[alu->src[1].swizzle[comp]].i8;
break;
case 16:
d = const_denom->i16[alu->src[1].swizzle[comp]];
d = const_denom[alu->src[1].swizzle[comp]].i16;
break;
case 32:
d = const_denom->i32[alu->src[1].swizzle[comp]];
d = const_denom[alu->src[1].swizzle[comp]].i32;
break;
case 64:
d = const_denom->i64[alu->src[1].swizzle[comp]];
d = const_denom[alu->src[1].swizzle[comp]].i64;
break;
default:
unreachable("Invalid bit size");

View File

@ -74,9 +74,9 @@ phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr *phi,
return false;
if (src->pred != entry_block) {
*continue_val = const_src->u32[0];
*continue_val = const_src[0].u32;
} else {
*entry_val = const_src->u32[0];
*entry_val = const_src[0].u32;
}
}

View File

@ -93,27 +93,27 @@ handle_constant_store(nir_builder *b, nir_intrinsic_instr *store,
case 1:
/* Booleans are special-cased to be 32-bit */
for (unsigned i = 0; i < num_components; i++)
((int32_t *)dst)[i] = -(int)val->b[i];
((int32_t *)dst)[i] = -(int)val[i].b;
break;
case 8:
for (unsigned i = 0; i < num_components; i++)
((uint8_t *)dst)[i] = val->u8[i];
((uint8_t *)dst)[i] = val[i].u8;
break;
case 16:
for (unsigned i = 0; i < num_components; i++)
((uint16_t *)dst)[i] = val->u16[i];
((uint16_t *)dst)[i] = val[i].u16;
break;
case 32:
for (unsigned i = 0; i < num_components; i++)
((uint32_t *)dst)[i] = val->u32[i];
((uint32_t *)dst)[i] = val[i].u32;
break;
case 64:
for (unsigned i = 0; i < num_components; i++)
((uint64_t *)dst)[i] = val->u64[i];
((uint64_t *)dst)[i] = val[i].u64;
break;
default:

View File

@ -300,7 +300,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < rows; i++) {
if (i > 0) fprintf(fp, ", ");
fprintf(fp, "%s", c->values[0].b[i] ? "true" : "false");
fprintf(fp, "%s", c->values[0][i].b ? "true" : "false");
}
break;
@ -311,7 +311,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < rows; i++) {
if (i > 0) fprintf(fp, ", ");
fprintf(fp, "0x%02x", c->values[0].u8[i]);
fprintf(fp, "0x%02x", c->values[0][i].u8);
}
break;
@ -322,7 +322,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < rows; i++) {
if (i > 0) fprintf(fp, ", ");
fprintf(fp, "0x%04x", c->values[0].u16[i]);
fprintf(fp, "0x%04x", c->values[0][i].u16);
}
break;
@ -333,7 +333,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < rows; i++) {
if (i > 0) fprintf(fp, ", ");
fprintf(fp, "0x%08x", c->values[0].u32[i]);
fprintf(fp, "0x%08x", c->values[0][i].u32);
}
break;
@ -341,7 +341,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < cols; i++) {
for (j = 0; j < rows; j++) {
if (i + j > 0) fprintf(fp, ", ");
fprintf(fp, "%f", _mesa_half_to_float(c->values[i].u16[j]));
fprintf(fp, "%f", _mesa_half_to_float(c->values[i][j].u16));
}
}
break;
@ -350,7 +350,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < cols; i++) {
for (j = 0; j < rows; j++) {
if (i + j > 0) fprintf(fp, ", ");
fprintf(fp, "%f", c->values[i].f32[j]);
fprintf(fp, "%f", c->values[i][j].f32);
}
}
break;
@ -359,7 +359,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < cols; i++) {
for (j = 0; j < rows; j++) {
if (i + j > 0) fprintf(fp, ", ");
fprintf(fp, "%f", c->values[i].f64[j]);
fprintf(fp, "%f", c->values[i][j].f64);
}
}
break;
@ -371,7 +371,7 @@ print_constant(nir_constant *c, const struct glsl_type *type, print_state *state
for (i = 0; i < cols; i++) {
if (i > 0) fprintf(fp, ", ");
fprintf(fp, "0x%08" PRIx64, c->values[0].u64[i]);
fprintf(fp, "0x%08" PRIx64, c->values[0][i].u64);
}
break;
@ -1038,21 +1038,21 @@ print_load_const_instr(nir_load_const_instr *instr, print_state *state)
switch (instr->def.bit_size) {
case 64:
fprintf(fp, "0x%16" PRIx64 " /* %f */", instr->value.u64[i],
instr->value.f64[i]);
fprintf(fp, "0x%16" PRIx64 " /* %f */", instr->value[i].u64,
instr->value[i].f64);
break;
case 32:
fprintf(fp, "0x%08x /* %f */", instr->value.u32[i], instr->value.f32[i]);
fprintf(fp, "0x%08x /* %f */", instr->value[i].u32, instr->value[i].f32);
break;
case 16:
fprintf(fp, "0x%04x /* %f */", instr->value.u16[i],
_mesa_half_to_float(instr->value.u16[i]));
fprintf(fp, "0x%04x /* %f */", instr->value[i].u16,
_mesa_half_to_float(instr->value[i].u16));
break;
case 8:
fprintf(fp, "0x%02x", instr->value.u8[i]);
fprintf(fp, "0x%02x", instr->value[i].u8);
break;
case 1:
fprintf(fp, "%s", instr->value.b[i] ? "true" : "false");
fprintf(fp, "%s", instr->value[i].b ? "true" : "false");
break;
}
}

View File

@ -553,7 +553,7 @@ write_load_const(write_ctx *ctx, const nir_load_const_instr *lc)
uint32_t val = lc->def.num_components;
val |= lc->def.bit_size << 3;
blob_write_uint32(ctx->blob, val);
blob_write_bytes(ctx->blob, (uint8_t *) &lc->value, sizeof(lc->value));
blob_write_bytes(ctx->blob, lc->value, sizeof(*lc->value) * lc->def.num_components);
write_add_object(ctx, &lc->def);
}
@ -565,7 +565,7 @@ read_load_const(read_ctx *ctx)
nir_load_const_instr *lc =
nir_load_const_instr_create(ctx->nir, val & 0x7, val >> 3);
blob_copy_bytes(ctx->blob, (uint8_t *) &lc->value, sizeof(lc->value));
blob_copy_bytes(ctx->blob, lc->value, sizeof(*lc->value) * lc->def.num_components);
read_add_object(ctx, &lc->def);
return lc;
}

View File

@ -25,18 +25,18 @@
#include "nir_builder.h"
#include "util/half_float.h"
static nir_const_value count_sequence(nir_alu_type base_type, unsigned bits,
int first);
static nir_const_value negate(const nir_const_value &src,
nir_alu_type base_type, unsigned bits,
unsigned components);
static void count_sequence(nir_const_value c[NIR_MAX_VEC_COMPONENTS],
nir_alu_type base_type, unsigned bits, int first);
static void negate(nir_const_value dst[NIR_MAX_VEC_COMPONENTS],
const nir_const_value src[NIR_MAX_VEC_COMPONENTS],
nir_alu_type base_type, unsigned bits, unsigned components);
class const_value_negative_equal_test : public ::testing::Test {
protected:
const_value_negative_equal_test()
{
memset(&c1, 0, sizeof(c1));
memset(&c2, 0, sizeof(c2));
memset(c1, 0, sizeof(c1));
memset(c2, 0, sizeof(c2));
}
~const_value_negative_equal_test()
@ -44,8 +44,8 @@ protected:
/* empty */
}
nir_const_value c1;
nir_const_value c2;
nir_const_value c1[NIR_MAX_VEC_COMPONENTS];
nir_const_value c2[NIR_MAX_VEC_COMPONENTS];
};
class alu_srcs_negative_equal_test : public ::testing::Test {
@ -67,15 +67,15 @@ protected:
TEST_F(const_value_negative_equal_test, float32_zero)
{
/* Verify that 0.0 negative-equals 0.0. */
EXPECT_TRUE(nir_const_value_negative_equal(&c1, &c1,
4, nir_type_float, 32));
EXPECT_TRUE(nir_const_value_negative_equal(c1, c1, NIR_MAX_VEC_COMPONENTS,
nir_type_float, 32));
}
TEST_F(const_value_negative_equal_test, float64_zero)
{
/* Verify that 0.0 negative-equals 0.0. */
EXPECT_TRUE(nir_const_value_negative_equal(&c1, &c1,
4, nir_type_float, 64));
EXPECT_TRUE(nir_const_value_negative_equal(c1, c1, NIR_MAX_VEC_COMPONENTS,
nir_type_float, 64));
}
/* Compare an object with non-zero values to itself. This should always be
@ -84,8 +84,10 @@ TEST_F(const_value_negative_equal_test, float64_zero)
#define compare_with_self(base_type, bits) \
TEST_F(const_value_negative_equal_test, base_type ## bits ## _self) \
{ \
c1 = count_sequence(base_type, bits, 1); \
EXPECT_FALSE(nir_const_value_negative_equal(&c1, &c1, 4, base_type, bits)); \
count_sequence(c1, base_type, bits, 1); \
EXPECT_FALSE(nir_const_value_negative_equal(c1, c1, \
NIR_MAX_VEC_COMPONENTS, \
base_type, bits)); \
}
compare_with_self(nir_type_float, 16)
@ -105,9 +107,11 @@ compare_with_self(nir_type_uint, 64)
#define compare_with_negation(base_type, bits) \
TEST_F(const_value_negative_equal_test, base_type ## bits ## _trivially_true) \
{ \
c1 = count_sequence(base_type, bits, 1); \
c2 = negate(c1, base_type, bits, 4); \
EXPECT_TRUE(nir_const_value_negative_equal(&c1, &c2, 4, base_type, bits)); \
count_sequence(c1, base_type, bits, 1); \
negate(c2, c1, base_type, bits, NIR_MAX_VEC_COMPONENTS); \
EXPECT_TRUE(nir_const_value_negative_equal(c1, c2, \
NIR_MAX_VEC_COMPONENTS, \
base_type, bits)); \
}
compare_with_negation(nir_type_float, 16)
@ -128,10 +132,12 @@ compare_with_negation(nir_type_uint, 64)
#define compare_fewer_components(base_type, bits) \
TEST_F(const_value_negative_equal_test, base_type ## bits ## _fewer_components) \
{ \
c1 = count_sequence(base_type, bits, 1); \
c2 = negate(c1, base_type, bits, 3); \
EXPECT_TRUE(nir_const_value_negative_equal(&c1, &c2, 3, base_type, bits)); \
EXPECT_FALSE(nir_const_value_negative_equal(&c1, &c2, 4, base_type, bits)); \
count_sequence(c1, base_type, bits, 1); \
negate(c2, c1, base_type, bits, 3); \
EXPECT_TRUE(nir_const_value_negative_equal(c1, c2, 3, base_type, bits)); \
EXPECT_FALSE(nir_const_value_negative_equal(c1, c2, \
NIR_MAX_VEC_COMPONENTS, \
base_type, bits)); \
}
compare_fewer_components(nir_type_float, 16)
@ -214,29 +220,27 @@ TEST_F(alu_srcs_negative_equal_test, trivial_negation_int)
EXPECT_FALSE(nir_alu_srcs_negative_equal(instr, instr, 1, 1));
}
static nir_const_value
count_sequence(nir_alu_type base_type, unsigned bits, int first)
static void
count_sequence(nir_const_value c[NIR_MAX_VEC_COMPONENTS], nir_alu_type base_type, unsigned bits, int first)
{
nir_const_value c;
switch (base_type) {
case nir_type_float:
switch (bits) {
case 16:
for (unsigned i = 0; i < ARRAY_SIZE(c.u16); i++)
c.u16[i] = _mesa_float_to_half(float(i + first));
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].u16 = _mesa_float_to_half(float(i + first));
break;
case 32:
for (unsigned i = 0; i < ARRAY_SIZE(c.f32); i++)
c.f32[i] = float(i + first);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].f32 = float(i + first);
break;
case 64:
for (unsigned i = 0; i < ARRAY_SIZE(c.f64); i++)
c.f64[i] = double(i + first);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].f64 = double(i + first);
break;
@ -250,26 +254,26 @@ count_sequence(nir_alu_type base_type, unsigned bits, int first)
case nir_type_uint:
switch (bits) {
case 8:
for (unsigned i = 0; i < ARRAY_SIZE(c.i8); i++)
c.i8[i] = i + first;
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].i8 = i + first;
break;
case 16:
for (unsigned i = 0; i < ARRAY_SIZE(c.i16); i++)
c.i16[i] = i + first;
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].i16 = i + first;
break;
case 32:
for (unsigned i = 0; i < ARRAY_SIZE(c.i32); i++)
c.i32[i] = i + first;
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].i32 = i + first;
break;
case 64:
for (unsigned i = 0; i < ARRAY_SIZE(c.i64); i++)
c.i64[i] = i + first;
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
c[i].i64 = i + first;
break;
@ -283,34 +287,31 @@ count_sequence(nir_alu_type base_type, unsigned bits, int first)
default:
unreachable("invalid base type");
}
return c;
}
static nir_const_value
negate(const nir_const_value &src, nir_alu_type base_type, unsigned bits,
unsigned components)
static void
negate(nir_const_value dst[NIR_MAX_VEC_COMPONENTS],
const nir_const_value src[NIR_MAX_VEC_COMPONENTS],
nir_alu_type base_type, unsigned bits, unsigned components)
{
nir_const_value c = src;
switch (base_type) {
case nir_type_float:
switch (bits) {
case 16:
for (unsigned i = 0; i < components; i++)
c.u16[i] = _mesa_float_to_half(-_mesa_half_to_float(c.u16[i]));
dst[i].u16 = _mesa_float_to_half(-_mesa_half_to_float(src[i].u16));
break;
case 32:
for (unsigned i = 0; i < components; i++)
c.f32[i] = -c.f32[i];
dst[i].f32 = -src[i].f32;
break;
case 64:
for (unsigned i = 0; i < components; i++)
c.f64[i] = -c.f64[i];
dst[i].f64 = -src[i].f64;
break;
@ -325,25 +326,25 @@ negate(const nir_const_value &src, nir_alu_type base_type, unsigned bits,
switch (bits) {
case 8:
for (unsigned i = 0; i < components; i++)
c.i8[i] = -c.i8[i];
dst[i].i8 = -src[i].i8;
break;
case 16:
for (unsigned i = 0; i < components; i++)
c.i16[i] = -c.i16[i];
dst[i].i16 = -src[i].i16;
break;
case 32:
for (unsigned i = 0; i < components; i++)
c.i32[i] = -c.i32[i];
dst[i].i32 = -src[i].i32;
break;
case 64:
for (unsigned i = 0; i < components; i++)
c.i64[i] = -c.i64[i];
dst[i].i64 = -src[i].i64;
break;
@ -357,6 +358,4 @@ negate(const nir_const_value &src, nir_alu_type base_type, unsigned bits,
default:
unreachable("invalid base type");
}
return c;
}

View File

@ -236,7 +236,8 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
nir_load_const_instr *load =
nir_load_const_instr_create(b->shader, num_components, bit_size);
load->value = constant->values[0];
memcpy(load->value, constant->values[0],
sizeof(nir_const_value) * load->def.num_components);
nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
val->def = &load->def;
@ -252,7 +253,8 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
nir_load_const_instr *load =
nir_load_const_instr_create(b->shader, rows, bit_size);
load->value = constant->values[i];
memcpy(load->value, constant->values[i],
sizeof(nir_const_value) * load->def.num_components);
nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
col_val->def = &load->def;
@ -1254,7 +1256,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
val->type->length = 0;
} else {
val->type->length =
vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0][0].u32;
}
val->type->base_type = vtn_base_type_array;
@ -1668,7 +1670,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
opcode == SpvOpSpecConstantFalse)
int_val = get_specialization(b, val, int_val);
val->constant->values[0].b[0] = int_val != 0;
val->constant->values[0][0].b = int_val != 0;
break;
}
@ -1679,16 +1681,16 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
val->constant->values->u64[0] = vtn_u64_literal(&w[3]);
val->constant->values[0][0].u64 = vtn_u64_literal(&w[3]);
break;
case 32:
val->constant->values->u32[0] = w[3];
val->constant->values[0][0].u32 = w[3];
break;
case 16:
val->constant->values->u16[0] = w[3];
val->constant->values[0][0].u16 = w[3];
break;
case 8:
val->constant->values->u8[0] = w[3];
val->constant->values[0][0].u8 = w[3];
break;
default:
vtn_fail("Unsupported SpvOpConstant bit size");
@ -1703,17 +1705,17 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
val->constant->values[0].u64[0] =
val->constant->values[0][0].u64 =
get_specialization64(b, val, vtn_u64_literal(&w[3]));
break;
case 32:
val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
val->constant->values[0][0].u32 = get_specialization(b, val, w[3]);
break;
case 16:
val->constant->values[0].u16[0] = get_specialization(b, val, w[3]);
val->constant->values[0][0].u16 = get_specialization(b, val, w[3]);
break;
case 8:
val->constant->values[0].u8[0] = get_specialization(b, val, w[3]);
val->constant->values[0][0].u8 = get_specialization(b, val, w[3]);
break;
default:
vtn_fail("Unsupported SpvOpSpecConstant bit size");
@ -1750,19 +1752,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
for (unsigned i = 0; i < elem_count; i++) {
switch (bit_size) {
case 64:
val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
val->constant->values[0][i].u64 = elems[i]->values[0][0].u64;
break;
case 32:
val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
val->constant->values[0][i].u32 = elems[i]->values[0][0].u32;
break;
case 16:
val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
val->constant->values[0][i].u16 = elems[i]->values[0][0].u16;
break;
case 8:
val->constant->values[0].u8[i] = elems[i]->values[0].u8[0];
val->constant->values[0][i].u8 = elems[i]->values[0][0].u8;
break;
case 1:
val->constant->values[0].b[i] = elems[i]->values[0].b[0];
val->constant->values[0][i].b = elems[i]->values[0][0].b;
break;
default:
vtn_fail("Invalid SpvOpConstantComposite bit size");
@ -1773,8 +1775,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
case vtn_base_type_matrix:
assert(glsl_type_is_matrix(val->type->type));
for (unsigned i = 0; i < elem_count; i++)
val->constant->values[i] = elems[i]->values[0];
for (unsigned i = 0; i < elem_count; i++) {
unsigned components =
glsl_get_components(glsl_get_column_type(val->type->type));
memcpy(val->constant->values[i], elems[i]->values,
sizeof(nir_const_value) * components);
}
break;
case vtn_base_type_struct:
@ -1819,11 +1825,11 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
uint64_t u64[8];
if (v0->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len0; i++)
u64[i] = v0->constant->values[0].u64[i];
u64[i] = v0->constant->values[0][i].u64;
}
if (v1->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len1; i++)
u64[len0 + i] = v1->constant->values[0].u64[i];
u64[len0 + i] = v1->constant->values[0][i].u64;
}
for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
@ -1832,20 +1838,20 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
* to detect if it is wrongly used.
*/
if (comp == (uint32_t)-1)
val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
val->constant->values[0][j].u64 = 0xdeadbeefdeadbeef;
else
val->constant->values[0].u64[j] = u64[comp];
val->constant->values[0][j].u64 = u64[comp];
}
} else {
/* This is for both 32-bit and 16-bit values */
uint32_t u32[8];
if (v0->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len0; i++)
u32[i] = v0->constant->values[0].u32[i];
u32[i] = v0->constant->values[0][i].u32;
}
if (v1->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len1; i++)
u32[len0 + i] = v1->constant->values[0].u32[i];
u32[len0 + i] = v1->constant->values[0][i].u32;
}
for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
@ -1854,9 +1860,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
* to detect if it is wrongly used.
*/
if (comp == (uint32_t)-1)
val->constant->values[0].u32[j] = 0xdeadbeef;
val->constant->values[0][j].u32 = 0xdeadbeef;
else
val->constant->values[0].u32[j] = u32[comp];
val->constant->values[0][j].u32 = u32[comp];
}
}
break;
@ -1926,19 +1932,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
for (unsigned i = 0; i < num_components; i++)
switch(bit_size) {
case 64:
val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
val->constant->values[0][i].u64 = (*c)->values[col][elem + i].u64;
break;
case 32:
val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
val->constant->values[0][i].u32 = (*c)->values[col][elem + i].u32;
break;
case 16:
val->constant->values[0].u16[i] = (*c)->values[col].u16[elem + i];
val->constant->values[0][i].u16 = (*c)->values[col][elem + i].u16;
break;
case 8:
val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i];
val->constant->values[0][i].u8 = (*c)->values[col][elem + i].u8;
break;
case 1:
val->constant->values[0].b[i] = (*c)->values[col].b[elem + i];
val->constant->values[0][i].b = (*c)->values[col][elem + i].b;
break;
default:
vtn_fail("Invalid SpvOpCompositeExtract bit size");
@ -1956,19 +1962,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
for (unsigned i = 0; i < num_components; i++)
switch (bit_size) {
case 64:
(*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
(*c)->values[col][elem + i].u64 = insert->constant->values[0][i].u64;
break;
case 32:
(*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
(*c)->values[col][elem + i].u32 = insert->constant->values[0][i].u32;
break;
case 16:
(*c)->values[col].u16[elem + i] = insert->constant->values[0].u16[i];
(*c)->values[col][elem + i].u16 = insert->constant->values[0][i].u16;
break;
case 8:
(*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i];
(*c)->values[col][elem + i].u8 = insert->constant->values[0][i].u8;
break;
case 1:
(*c)->values[col].b[elem + i] = insert->constant->values[0].b[i];
(*c)->values[col][elem + i].b = insert->constant->values[0][i].b;
break;
default:
vtn_fail("Invalid SpvOpCompositeInsert bit size");
@ -2005,7 +2011,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
nir_alu_type_get_type_size(src_alu_type),
nir_alu_type_get_type_size(dst_alu_type));
nir_const_value src[3];
nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < count - 4; i++) {
struct vtn_value *src_val =
@ -2018,7 +2024,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
bit_size = glsl_get_bit_size(src_val->type->type);
unsigned j = swap ? 1 - i : i;
src[j] = src_val->constant->values[0];
memcpy(src[j], src_val->constant->values[0], sizeof(src[j]));
}
/* fix up fixed size sources */
@ -2030,9 +2036,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
break;
for (unsigned i = 0; i < num_components; ++i) {
switch (bit_size) {
case 64: src[1].u32[i] = src[1].u64[i]; break;
case 16: src[1].u32[i] = src[1].u16[i]; break;
case 8: src[1].u32[i] = src[1].u8[i]; break;
case 64: src[1][i].u32 = src[1][i].u64; break;
case 16: src[1][i].u32 = src[1][i].u16; break;
case 8: src[1][i].u32 = src[1][i].u8; break;
}
}
break;
@ -2041,8 +2047,10 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
break;
}
val->constant->values[0] =
nir_eval_const_opcode(op, num_components, bit_size, src);
nir_const_value *srcs[3] = {
src[0], src[1], src[2],
};
nir_eval_const_opcode(op, val->constant->values[0], num_components, bit_size, srcs);
break;
} /* default */
}
@ -2334,7 +2342,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
case SpvOpImageGather:
/* This has a component as its next source */
gather_component =
vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0][0].u32;
break;
default:
@ -2444,13 +2452,13 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
unsigned bit_size = glsl_get_bit_size(vec_type->type);
for (uint32_t i = 0; i < 4; i++) {
const nir_const_value *cvec =
&gather_offsets->constant->elements[i]->values[0];
gather_offsets->constant->elements[i]->values[0];
for (uint32_t j = 0; j < 2; j++) {
switch (bit_size) {
case 8: instr->tg4_offsets[i][j] = cvec->i8[j]; break;
case 16: instr->tg4_offsets[i][j] = cvec->i16[j]; break;
case 32: instr->tg4_offsets[i][j] = cvec->i32[j]; break;
case 64: instr->tg4_offsets[i][j] = cvec->i64[j]; break;
case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
default:
vtn_fail("Unsupported bit size");
}
@ -4640,11 +4648,11 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
glsl_vector_type(GLSL_TYPE_UINT, 3));
nir_const_value *const_size =
&b->workgroup_size_builtin->constant->values[0];
b->workgroup_size_builtin->constant->values[0];
b->shader->info.cs.local_size[0] = const_size->u32[0];
b->shader->info.cs.local_size[1] = const_size->u32[1];
b->shader->info.cs.local_size[2] = const_size->u32[2];
b->shader->info.cs.local_size[0] = const_size[0].u32;
b->shader->info.cs.local_size[1] = const_size[1].u32;
b->shader->info.cs.local_size[2] = const_size[2].u32;
}
/* Set types on all vtn_values */

View File

@ -692,10 +692,10 @@ vtn_constant_uint(struct vtn_builder *b, uint32_t value_id)
"Expected id %u to be an integer constant", value_id);
switch (glsl_get_bit_size(val->type->type)) {
case 8: return val->constant->values[0].u8[0];
case 16: return val->constant->values[0].u16[0];
case 32: return val->constant->values[0].u32[0];
case 64: return val->constant->values[0].u64[0];
case 8: return val->constant->values[0][0].u8;
case 16: return val->constant->values[0][0].u16;
case 32: return val->constant->values[0][0].u32;
case 64: return val->constant->values[0][0].u64;
default: unreachable("Invalid bit size");
}
}

View File

@ -2356,16 +2356,16 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
chain->link[idx].mode = vtn_access_mode_literal;
switch (glsl_get_bit_size(link_val->type->type)) {
case 8:
chain->link[idx].id = link_val->constant->values[0].i8[0];
chain->link[idx].id = link_val->constant->values[0][0].i8;
break;
case 16:
chain->link[idx].id = link_val->constant->values[0].i16[0];
chain->link[idx].id = link_val->constant->values[0][0].i16;
break;
case 32:
chain->link[idx].id = link_val->constant->values[0].i32[0];
chain->link[idx].id = link_val->constant->values[0][0].i32;
break;
case 64:
chain->link[idx].id = link_val->constant->values[0].i64[0];
chain->link[idx].id = link_val->constant->values[0][0].i64;
break;
default:
vtn_fail("Invalid bit size");

View File

@ -1451,7 +1451,7 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
for (int i = 0; i < instr->def.num_components; i++)
dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
dst[i] = create_immed_typed(ctx->block, instr->value[i].u32, type);
}
static void

View File

@ -98,7 +98,7 @@ check_and_propagate_bit_shift32(nir_builder *b, nir_ssa_def *offset,
if (!const_val)
return NULL;
int32_t current_shift = const_val->i32[0] * direction;
int32_t current_shift = const_val[0].i32 * direction;
int32_t new_shift = current_shift + shift;
/* If the merge would reverse the direction, bail out.

View File

@ -442,8 +442,8 @@ ttn_emit_immediate(struct ttn_compile *c)
c->imm_defs[c->next_imm] = &load_const->def;
c->next_imm++;
for (i = 0; i < 4; i++)
load_const->value.u32[i] = tgsi_imm->u[i].Uint;
for (i = 0; i < load_const->def.num_components; i++)
load_const->value[i].u32 = tgsi_imm->u[i].Uint;
nir_builder_instr_insert(b, &load_const->instr);
}

View File

@ -234,7 +234,9 @@ make_src(struct ir2_context *ctx, nir_src src)
if (const_value) {
assert(src.is_ssa);
return load_const(ctx, &const_value->f32[0], src.ssa->num_components);
float c[src.ssa->num_components];
nir_const_value_to_array(c, const_value, src.ssa->num_components, f32);
return load_const(ctx, c, src.ssa->num_components);
}
if (!src.is_ssa) {
@ -620,7 +622,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr)
const_offset = nir_src_as_const_value(intr->src[0]);
assert(const_offset); /* TODO can be false in ES2? */
idx = nir_intrinsic_base(intr);
idx += (uint32_t) nir_src_as_const_value(intr->src[0])->f32[0];
idx += (uint32_t) nir_src_as_const_value(intr->src[0])[0].f32;
instr = instr_create_alu_dest(ctx, nir_op_fmov, &intr->dest);
instr->src[0] = ir2_src(idx, 0, IR2_SRC_CONST);
break;

View File

@ -217,7 +217,7 @@ static bool gpir_emit_load_const(gpir_block *block, nir_instr *ni)
assert(instr->def.bit_size == 32);
assert(instr->def.num_components == 1);
node->value.i = instr->value.i32[0];
node->value.i = instr->value[0].i32;
return true;
}

View File

@ -268,7 +268,7 @@ static ppir_node *ppir_emit_load_const(ppir_block *block, nir_instr *ni)
assert(instr->def.bit_size == 32);
for (int i = 0; i < instr->def.num_components; i++)
node->constant.value[i].i = instr->value.i32[i];
node->constant.value[i].i = instr->value[i].i32;
node->constant.num = instr->def.num_components;
return &node->node;

View File

@ -781,7 +781,7 @@ Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
if (offset) {
indirect = NULL;
return offset->u32[0];
return offset[0].u32;
}
indirect = getSrc(src, idx, true);
@ -2655,16 +2655,16 @@ Converter::convert(nir_load_const_instr *insn, uint8_t idx)
switch (insn->def.bit_size) {
case 64:
val = loadImm(getSSA(8), insn->value.u64[idx]);
val = loadImm(getSSA(8), insn->value[idx].u64);
break;
case 32:
val = loadImm(getSSA(4), insn->value.u32[idx]);
val = loadImm(getSSA(4), insn->value[idx].u32);
break;
case 16:
val = loadImm(getSSA(2), insn->value.u16[idx]);
val = loadImm(getSSA(2), insn->value[idx].u16);
break;
case 8:
val = loadImm(getSSA(1), insn->value.u8[idx]);
val = loadImm(getSSA(1), insn->value[idx].u8);
break;
default:
unreachable("unhandled bit size!\n");

View File

@ -865,7 +865,7 @@ emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
nir_ssa_def def = instr->def;
float *v = ralloc_array(NULL, float, 4);
memcpy(v, &instr->value.f32, 4 * sizeof(float));
nir_const_load_to_arr(v, instr, f32);
_mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
}

View File

@ -1673,7 +1673,7 @@ ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
{
struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
for (int i = 0; i < instr->def.num_components; i++)
qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
qregs[i] = qir_uniform_ui(c, instr->value[i].u32);
_mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
}

View File

@ -1694,17 +1694,17 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld,
switch (instr->def.bit_size) {
case 8:
for (unsigned i = 0; i < instr->def.num_components; i++)
bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value.i8[i]));
bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
break;
case 16:
for (unsigned i = 0; i < instr->def.num_components; i++)
bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value.i16[i]));
bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
break;
case 32:
for (unsigned i = 0; i < instr->def.num_components; i++)
bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
break;
case 64:
@ -1713,11 +1713,11 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld,
/* We don't get 64-bit integer types until gen8 */
for (unsigned i = 0; i < instr->def.num_components; i++) {
bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
setup_imm_df(bld, instr->value.f64[i]));
setup_imm_df(bld, instr->value[i].f64));
}
} else {
for (unsigned i = 0; i < instr->def.num_components; i++)
bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i]));
bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
}
break;
@ -3383,8 +3383,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
if (const_offset) {
assert(nir_src_bit_size(instr->src[0]) == 32);
unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf;
unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
@ -3674,14 +3674,14 @@ brw_nir_reduction_op_identity(const fs_builder &bld,
switch (type_sz(type)) {
case 2:
assert(type != BRW_REGISTER_TYPE_HF);
return retype(brw_imm_uw(value.u16[0]), type);
return retype(brw_imm_uw(value.u16), type);
case 4:
return retype(brw_imm_ud(value.u32[0]), type);
return retype(brw_imm_ud(value.u32), type);
case 8:
if (type == BRW_REGISTER_TYPE_DF)
return setup_imm_df(bld, value.f64[0]);
return setup_imm_df(bld, value.f64);
else
return retype(brw_imm_u64(value.u64[0]), type);
return retype(brw_imm_u64(value.u64), type);
default:
unreachable("Invalid type size");
}

View File

@ -225,7 +225,7 @@ analyze_boolean_resolves_block(nir_block *block)
* have to worry about resolving them.
*/
instr->pass_flags &= ~BRW_NIR_BOOLEAN_MASK;
if (load->value.u32[0] == NIR_TRUE || load->value.u32[0] == NIR_FALSE) {
if (load->value[0].u32 == NIR_TRUE || load->value[0].u32 == NIR_FALSE) {
instr->pass_flags |= BRW_NIR_BOOLEAN_NO_RESOLVE;
} else {
instr->pass_flags |= BRW_NIR_NON_BOOLEAN;

View File

@ -353,18 +353,18 @@ vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
for (unsigned j = i; j < instr->def.num_components; j++) {
if ((instr->def.bit_size == 32 &&
instr->value.u32[i] == instr->value.u32[j]) ||
instr->value[i].u32 == instr->value[j].u32) ||
(instr->def.bit_size == 64 &&
instr->value.f64[i] == instr->value.f64[j])) {
instr->value[i].f64 == instr->value[j].f64)) {
writemask |= 1 << j;
}
}
reg.writemask = writemask;
if (instr->def.bit_size == 64) {
emit(MOV(reg, setup_imm_df(ibld, instr->value.f64[i])));
emit(MOV(reg, setup_imm_df(ibld, instr->value[i].f64)));
} else {
emit(MOV(reg, brw_imm_d(instr->value.i32[i])));
emit(MOV(reg, brw_imm_d(instr->value[i].i32)));
}
remaining &= ~writemask;

View File

@ -79,36 +79,40 @@ chroma_range(nir_builder *b,
}
}
static const nir_const_value *
typedef struct nir_const_value_3_4 {
nir_const_value v[3][4];
} nir_const_value_3_4;
static const nir_const_value_3_4 *
ycbcr_model_to_rgb_matrix(VkSamplerYcbcrModelConversion model)
{
switch (model) {
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: {
static const nir_const_value bt601[3] = {
{ .f32 = { 1.402f, 1.0f, 0.0f, 0.0f } },
{ .f32 = { -0.714136286201022f, 1.0f, -0.344136286201022f, 0.0f } },
{ .f32 = { 0.0f, 1.0f, 1.772f, 0.0f } }
};
static const nir_const_value_3_4 bt601 = { {
{ { .f32 = 1.402f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
{ { .f32 = -0.714136286201022f }, { .f32 = 1.0f }, { .f32 = -0.344136286201022f }, { .f32 = 0.0f } },
{ { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.772f }, { .f32 = 0.0f } },
} };
return bt601;
return &bt601;
}
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: {
static const nir_const_value bt709[3] = {
{ .f32 = { 1.5748031496063f, 1.0f, 0.0, 0.0f } },
{ .f32 = { -0.468125209181067f, 1.0f, -0.187327487470334f, 0.0f } },
{ .f32 = { 0.0f, 1.0f, 1.85563184264242f, 0.0f } }
};
static const nir_const_value_3_4 bt709 = { {
{ { .f32 = 1.5748031496063f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
{ { .f32 = -0.468125209181067f }, { .f32 = 1.0f }, { .f32 = -0.187327487470334f }, { .f32 = 0.0f } },
{ { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.85563184264242f }, { .f32 = 0.0f } },
} };
return bt709;
return &bt709;
}
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: {
static const nir_const_value bt2020[3] = {
{ .f32 = { 1.4746f, 1.0f, 0.0f, 0.0f } },
{ .f32 = { -0.571353126843658f, 1.0f, -0.164553126843658f, 0.0f } },
{ .f32 = { 0.0f, 1.0f, 1.8814f, 0.0f } }
};
static const nir_const_value_3_4 bt2020 = { {
{ { .f32 = 1.4746f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
{ { .f32 = -0.571353126843658f }, { .f32 = 1.0f }, { .f32 = -0.164553126843658f }, { .f32 = 0.0f } },
{ { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.8814f }, { .f32 = 0.0f } },
} };
return bt2020;
return &bt2020;
}
default:
unreachable("missing Ycbcr model");
@ -137,13 +141,13 @@ convert_ycbcr(struct ycbcr_state *state,
if (conversion->ycbcr_model == VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY)
return expanded_channels;
const nir_const_value *conversion_matrix =
const nir_const_value_3_4 *conversion_matrix =
ycbcr_model_to_rgb_matrix(conversion->ycbcr_model);
nir_ssa_def *converted_channels[] = {
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[0])),
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[1])),
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[2]))
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[0])),
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[1])),
nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[2]))
};
return nir_vec4(b,

View File

@ -76,15 +76,15 @@ lower_tex_src_plane_block(lower_tex_src_state *state, nir_block *block)
nir_const_value *plane = nir_src_as_const_value(tex->src[plane_index].src);
assume(plane);
if (plane->i32[0] > 0) {
if (plane[0].i32 > 0) {
unsigned y_samp = tex->texture_index;
assume(tex->texture_index == tex->sampler_index);
assume(((state->lower_3plane & (1 << y_samp)) && plane->i32[0] < 3) ||
(plane->i32[0] < 2));
assume(((state->lower_3plane & (1 << y_samp)) && plane[0].i32 < 3) ||
(plane[0].i32 < 2));
tex->texture_index = tex->sampler_index =
state->sampler_map[y_samp][plane->i32[0] - 1];
state->sampler_map[y_samp][plane[0].i32 - 1];
}
nir_tex_instr_remove_src(tex, plane_index);