nir: add generated intrinsic builders

Signed-off-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6587>
This commit is contained in:
Rhys Perry 2020-09-07 13:55:38 +01:00 committed by Marge Bot
parent b3c65f97ad
commit c9bcad2573
11 changed files with 135 additions and 233 deletions

View File

@ -536,6 +536,7 @@ nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
}
#include "nir_builder_opcodes.h"
#undef nir_deref_mode_is
static inline nir_ssa_def *
nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
@ -1361,17 +1362,12 @@ static inline nir_ssa_def *
nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref);
load->num_components = glsl_get_vector_elements(deref->type);
load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
glsl_get_bit_size(deref->type), NULL);
nir_intrinsic_set_access(load, access);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return nir_build_load_deref(build, glsl_get_vector_elements(deref->type),
glsl_get_bit_size(deref->type), &deref->dest.ssa,
access);
}
#undef nir_load_deref
static inline nir_ssa_def *
nir_load_deref(nir_builder *build, nir_deref_instr *deref)
{
@ -1383,17 +1379,11 @@ nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask,
enum gl_access_qualifier access)
{
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref);
store->num_components = glsl_get_vector_elements(deref->type);
store->src[0] = nir_src_for_ssa(&deref->dest.ssa);
store->src[1] = nir_src_for_ssa(value);
nir_intrinsic_set_write_mask(store,
writemask & ((1 << store->num_components) - 1));
nir_intrinsic_set_access(store, access);
nir_builder_instr_insert(build, &store->instr);
writemask &= (1u << value->num_components) - 1u;
nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access);
}
#undef nir_store_deref
static inline void
nir_store_deref(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask)
@ -1408,15 +1398,10 @@ nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
nir_intrinsic_instr *copy =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_deref);
copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
nir_intrinsic_set_dst_access(copy, dest_access);
nir_intrinsic_set_src_access(copy, src_access);
nir_builder_instr_insert(build, &copy->instr);
nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access);
}
#undef nir_copy_deref
static inline void
nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
{
@ -1431,16 +1416,11 @@ nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
nir_intrinsic_instr *copy =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_memcpy_deref);
copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
copy->src[2] = nir_src_for_ssa(size);
nir_intrinsic_set_dst_access(copy, dest_access);
nir_intrinsic_set_src_access(copy, src_access);
nir_builder_instr_insert(build, &copy->instr);
nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa,
size, dest_access, src_access);
}
#undef nir_memcpy_deref
static inline void
nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
nir_deref_instr *src, nir_ssa_def *size)
@ -1450,19 +1430,6 @@ nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
(enum gl_access_qualifier)0);
}
static inline nir_ssa_def *
nir_build_deref_mode_is(nir_builder *build, nir_deref_instr *deref,
nir_variable_mode mode)
{
nir_intrinsic_instr *intrin =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_deref_mode_is);
intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
nir_intrinsic_set_memory_modes(intrin, mode);
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
nir_builder_instr_insert(build, &intrin->instr);
return &intrin->dest.ssa;
}
static inline nir_ssa_def *
nir_load_var(nir_builder *build, nir_variable *var)
{
@ -1483,101 +1450,38 @@ nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
nir_build_deref_var(build, src));
}
#undef nir_load_global
static inline nir_ssa_def *
nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
unsigned num_components, unsigned bit_size)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global);
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
nir_ssa_dest_init(&load->instr, &load->dest,
num_components, bit_size, NULL);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return nir_build_load_global(build, num_components, bit_size, addr, .align_mul=align);
}
#undef nir_store_global
static inline void
nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
nir_ssa_def *value, nir_component_mask_t write_mask)
{
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
store->num_components = value->num_components;
store->src[0] = nir_src_for_ssa(value);
store->src[1] = nir_src_for_ssa(addr);
nir_intrinsic_set_write_mask(store,
write_mask & BITFIELD_MASK(value->num_components));
nir_intrinsic_set_align(store, align, 0);
nir_builder_instr_insert(build, &store->instr);
write_mask &= BITFIELD_MASK(value->num_components);
nir_build_store_global(build, value, addr, .write_mask=write_mask, .align_mul=align);
}
#undef nir_load_global_constant
static inline nir_ssa_def *
nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
unsigned num_components, unsigned bit_size)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global_constant);
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
nir_ssa_dest_init(&load->instr, &load->dest,
num_components, bit_size, NULL);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return nir_build_load_global_constant(build, num_components, bit_size, addr, .align_mul=align);
}
#undef nir_load_param
static inline nir_ssa_def *
nir_load_param(nir_builder *build, uint32_t param_idx)
{
assert(param_idx < build->impl->function->num_params);
nir_parameter *param = &build->impl->function->params[param_idx];
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_param);
nir_intrinsic_set_param_idx(load, param_idx);
load->num_components = param->num_components;
nir_ssa_dest_init(&load->instr, &load->dest,
param->num_components, param->bit_size, NULL);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
static inline nir_ssa_def *
nir_load_reloc_const_intel(nir_builder *b, uint32_t id)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_load_reloc_const_intel);
nir_intrinsic_set_param_idx(load, id);
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
nir_builder_instr_insert(b, &load->instr);
return &load->dest.ssa;
}
static inline nir_ssa_def *
nir_convert_alu_types(nir_builder *b, nir_ssa_def *src,
nir_alu_type src_type, nir_alu_type dest_type,
nir_rounding_mode round, bool saturate)
{
assert(nir_alu_type_get_type_size(dest_type) != 0);
assert(nir_alu_type_get_type_size(src_type) == 0 ||
nir_alu_type_get_type_size(src_type) == src->bit_size);
src_type = (nir_alu_type)(src_type | src->bit_size);
nir_intrinsic_instr *conv =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_convert_alu_types);
conv->src[0] = nir_src_for_ssa(src);
conv->num_components = src->num_components;
nir_intrinsic_set_src_type(conv, src_type);
nir_intrinsic_set_dest_type(conv, dest_type);
nir_intrinsic_set_rounding_mode(conv, round);
nir_intrinsic_set_saturate(conv, saturate);
nir_ssa_dest_init(&conv->instr, &conv->dest, src->num_components,
nir_alu_type_get_type_size(dest_type), NULL);
nir_builder_instr_insert(b, &conv->instr);
return &conv->dest.ssa;
return nir_build_load_param(build, param->num_components, param->bit_size, param_idx);
}
static inline nir_ssa_def *
@ -1682,22 +1586,6 @@ nir_compare_func(nir_builder *b, enum compare_func func,
unreachable("bad compare func");
}
static inline void
nir_scoped_barrier(nir_builder *b,
nir_scope exec_scope,
nir_scope mem_scope,
nir_memory_semantics mem_semantics,
nir_variable_mode mem_modes)
{
nir_intrinsic_instr *intrin =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_barrier);
nir_intrinsic_set_execution_scope(intrin, exec_scope);
nir_intrinsic_set_memory_scope(intrin, mem_scope);
nir_intrinsic_set_memory_semantics(intrin, mem_semantics);
nir_intrinsic_set_memory_modes(intrin, mem_modes);
nir_builder_instr_insert(b, &intrin->instr);
}
static inline void
nir_scoped_memory_barrier(nir_builder *b,
nir_scope scope,

View File

@ -50,48 +50,102 @@ nir_${name}(nir_builder *build, ${src_decl_list(opcode.num_inputs)})
}
% endfor
% for name, opcode in sorted(INTR_OPCODES.items()):
struct _nir_${name}_indices {
int _; /* exists to avoid empty initializers */
% for index in opcode.indices:
${index.c_data_type} ${index.name};
% endfor
};
% endfor
<%
def sysval_decl_list(opcode):
res = ''
if opcode.indices:
res += ', unsigned ' + opcode.indices[0].name.lower()
if opcode.dest_components == 0:
res += ', unsigned num_components'
if len(opcode.bit_sizes) != 1:
res += ', unsigned bit_size'
return res
def intrinsic_decl_list(opcode):
need_components = opcode.dest_components == 0 and \
0 not in opcode.src_components
def sysval_arg_list(opcode):
args = []
if opcode.indices:
args.append(opcode.indices[0].name.lower())
else:
args.append('0')
res = ''
if (opcode.has_dest or opcode.num_srcs) and need_components:
res += ', unsigned num_components'
if opcode.has_dest and len(opcode.bit_sizes) != 1 and opcode.bit_size_src == -1:
res += ', unsigned bit_size'
for i in range(opcode.num_srcs):
res += ', nir_ssa_def *src' + str(i)
if opcode.indices:
res += ', struct _nir_' + opcode.name + '_indices indices'
return res
if opcode.dest_components == 0:
args.append('num_components')
else:
args.append(str(opcode.dest_components))
def intrinsic_macro_list(opcode):
need_components = opcode.dest_components == 0 and \
0 not in opcode.src_components
if len(opcode.bit_sizes) == 1:
bit_size = opcode.bit_sizes[0]
args.append(str(bit_size))
else:
args.append('bit_size')
res = ''
if (opcode.has_dest or opcode.num_srcs) and need_components:
res += ', num_components'
if opcode.has_dest and len(opcode.bit_sizes) != 1 and opcode.bit_size_src == -1:
res += ', bit_size'
for i in range(opcode.num_srcs):
res += ', src' + str(i)
return res
return ', '.join(args)
def get_intrinsic_bitsize(opcode):
if len(opcode.bit_sizes) == 1:
return str(opcode.bit_sizes[0])
elif opcode.bit_size_src != -1:
return 'src' + str(opcode.bit_size_src) + '->bit_size'
else:
return 'bit_size'
%>
% for name, opcode in filter(lambda v: v[1].sysval, sorted(INTR_OPCODES.items())):
<% assert len(opcode.bit_sizes) > 0 %>
% for name, opcode in sorted(INTR_OPCODES.items()):
% if opcode.has_dest:
static inline nir_ssa_def *
nir_${name}(nir_builder *build${sysval_decl_list(opcode)})
% else:
static inline nir_intrinsic_instr *
% endif
_nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)})
{
return nir_load_system_value(build, nir_intrinsic_${name},
${sysval_arg_list(opcode)});
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(
build->shader, nir_intrinsic_${name});
% if 0 in opcode.src_components:
intrin->num_components = src${opcode.src_components.index(0)}->num_components;
% elif opcode.dest_components == 0:
intrin->num_components = num_components;
% endif
% if opcode.has_dest:
% if opcode.dest_components == 0:
nir_ssa_dest_init(&intrin->instr, &intrin->dest, intrin->num_components, ${get_intrinsic_bitsize(opcode)}, NULL);
% else:
nir_ssa_dest_init(&intrin->instr, &intrin->dest, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)}, NULL);
% endif
% endif
% for i in range(opcode.num_srcs):
intrin->src[${i}] = nir_src_for_ssa(src${i});
% endfor
% for index in opcode.indices:
nir_intrinsic_set_${index.name}(intrin, indices.${index.name});
% endfor
nir_builder_instr_insert(build, &intrin->instr);
% if opcode.has_dest:
return &intrin->dest.ssa;
% else:
return intrin;
% endif
}
% endfor
% for name, opcode in sorted(INTR_OPCODES.items()):
% if opcode.indices:
#define nir_build_${name}(build${intrinsic_macro_list(opcode)}, ...) ${'\\\\'}
_nir_build_${name}(build${intrinsic_macro_list(opcode)}, (struct _nir_${name}_indices){0, __VA_ARGS__})
% else:
#define nir_build_${name} _nir_build_${name}
% endif
#define nir_${name} nir_build_${name}
% endfor
#endif /* _NIR_BUILDER_OPCODES_ */"""
from nir_opcodes import opcodes

View File

@ -323,7 +323,7 @@ barrier("memory_barrier")
# Storage that the barrier applies is represented using NIR variable modes.
# For an OpMemoryBarrier, set EXECUTION_SCOPE to NIR_SCOPE_NONE.
intrinsic("scoped_barrier",
indices=[EXECUTION_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE])
indices=[EXECUTION_SCOPE, MEMORY_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES])
# Shader clock intrinsic with semantics analogous to the clock2x32ARB()
# GLSL intrinsic.

View File

@ -192,10 +192,13 @@ static nir_ssa_def *
lower_alu_conversion(nir_builder *b, nir_instr *instr, UNUSED void *_data)
{
nir_alu_instr *alu = nir_instr_as_alu(instr);
return nir_convert_alu_types(b, nir_ssa_for_alu_src(b, alu, 0),
nir_op_infos[alu->op].input_types[0],
nir_op_infos[alu->op].output_type,
nir_rounding_mode_undef, false);
nir_ssa_def *src = nir_ssa_for_alu_src(b, alu, 0);
nir_alu_type src_type = nir_op_infos[alu->op].input_types[0] | src->bit_size;
nir_alu_type dst_type = nir_op_infos[alu->op].output_type;
return nir_convert_alu_types(b, alu->dest.dest.ssa.bit_size, src,
.src_type = src_type, .dest_type = dst_type,
.rounding_mode = nir_rounding_mode_undef,
.saturate = false);
}
bool

View File

@ -73,19 +73,11 @@ nir_lower_ubo_vec4_filter(const nir_instr *instr, const void *data)
}
static nir_intrinsic_instr *
nir_load_ubo_vec4(nir_builder *b, nir_ssa_def *block, nir_ssa_def *offset,
unsigned bit_size, unsigned num_components)
create_load(nir_builder *b, nir_ssa_def *block, nir_ssa_def *offset,
unsigned bit_size, unsigned num_components)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo_vec4);
load->src[0] = nir_src_for_ssa(block);
load->src[1] = nir_src_for_ssa(offset);
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size, NULL);
load->num_components = num_components;
nir_builder_instr_insert(b, &load->instr);
return load;
nir_ssa_def *def = nir_load_ubo_vec4(b, num_components, bit_size, block, offset);
return nir_instr_as_intrinsic(def->parent_instr);
}
static nir_ssa_def *
@ -117,10 +109,9 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
if (!aligned_mul)
num_components = chans_per_vec4;
nir_intrinsic_instr *load = nir_load_ubo_vec4(b, intr->src[0].ssa,
vec4_offset,
intr->dest.ssa.bit_size,
num_components);
nir_intrinsic_instr *load = create_load(b, intr->src[0].ssa, vec4_offset,
intr->dest.ssa.bit_size,
num_components);
nir_ssa_def *result = &load->dest.ssa;
@ -159,10 +150,9 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
*/
assert(num_components == 4);
nir_ssa_def *next_vec4_offset = nir_iadd_imm(b, vec4_offset, 1);
nir_intrinsic_instr *next_load = nir_load_ubo_vec4(b, intr->src[0].ssa,
next_vec4_offset,
intr->dest.ssa.bit_size,
num_components);
nir_intrinsic_instr *next_load = create_load(b, intr->src[0].ssa, next_vec4_offset,
intr->dest.ssa.bit_size,
num_components);
nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < intr->num_components; i++) {

View File

@ -653,9 +653,9 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
nir_rounding_mode_undef);
dest->def = nir_build_alu(&b->nb, op, src[0], NULL, NULL, NULL);
} else {
dest->def = nir_convert_alu_types(&b->nb, src[0], src_type,
dst_type, opts.rounding_mode,
opts.saturate);
dest->def = nir_convert_alu_types(&b->nb, dst_bit_size, src[0],
src_type, dst_type,
opts.rounding_mode, opts.saturate);
}
} else {
vtn_fail_if(opts.rounding_mode != nir_rounding_mode_undef &&

View File

@ -682,8 +682,8 @@ _handle_v_load_store(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
if (rounding == nir_rounding_mode_undef) {
ssa->def = nir_f2f16(&b->nb, ssa->def);
} else {
ssa->def = nir_convert_alu_types(&b->nb, ssa->def,
nir_type_float,
ssa->def = nir_convert_alu_types(&b->nb, 16, ssa->def,
nir_type_float | ssa->def->bit_size,
nir_type_float16,
rounding, false);
}

View File

@ -2546,7 +2546,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
nir_address_format_bit_size(addr_format),
nir_address_format_null_value(addr_format));
nir_ssa_def *valid = nir_build_deref_mode_is(&b->nb, src_deref, nir_mode);
nir_ssa_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, nir_mode);
vtn_push_nir_ssa(b, w[2], nir_bcsel(&b->nb, valid,
&src_deref->dest.ssa,
null_value));
@ -2570,13 +2570,13 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
nir_deref_instr *src_deref = vtn_nir_deref(b, w[3]);
nir_ssa_def *global_bit =
nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, src_deref,
nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
nir_var_mem_global),
nir_imm_int(&b->nb, SpvMemorySemanticsCrossWorkgroupMemoryMask),
nir_imm_int(&b->nb, 0));
nir_ssa_def *shared_bit =
nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, src_deref,
nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
nir_var_mem_shared),
nir_imm_int(&b->nb, SpvMemorySemanticsWorkgroupMemoryMask),
nir_imm_int(&b->nb, 0));

View File

@ -164,7 +164,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
nir_ssa_def *addr =
nir_iadd_imm(b, nir_load_btd_global_arg_addr_intel(b),
aligned_offset + i * 64);
data[i] = nir_load_global_const_block_intel(b, addr, 16);
data[i] = nir_load_global_const_block_intel(b, 16, addr);
}
sysval = nir_extract_bits(b, data, 2, suboffset * 8,

View File

@ -99,16 +99,6 @@ brw_nir_lower_shader_returns(nir_shader *shader)
nir_metadata_dominance);
}
static void
nir_btd_stack_push_intel(nir_builder *b, uint32_t stack_size)
{
nir_intrinsic_instr *push =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_btd_stack_push_intel);
nir_intrinsic_set_range(push, stack_size);
nir_builder_instr_insert(b, &push->instr);
}
static bool
move_system_values_to_top(nir_shader *shader)
{

View File

@ -27,20 +27,6 @@
#include "brw_rt.h"
#include "nir_builder.h"
static nir_ssa_def *
nir_load_global_const_block_intel(nir_builder *b, nir_ssa_def *addr,
unsigned num_components)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_load_global_const_block_intel);
load->src[0] = nir_src_for_ssa(addr);
load->num_components = num_components;
nir_ssa_dest_init(&load->instr, &load->dest, num_components, 32, NULL);
nir_builder_instr_insert(b, &load->instr);
return &load->dest.ssa;
}
/* We have our own load/store scratch helpers because they emit a global
* memory read or write based on the scratch_base_ptr system value rather
* than a load/store_scratch intrinsic.
@ -65,15 +51,6 @@ brw_nir_rt_store_scratch(nir_builder *b, uint32_t offset, unsigned align,
value, write_mask);
}
static inline void
nir_accept_ray_intersection(nir_builder *b)
{
nir_intrinsic_instr *accept =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_accept_ray_intersection);
nir_builder_instr_insert(b, &accept->instr);
}
static inline void
brw_nir_btd_spawn(nir_builder *b, nir_ssa_def *record_addr)
{
@ -253,7 +230,7 @@ brw_nir_rt_load_globals(nir_builder *b,
nir_ssa_def *addr = nir_load_btd_global_arg_addr_intel(b);
nir_ssa_def *data;
data = nir_load_global_const_block_intel(b, addr, 16);
data = nir_load_global_const_block_intel(b, 16, addr);
defs->base_mem_addr = nir_pack_64_2x32(b, nir_channels(b, data, 0x3));
defs->call_stack_handler_addr =
@ -276,7 +253,7 @@ brw_nir_rt_load_globals(nir_builder *b,
defs->sw_stack_size = nir_channel(b, data, 12);
defs->launch_size = nir_channels(b, data, 0x7u << 13);
data = nir_load_global_const_block_intel(b, nir_iadd_imm(b, addr, 64), 8);
data = nir_load_global_const_block_intel(b, 8, nir_iadd_imm(b, addr, 64));
defs->call_sbt_addr =
nir_pack_64_2x32_split(b, nir_channel(b, data, 0),
nir_extract_i16(b, nir_channel(b, data, 1),