nir: Get rid of nir_shader::stage
It's redundant with nir_shader::info::stage. Acked-by: Timothy Arceri <tarceri@itsqueeze.com> Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
This commit is contained in:
parent
341529dbee
commit
59fb59ad54
|
@ -6453,7 +6453,7 @@ static unsigned
|
|||
ac_nir_get_max_workgroup_size(enum chip_class chip_class,
|
||||
const struct nir_shader *nir)
|
||||
{
|
||||
switch (nir->stage) {
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_TESS_CTRL:
|
||||
return chip_class >= CIK ? 128 : 64;
|
||||
case MESA_SHADER_GEOMETRY:
|
||||
|
@ -6510,7 +6510,7 @@ void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
|
|||
if (nctx)
|
||||
nctx->nir = &ctx;
|
||||
|
||||
ctx.stage = nir->stage;
|
||||
ctx.stage = nir->info.stage;
|
||||
|
||||
ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
|
||||
|
||||
|
@ -6528,7 +6528,7 @@ void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
|
|||
|
||||
setup_locals(&ctx, func);
|
||||
|
||||
if (nir->stage == MESA_SHADER_COMPUTE)
|
||||
if (nir->info.stage == MESA_SHADER_COMPUTE)
|
||||
setup_shared(&ctx, nir);
|
||||
|
||||
visit_cf_list(&ctx, &func->impl->body);
|
||||
|
@ -6586,8 +6586,8 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
|
|||
|
||||
ctx.max_workgroup_size = ac_nir_get_max_workgroup_size(ctx.options->chip_class, shaders[0]);
|
||||
|
||||
create_function(&ctx, shaders[shader_count - 1]->stage, shader_count >= 2,
|
||||
shader_count >= 2 ? shaders[shader_count - 2]->stage : MESA_SHADER_VERTEX);
|
||||
create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
|
||||
shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
|
||||
|
||||
ctx.abi.inputs = &ctx.inputs[0];
|
||||
ctx.abi.emit_outputs = handle_shader_outputs_post;
|
||||
|
@ -6598,28 +6598,28 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
|
|||
ac_init_exec_full_mask(&ctx.ac);
|
||||
|
||||
if (ctx.ac.chip_class == GFX9 &&
|
||||
shaders[shader_count - 1]->stage == MESA_SHADER_TESS_CTRL)
|
||||
shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
|
||||
ac_nir_fixup_ls_hs_input_vgprs(&ctx);
|
||||
|
||||
for(int i = 0; i < shader_count; ++i) {
|
||||
ctx.stage = shaders[i]->stage;
|
||||
ctx.stage = shaders[i]->info.stage;
|
||||
ctx.output_mask = 0;
|
||||
ctx.tess_outputs_written = 0;
|
||||
ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
|
||||
ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
|
||||
|
||||
if (shaders[i]->stage == MESA_SHADER_GEOMETRY) {
|
||||
if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
|
||||
ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.i32, "gs_next_vertex");
|
||||
|
||||
ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
|
||||
} else if (shaders[i]->stage == MESA_SHADER_TESS_EVAL) {
|
||||
} else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
|
||||
ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
|
||||
} else if (shaders[i]->stage == MESA_SHADER_VERTEX) {
|
||||
} else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
|
||||
if (shader_info->info.vs.needs_instance_id) {
|
||||
ctx.shader_info->vs.vgpr_comp_cnt =
|
||||
MAX2(3, ctx.shader_info->vs.vgpr_comp_cnt);
|
||||
}
|
||||
} else if (shaders[i]->stage == MESA_SHADER_FRAGMENT) {
|
||||
} else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
|
||||
}
|
||||
|
||||
|
@ -6645,15 +6645,15 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
|
|||
LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
|
||||
}
|
||||
|
||||
if (shaders[i]->stage == MESA_SHADER_FRAGMENT)
|
||||
if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
|
||||
handle_fs_inputs(&ctx, shaders[i]);
|
||||
else if(shaders[i]->stage == MESA_SHADER_VERTEX)
|
||||
else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
|
||||
handle_vs_inputs(&ctx, shaders[i]);
|
||||
else if(shader_count >= 2 && shaders[i]->stage == MESA_SHADER_GEOMETRY)
|
||||
else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
|
||||
prepare_gs_input_vgprs(&ctx);
|
||||
|
||||
nir_foreach_variable(variable, &shaders[i]->outputs)
|
||||
scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->stage);
|
||||
scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
|
||||
|
||||
ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i], &ctx);
|
||||
|
||||
|
@ -6662,16 +6662,16 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
|
|||
LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
|
||||
}
|
||||
|
||||
if (shaders[i]->stage == MESA_SHADER_GEOMETRY) {
|
||||
if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
|
||||
unsigned addclip = shaders[i]->info.clip_distance_array_size +
|
||||
shaders[i]->info.cull_distance_array_size > 4;
|
||||
shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
|
||||
shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
|
||||
shaders[i]->info.gs.vertices_out;
|
||||
} else if (shaders[i]->stage == MESA_SHADER_TESS_CTRL) {
|
||||
} else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
|
||||
shader_info->tcs.outputs_written = ctx.tess_outputs_written;
|
||||
shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
|
||||
} else if (shaders[i]->stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
|
||||
} else if (shaders[i]->info.stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
|
||||
shader_info->vs.outputs_written = ctx.tess_outputs_written;
|
||||
}
|
||||
}
|
||||
|
@ -6815,7 +6815,7 @@ static void ac_compile_llvm_module(LLVMTargetMachineRef tm,
|
|||
static void
|
||||
ac_fill_shader_info(struct ac_shader_variant_info *shader_info, struct nir_shader *nir, const struct ac_nir_compiler_options *options)
|
||||
{
|
||||
switch (nir->stage) {
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_COMPUTE:
|
||||
for (int i = 0; i < 3; ++i)
|
||||
shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
|
||||
|
@ -6864,7 +6864,7 @@ void ac_compile_nir_shader(LLVMTargetMachineRef tm,
|
|||
LLVMModuleRef llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
|
||||
options);
|
||||
|
||||
ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->stage, dump_shader, options->supports_spill);
|
||||
ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->info.stage, dump_shader, options->supports_spill);
|
||||
for (int i = 0; i < nir_count; ++i)
|
||||
ac_fill_shader_info(shader_info, nir[i], options);
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ gather_info_input_decl(nir_shader *nir,
|
|||
nir_variable *var,
|
||||
struct ac_shader_info *info)
|
||||
{
|
||||
switch (nir->stage) {
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_VERTEX:
|
||||
info->vs.has_vertex_buffers = true;
|
||||
break;
|
||||
|
|
|
@ -208,7 +208,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
|
|||
spec_entries, num_spec_entries,
|
||||
stage, entrypoint_name, &supported_ext, &nir_options);
|
||||
nir = entry_point->shader;
|
||||
assert(nir->stage == stage);
|
||||
assert(nir->info.stage == stage);
|
||||
nir_validate_shader(nir);
|
||||
|
||||
free(spec_entries);
|
||||
|
@ -258,9 +258,9 @@ radv_shader_compile_to_nir(struct radv_device *device,
|
|||
* indirect indexing is trivial.
|
||||
*/
|
||||
nir_variable_mode indirect_mask = 0;
|
||||
if (nir->stage == MESA_SHADER_GEOMETRY ||
|
||||
(nir->stage != MESA_SHADER_TESS_CTRL &&
|
||||
nir->stage != MESA_SHADER_TESS_EVAL &&
|
||||
if (nir->info.stage == MESA_SHADER_GEOMETRY ||
|
||||
(nir->info.stage != MESA_SHADER_TESS_CTRL &&
|
||||
nir->info.stage != MESA_SHADER_TESS_EVAL &&
|
||||
!llvm_has_working_vgpr_indexing)) {
|
||||
indirect_mask |= nir_var_shader_in;
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ radv_shader_variant_create(struct radv_device *device,
|
|||
options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
|
||||
options.supports_spill = device->llvm_supports_spill;
|
||||
|
||||
return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->stage,
|
||||
return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
|
||||
&options, false, code_out, code_size_out);
|
||||
}
|
||||
|
||||
|
|
|
@ -1375,7 +1375,7 @@ ntq_setup_inputs(struct v3d_compile *c)
|
|||
qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
|
||||
|
||||
uint32_t vpm_components_queued = 0;
|
||||
if (c->s->stage == MESA_SHADER_VERTEX) {
|
||||
if (c->s->info.stage == MESA_SHADER_VERTEX) {
|
||||
bool uses_iid = c->s->info.system_values_read &
|
||||
(1ull << SYSTEM_VALUE_INSTANCE_ID);
|
||||
bool uses_vid = c->s->info.system_values_read &
|
||||
|
@ -1405,7 +1405,7 @@ ntq_setup_inputs(struct v3d_compile *c)
|
|||
resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
|
||||
(loc + 1) * 4);
|
||||
|
||||
if (c->s->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
if (var->data.location == VARYING_SLOT_POS) {
|
||||
emit_fragcoord_input(c, loc);
|
||||
} else if (var->data.location == VARYING_SLOT_PNTC ||
|
||||
|
@ -1433,7 +1433,7 @@ ntq_setup_inputs(struct v3d_compile *c)
|
|||
}
|
||||
}
|
||||
|
||||
if (c->s->stage == MESA_SHADER_VERTEX) {
|
||||
if (c->s->info.stage == MESA_SHADER_VERTEX) {
|
||||
assert(vpm_components_queued == 0);
|
||||
assert(num_components == 0);
|
||||
}
|
||||
|
@ -1452,7 +1452,7 @@ ntq_setup_outputs(struct v3d_compile *c)
|
|||
for (int i = 0; i < 4; i++)
|
||||
add_output(c, loc + i, var->data.location, i);
|
||||
|
||||
if (c->s->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
switch (var->data.location) {
|
||||
case FRAG_RESULT_COLOR:
|
||||
c->output_color_var[0] = var;
|
||||
|
@ -1948,7 +1948,7 @@ ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
|
|||
static void
|
||||
nir_to_vir(struct v3d_compile *c)
|
||||
{
|
||||
if (c->s->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
|
||||
c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
|
||||
c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
|
||||
|
@ -2013,7 +2013,7 @@ void
|
|||
v3d_nir_to_vir(struct v3d_compile *c)
|
||||
{
|
||||
if (V3D_DEBUG & (V3D_DEBUG_NIR |
|
||||
v3d_debug_flag_for_shader_stage(c->s->stage))) {
|
||||
v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
|
||||
fprintf(stderr, "%s prog %d/%d NIR:\n",
|
||||
vir_get_stage_name(c),
|
||||
c->program_id, c->variant_id);
|
||||
|
@ -2022,7 +2022,7 @@ v3d_nir_to_vir(struct v3d_compile *c)
|
|||
|
||||
nir_to_vir(c);
|
||||
|
||||
switch (c->s->stage) {
|
||||
switch (c->s->info.stage) {
|
||||
case MESA_SHADER_FRAGMENT:
|
||||
emit_frag_end(c);
|
||||
break;
|
||||
|
@ -2034,7 +2034,7 @@ v3d_nir_to_vir(struct v3d_compile *c)
|
|||
}
|
||||
|
||||
if (V3D_DEBUG & (V3D_DEBUG_VIR |
|
||||
v3d_debug_flag_for_shader_stage(c->s->stage))) {
|
||||
v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
|
||||
fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
|
||||
vir_get_stage_name(c),
|
||||
c->program_id, c->variant_id);
|
||||
|
@ -2048,7 +2048,7 @@ v3d_nir_to_vir(struct v3d_compile *c)
|
|||
/* XXX: vir_schedule_instructions(c); */
|
||||
|
||||
if (V3D_DEBUG & (V3D_DEBUG_VIR |
|
||||
v3d_debug_flag_for_shader_stage(c->s->stage))) {
|
||||
v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
|
||||
fprintf(stderr, "%s prog %d/%d VIR:\n",
|
||||
vir_get_stage_name(c),
|
||||
c->program_id, c->variant_id);
|
||||
|
|
|
@ -894,5 +894,5 @@ vir_get_stage_name(struct v3d_compile *c)
|
|||
if (c->vs_key && c->vs_key->is_coord)
|
||||
return "MESA_SHADER_COORD";
|
||||
else
|
||||
return gl_shader_stage_name(c->s->stage);
|
||||
return gl_shader_stage_name(c->s->info.stage);
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ v3d_vir_to_qpu(struct v3d_compile *c)
|
|||
}
|
||||
|
||||
if (V3D_DEBUG & (V3D_DEBUG_QPU |
|
||||
v3d_debug_flag_for_shader_stage(c->s->stage))) {
|
||||
v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
|
||||
v3d_dump_qpu(c);
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ glsl_to_nir(const struct gl_shader_program *shader_prog,
|
|||
* two locations. For instance, if we have in the IR code a dvec3 attr0 in
|
||||
* location 0 and vec4 attr1 in location 1, in NIR attr0 will use
|
||||
* locations/slots 0 and 1, and attr1 will use location/slot 2 */
|
||||
if (shader->stage == MESA_SHADER_VERTEX)
|
||||
if (shader->info.stage == MESA_SHADER_VERTEX)
|
||||
nir_remap_attributes(shader);
|
||||
|
||||
shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
|
||||
|
@ -341,12 +341,12 @@ nir_visitor::visit(ir_variable *ir)
|
|||
break;
|
||||
|
||||
case ir_var_shader_in:
|
||||
if (shader->stage == MESA_SHADER_FRAGMENT &&
|
||||
if (shader->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
ir->data.location == VARYING_SLOT_FACE) {
|
||||
/* For whatever reason, GLSL IR makes gl_FrontFacing an input */
|
||||
var->data.location = SYSTEM_VALUE_FRONT_FACE;
|
||||
var->data.mode = nir_var_system_value;
|
||||
} else if (shader->stage == MESA_SHADER_GEOMETRY &&
|
||||
} else if (shader->info.stage == MESA_SHADER_GEOMETRY &&
|
||||
ir->data.location == VARYING_SLOT_PRIMITIVE_ID) {
|
||||
/* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
|
||||
var->data.location = SYSTEM_VALUE_PRIMITIVE_ID;
|
||||
|
@ -354,7 +354,7 @@ nir_visitor::visit(ir_variable *ir)
|
|||
} else {
|
||||
var->data.mode = nir_var_shader_in;
|
||||
|
||||
if (shader->stage == MESA_SHADER_TESS_EVAL &&
|
||||
if (shader->info.stage == MESA_SHADER_TESS_EVAL &&
|
||||
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
|
||||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
|
||||
var->data.compact = ir->type->without_array()->is_scalar();
|
||||
|
@ -372,7 +372,7 @@ nir_visitor::visit(ir_variable *ir)
|
|||
|
||||
case ir_var_shader_out:
|
||||
var->data.mode = nir_var_shader_out;
|
||||
if (shader->stage == MESA_SHADER_TESS_CTRL &&
|
||||
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
|
||||
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
|
||||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
|
||||
var->data.compact = ir->type->without_array()->is_scalar();
|
||||
|
|
|
@ -44,8 +44,12 @@ nir_shader_create(void *mem_ctx,
|
|||
|
||||
shader->options = options;
|
||||
|
||||
if (si)
|
||||
if (si) {
|
||||
assert(si->stage == stage);
|
||||
shader->info = *si;
|
||||
} else {
|
||||
shader->info.stage = stage;
|
||||
}
|
||||
|
||||
exec_list_make_empty(&shader->functions);
|
||||
exec_list_make_empty(&shader->registers);
|
||||
|
@ -58,8 +62,6 @@ nir_shader_create(void *mem_ctx,
|
|||
shader->num_uniforms = 0;
|
||||
shader->num_shared = 0;
|
||||
|
||||
shader->stage = stage;
|
||||
|
||||
return shader;
|
||||
}
|
||||
|
||||
|
@ -143,7 +145,7 @@ nir_shader_add_variable(nir_shader *shader, nir_variable *var)
|
|||
break;
|
||||
|
||||
case nir_var_shared:
|
||||
assert(shader->stage == MESA_SHADER_COMPUTE);
|
||||
assert(shader->info.stage == MESA_SHADER_COMPUTE);
|
||||
exec_list_push_tail(&shader->shared, &var->node);
|
||||
break;
|
||||
|
||||
|
@ -162,8 +164,10 @@ nir_variable_create(nir_shader *shader, nir_variable_mode mode,
|
|||
var->type = type;
|
||||
var->data.mode = mode;
|
||||
|
||||
if ((mode == nir_var_shader_in && shader->stage != MESA_SHADER_VERTEX) ||
|
||||
(mode == nir_var_shader_out && shader->stage != MESA_SHADER_FRAGMENT))
|
||||
if ((mode == nir_var_shader_in &&
|
||||
shader->info.stage != MESA_SHADER_VERTEX) ||
|
||||
(mode == nir_var_shader_out &&
|
||||
shader->info.stage != MESA_SHADER_FRAGMENT))
|
||||
var->data.interpolation = INTERP_MODE_SMOOTH;
|
||||
|
||||
if (mode == nir_var_shader_in || mode == nir_var_uniform)
|
||||
|
|
|
@ -1904,9 +1904,6 @@ typedef struct nir_shader {
|
|||
* access plus one
|
||||
*/
|
||||
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
|
||||
|
||||
/** The shader stage, such as MESA_SHADER_VERTEX. */
|
||||
gl_shader_stage stage;
|
||||
} nir_shader;
|
||||
|
||||
static inline nir_function_impl *
|
||||
|
|
|
@ -737,7 +737,7 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
|
|||
clone_state state;
|
||||
init_clone_state(&state, NULL, true, false);
|
||||
|
||||
nir_shader *ns = nir_shader_create(mem_ctx, s->stage, s->options, NULL);
|
||||
nir_shader *ns = nir_shader_create(mem_ctx, s->info.stage, s->options, NULL);
|
||||
state.ns = ns;
|
||||
|
||||
clone_var_list(&state, &ns->uniforms, &s->uniforms);
|
||||
|
|
|
@ -53,7 +53,7 @@ set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len)
|
|||
else
|
||||
shader->info.inputs_read |= bitfield;
|
||||
|
||||
if (shader->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (shader->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
shader->info.fs.uses_sample_qualifier |= var->data.sample;
|
||||
}
|
||||
} else {
|
||||
|
@ -79,7 +79,7 @@ mark_whole_variable(nir_shader *shader, nir_variable *var)
|
|||
{
|
||||
const struct glsl_type *type = var->type;
|
||||
|
||||
if (nir_is_per_vertex_io(var, shader->stage)) {
|
||||
if (nir_is_per_vertex_io(var, shader->info.stage)) {
|
||||
assert(glsl_type_is_array(type));
|
||||
type = glsl_get_array_element(type);
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ try_mask_partial_io(nir_shader *shader, nir_deref_var *deref)
|
|||
nir_variable *var = deref->var;
|
||||
const struct glsl_type *type = var->type;
|
||||
|
||||
if (nir_is_per_vertex_io(var, shader->stage)) {
|
||||
if (nir_is_per_vertex_io(var, shader->info.stage)) {
|
||||
assert(glsl_type_is_array(type));
|
||||
type = glsl_get_array_element(type);
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
|
|||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_discard:
|
||||
case nir_intrinsic_discard_if:
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
shader->info.fs.uses_discard = true;
|
||||
break;
|
||||
|
||||
|
@ -214,7 +214,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
|
|||
|
||||
/* We need to track which input_reads bits correspond to a
|
||||
* dvec3/dvec4 input attribute */
|
||||
if (shader->stage == MESA_SHADER_VERTEX &&
|
||||
if (shader->info.stage == MESA_SHADER_VERTEX &&
|
||||
var->data.mode == nir_var_shader_in &&
|
||||
glsl_type_is_dual_slot(glsl_without_array(var->type))) {
|
||||
for (uint i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
|
||||
|
@ -252,7 +252,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
|
|||
|
||||
case nir_intrinsic_end_primitive:
|
||||
case nir_intrinsic_end_primitive_with_counter:
|
||||
assert(shader->stage == MESA_SHADER_GEOMETRY);
|
||||
assert(shader->info.stage == MESA_SHADER_GEOMETRY);
|
||||
shader->info.gs.uses_end_primitive = 1;
|
||||
break;
|
||||
|
||||
|
@ -327,7 +327,7 @@ nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
|
|||
shader->info.patch_inputs_read = 0;
|
||||
shader->info.patch_outputs_written = 0;
|
||||
shader->info.system_values_read = 0;
|
||||
if (shader->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (shader->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
shader->info.fs.uses_sample_qualifier = false;
|
||||
}
|
||||
nir_foreach_block(block, entrypoint) {
|
||||
|
|
|
@ -75,7 +75,7 @@ tcs_add_output_reads(nir_shader *shader, uint64_t *read)
|
|||
nir_variable *var = intrin_instr->variables[0]->var;
|
||||
read[var->data.location_frac] |=
|
||||
get_variable_io_mask(intrin_instr->variables[0]->var,
|
||||
shader->stage);
|
||||
shader->info.stage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
|
|||
|
||||
uint64_t other_stage = used_by_other_stage[var->data.location_frac];
|
||||
|
||||
if (!(other_stage & get_variable_io_mask(var, shader->stage))) {
|
||||
if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
|
||||
/* This one is invalid, make it a global variable instead */
|
||||
var->data.location = 0;
|
||||
var->data.mode = nir_var_global;
|
||||
|
@ -120,26 +120,26 @@ remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
|
|||
bool
|
||||
nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
|
||||
{
|
||||
assert(producer->stage != MESA_SHADER_FRAGMENT);
|
||||
assert(consumer->stage != MESA_SHADER_VERTEX);
|
||||
assert(producer->info.stage != MESA_SHADER_FRAGMENT);
|
||||
assert(consumer->info.stage != MESA_SHADER_VERTEX);
|
||||
|
||||
uint64_t read[4] = { 0 }, written[4] = { 0 };
|
||||
|
||||
nir_foreach_variable(var, &producer->outputs) {
|
||||
written[var->data.location_frac] |=
|
||||
get_variable_io_mask(var, producer->stage);
|
||||
get_variable_io_mask(var, producer->info.stage);
|
||||
}
|
||||
|
||||
nir_foreach_variable(var, &consumer->inputs) {
|
||||
read[var->data.location_frac] |=
|
||||
get_variable_io_mask(var, consumer->stage);
|
||||
get_variable_io_mask(var, consumer->info.stage);
|
||||
}
|
||||
|
||||
/* Each TCS invocation can read data written by other TCS invocations,
|
||||
* so even if the outputs are not used by the TES we must also make
|
||||
* sure they are not read by the TCS before demoting them to globals.
|
||||
*/
|
||||
if (producer->stage == MESA_SHADER_TESS_CTRL)
|
||||
if (producer->info.stage == MESA_SHADER_TESS_CTRL)
|
||||
tcs_add_output_reads(producer, read);
|
||||
|
||||
bool progress = false;
|
||||
|
|
|
@ -39,7 +39,7 @@ void
|
|||
nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
|
||||
bool alpha_to_one)
|
||||
{
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
nir_function_impl *impl = function->impl;
|
||||
|
|
|
@ -100,7 +100,7 @@ lower_instr(nir_intrinsic_instr *instr,
|
|||
|
||||
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
|
||||
nir_intrinsic_set_base(new_instr,
|
||||
shader_program->data->UniformStorage[uniform_loc].opaque[shader->stage].index);
|
||||
shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index);
|
||||
|
||||
nir_load_const_instr *offset_const =
|
||||
nir_load_const_instr_create(mem_ctx, 1, 32);
|
||||
|
|
|
@ -133,7 +133,7 @@ void
|
|||
nir_lower_bitmap(nir_shader *shader,
|
||||
const nir_lower_bitmap_options *options)
|
||||
{
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
lower_bitmap_impl(nir_shader_get_entrypoint(shader), options);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ typedef struct {
|
|||
static bool
|
||||
is_color_output(lower_state *state, nir_variable *out)
|
||||
{
|
||||
switch (state->shader->stage) {
|
||||
switch (state->shader->info.stage) {
|
||||
case MESA_SHADER_VERTEX:
|
||||
case MESA_SHADER_GEOMETRY:
|
||||
switch (out->data.location) {
|
||||
|
|
|
@ -48,7 +48,7 @@ get_unwrapped_array_length(nir_shader *nir, nir_variable *var)
|
|||
* array length.
|
||||
*/
|
||||
const struct glsl_type *type = var->type;
|
||||
if (nir_is_per_vertex_io(var, nir->stage))
|
||||
if (nir_is_per_vertex_io(var, nir->info.stage))
|
||||
type = glsl_get_array_element(type);
|
||||
|
||||
assert(glsl_type_is_array(type));
|
||||
|
@ -158,7 +158,7 @@ combine_clip_cull(nir_shader *nir,
|
|||
cull->data.location = VARYING_SLOT_CLIP_DIST0;
|
||||
} else {
|
||||
/* Turn the ClipDistance array into a combined one */
|
||||
update_type(clip, nir->stage, clip_array_size + cull_array_size);
|
||||
update_type(clip, nir->info.stage, clip_array_size + cull_array_size);
|
||||
|
||||
/* Rewrite CullDistance to reference the combined array */
|
||||
nir_foreach_function(function, nir) {
|
||||
|
@ -194,10 +194,10 @@ nir_lower_clip_cull_distance_arrays(nir_shader *nir)
|
|||
{
|
||||
bool progress = false;
|
||||
|
||||
if (nir->stage <= MESA_SHADER_GEOMETRY)
|
||||
if (nir->info.stage <= MESA_SHADER_GEOMETRY)
|
||||
progress |= combine_clip_cull(nir, &nir->outputs, true);
|
||||
|
||||
if (nir->stage > MESA_SHADER_VERTEX)
|
||||
if (nir->info.stage > MESA_SHADER_VERTEX)
|
||||
progress |= combine_clip_cull(nir, &nir->inputs, false);
|
||||
|
||||
return progress;
|
||||
|
|
|
@ -252,7 +252,7 @@ nir_lower_drawpixels(nir_shader *shader,
|
|||
.shader = shader,
|
||||
};
|
||||
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl)
|
||||
|
|
|
@ -167,7 +167,7 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
|
|||
nir_intrinsic_op op;
|
||||
switch (mode) {
|
||||
case nir_var_shader_in:
|
||||
if (nir->stage == MESA_SHADER_FRAGMENT &&
|
||||
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
nir->options->use_interpolated_input_intrinsics &&
|
||||
var->data.interpolation != INTERP_MODE_FLAT) {
|
||||
assert(vertex_index == NULL);
|
||||
|
@ -412,7 +412,7 @@ nir_lower_io_block(nir_block *block,
|
|||
|
||||
b->cursor = nir_before_instr(instr);
|
||||
|
||||
const bool per_vertex = nir_is_per_vertex_io(var, b->shader->stage);
|
||||
const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
|
||||
|
||||
nir_ssa_def *offset;
|
||||
nir_ssa_def *vertex_index = NULL;
|
||||
|
|
|
@ -76,7 +76,7 @@ emit_copies(nir_cursor cursor, nir_shader *shader, struct exec_list *new_vars,
|
|||
static void
|
||||
emit_output_copies_impl(struct lower_io_state *state, nir_function_impl *impl)
|
||||
{
|
||||
if (state->shader->stage == MESA_SHADER_GEOMETRY) {
|
||||
if (state->shader->info.stage == MESA_SHADER_GEOMETRY) {
|
||||
/* For geometry shaders, we have to emit the output copies right
|
||||
* before each EmitVertex call.
|
||||
*/
|
||||
|
@ -152,7 +152,7 @@ nir_lower_io_to_temporaries(nir_shader *shader, nir_function_impl *entrypoint,
|
|||
{
|
||||
struct lower_io_state state;
|
||||
|
||||
if (shader->stage == MESA_SHADER_TESS_CTRL)
|
||||
if (shader->info.stage == MESA_SHADER_TESS_CTRL)
|
||||
return;
|
||||
|
||||
state.shader = shader;
|
||||
|
|
|
@ -131,7 +131,7 @@ lower_io_types_block(struct lower_io_types_state *state, nir_block *block)
|
|||
(var->data.mode != nir_var_shader_out))
|
||||
continue;
|
||||
|
||||
bool vs_in = (state->shader->stage == MESA_SHADER_VERTEX) &&
|
||||
bool vs_in = (state->shader->info.stage == MESA_SHADER_VERTEX) &&
|
||||
(var->data.mode == nir_var_shader_in);
|
||||
if (glsl_count_attribute_slots(var->type, vs_in) == 1)
|
||||
continue;
|
||||
|
|
|
@ -157,7 +157,8 @@ nir_lower_samplers(nir_shader *shader,
|
|||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl)
|
||||
progress |= lower_impl(function->impl, shader_program, shader->stage);
|
||||
progress |= lower_impl(function->impl, shader_program,
|
||||
shader->info.stage);
|
||||
}
|
||||
|
||||
return progress;
|
||||
|
|
|
@ -116,7 +116,7 @@ lower_deref(nir_deref_var *deref,
|
|||
nir_builder *b)
|
||||
{
|
||||
nir_variable *var = deref->var;
|
||||
gl_shader_stage stage = state->shader->stage;
|
||||
gl_shader_stage stage = state->shader->info.stage;
|
||||
unsigned location = var->data.location;
|
||||
unsigned binding;
|
||||
const struct glsl_type *orig_type = deref->deref.type;
|
||||
|
|
|
@ -820,7 +820,8 @@ nir_lower_tex_block(nir_block *block, nir_builder *b,
|
|||
if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
|
||||
(tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
|
||||
tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
|
||||
(tex->op == nir_texop_tex && b->shader->stage != MESA_SHADER_FRAGMENT))) {
|
||||
(tex->op == nir_texop_tex &&
|
||||
b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
|
||||
b->cursor = nir_before_instr(&tex->instr);
|
||||
nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
|
||||
progress = true;
|
||||
|
|
|
@ -193,7 +193,7 @@ nir_lower_two_sided_color(nir_shader *shader)
|
|||
.shader = shader,
|
||||
};
|
||||
|
||||
if (shader->stage != MESA_SHADER_FRAGMENT)
|
||||
if (shader->info.stage != MESA_SHADER_FRAGMENT)
|
||||
return;
|
||||
|
||||
if (setup_inputs(&state) != 0)
|
||||
|
|
|
@ -105,7 +105,7 @@ nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading)
|
|||
bool progress = false;
|
||||
nir_builder b;
|
||||
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl) {
|
||||
|
|
|
@ -348,7 +348,7 @@ nir_lower_wpos_ytransform(nir_shader *shader,
|
|||
.shader = shader,
|
||||
};
|
||||
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl)
|
||||
|
|
|
@ -416,7 +416,7 @@ print_var_decl(nir_variable *var, print_state *state)
|
|||
const char *loc = NULL;
|
||||
char buf[4];
|
||||
|
||||
switch (state->shader->stage) {
|
||||
switch (state->shader->info.stage) {
|
||||
case MESA_SHADER_VERTEX:
|
||||
if (var->data.mode == nir_var_shader_in)
|
||||
loc = gl_vert_attrib_name(var->data.location);
|
||||
|
@ -1157,7 +1157,7 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
|
|||
|
||||
state.annotations = annotations;
|
||||
|
||||
fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->stage));
|
||||
fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->info.stage));
|
||||
|
||||
if (shader->info.name)
|
||||
fprintf(fp, "name: %s\n", shader->info.name);
|
||||
|
@ -1165,7 +1165,7 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
|
|||
if (shader->info.label)
|
||||
fprintf(fp, "label: %s\n", shader->info.label);
|
||||
|
||||
switch (shader->stage) {
|
||||
switch (shader->info.stage) {
|
||||
case MESA_SHADER_COMPUTE:
|
||||
fprintf(fp, "local-size: %u, %u, %u%s\n",
|
||||
shader->info.cs.local_size[0],
|
||||
|
|
|
@ -973,7 +973,7 @@ validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
|
|||
assert(glsl_type_is_array(var->type));
|
||||
|
||||
const struct glsl_type *type = glsl_get_array_element(var->type);
|
||||
if (nir_is_per_vertex_io(var, state->shader->stage)) {
|
||||
if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
|
||||
assert(glsl_type_is_array(type));
|
||||
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
|
||||
} else {
|
||||
|
|
|
@ -2863,34 +2863,34 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
|
|||
break;
|
||||
|
||||
case SpvExecutionModeEarlyFragmentTests:
|
||||
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
b->shader->info.fs.early_fragment_tests = true;
|
||||
break;
|
||||
|
||||
case SpvExecutionModeInvocations:
|
||||
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
|
||||
assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
|
||||
b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
|
||||
break;
|
||||
|
||||
case SpvExecutionModeDepthReplacing:
|
||||
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
|
||||
break;
|
||||
case SpvExecutionModeDepthGreater:
|
||||
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
|
||||
break;
|
||||
case SpvExecutionModeDepthLess:
|
||||
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
|
||||
break;
|
||||
case SpvExecutionModeDepthUnchanged:
|
||||
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
|
||||
break;
|
||||
|
||||
case SpvExecutionModeLocalSize:
|
||||
assert(b->shader->stage == MESA_SHADER_COMPUTE);
|
||||
assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
|
||||
b->shader->info.cs.local_size[0] = mode->literals[0];
|
||||
b->shader->info.cs.local_size[1] = mode->literals[1];
|
||||
b->shader->info.cs.local_size[2] = mode->literals[2];
|
||||
|
@ -2899,11 +2899,11 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
|
|||
break; /* Nothing to do with this */
|
||||
|
||||
case SpvExecutionModeOutputVertices:
|
||||
if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL) {
|
||||
if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
|
||||
b->shader->info.tess.tcs_vertices_out = mode->literals[0];
|
||||
} else {
|
||||
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
|
||||
assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
|
||||
b->shader->info.gs.vertices_out = mode->literals[0];
|
||||
}
|
||||
break;
|
||||
|
@ -2915,12 +2915,12 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
|
|||
case SpvExecutionModeInputTrianglesAdjacency:
|
||||
case SpvExecutionModeQuads:
|
||||
case SpvExecutionModeIsolines:
|
||||
if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL) {
|
||||
if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
|
||||
b->shader->info.tess.primitive_mode =
|
||||
gl_primitive_from_spv_execution_mode(mode->exec_mode);
|
||||
} else {
|
||||
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
|
||||
assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
|
||||
b->shader->info.gs.vertices_in =
|
||||
vertices_in_from_spv_execution_mode(mode->exec_mode);
|
||||
}
|
||||
|
@ -2929,39 +2929,39 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
|
|||
case SpvExecutionModeOutputPoints:
|
||||
case SpvExecutionModeOutputLineStrip:
|
||||
case SpvExecutionModeOutputTriangleStrip:
|
||||
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
|
||||
assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
|
||||
b->shader->info.gs.output_primitive =
|
||||
gl_primitive_from_spv_execution_mode(mode->exec_mode);
|
||||
break;
|
||||
|
||||
case SpvExecutionModeSpacingEqual:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
|
||||
break;
|
||||
case SpvExecutionModeSpacingFractionalEven:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
|
||||
break;
|
||||
case SpvExecutionModeSpacingFractionalOdd:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
|
||||
break;
|
||||
case SpvExecutionModeVertexOrderCw:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.ccw = false;
|
||||
break;
|
||||
case SpvExecutionModeVertexOrderCcw:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.ccw = true;
|
||||
break;
|
||||
case SpvExecutionModePointMode:
|
||||
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_TESS_EVAL);
|
||||
assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_TESS_EVAL);
|
||||
b->shader->info.tess.point_mode = true;
|
||||
break;
|
||||
|
||||
|
|
|
@ -1048,7 +1048,7 @@ vtn_get_builtin_location(struct vtn_builder *b,
|
|||
set_mode_system_value(mode);
|
||||
break;
|
||||
case SpvBuiltInPrimitiveId:
|
||||
if (b->shader->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
assert(*mode == nir_var_shader_in);
|
||||
*location = VARYING_SLOT_PRIMITIVE_ID;
|
||||
} else if (*mode == nir_var_shader_out) {
|
||||
|
@ -1064,18 +1064,18 @@ vtn_get_builtin_location(struct vtn_builder *b,
|
|||
break;
|
||||
case SpvBuiltInLayer:
|
||||
*location = VARYING_SLOT_LAYER;
|
||||
if (b->shader->stage == MESA_SHADER_FRAGMENT)
|
||||
if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
|
||||
*mode = nir_var_shader_in;
|
||||
else if (b->shader->stage == MESA_SHADER_GEOMETRY)
|
||||
else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
|
||||
*mode = nir_var_shader_out;
|
||||
else
|
||||
unreachable("invalid stage for SpvBuiltInLayer");
|
||||
break;
|
||||
case SpvBuiltInViewportIndex:
|
||||
*location = VARYING_SLOT_VIEWPORT;
|
||||
if (b->shader->stage == MESA_SHADER_GEOMETRY)
|
||||
if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
|
||||
*mode = nir_var_shader_out;
|
||||
else if (b->shader->stage == MESA_SHADER_FRAGMENT)
|
||||
else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
|
||||
*mode = nir_var_shader_in;
|
||||
else
|
||||
unreachable("invalid stage for SpvBuiltInViewportIndex");
|
||||
|
@ -1355,11 +1355,11 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
|
|||
if (dec->decoration == SpvDecorationLocation) {
|
||||
unsigned location = dec->literals[0];
|
||||
bool is_vertex_input;
|
||||
if (b->shader->stage == MESA_SHADER_FRAGMENT &&
|
||||
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
vtn_var->mode == vtn_variable_mode_output) {
|
||||
is_vertex_input = false;
|
||||
location += FRAG_RESULT_DATA0;
|
||||
} else if (b->shader->stage == MESA_SHADER_VERTEX &&
|
||||
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
|
||||
vtn_var->mode == vtn_variable_mode_input) {
|
||||
is_vertex_input = true;
|
||||
location += VERT_ATTRIB_GENERIC0;
|
||||
|
@ -1653,7 +1653,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
|
|||
|
||||
int array_length = -1;
|
||||
struct vtn_type *interface_type = var->type;
|
||||
if (is_per_vertex_inout(var, b->shader->stage)) {
|
||||
if (is_per_vertex_inout(var, b->shader->info.stage)) {
|
||||
/* In Geometry shaders (and some tessellation), inputs come
|
||||
* in per-vertex arrays. However, some builtins come in
|
||||
* non-per-vertex, hence the need for the is_array check. In
|
||||
|
|
|
@ -1867,7 +1867,7 @@ ttn_add_output_stores(struct ttn_compile *c)
|
|||
nir_src src = nir_src_for_reg(c->output_regs[loc].reg);
|
||||
src.reg.base_offset = c->output_regs[loc].offset;
|
||||
|
||||
if (c->build.shader->stage == MESA_SHADER_FRAGMENT &&
|
||||
if (c->build.shader->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
var->data.location == FRAG_RESULT_DEPTH) {
|
||||
/* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output, while
|
||||
* NIR uses a single float FRAG_RESULT_DEPTH.
|
||||
|
|
|
@ -399,7 +399,7 @@ int main(int argc, char **argv)
|
|||
v.key = key;
|
||||
v.shader = &s;
|
||||
|
||||
switch (nir->stage) {
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_FRAGMENT:
|
||||
s.type = v.type = SHADER_FRAGMENT;
|
||||
break;
|
||||
|
@ -410,7 +410,7 @@ int main(int argc, char **argv)
|
|||
s.type = v.type = SHADER_COMPUTE;
|
||||
break;
|
||||
default:
|
||||
errx(1, "unhandled shader stage: %d", nir->stage);
|
||||
errx(1, "unhandled shader stage: %d", nir->info.stage);
|
||||
}
|
||||
|
||||
info = "NIR compiler";
|
||||
|
|
|
@ -167,11 +167,11 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
|
|||
OPT_V(s, nir_lower_regs_to_ssa);
|
||||
|
||||
if (key) {
|
||||
if (s->stage == MESA_SHADER_VERTEX) {
|
||||
if (s->info.stage == MESA_SHADER_VERTEX) {
|
||||
OPT_V(s, nir_lower_clip_vs, key->ucp_enables);
|
||||
if (key->vclamp_color)
|
||||
OPT_V(s, nir_lower_clamp_color_outputs);
|
||||
} else if (s->stage == MESA_SHADER_FRAGMENT) {
|
||||
} else if (s->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
OPT_V(s, nir_lower_clip_fs, key->ucp_enables);
|
||||
if (key->fclamp_color)
|
||||
OPT_V(s, nir_lower_clamp_color_outputs);
|
||||
|
|
|
@ -124,10 +124,10 @@ void si_nir_scan_shader(const struct nir_shader *nir,
|
|||
nir_function *func;
|
||||
unsigned i;
|
||||
|
||||
assert(nir->stage == MESA_SHADER_VERTEX ||
|
||||
nir->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(nir->info.stage == MESA_SHADER_VERTEX ||
|
||||
nir->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
info->processor = pipe_shader_type_from_mesa(nir->stage);
|
||||
info->processor = pipe_shader_type_from_mesa(nir->info.stage);
|
||||
info->num_tokens = 2; /* indicate that the shader is non-empty */
|
||||
info->num_instructions = 2;
|
||||
|
||||
|
@ -138,7 +138,7 @@ void si_nir_scan_shader(const struct nir_shader *nir,
|
|||
nir_foreach_variable(variable, &nir->inputs) {
|
||||
unsigned semantic_name, semantic_index;
|
||||
unsigned attrib_count = glsl_count_attribute_slots(variable->type,
|
||||
nir->stage == MESA_SHADER_VERTEX);
|
||||
nir->info.stage == MESA_SHADER_VERTEX);
|
||||
|
||||
assert(attrib_count == 1 && "not implemented");
|
||||
|
||||
|
@ -146,11 +146,11 @@ void si_nir_scan_shader(const struct nir_shader *nir,
|
|||
* tracker has already mapped them to attributes via
|
||||
* variable->data.driver_location.
|
||||
*/
|
||||
if (nir->stage == MESA_SHADER_VERTEX)
|
||||
if (nir->info.stage == MESA_SHADER_VERTEX)
|
||||
continue;
|
||||
|
||||
/* Fragment shader position is a system value. */
|
||||
if (nir->stage == MESA_SHADER_FRAGMENT &&
|
||||
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
variable->data.location == VARYING_SLOT_POS) {
|
||||
if (variable->data.pixel_center_integer)
|
||||
info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
|
||||
|
@ -231,7 +231,7 @@ void si_nir_scan_shader(const struct nir_shader *nir,
|
|||
nir_foreach_variable(variable, &nir->outputs) {
|
||||
unsigned semantic_name, semantic_index;
|
||||
|
||||
if (nir->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
tgsi_get_gl_frag_result_semantic(variable->data.location,
|
||||
&semantic_name, &semantic_index);
|
||||
} else {
|
||||
|
@ -336,7 +336,7 @@ si_lower_nir(struct si_shader_selector* sel)
|
|||
nir_foreach_variable(variable, &sel->nir->outputs) {
|
||||
variable->data.driver_location *= 4;
|
||||
|
||||
if (sel->nir->stage == MESA_SHADER_FRAGMENT) {
|
||||
if (sel->nir->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
if (variable->data.location == FRAG_RESULT_DEPTH)
|
||||
variable->data.driver_location += 2;
|
||||
else if (variable->data.location == FRAG_RESULT_STENCIL)
|
||||
|
@ -478,15 +478,15 @@ bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
|
|||
unsigned fs_attr_idx = 0;
|
||||
nir_foreach_variable(variable, &nir->inputs) {
|
||||
unsigned attrib_count = glsl_count_attribute_slots(variable->type,
|
||||
nir->stage == MESA_SHADER_VERTEX);
|
||||
nir->info.stage == MESA_SHADER_VERTEX);
|
||||
unsigned input_idx = variable->data.driver_location;
|
||||
|
||||
for (unsigned i = 0; i < attrib_count; ++i) {
|
||||
LLVMValueRef data[4];
|
||||
|
||||
if (nir->stage == MESA_SHADER_VERTEX)
|
||||
if (nir->info.stage == MESA_SHADER_VERTEX)
|
||||
declare_nir_input_vs(ctx, variable, i, data);
|
||||
else if (nir->stage == MESA_SHADER_FRAGMENT)
|
||||
else if (nir->info.stage == MESA_SHADER_FRAGMENT)
|
||||
declare_nir_input_fs(ctx, variable, i, &fs_attr_idx, data);
|
||||
|
||||
for (unsigned chan = 0; chan < 4; chan++) {
|
||||
|
|
|
@ -2467,7 +2467,7 @@ vc4_shader_state_create(struct pipe_context *pctx,
|
|||
|
||||
if (vc4_debug & VC4_DEBUG_NIR) {
|
||||
fprintf(stderr, "%s prog %d NIR:\n",
|
||||
gl_shader_stage_name(s->stage),
|
||||
gl_shader_stage_name(s->info.stage),
|
||||
so->program_id);
|
||||
nir_print_shader(s, stderr);
|
||||
fprintf(stderr, "\n");
|
||||
|
|
|
@ -147,7 +147,7 @@ struct brw_blorp_wm_inputs
|
|||
#define BLORP_CREATE_NIR_INPUT(shader, name, type) ({ \
|
||||
nir_variable *input = nir_variable_create((shader), nir_var_shader_in, \
|
||||
type, #name); \
|
||||
if ((shader)->stage == MESA_SHADER_FRAGMENT) \
|
||||
if ((shader)->info.stage == MESA_SHADER_FRAGMENT) \
|
||||
input->data.interpolation = INTERP_MODE_FLAT; \
|
||||
input->data.location = VARYING_SLOT_VAR0 + \
|
||||
offsetof(struct brw_blorp_wm_inputs, name) / (4 * sizeof(float)); \
|
||||
|
|
|
@ -165,7 +165,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
|
|||
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
|
||||
gl_shader_stage stage = b->shader->stage;
|
||||
gl_shader_stage stage = b->shader->info.stage;
|
||||
|
||||
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
|
||||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
|
||||
|
@ -526,11 +526,11 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
|
|||
bool is_scalar)
|
||||
{
|
||||
nir_variable_mode indirect_mask = 0;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
|
||||
indirect_mask |= nir_var_shader_in;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
|
||||
indirect_mask |= nir_var_shader_out;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
|
||||
indirect_mask |= nir_var_local;
|
||||
|
||||
bool progress;
|
||||
|
@ -601,9 +601,9 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
|
|||
const struct gen_device_info *devinfo = compiler->devinfo;
|
||||
UNUSED bool progress; /* Written by OPT */
|
||||
|
||||
const bool is_scalar = compiler->scalar_stage[nir->stage];
|
||||
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
|
||||
|
||||
if (nir->stage == MESA_SHADER_GEOMETRY)
|
||||
if (nir->info.stage == MESA_SHADER_GEOMETRY)
|
||||
OPT(nir_lower_gs_intrinsics);
|
||||
|
||||
/* See also brw_nir_trig_workarounds.py */
|
||||
|
@ -638,11 +638,11 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
|
|||
OPT(nir_lower_clip_cull_distance_arrays);
|
||||
|
||||
nir_variable_mode indirect_mask = 0;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
|
||||
indirect_mask |= nir_var_shader_in;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
|
||||
indirect_mask |= nir_var_shader_out;
|
||||
if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
|
||||
if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
|
||||
indirect_mask |= nir_var_local;
|
||||
|
||||
nir_lower_indirect_derefs(nir, indirect_mask);
|
||||
|
@ -672,7 +672,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
|
|||
{
|
||||
const struct gen_device_info *devinfo = compiler->devinfo;
|
||||
bool debug_enabled =
|
||||
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
|
||||
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->info.stage));
|
||||
|
||||
UNUSED bool progress; /* Written by OPT */
|
||||
|
||||
|
@ -706,7 +706,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
|
|||
}
|
||||
|
||||
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
|
||||
_mesa_shader_stage_to_string(nir->stage));
|
||||
_mesa_shader_stage_to_string(nir->info.stage));
|
||||
nir_print_shader(nir, stderr);
|
||||
}
|
||||
|
||||
|
@ -729,7 +729,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
|
|||
|
||||
if (unlikely(debug_enabled)) {
|
||||
fprintf(stderr, "NIR (final form) for %s shader:\n",
|
||||
_mesa_shader_stage_to_string(nir->stage));
|
||||
_mesa_shader_stage_to_string(nir->info.stage));
|
||||
nir_print_shader(nir, stderr);
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
|
|||
const struct gen_device_info *devinfo = compiler->devinfo;
|
||||
|
||||
if ((devinfo->gen <= 7 && !devinfo->is_haswell) ||
|
||||
!compiler->scalar_stage[nir->stage]) {
|
||||
!compiler->scalar_stage[nir->info.stage]) {
|
||||
memset(out_ranges, 0, 4 * sizeof(struct brw_ubo_range));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ bool
|
|||
brw_nir_lower_cs_intrinsics(nir_shader *nir,
|
||||
struct brw_cs_prog_data *prog_data)
|
||||
{
|
||||
assert(nir->stage == MESA_SHADER_COMPUTE);
|
||||
assert(nir->info.stage == MESA_SHADER_COMPUTE);
|
||||
|
||||
bool progress = false;
|
||||
struct lower_intrinsics_state state;
|
||||
|
|
|
@ -123,7 +123,7 @@ emit_quads_workaround(nir_builder *b, nir_block *block)
|
|||
void
|
||||
brw_nir_apply_tcs_quads_workaround(nir_shader *nir)
|
||||
{
|
||||
assert(nir->stage == MESA_SHADER_TESS_CTRL);
|
||||
assert(nir->info.stage == MESA_SHADER_TESS_CTRL);
|
||||
|
||||
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
|
||||
|
||||
|
|
|
@ -670,7 +670,7 @@ backend_shader::backend_shader(const struct brw_compiler *compiler,
|
|||
stage_prog_data(stage_prog_data),
|
||||
mem_ctx(mem_ctx),
|
||||
cfg(NULL),
|
||||
stage(shader->stage)
|
||||
stage(shader->info.stage)
|
||||
{
|
||||
debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
|
||||
stage_name = _mesa_shader_stage_to_string(stage);
|
||||
|
|
|
@ -1497,9 +1497,9 @@ generate_code(struct brw_codegen *p,
|
|||
const struct cfg_t *cfg)
|
||||
{
|
||||
const struct gen_device_info *devinfo = p->devinfo;
|
||||
const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->stage);
|
||||
const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->info.stage);
|
||||
bool debug_flag = INTEL_DEBUG &
|
||||
intel_debug_flag_for_shader_stage(nir->stage);
|
||||
intel_debug_flag_for_shader_stage(nir->info.stage);
|
||||
struct annotation_info annotation;
|
||||
memset(&annotation, 0, sizeof(annotation));
|
||||
int spill_count = 0, fill_count = 0;
|
||||
|
@ -1770,7 +1770,7 @@ generate_code(struct brw_codegen *p,
|
|||
case SHADER_OPCODE_TG4:
|
||||
case SHADER_OPCODE_TG4_OFFSET:
|
||||
case SHADER_OPCODE_SAMPLEINFO:
|
||||
generate_tex(p, prog_data, nir->stage,
|
||||
generate_tex(p, prog_data, nir->info.stage,
|
||||
inst, dst, src[0], src[1], src[2]);
|
||||
break;
|
||||
|
||||
|
@ -1910,7 +1910,7 @@ generate_code(struct brw_codegen *p,
|
|||
|
||||
case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
|
||||
const struct brw_reg mask =
|
||||
brw_stage_has_packed_dispatch(devinfo, nir->stage,
|
||||
brw_stage_has_packed_dispatch(devinfo, nir->info.stage,
|
||||
&prog_data->base) ? brw_imm_ud(~0u) :
|
||||
brw_dmask_reg();
|
||||
brw_find_live_channel(p, dst, mask);
|
||||
|
@ -2195,7 +2195,7 @@ generate_code(struct brw_codegen *p,
|
|||
if (unlikely(debug_flag)) {
|
||||
fprintf(stderr, "Native code for %s %s shader %s:\n",
|
||||
nir->info.label ? nir->info.label : "unnamed",
|
||||
_mesa_shader_stage_to_string(nir->stage), nir->info.name);
|
||||
_mesa_shader_stage_to_string(nir->info.stage), nir->info.name);
|
||||
|
||||
fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
|
||||
"spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
|
||||
|
|
|
@ -296,6 +296,7 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
|
|||
struct anv_pipeline_bind_map *map)
|
||||
{
|
||||
struct anv_pipeline_layout *layout = pipeline->layout;
|
||||
gl_shader_stage stage = shader->info.stage;
|
||||
|
||||
struct apply_pipeline_layout_state state = {
|
||||
.shader = shader,
|
||||
|
@ -328,15 +329,15 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
|
|||
BITSET_WORD b, _tmp;
|
||||
BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
|
||||
set_layout->binding_count) {
|
||||
if (set_layout->binding[b].stage[shader->stage].surface_index >= 0) {
|
||||
if (set_layout->binding[b].stage[stage].surface_index >= 0) {
|
||||
map->surface_count +=
|
||||
anv_descriptor_set_binding_layout_get_hw_size(&set_layout->binding[b]);
|
||||
}
|
||||
if (set_layout->binding[b].stage[shader->stage].sampler_index >= 0) {
|
||||
if (set_layout->binding[b].stage[stage].sampler_index >= 0) {
|
||||
map->sampler_count +=
|
||||
anv_descriptor_set_binding_layout_get_hw_size(&set_layout->binding[b]);
|
||||
}
|
||||
if (set_layout->binding[b].stage[shader->stage].image_index >= 0)
|
||||
if (set_layout->binding[b].stage[stage].image_index >= 0)
|
||||
map->image_count += set_layout->binding[b].array_size;
|
||||
}
|
||||
}
|
||||
|
@ -353,7 +354,7 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
|
|||
struct anv_descriptor_set_binding_layout *binding =
|
||||
&set_layout->binding[b];
|
||||
|
||||
if (binding->stage[shader->stage].surface_index >= 0) {
|
||||
if (binding->stage[stage].surface_index >= 0) {
|
||||
state.set[set].surface_offsets[b] = surface;
|
||||
struct anv_sampler **samplers = binding->immutable_samplers;
|
||||
for (unsigned i = 0; i < binding->array_size; i++) {
|
||||
|
@ -368,7 +369,7 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
|
|||
}
|
||||
}
|
||||
|
||||
if (binding->stage[shader->stage].sampler_index >= 0) {
|
||||
if (binding->stage[stage].sampler_index >= 0) {
|
||||
state.set[set].sampler_offsets[b] = sampler;
|
||||
struct anv_sampler **samplers = binding->immutable_samplers;
|
||||
for (unsigned i = 0; i < binding->array_size; i++) {
|
||||
|
@ -383,7 +384,7 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
|
|||
}
|
||||
}
|
||||
|
||||
if (binding->stage[shader->stage].image_index >= 0) {
|
||||
if (binding->stage[stage].image_index >= 0) {
|
||||
state.set[set].image_offsets[b] = image;
|
||||
image += binding->array_size;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ try_lower_input_load(nir_function_impl *impl, nir_intrinsic_instr *load)
|
|||
void
|
||||
anv_nir_lower_input_attachments(nir_shader *shader)
|
||||
{
|
||||
assert(shader->stage == MESA_SHADER_FRAGMENT);
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (!function->impl)
|
||||
|
|
|
@ -44,7 +44,7 @@ struct lower_multiview_state {
|
|||
static nir_ssa_def *
|
||||
build_instance_id(struct lower_multiview_state *state)
|
||||
{
|
||||
assert(state->builder.shader->stage == MESA_SHADER_VERTEX);
|
||||
assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
|
||||
|
||||
if (state->instance_id == NULL) {
|
||||
nir_builder *b = &state->builder;
|
||||
|
@ -74,7 +74,7 @@ build_view_index(struct lower_multiview_state *state)
|
|||
assert(state->view_mask != 0);
|
||||
if (0 && _mesa_bitcount(state->view_mask) == 1) {
|
||||
state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
|
||||
} else if (state->builder.shader->stage == MESA_SHADER_VERTEX) {
|
||||
} else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
|
||||
/* We only support 16 viewports */
|
||||
assert((state->view_mask & 0xffff0000) == 0);
|
||||
|
||||
|
@ -122,15 +122,15 @@ build_view_index(struct lower_multiview_state *state)
|
|||
}
|
||||
} else {
|
||||
const struct glsl_type *type = glsl_int_type();
|
||||
if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->stage == MESA_SHADER_GEOMETRY)
|
||||
if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
|
||||
b->shader->info.stage == MESA_SHADER_GEOMETRY)
|
||||
type = glsl_array_type(type, 1);
|
||||
|
||||
nir_variable *idx_var =
|
||||
nir_variable_create(b->shader, nir_var_shader_in,
|
||||
type, "view index");
|
||||
idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
|
||||
if (b->shader->stage == MESA_SHADER_FRAGMENT)
|
||||
if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
|
||||
idx_var->data.interpolation = INTERP_MODE_FLAT;
|
||||
|
||||
if (glsl_type_is_array(type)) {
|
||||
|
@ -154,7 +154,7 @@ build_view_index(struct lower_multiview_state *state)
|
|||
bool
|
||||
anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
|
||||
{
|
||||
assert(shader->stage != MESA_SHADER_COMPUTE);
|
||||
assert(shader->info.stage != MESA_SHADER_COMPUTE);
|
||||
|
||||
/* If multiview isn't enabled, we have nothing to do. */
|
||||
if (view_mask == 0)
|
||||
|
@ -202,7 +202,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
|
|||
* available in the VS. If it's not a fragment shader, we need to pass
|
||||
* the view index on to the next stage.
|
||||
*/
|
||||
if (shader->stage != MESA_SHADER_FRAGMENT) {
|
||||
if (shader->info.stage != MESA_SHADER_FRAGMENT) {
|
||||
nir_ssa_def *view_index = build_view_index(&state);
|
||||
|
||||
nir_builder *b = &state.builder;
|
||||
|
|
|
@ -138,7 +138,7 @@ anv_shader_compile_to_nir(struct anv_pipeline *pipeline,
|
|||
spec_entries, num_spec_entries,
|
||||
stage, entrypoint_name, &supported_ext, nir_options);
|
||||
nir_shader *nir = entry_point->shader;
|
||||
assert(nir->stage == stage);
|
||||
assert(nir->info.stage == stage);
|
||||
nir_validate_shader(nir);
|
||||
ralloc_steal(mem_ctx, nir);
|
||||
|
||||
|
|
|
@ -296,10 +296,12 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
|
|||
NIR_PASS_V(producer, nir_lower_indirect_derefs, indirect_mask);
|
||||
NIR_PASS_V(consumer, nir_lower_indirect_derefs, indirect_mask);
|
||||
|
||||
const bool p_is_scalar = compiler->scalar_stage[producer->stage];
|
||||
const bool p_is_scalar =
|
||||
compiler->scalar_stage[producer->info.stage];
|
||||
producer = brw_nir_optimize(producer, compiler, p_is_scalar);
|
||||
|
||||
const bool c_is_scalar = compiler->scalar_stage[producer->stage];
|
||||
const bool c_is_scalar =
|
||||
compiler->scalar_stage[producer->info.stage];
|
||||
consumer = brw_nir_optimize(consumer, compiler, c_is_scalar);
|
||||
}
|
||||
|
||||
|
|
|
@ -206,8 +206,8 @@ brw_nir_setup_glsl_uniforms(void *mem_ctx, nir_shader *shader,
|
|||
brw_nir_setup_glsl_builtin_uniform(var, prog, stage_prog_data,
|
||||
is_scalar);
|
||||
} else {
|
||||
brw_nir_setup_glsl_uniform(shader->stage, var, prog, stage_prog_data,
|
||||
is_scalar);
|
||||
brw_nir_setup_glsl_uniform(shader->info.stage, var, prog,
|
||||
stage_prog_data, is_scalar);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -328,7 +328,7 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
|
|||
NIR_PASS_V(nir, nir_lower_var_copies);
|
||||
NIR_PASS_V(nir, nir_lower_io_types);
|
||||
|
||||
if (nir->stage == MESA_SHADER_VERTEX) {
|
||||
if (nir->info.stage == MESA_SHADER_VERTEX) {
|
||||
/* Needs special handling so drvloc matches the vbo state: */
|
||||
st_nir_assign_vs_in_locations(prog, nir);
|
||||
/* Re-lower global vars, to deal with any dead VS inputs. */
|
||||
|
@ -339,7 +339,7 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
|
|||
&nir->num_outputs,
|
||||
type_size);
|
||||
st_nir_fixup_varying_slots(st, &nir->outputs);
|
||||
} else if (nir->stage == MESA_SHADER_FRAGMENT) {
|
||||
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
sort_varyings(&nir->inputs);
|
||||
nir_assign_var_locations(&nir->inputs,
|
||||
&nir->num_inputs,
|
||||
|
@ -348,14 +348,14 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
|
|||
nir_assign_var_locations(&nir->outputs,
|
||||
&nir->num_outputs,
|
||||
type_size);
|
||||
} else if (nir->stage == MESA_SHADER_COMPUTE) {
|
||||
} else if (nir->info.stage == MESA_SHADER_COMPUTE) {
|
||||
/* TODO? */
|
||||
} else {
|
||||
unreachable("invalid shader type for tgsi bypass\n");
|
||||
}
|
||||
|
||||
struct gl_shader_program *shader_program;
|
||||
switch (nir->stage) {
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_VERTEX:
|
||||
shader_program = ((struct st_vertex_program *)prog)->shader_program;
|
||||
break;
|
||||
|
@ -371,7 +371,7 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
|
|||
}
|
||||
|
||||
NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
|
||||
st->ctx->Const.Program[nir->stage].MaxAtomicBuffers);
|
||||
st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
|
||||
|
||||
st_nir_assign_uniform_locations(prog, shader_program,
|
||||
&nir->uniforms, &nir->num_uniforms);
|
||||
|
|
Loading…
Reference in New Issue