iris: Create an enum for the surface groups

This will make convenient to handle compacting and printing the
binding table.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Caio Marcelo de Oliveira Filho 2019-05-23 08:44:29 -07:00
parent 1c8ea8b300
commit 79f1529ae0
3 changed files with 45 additions and 35 deletions

View File

@ -281,13 +281,21 @@ struct iris_uncompiled_shader {
struct iris_state_ref const_data_state;
};
enum iris_surface_group {
IRIS_SURFACE_GROUP_RENDER_TARGET,
IRIS_SURFACE_GROUP_CS_WORK_GROUPS,
IRIS_SURFACE_GROUP_TEXTURE,
IRIS_SURFACE_GROUP_IMAGE,
IRIS_SURFACE_GROUP_UBO,
IRIS_SURFACE_GROUP_SSBO,
IRIS_SURFACE_GROUP_COUNT,
};
struct iris_binding_table {
uint32_t size_bytes;
uint32_t texture_start;
uint32_t ubo_start;
uint32_t ssbo_start;
uint32_t image_start;
uint32_t offsets[IRIS_SURFACE_GROUP_COUNT];
};
/**

View File

@ -502,18 +502,19 @@ iris_setup_uniforms(const struct brw_compiler *compiler,
}
static void
rewrite_src_with_bti(nir_builder *b, nir_instr *instr,
nir_src *src, uint32_t offset)
rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
nir_instr *instr, nir_src *src,
enum iris_surface_group group)
{
assert(offset != 0xd0d0d0d0);
assert(bt->offsets[group] != 0xd0d0d0d0);
b->cursor = nir_before_instr(instr);
nir_ssa_def *bti;
if (nir_src_is_const(*src)) {
bti = nir_imm_intN_t(b, nir_src_as_uint(*src) + offset,
bti = nir_imm_intN_t(b, nir_src_as_uint(*src) + bt->offsets[group],
src->ssa->bit_size);
} else {
bti = nir_iadd_imm(b, src->ssa, offset);
bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
}
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
}
@ -535,30 +536,30 @@ iris_setup_binding_table(struct nir_shader *nir,
const struct shader_info *info = &nir->info;
memset(bt, 0, sizeof(*bt));
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
bt->offsets[i] = 0xd0d0d0d0;
/* Calculate the initial binding table index for each group. */
uint32_t next_offset;
if (info->stage == MESA_SHADER_FRAGMENT) {
next_offset = num_render_targets;
bt->offsets[IRIS_SURFACE_GROUP_RENDER_TARGET] = 0;
} else if (info->stage == MESA_SHADER_COMPUTE) {
next_offset = 1;
bt->offsets[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 0;
} else {
next_offset = 0;
}
unsigned num_textures = util_last_bit(info->textures_used);
if (num_textures) {
bt->texture_start = next_offset;
bt->offsets[IRIS_SURFACE_GROUP_TEXTURE] = next_offset;
next_offset += num_textures;
} else {
bt->texture_start = 0xd0d0d0d0;
}
if (info->num_images) {
bt->image_start = next_offset;
bt->offsets[IRIS_SURFACE_GROUP_IMAGE] = next_offset;
next_offset += info->num_images;
} else {
bt->image_start = 0xd0d0d0d0;
}
/* Allocate a slot in the UBO section for NIR constants if present.
@ -571,18 +572,14 @@ iris_setup_binding_table(struct nir_shader *nir,
if (num_cbufs) {
//assert(info->num_ubos <= BRW_MAX_UBO);
bt->ubo_start = next_offset;
bt->offsets[IRIS_SURFACE_GROUP_UBO] = next_offset;
next_offset += num_cbufs;
} else {
bt->ubo_start = 0xd0d0d0d0;
}
if (info->num_ssbos || info->num_abos) {
bt->ssbo_start = next_offset;
bt->offsets[IRIS_SURFACE_GROUP_SSBO] = next_offset;
// XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
next_offset += IRIS_MAX_ABOS + info->num_ssbos;
} else {
bt->ssbo_start = 0xd0d0d0d0;
}
bt->size_bytes = next_offset * 4;
@ -599,8 +596,9 @@ iris_setup_binding_table(struct nir_shader *nir,
nir_foreach_block (block, impl) {
nir_foreach_instr (instr, block) {
if (instr->type == nir_instr_type_tex) {
assert(bt->texture_start != 0xd0d0d0d0);
nir_instr_as_tex(instr)->texture_index += bt->texture_start;
assert(bt->offsets[IRIS_SURFACE_GROUP_TEXTURE] != 0xd0d0d0d0);
nir_instr_as_tex(instr)->texture_index +=
bt->offsets[IRIS_SURFACE_GROUP_TEXTURE];
continue;
}
@ -622,15 +620,18 @@ iris_setup_binding_table(struct nir_shader *nir,
case nir_intrinsic_image_atomic_comp_swap:
case nir_intrinsic_image_load_raw_intel:
case nir_intrinsic_image_store_raw_intel:
rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->image_start);
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
IRIS_SURFACE_GROUP_IMAGE);
break;
case nir_intrinsic_load_ubo:
rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->ubo_start);
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
IRIS_SURFACE_GROUP_UBO);
break;
case nir_intrinsic_store_ssbo:
rewrite_src_with_bti(&b, instr, &intrin->src[1], bt->ssbo_start);
rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
IRIS_SURFACE_GROUP_SSBO);
break;
case nir_intrinsic_get_buffer_size:
@ -648,7 +649,8 @@ iris_setup_binding_table(struct nir_shader *nir,
case nir_intrinsic_ssbo_atomic_fmax:
case nir_intrinsic_ssbo_atomic_fcomp_swap:
case nir_intrinsic_load_ssbo:
rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->ssbo_start);
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
IRIS_SURFACE_GROUP_SSBO);
break;
default:

View File

@ -4103,7 +4103,7 @@ use_image(struct iris_batch *batch, struct iris_context *ice,
if (!pin_only) bt_map[s++] = (addr) - binder_addr;
#define bt_assert(section, exists) \
if (!pin_only) assert(shader->bt.section == (exists) ? s : 0xd0d0d0d0)
if (!pin_only) assert(shader->bt.offsets[section] == (exists) ? s : 0xd0d0d0d0)
/**
* Populate the binding table for a given shader stage.
@ -4170,7 +4170,7 @@ iris_populate_binding_table(struct iris_context *ice,
unsigned num_textures = util_last_bit(info->textures_used);
bt_assert(texture_start, num_textures > 0);
bt_assert(IRIS_SURFACE_GROUP_TEXTURE, num_textures > 0);
for (int i = 0; i < num_textures; i++) {
struct iris_sampler_view *view = shs->textures[i];
@ -4179,14 +4179,14 @@ iris_populate_binding_table(struct iris_context *ice,
push_bt_entry(addr);
}
bt_assert(image_start, info->num_images > 0);
bt_assert(IRIS_SURFACE_GROUP_IMAGE, info->num_images > 0);
for (int i = 0; i < info->num_images; i++) {
uint32_t addr = use_image(batch, ice, shs, i);
push_bt_entry(addr);
}
bt_assert(ubo_start, shader->num_cbufs > 0);
bt_assert(IRIS_SURFACE_GROUP_UBO, shader->num_cbufs > 0);
for (int i = 0; i < shader->num_cbufs; i++) {
uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
@ -4202,7 +4202,7 @@ iris_populate_binding_table(struct iris_context *ice,
push_bt_entry(addr);
}
bt_assert(ssbo_start, info->num_abos + info->num_ssbos > 0);
bt_assert(IRIS_SURFACE_GROUP_SSBO, info->num_abos + info->num_ssbos > 0);
/* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
* for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
@ -4336,7 +4336,7 @@ iris_restore_render_saved_bos(struct iris_context *ice,
continue;
/* Range block is a binding table index, map back to UBO index. */
unsigned block_index = range->block - shader->bt.ubo_start;
unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;
@ -4424,7 +4424,7 @@ iris_restore_compute_saved_bos(struct iris_context *ice,
if (range->length > 0) {
/* Range block is a binding table index, map back to UBO index. */
unsigned block_index = range->block - shader->bt.ubo_start;
unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;
@ -4700,7 +4700,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
continue;
/* Range block is a binding table index, map back to UBO index. */
unsigned block_index = range->block - shader->bt.ubo_start;
unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;