microsoft/compiler: Handle write masks in SSBO lowering pass

Previously, the lowering was for 8/16/64-bit values, or 8/16-component
vectors. Now, it also handles write masks on 32-bit 1/2/3/4-component
vectors.

DXIL looks like it supports putting an interesting write mask in the
buffer store intrinsic, but DXC never generates stores with write
masks, and multiple drivers completely ignore the write mask.

Also, set the write mask properly on the output intrinsic.

Reviewed-by: Sil Vilerino <sivileri@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14294>
This commit is contained in:
Jesse Natalie 2021-12-22 11:51:49 -08:00 committed by Marge Bot
parent efc47571d4
commit c375b05bfe
1 changed files with 31 additions and 8 deletions

View File

@ -333,24 +333,43 @@ lower_store_ssbo(nir_builder *b, nir_intrinsic_instr *intr)
unsigned num_components = val->num_components;
unsigned num_bits = num_components * bit_size;
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
unsigned comp_idx = 0;
unsigned write_mask = nir_intrinsic_write_mask(intr);
for (unsigned i = 0; i < num_components; i++)
comps[i] = nir_channel(b, val, i);
if (write_mask & (1 << i))
comps[i] = nir_channel(b, val, i);
/* We split stores in 16byte chunks because that's the optimal granularity
* of bufferStore(). Minimum alignment is 4byte, which saves from us from
* extra complexity to store >= 32 bit components.
*/
for (unsigned i = 0; i < num_bits; i += 4 * 32) {
unsigned bit_offset = 0;
while (true) {
/* Skip over holes in the write mask */
while (comp_idx < num_components && comps[comp_idx] == NULL) {
comp_idx++;
bit_offset += bit_size;
}
if (comp_idx >= num_components)
break;
/* For each 16byte chunk (or smaller) we generate a 32bit ssbo vec
* store.
* store. If a component is skipped by the write mask, do a smaller
* sub-store
*/
unsigned substore_num_bits = MIN2(num_bits - i, 4 * 32);
nir_ssa_def *local_offset = nir_iadd(b, offset, nir_imm_int(b, i / 8));
unsigned num_src_comps_stored = 0, substore_num_bits = 0;
while(num_src_comps_stored + comp_idx < num_components &&
substore_num_bits + bit_offset < num_bits &&
substore_num_bits < 4 * 32 &&
comps[comp_idx + num_src_comps_stored]) {
++num_src_comps_stored;
substore_num_bits += bit_size;
}
nir_ssa_def *local_offset = nir_iadd(b, offset, nir_imm_int(b, bit_offset / 8));
nir_ssa_def *vec32 = load_comps_to_vec32(b, bit_size, &comps[comp_idx],
substore_num_bits / bit_size);
num_src_comps_stored);
nir_intrinsic_instr *store;
if (substore_num_bits < 32) {
@ -387,7 +406,11 @@ lower_store_ssbo(nir_builder *b, nir_intrinsic_instr *intr)
/* The number of components to store depends on the number of bits. */
store->num_components = DIV_ROUND_UP(substore_num_bits, 32);
nir_builder_instr_insert(b, &store->instr);
comp_idx += substore_num_bits / bit_size;
comp_idx += num_src_comps_stored;
bit_offset += substore_num_bits;
if (nir_intrinsic_has_write_mask(store))
nir_intrinsic_set_write_mask(store, (1 << store->num_components) - 1);
}
nir_instr_remove(&intr->instr);