radeonsi: expand FMASK before MSAA image stores are used

Image stores don't use FMASK, so we have to turn it into identity.

Acked-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
This commit is contained in:
Marek Olšák 2019-09-12 20:20:53 -04:00
parent 98b88cc1f6
commit 095a58204d
7 changed files with 193 additions and 24 deletions

View File

@ -432,7 +432,8 @@ static void si_blit_decompress_color(struct si_context *sctx,
struct si_texture *tex,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
bool need_dcc_decompress)
bool need_dcc_decompress,
bool need_fmask_expand)
{
void* custom_blend;
unsigned layer, checked_last_layer, max_layer;
@ -512,11 +513,17 @@ static void si_blit_decompress_color(struct si_context *sctx,
si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
vi_dcc_enabled(tex, first_level),
tex->surface.u.gfx9.dcc.pipe_aligned);
if (need_fmask_expand && tex->surface.fmask_offset && tex->fmask_is_not_identity) {
si_compute_expand_fmask(&sctx->b, &tex->buffer.b.b);
tex->fmask_is_not_identity = false;
}
}
static void
si_decompress_color_texture(struct si_context *sctx, struct si_texture *tex,
unsigned first_level, unsigned last_level)
unsigned first_level, unsigned last_level,
bool need_fmask_expand)
{
/* CMASK or DCC can be discarded and we can still end up here. */
if (!tex->cmask_buffer && !tex->surface.fmask_size && !tex->surface.dcc_offset)
@ -524,7 +531,7 @@ si_decompress_color_texture(struct si_context *sctx, struct si_texture *tex,
si_blit_decompress_color(sctx, tex, first_level, last_level, 0,
util_max_layer(&tex->buffer.b.b, first_level),
false);
false, need_fmask_expand);
}
static void
@ -546,7 +553,7 @@ si_decompress_sampler_color_textures(struct si_context *sctx,
tex = (struct si_texture *)view->texture;
si_decompress_color_texture(sctx, tex, view->u.tex.first_level,
view->u.tex.last_level);
view->u.tex.last_level, false);
}
}
@ -569,7 +576,8 @@ si_decompress_image_color_textures(struct si_context *sctx,
tex = (struct si_texture *)view->resource;
si_decompress_color_texture(sctx, tex, view->u.tex.level,
view->u.tex.level);
view->u.tex.level,
view->access & PIPE_IMAGE_ACCESS_WRITE);
}
}
@ -729,7 +737,7 @@ static void si_decompress_resident_textures(struct si_context *sctx)
struct si_texture *tex = (struct si_texture *)view->texture;
si_decompress_color_texture(sctx, tex, view->u.tex.first_level,
view->u.tex.last_level);
view->u.tex.last_level, false);
}
util_dynarray_foreach(&sctx->resident_tex_needs_depth_decompress,
@ -753,7 +761,8 @@ static void si_decompress_resident_images(struct si_context *sctx)
struct si_texture *tex = (struct si_texture *)view->resource;
si_decompress_color_texture(sctx, tex, view->u.tex.level,
view->u.tex.level);
view->u.tex.level,
view->access & PIPE_IMAGE_ACCESS_WRITE);
}
}
@ -798,7 +807,7 @@ void si_decompress_textures(struct si_context *sctx, unsigned shader_mask)
si_decompress_color_texture(sctx,
(struct si_texture*)cb0->texture,
cb0->u.tex.first_layer,
cb0->u.tex.last_layer);
cb0->u.tex.last_layer, false);
}
si_check_render_feedback(sctx);
@ -855,7 +864,7 @@ static void si_decompress_subresource(struct pipe_context *ctx,
}
si_blit_decompress_color(sctx, stex, level, level,
first_layer, last_layer, false);
first_layer, last_layer, false, false);
}
}
@ -1291,7 +1300,7 @@ static void si_flush_resource(struct pipe_context *ctx,
if (!tex->is_depth && (tex->cmask_buffer || tex->surface.dcc_offset)) {
si_blit_decompress_color(sctx, tex, 0, res->last_level,
0, util_max_layer(res, 0),
tex->dcc_separate_buffer != NULL);
tex->dcc_separate_buffer != NULL, false);
if (tex->surface.display_dcc_offset)
si_retile_dcc(sctx, tex);
@ -1338,7 +1347,7 @@ void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex)
si_blit_decompress_color(sctx, tex, 0, tex->buffer.b.b.last_level,
0, util_max_layer(&tex->buffer.b.b, 0),
true);
true, false);
}
void si_init_blit_functions(struct si_context *sctx)

View File

@ -505,6 +505,91 @@ void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, saved_img);
}
/* Expand FMASK to make it identity, so that image stores can ignore it. */
void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
{
struct si_context *sctx = (struct si_context *)ctx;
bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
unsigned log_samples = util_logbase2(tex->nr_samples);
assert(tex->nr_samples >= 2);
/* EQAA FMASK expansion is unimplemented. */
if (tex->nr_samples != tex->nr_storage_samples)
return;
si_compute_internal_begin(sctx);
/* Flush caches and sync engines. */
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
true /* DCC is not possible with image stores */);
/* Save states. */
void *saved_cs = sctx->cs_shader_state.program;
struct pipe_image_view saved_image = {0};
util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
/* Bind the image. */
struct pipe_image_view image = {0};
image.resource = tex;
/* Don't set WRITE so as not to trigger FMASK expansion, causing
* an infinite loop. */
image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
image.format = util_format_linear(tex->format);
if (is_array)
image.u.tex.last_layer = tex->array_size - 1;
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image);
/* Bind the shader. */
void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
if (!*shader)
*shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array);
ctx->bind_compute_state(ctx, *shader);
/* Dispatch compute. */
struct pipe_grid_info info = {0};
info.block[0] = 8;
info.last_block[0] = tex->width0 % 8;
info.block[1] = 8;
info.last_block[1] = tex->height0 % 8;
info.block[2] = 1;
info.grid[0] = DIV_ROUND_UP(tex->width0, 8);
info.grid[1] = DIV_ROUND_UP(tex->height0, 8);
info.grid[2] = is_array ? tex->array_size : 1;
ctx->launch_grid(ctx, &info);
/* Flush caches and sync engines. */
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
(sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
/* Restore previous states. */
ctx->bind_compute_state(ctx, saved_cs);
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image);
si_compute_internal_end(sctx);
/* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
#define INVALID 0 /* never used */
static const uint64_t fmask_expand_values[][4] = {
/* samples */
/* 2 (8 bpp) 4 (8 bpp) 8 (8-32bpp) 16 (16-64bpp) fragments */
{0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE}, /* 1 */
{0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4}, /* 2 */
{INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
{INVALID, INVALID, 0x76543210, 0x8888888876543210}, /* 8 */
};
/* Clear FMASK to identity. */
struct si_texture *stex = (struct si_texture*)tex;
si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
(uint32_t*)&fmask_expand_values[log_fragments][log_samples - 1],
4, SI_COHERENCY_SHADER, false);
}
void si_init_compute_blit_functions(struct si_context *sctx)
{
sctx->b.clear_buffer = si_pipe_clear_buffer;

View File

@ -725,21 +725,11 @@ static void si_set_shader_image_desc(struct si_context *ctx,
bool uses_dcc = vi_dcc_enabled(tex, level);
unsigned access = view->access;
/* Clear the write flag when writes can't occur.
* Note that DCC_DECOMPRESS for MSAA doesn't work in some cases,
* so we don't wanna trigger it.
*/
if (tex->is_depth ||
(!fmask_desc && tex->surface.fmask_size != 0)) {
assert(!"Z/S and MSAA image stores are not supported");
access &= ~PIPE_IMAGE_ACCESS_WRITE;
}
assert(!tex->is_depth);
assert(fmask_desc || tex->surface.fmask_size == 0);
assert(fmask_desc || tex->surface.fmask_offset == 0);
if (uses_dcc && !skip_decompress &&
(view->access & PIPE_IMAGE_ACCESS_WRITE ||
(access & PIPE_IMAGE_ACCESS_WRITE ||
!vi_dcc_formats_compatible(screen, res->b.b.format, view->format))) {
/* If DCC can't be disabled, at least decompress it.
* The decompression is relatively cheap if the surface

View File

@ -233,6 +233,15 @@ static void si_destroy_context(struct pipe_context *context)
if (sctx->cs_dcc_retile)
sctx->b.delete_compute_state(&sctx->b, sctx->cs_dcc_retile);
for (unsigned i = 0; i < ARRAY_SIZE(sctx->cs_fmask_expand); i++) {
for (unsigned j = 0; j < ARRAY_SIZE(sctx->cs_fmask_expand[i]); j++) {
if (sctx->cs_fmask_expand[i][j]) {
sctx->b.delete_compute_state(&sctx->b,
sctx->cs_fmask_expand[i][j]);
}
}
}
if (sctx->blitter)
util_blitter_destroy(sctx->blitter);

View File

@ -314,6 +314,7 @@ struct si_texture {
uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
enum pipe_format db_render_format:16;
uint8_t stencil_clear_value;
bool fmask_is_not_identity:1;
bool tc_compatible_htile:1;
bool htile_stencil_disabled:1;
bool depth_cleared:1; /* if it was cleared at least once */
@ -894,6 +895,7 @@ struct si_context {
void *cs_clear_render_target;
void *cs_clear_render_target_1d_array;
void *cs_dcc_retile;
void *cs_fmask_expand[3][2]; /* [log2(samples)-1][is_array] */
struct si_screen *screen;
struct pipe_debug_callback debug;
struct ac_llvm_compiler compiler; /* only non-threaded compilation */
@ -1303,6 +1305,7 @@ void si_compute_clear_render_target(struct pipe_context *ctx,
unsigned width, unsigned height,
bool render_condition_enabled);
void si_retile_dcc(struct si_context *sctx, struct si_texture *tex);
void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex);
void si_init_compute_blit_functions(struct si_context *sctx);
/* si_cp_dma.c */
@ -1448,6 +1451,8 @@ void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx);
void *si_clear_render_target_shader(struct pipe_context *ctx);
void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx);
void *si_create_dcc_retile_cs(struct pipe_context *ctx);
void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples,
bool is_array);
void *si_create_query_result_cs(struct si_context *sctx);
void *gfx10_create_sh_query_result_cs(struct si_context *sctx);

View File

@ -665,6 +665,75 @@ void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx)
return ctx->create_compute_state(ctx, &state);
}
/* Load samples from the image, and copy them to the same image. This looks like
* a no-op, but it's not. Loads use FMASK, while stores don't, so samples are
* reordered to match expanded FMASK.
*
* After the shader finishes, FMASK should be cleared to identity.
*/
void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples,
bool is_array)
{
enum tgsi_texture_type target = is_array ? TGSI_TEXTURE_2D_ARRAY_MSAA :
TGSI_TEXTURE_2D_MSAA;
struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
if (!ureg)
return NULL;
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 8);
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 8);
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
/* Compute the image coordinates. */
struct ureg_src image = ureg_DECL_image(ureg, 0, target, 0, true, false);
struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
struct ureg_dst coord = ureg_writemask(ureg_DECL_temporary(ureg),
TGSI_WRITEMASK_XYZ);
ureg_UMAD(ureg, ureg_writemask(coord, TGSI_WRITEMASK_XY),
ureg_swizzle(blk, 0, 1, 1, 1), ureg_imm2u(ureg, 8, 8),
ureg_swizzle(tid, 0, 1, 1, 1));
if (is_array) {
ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_Z),
ureg_scalar(blk, TGSI_SWIZZLE_Z));
}
/* Load samples, resolving FMASK. */
struct ureg_dst sample[8];
assert(num_samples <= ARRAY_SIZE(sample));
for (unsigned i = 0; i < num_samples; i++) {
sample[i] = ureg_DECL_temporary(ureg);
ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W),
ureg_imm1u(ureg, i));
struct ureg_src srcs[] = {image, ureg_src(coord)};
ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &sample[i], 1, srcs, 2,
TGSI_MEMORY_RESTRICT, target, 0);
}
/* Store samples, ignoring FMASK. */
for (unsigned i = 0; i < num_samples; i++) {
ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W),
ureg_imm1u(ureg, i));
struct ureg_dst dst_image = ureg_dst(image);
struct ureg_src srcs[] = {ureg_src(coord), ureg_src(sample[i])};
ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst_image, 1, srcs, 2,
TGSI_MEMORY_RESTRICT, target, 0);
}
ureg_END(ureg);
struct pipe_compute_state state = {};
state.ir_type = PIPE_SHADER_IR_TGSI;
state.prog = ureg_get_tokens(ureg, NULL);
void *cs = ctx->create_compute_state(ctx, &state);
ureg_destroy(ureg);
return cs;
}
/* Create the compute shader that is used to collect the results of gfx10+
* shader queries.
*

View File

@ -2835,8 +2835,10 @@ void si_update_fb_dirtiness_after_rendering(struct si_context *sctx)
struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
struct si_texture *tex = (struct si_texture*)surf->texture;
if (tex->surface.fmask_offset)
if (tex->surface.fmask_offset) {
tex->dirty_level_mask |= 1 << surf->u.tex.level;
tex->fmask_is_not_identity = true;
}
if (tex->dcc_gather_statistics)
tex->separate_dcc_dirty = true;
}