radv/meta: select resolve paths

There are 3 resolve paths, the fastest being the hw resolver
but it has restriction on tile modes and can't do subresolves,
the compute resolver is next speed wise, but can't handle DCC
destinations, the fragment resolver handles that case.

This will end up with a slow down as currently we hack the
hw resolver paths when they shouldn't work, but we shouldn't
keep doing that.

The next patch removes the hacks.

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2017-04-28 03:56:25 +01:00
parent 69136f4e63
commit 2a04f5481d
1 changed files with 50 additions and 8 deletions

View File

@ -303,6 +303,25 @@ emit_resolve(struct radv_cmd_buffer *cmd_buffer,
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
}
enum radv_resolve_method {
RESOLVE_HW,
RESOLVE_COMPUTE,
RESOLVE_FRAGMENT,
};
static void radv_pick_resolve_method_images(struct radv_image *src_image,
struct radv_image *dest_image,
enum radv_resolve_method *method)
{
if (dest_image->surface.micro_tile_mode != src_image->surface.micro_tile_mode) {
if (dest_image->surface.level[0].dcc_enabled)
*method = RESOLVE_FRAGMENT;
else
*method = RESOLVE_COMPUTE;
}
}
void radv_CmdResolveImage(
VkCommandBuffer cmd_buffer_h,
VkImage src_image_h,
@ -318,27 +337,29 @@ void radv_CmdResolveImage(
struct radv_device *device = cmd_buffer->device;
struct radv_meta_saved_state saved_state;
VkDevice device_h = radv_device_to_handle(device);
bool use_compute_resolve = false;
bool use_fragment_resolve = false;
enum radv_resolve_method resolve_method = RESOLVE_HW;
/* we can use the hw resolve only for single full resolves */
if (region_count == 1) {
if (regions[0].srcOffset.x ||
regions[0].srcOffset.y ||
regions[0].srcOffset.z)
use_compute_resolve = true;
resolve_method = RESOLVE_COMPUTE;
if (regions[0].dstOffset.x ||
regions[0].dstOffset.y ||
regions[0].dstOffset.z)
use_compute_resolve = true;
resolve_method = RESOLVE_COMPUTE;
if (regions[0].extent.width != src_image->info.width ||
regions[0].extent.height != src_image->info.height ||
regions[0].extent.depth != src_image->info.depth)
use_compute_resolve = true;
resolve_method = RESOLVE_COMPUTE;
} else
use_compute_resolve = true;
resolve_method = RESOLVE_COMPUTE;
if (use_fragment_resolve) {
radv_pick_resolve_method_images(src_image, dest_image,
&resolve_method);
if (resolve_method == RESOLVE_FRAGMENT) {
radv_meta_resolve_fragment_image(cmd_buffer,
src_image,
src_image_layout,
@ -348,7 +369,7 @@ void radv_CmdResolveImage(
return;
}
if (use_compute_resolve) {
if (resolve_method == RESOLVE_COMPUTE) {
radv_meta_resolve_compute_image(cmd_buffer,
src_image,
src_image_layout,
@ -524,6 +545,7 @@ radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer)
struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
struct radv_meta_saved_state saved_state;
enum radv_resolve_method resolve_method = RESOLVE_HW;
/* FINISHME(perf): Skip clears for resolve attachments.
*
@ -537,6 +559,26 @@ radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer)
if (!subpass->has_resolve)
return;
for (uint32_t i = 0; i < subpass->color_count; ++i) {
VkAttachmentReference src_att = subpass->color_attachments[i];
VkAttachmentReference dest_att = subpass->resolve_attachments[i];
struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
struct radv_image *src_img = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment->image;
radv_pick_resolve_method_images(dst_img, src_img, &resolve_method);
if (resolve_method == RESOLVE_FRAGMENT) {
break;
}
}
if (resolve_method == RESOLVE_COMPUTE) {
radv_cmd_buffer_resolve_subpass_cs(cmd_buffer);
return;
} else if (resolve_method == RESOLVE_FRAGMENT) {
radv_cmd_buffer_resolve_subpass_fs(cmd_buffer);
return;
}
radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
for (uint32_t i = 0; i < subpass->color_count; ++i) {