2016-08-23 05:37:28 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "anv_private.h"
|
|
|
|
|
|
|
|
static bool
|
2019-01-09 23:15:49 +00:00
|
|
|
lookup_blorp_shader(struct blorp_batch *batch,
|
2016-08-23 05:37:28 +01:00
|
|
|
const void *key, uint32_t key_size,
|
|
|
|
uint32_t *kernel_out, void *prog_data_out)
|
|
|
|
{
|
2019-01-09 23:15:49 +00:00
|
|
|
struct blorp_context *blorp = batch->blorp;
|
2016-08-23 05:37:28 +01:00
|
|
|
struct anv_device *device = blorp->driver_ctx;
|
|
|
|
|
|
|
|
struct anv_shader_bin *bin =
|
2022-05-27 09:27:55 +01:00
|
|
|
anv_device_search_for_kernel(device, device->internal_cache,
|
2021-10-04 20:24:57 +01:00
|
|
|
key, key_size, NULL);
|
2016-08-23 05:37:28 +01:00
|
|
|
if (!bin)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* The cache already has a reference and it's not going anywhere so there
|
|
|
|
* is no need to hold a second reference.
|
|
|
|
*/
|
|
|
|
anv_shader_bin_unref(device, bin);
|
|
|
|
|
|
|
|
*kernel_out = bin->kernel.offset;
|
2016-11-01 23:03:12 +00:00
|
|
|
*(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
|
2016-08-23 05:37:28 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-14 12:12:22 +00:00
|
|
|
static bool
|
2020-03-12 21:27:13 +00:00
|
|
|
upload_blorp_shader(struct blorp_batch *batch, uint32_t stage,
|
2016-08-23 05:37:28 +01:00
|
|
|
const void *key, uint32_t key_size,
|
|
|
|
const void *kernel, uint32_t kernel_size,
|
2016-11-01 21:16:34 +00:00
|
|
|
const struct brw_stage_prog_data *prog_data,
|
|
|
|
uint32_t prog_data_size,
|
2016-08-23 05:37:28 +01:00
|
|
|
uint32_t *kernel_out, void *prog_data_out)
|
|
|
|
{
|
2019-01-09 23:15:49 +00:00
|
|
|
struct blorp_context *blorp = batch->blorp;
|
2016-08-23 05:37:28 +01:00
|
|
|
struct anv_device *device = blorp->driver_ctx;
|
|
|
|
|
|
|
|
struct anv_pipeline_bind_map bind_map = {
|
|
|
|
.surface_count = 0,
|
|
|
|
.sampler_count = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct anv_shader_bin *bin =
|
2022-05-27 09:27:55 +01:00
|
|
|
anv_device_upload_kernel(device, device->internal_cache, stage,
|
2021-10-04 20:24:57 +01:00
|
|
|
key, key_size, kernel, kernel_size,
|
|
|
|
prog_data, prog_data_size,
|
|
|
|
NULL, 0, NULL, &bind_map);
|
2016-08-23 05:37:28 +01:00
|
|
|
|
2017-03-03 09:58:23 +00:00
|
|
|
if (!bin)
|
|
|
|
return false;
|
|
|
|
|
2016-08-23 05:37:28 +01:00
|
|
|
/* The cache already has a reference and it's not going anywhere so there
|
|
|
|
* is no need to hold a second reference.
|
|
|
|
*/
|
|
|
|
anv_shader_bin_unref(device, bin);
|
|
|
|
|
|
|
|
*kernel_out = bin->kernel.offset;
|
2016-11-01 23:03:12 +00:00
|
|
|
*(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
|
2017-03-14 12:12:22 +00:00
|
|
|
|
|
|
|
return true;
|
2016-08-23 05:37:28 +01:00
|
|
|
}
|
|
|
|
|
2022-05-27 09:27:55 +01:00
|
|
|
void
|
2016-08-23 05:37:28 +01:00
|
|
|
anv_device_init_blorp(struct anv_device *device)
|
|
|
|
{
|
2021-04-29 23:10:57 +01:00
|
|
|
const struct blorp_config config = {
|
2021-05-20 20:07:34 +01:00
|
|
|
.use_mesh_shading = device->physical->vk.supported_extensions.NV_mesh_shader,
|
2021-04-29 23:10:57 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
blorp_init(&device->blorp, device, &device->isl_dev, &config);
|
2020-01-18 04:23:30 +00:00
|
|
|
device->blorp.compiler = device->physical->compiler;
|
2016-08-23 05:37:28 +01:00
|
|
|
device->blorp.lookup_shader = lookup_blorp_shader;
|
|
|
|
device->blorp.upload_shader = upload_blorp_shader;
|
2021-03-29 22:41:58 +01:00
|
|
|
switch (device->info.verx10) {
|
2021-02-27 23:38:53 +00:00
|
|
|
case 70:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx7_blorp_exec;
|
2016-08-23 05:37:28 +01:00
|
|
|
break;
|
2021-02-27 23:38:53 +00:00
|
|
|
case 75:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx75_blorp_exec;
|
2021-02-27 23:38:53 +00:00
|
|
|
break;
|
|
|
|
case 80:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx8_blorp_exec;
|
2016-08-23 05:37:28 +01:00
|
|
|
break;
|
2021-02-27 23:38:53 +00:00
|
|
|
case 90:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx9_blorp_exec;
|
2016-08-23 05:37:28 +01:00
|
|
|
break;
|
2021-02-27 23:38:53 +00:00
|
|
|
case 110:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx11_blorp_exec;
|
2017-05-26 20:32:23 +01:00
|
|
|
break;
|
2021-02-27 23:38:53 +00:00
|
|
|
case 120:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx12_blorp_exec;
|
2021-02-27 23:38:53 +00:00
|
|
|
break;
|
|
|
|
case 125:
|
2021-03-29 23:40:04 +01:00
|
|
|
device->blorp.exec = gfx125_blorp_exec;
|
2017-12-13 09:18:07 +00:00
|
|
|
break;
|
2016-08-23 05:37:28 +01:00
|
|
|
default:
|
|
|
|
unreachable("Unknown hardware generation");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
anv_device_finish_blorp(struct anv_device *device)
|
|
|
|
{
|
|
|
|
blorp_finish(&device->blorp);
|
|
|
|
}
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
static void
|
|
|
|
anv_blorp_batch_init(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct blorp_batch *batch, enum blorp_batch_flags flags)
|
|
|
|
{
|
2022-02-07 21:22:54 +00:00
|
|
|
if (!(cmd_buffer->queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT)) {
|
|
|
|
assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_COMPUTE_BIT);
|
2019-01-01 00:06:47 +00:00
|
|
|
flags |= BLORP_BATCH_USE_COMPUTE;
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, batch, cmd_buffer, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
anv_blorp_batch_finish(struct blorp_batch *batch)
|
|
|
|
{
|
|
|
|
blorp_batch_finish(batch);
|
|
|
|
}
|
|
|
|
|
2016-08-24 04:19:57 +01:00
|
|
|
static void
|
|
|
|
get_blorp_surf_for_anv_buffer(struct anv_device *device,
|
|
|
|
struct anv_buffer *buffer, uint64_t offset,
|
|
|
|
uint32_t width, uint32_t height,
|
|
|
|
uint32_t row_pitch, enum isl_format format,
|
2020-09-30 23:05:54 +01:00
|
|
|
bool is_dest,
|
2016-08-24 04:19:57 +01:00
|
|
|
struct blorp_surf *blorp_surf,
|
|
|
|
struct isl_surf *isl_surf)
|
|
|
|
{
|
2017-02-25 01:15:43 +00:00
|
|
|
bool ok UNUSED;
|
2016-11-02 23:18:44 +00:00
|
|
|
|
2016-08-24 04:19:57 +01:00
|
|
|
*blorp_surf = (struct blorp_surf) {
|
|
|
|
.surf = isl_surf,
|
|
|
|
.addr = {
|
2018-05-31 02:05:54 +01:00
|
|
|
.buffer = buffer->address.bo,
|
|
|
|
.offset = buffer->address.offset + offset,
|
2020-10-07 15:44:56 +01:00
|
|
|
.mocs = anv_mocs(device, buffer->address.bo,
|
|
|
|
is_dest ? ISL_SURF_USAGE_RENDER_TARGET_BIT
|
|
|
|
: ISL_SURF_USAGE_TEXTURE_BIT),
|
2016-08-24 04:19:57 +01:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2017-02-25 01:15:43 +00:00
|
|
|
ok = isl_surf_init(&device->isl_dev, isl_surf,
|
|
|
|
.dim = ISL_SURF_DIM_2D,
|
|
|
|
.format = format,
|
|
|
|
.width = width,
|
|
|
|
.height = height,
|
|
|
|
.depth = 1,
|
|
|
|
.levels = 1,
|
|
|
|
.array_len = 1,
|
|
|
|
.samples = 1,
|
2018-09-05 20:02:12 +01:00
|
|
|
.row_pitch_B = row_pitch,
|
2020-09-30 23:05:54 +01:00
|
|
|
.usage = is_dest ? ISL_SURF_USAGE_RENDER_TARGET_BIT
|
|
|
|
: ISL_SURF_USAGE_TEXTURE_BIT,
|
2017-02-25 01:15:43 +00:00
|
|
|
.tiling_flags = ISL_TILING_LINEAR_BIT);
|
|
|
|
assert(ok);
|
2016-08-24 04:19:57 +01:00
|
|
|
}
|
|
|
|
|
2018-02-02 22:51:56 +00:00
|
|
|
/* Pick something high enough that it won't be used in core and low enough it
|
|
|
|
* will never map to an extension.
|
|
|
|
*/
|
|
|
|
#define ANV_IMAGE_LAYOUT_EXPLICIT_AUX (VkImageLayout)10000000
|
2017-10-05 19:22:47 +01:00
|
|
|
|
2017-11-11 20:22:45 +00:00
|
|
|
static struct blorp_address
|
|
|
|
anv_to_blorp_address(struct anv_address addr)
|
|
|
|
{
|
|
|
|
return (struct blorp_address) {
|
|
|
|
.buffer = addr.bo,
|
|
|
|
.offset = addr.offset,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2016-08-23 06:33:06 +01:00
|
|
|
static void
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(const struct anv_device *device,
|
|
|
|
const struct anv_image *image,
|
2016-08-23 06:33:06 +01:00
|
|
|
VkImageAspectFlags aspect,
|
2019-11-19 23:51:20 +00:00
|
|
|
VkImageUsageFlags usage,
|
2018-02-02 22:51:56 +00:00
|
|
|
VkImageLayout layout,
|
2016-10-25 18:32:18 +01:00
|
|
|
enum isl_aux_usage aux_usage,
|
2016-08-23 06:33:06 +01:00
|
|
|
struct blorp_surf *blorp_surf)
|
|
|
|
{
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
2019-11-19 23:51:20 +00:00
|
|
|
if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX) {
|
|
|
|
assert(usage != 0);
|
|
|
|
aux_usage = anv_layout_to_aux_usage(&device->info, image,
|
|
|
|
aspect, usage, layout);
|
|
|
|
}
|
2016-10-25 18:32:18 +01:00
|
|
|
|
2020-10-07 15:44:56 +01:00
|
|
|
isl_surf_usage_flags_t mocs_usage =
|
|
|
|
(usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) ?
|
|
|
|
ISL_SURF_USAGE_RENDER_TARGET_BIT : ISL_SURF_USAGE_TEXTURE_BIT;
|
|
|
|
|
2021-02-05 20:07:07 +00:00
|
|
|
const struct anv_surface *surface = &image->planes[plane].primary_surface;
|
2021-02-15 18:58:37 +00:00
|
|
|
const struct anv_address address =
|
2020-12-15 04:13:30 +00:00
|
|
|
anv_image_address(image, &surface->memory_range);
|
2021-02-15 18:58:37 +00:00
|
|
|
|
2016-08-23 06:33:06 +01:00
|
|
|
*blorp_surf = (struct blorp_surf) {
|
|
|
|
.surf = &surface->isl,
|
|
|
|
.addr = {
|
2021-02-15 18:58:37 +00:00
|
|
|
.buffer = address.bo,
|
|
|
|
.offset = address.offset,
|
|
|
|
.mocs = anv_mocs(device, address.bo, mocs_usage),
|
2016-08-23 06:33:06 +01:00
|
|
|
},
|
|
|
|
};
|
2016-10-25 18:32:18 +01:00
|
|
|
|
|
|
|
if (aux_usage != ISL_AUX_USAGE_NONE) {
|
2017-07-19 12:14:19 +01:00
|
|
|
const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
|
2021-02-15 18:58:37 +00:00
|
|
|
const struct anv_address aux_address =
|
2020-12-15 04:13:30 +00:00
|
|
|
anv_image_address(image, &aux_surface->memory_range);
|
|
|
|
|
2016-10-25 18:32:18 +01:00
|
|
|
blorp_surf->aux_usage = aux_usage;
|
2020-12-15 04:13:30 +00:00
|
|
|
blorp_surf->aux_surf = &aux_surface->isl;
|
|
|
|
|
|
|
|
if (!anv_address_is_null(aux_address)) {
|
|
|
|
blorp_surf->aux_addr = (struct blorp_address) {
|
|
|
|
.buffer = aux_address.bo,
|
|
|
|
.offset = aux_address.offset,
|
|
|
|
.mocs = anv_mocs(device, aux_address.bo, 0),
|
|
|
|
};
|
|
|
|
}
|
2018-03-06 17:21:40 +00:00
|
|
|
|
|
|
|
/* If we're doing a partial resolve, then we need the indirect clear
|
|
|
|
* color. If we are doing a fast clear and want to store/update the
|
|
|
|
* clear color, we also pass the address to blorp, otherwise it will only
|
|
|
|
* stomp the CCS to a particular value and won't care about format or
|
|
|
|
* clear value
|
|
|
|
*/
|
|
|
|
if (aspect & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
|
|
|
|
const struct anv_address clear_color_addr =
|
|
|
|
anv_image_get_clear_color_addr(device, image, aspect);
|
|
|
|
blorp_surf->clear_color_addr = anv_to_blorp_address(clear_color_addr);
|
2019-12-04 21:51:34 +00:00
|
|
|
} else if (aspect & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
2021-04-07 22:05:21 +01:00
|
|
|
const struct anv_address clear_color_addr =
|
|
|
|
anv_image_get_clear_color_addr(device, image, aspect);
|
|
|
|
blorp_surf->clear_color_addr = anv_to_blorp_address(clear_color_addr);
|
2021-04-09 01:10:07 +01:00
|
|
|
blorp_surf->clear_color = (union isl_color_value) {
|
|
|
|
.f32 = { ANV_HZ_FC_VAL },
|
|
|
|
};
|
2018-03-06 17:21:40 +00:00
|
|
|
}
|
2016-10-25 18:32:18 +01:00
|
|
|
}
|
2016-08-23 06:33:06 +01:00
|
|
|
}
|
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
static bool
|
|
|
|
get_blorp_surf_for_anv_shadow_image(const struct anv_device *device,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageAspectFlags aspect,
|
|
|
|
struct blorp_surf *blorp_surf)
|
|
|
|
{
|
|
|
|
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
|
2020-12-10 19:57:46 +00:00
|
|
|
if (!anv_surface_is_valid(&image->planes[plane].shadow_surface))
|
2019-06-17 07:53:50 +01:00
|
|
|
return false;
|
|
|
|
|
2021-02-15 18:58:37 +00:00
|
|
|
const struct anv_surface *surface = &image->planes[plane].shadow_surface;
|
|
|
|
const struct anv_address address =
|
2020-12-15 04:13:30 +00:00
|
|
|
anv_image_address(image, &surface->memory_range);
|
2021-02-15 18:58:37 +00:00
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
*blorp_surf = (struct blorp_surf) {
|
2021-02-15 18:58:37 +00:00
|
|
|
.surf = &surface->isl,
|
2019-06-17 07:53:50 +01:00
|
|
|
.addr = {
|
2021-02-15 18:58:37 +00:00
|
|
|
.buffer = address.bo,
|
|
|
|
.offset = address.offset,
|
|
|
|
.mocs = anv_mocs(device, address.bo, ISL_SURF_USAGE_RENDER_TARGET_BIT),
|
2019-06-17 07:53:50 +01:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
static void
|
|
|
|
copy_image(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
struct anv_image *src_image,
|
|
|
|
VkImageLayout src_image_layout,
|
|
|
|
struct anv_image *dst_image,
|
|
|
|
VkImageLayout dst_image_layout,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkImageCopy2 *region)
|
2016-08-28 05:05:13 +01:00
|
|
|
{
|
2020-07-15 10:30:49 +01:00
|
|
|
VkOffset3D srcOffset =
|
2022-06-05 04:22:15 +01:00
|
|
|
vk_image_sanitize_offset(&src_image->vk, region->srcOffset);
|
2020-07-15 10:30:49 +01:00
|
|
|
VkOffset3D dstOffset =
|
2022-06-05 04:22:15 +01:00
|
|
|
vk_image_sanitize_offset(&dst_image->vk, region->dstOffset);
|
2020-07-15 10:30:49 +01:00
|
|
|
VkExtent3D extent =
|
2022-06-05 04:22:15 +01:00
|
|
|
vk_image_sanitize_extent(&src_image->vk, region->extent);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
const uint32_t dst_level = region->dstSubresource.mipLevel;
|
|
|
|
unsigned dst_base_layer, layer_count;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (dst_image->vk.image_type == VK_IMAGE_TYPE_3D) {
|
2020-07-15 10:30:49 +01:00
|
|
|
dst_base_layer = region->dstOffset.z;
|
|
|
|
layer_count = region->extent.depth;
|
|
|
|
} else {
|
|
|
|
dst_base_layer = region->dstSubresource.baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
layer_count = vk_image_subresource_layer_count(&dst_image->vk,
|
|
|
|
®ion->dstSubresource);
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-28 05:05:13 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
const uint32_t src_level = region->srcSubresource.mipLevel;
|
|
|
|
unsigned src_base_layer;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (src_image->vk.image_type == VK_IMAGE_TYPE_3D) {
|
2020-07-15 10:30:49 +01:00
|
|
|
src_base_layer = region->srcOffset.z;
|
|
|
|
} else {
|
|
|
|
src_base_layer = region->srcSubresource.baseArrayLayer;
|
|
|
|
assert(layer_count ==
|
2021-07-22 02:48:30 +01:00
|
|
|
vk_image_subresource_layer_count(&src_image->vk,
|
|
|
|
®ion->srcSubresource));
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-28 05:05:13 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
VkImageAspectFlags src_mask = region->srcSubresource.aspectMask,
|
|
|
|
dst_mask = region->dstSubresource.aspectMask;
|
2017-07-19 12:14:19 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
assert(anv_image_aspects_compatible(src_mask, dst_mask));
|
2019-06-17 07:53:50 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
if (util_bitcount(src_mask) > 1) {
|
|
|
|
anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
|
2016-08-28 05:05:13 +01:00
|
|
|
struct blorp_surf src_surf, dst_surf;
|
2020-07-15 10:30:49 +01:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
src_image, 1UL << aspect_bit,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
2020-07-15 10:30:49 +01:00
|
|
|
src_image_layout, ISL_AUX_USAGE_NONE,
|
2018-02-02 22:51:56 +00:00
|
|
|
&src_surf);
|
2020-07-15 10:30:49 +01:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
dst_image, 1UL << aspect_bit,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
2020-07-15 10:30:49 +01:00
|
|
|
dst_image_layout, ISL_AUX_USAGE_NONE,
|
2018-02-02 22:51:56 +00:00
|
|
|
&dst_surf);
|
2020-07-15 10:30:49 +01:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
1UL << aspect_bit,
|
2017-11-27 16:35:12 +00:00
|
|
|
dst_surf.aux_usage, dst_level,
|
|
|
|
dst_base_layer, layer_count);
|
2016-08-28 05:05:13 +01:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
2020-07-15 10:30:49 +01:00
|
|
|
blorp_copy(batch, &src_surf, src_level, src_base_layer + i,
|
2018-01-19 17:12:17 +00:00
|
|
|
&dst_surf, dst_level, dst_base_layer + i,
|
2016-08-28 05:05:13 +01:00
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
extent.width, extent.height);
|
|
|
|
}
|
2019-06-17 07:53:50 +01:00
|
|
|
|
|
|
|
struct blorp_surf dst_shadow_surf;
|
|
|
|
if (get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
|
2020-07-15 10:30:49 +01:00
|
|
|
dst_image,
|
|
|
|
1UL << aspect_bit,
|
2019-06-17 07:53:50 +01:00
|
|
|
&dst_shadow_surf)) {
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
2020-07-15 10:30:49 +01:00
|
|
|
blorp_copy(batch, &src_surf, src_level, src_base_layer + i,
|
2019-06-17 07:53:50 +01:00
|
|
|
&dst_shadow_surf, dst_level, dst_base_layer + i,
|
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
extent.width, extent.height);
|
|
|
|
}
|
|
|
|
}
|
2016-08-28 05:05:13 +01:00
|
|
|
}
|
2020-07-15 10:30:49 +01:00
|
|
|
} else {
|
|
|
|
struct blorp_surf src_surf, dst_surf;
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
src_image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
&src_surf);
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
dst_image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
&dst_surf);
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image, dst_mask,
|
|
|
|
dst_surf.aux_usage, dst_level,
|
|
|
|
dst_base_layer, layer_count);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
|
|
|
blorp_copy(batch, &src_surf, src_level, src_base_layer + i,
|
|
|
|
&dst_surf, dst_level, dst_base_layer + i,
|
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
extent.width, extent.height);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct blorp_surf dst_shadow_surf;
|
|
|
|
if (get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
|
|
|
|
dst_image, dst_mask,
|
|
|
|
&dst_shadow_surf)) {
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
|
|
|
blorp_copy(batch, &src_surf, src_level, src_base_layer + i,
|
|
|
|
&dst_shadow_surf, dst_level, dst_base_layer + i,
|
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
extent.width, extent.height);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdCopyImage2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkCopyImageInfo2* pCopyImageInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, pCopyImageInfo->srcImage);
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, pCopyImageInfo->dstImage);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < pCopyImageInfo->regionCount; r++) {
|
|
|
|
copy_image(cmd_buffer, &batch,
|
|
|
|
src_image, pCopyImageInfo->srcImageLayout,
|
|
|
|
dst_image, pCopyImageInfo->dstImageLayout,
|
|
|
|
&pCopyImageInfo->pRegions[r]);
|
2016-08-28 05:05:13 +01:00
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-08-28 05:05:13 +01:00
|
|
|
}
|
|
|
|
|
2020-01-17 23:46:31 +00:00
|
|
|
static enum isl_format
|
|
|
|
isl_format_for_size(unsigned size_B)
|
|
|
|
{
|
|
|
|
/* Prefer 32-bit per component formats for CmdFillBuffer */
|
|
|
|
switch (size_B) {
|
|
|
|
case 1: return ISL_FORMAT_R8_UINT;
|
|
|
|
case 2: return ISL_FORMAT_R16_UINT;
|
|
|
|
case 3: return ISL_FORMAT_R8G8B8_UINT;
|
|
|
|
case 4: return ISL_FORMAT_R32_UINT;
|
|
|
|
case 6: return ISL_FORMAT_R16G16B16_UINT;
|
|
|
|
case 8: return ISL_FORMAT_R32G32_UINT;
|
|
|
|
case 12: return ISL_FORMAT_R32G32B32_UINT;
|
|
|
|
case 16: return ISL_FORMAT_R32G32B32A32_UINT;
|
|
|
|
default:
|
|
|
|
unreachable("Unknown format size");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 04:19:57 +01:00
|
|
|
static void
|
|
|
|
copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
|
2020-07-15 10:30:49 +01:00
|
|
|
struct blorp_batch *batch,
|
2016-08-24 04:19:57 +01:00
|
|
|
struct anv_buffer *anv_buffer,
|
|
|
|
struct anv_image *anv_image,
|
2018-02-02 22:51:56 +00:00
|
|
|
VkImageLayout image_layout,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkBufferImageCopy2* region,
|
2016-08-24 04:19:57 +01:00
|
|
|
bool buffer_to_image)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct blorp_surf surf;
|
|
|
|
uint32_t level;
|
|
|
|
VkOffset3D offset;
|
|
|
|
} image, buffer, *src, *dst;
|
|
|
|
|
|
|
|
buffer.level = 0;
|
|
|
|
buffer.offset = (VkOffset3D) { 0, 0, 0 };
|
|
|
|
|
|
|
|
if (buffer_to_image) {
|
|
|
|
src = &buffer;
|
|
|
|
dst = ℑ
|
|
|
|
} else {
|
|
|
|
src = ℑ
|
|
|
|
dst = &buffer;
|
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
const VkImageAspectFlags aspect = region->imageSubresource.aspectMask;
|
|
|
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
|
|
|
|
buffer_to_image ?
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT :
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
&image.surf);
|
|
|
|
image.offset =
|
2022-06-05 04:22:15 +01:00
|
|
|
vk_image_sanitize_offset(&anv_image->vk, region->imageOffset);
|
2020-07-15 10:30:49 +01:00
|
|
|
image.level = region->imageSubresource.mipLevel;
|
|
|
|
|
|
|
|
VkExtent3D extent =
|
2022-06-05 04:22:15 +01:00
|
|
|
vk_image_sanitize_extent(&anv_image->vk, region->imageExtent);
|
2021-07-22 02:42:00 +01:00
|
|
|
if (anv_image->vk.image_type != VK_IMAGE_TYPE_3D) {
|
2020-07-15 10:30:49 +01:00
|
|
|
image.offset.z = region->imageSubresource.baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
extent.depth =
|
|
|
|
vk_image_subresource_layer_count(&anv_image->vk,
|
|
|
|
®ion->imageSubresource);
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-24 04:19:57 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
const enum isl_format linear_format =
|
2021-07-22 02:42:00 +01:00
|
|
|
anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk.format,
|
2020-07-15 10:30:49 +01:00
|
|
|
aspect, VK_IMAGE_TILING_LINEAR);
|
|
|
|
const struct isl_format_layout *linear_fmtl =
|
|
|
|
isl_format_get_layout(linear_format);
|
2016-08-24 04:19:57 +01:00
|
|
|
|
2022-06-05 05:07:51 +01:00
|
|
|
const struct vk_image_buffer_layout buffer_layout =
|
|
|
|
vk_image_buffer_copy_layout(&anv_image->vk, region);
|
2016-08-24 04:19:57 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
/* Some formats have additional restrictions which may cause ISL to
|
2021-11-12 18:37:32 +00:00
|
|
|
* fail to create a surface for us. For example, YCbCr formats
|
|
|
|
* have to have 2-pixel aligned strides.
|
2020-07-15 10:30:49 +01:00
|
|
|
*
|
|
|
|
* To avoid these issues, we always bind the buffer as if it's a
|
|
|
|
* "normal" format like RGBA32_UINT. Since we're using blorp_copy,
|
|
|
|
* the format doesn't matter as long as it has the right bpb.
|
|
|
|
*/
|
|
|
|
const VkExtent2D buffer_extent = {
|
|
|
|
.width = DIV_ROUND_UP(extent.width, linear_fmtl->bw),
|
|
|
|
.height = DIV_ROUND_UP(extent.height, linear_fmtl->bh),
|
|
|
|
};
|
|
|
|
const enum isl_format buffer_format =
|
|
|
|
isl_format_for_size(linear_fmtl->bpb / 8);
|
|
|
|
|
|
|
|
struct isl_surf buffer_isl_surf;
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
anv_buffer, region->bufferOffset,
|
|
|
|
buffer_extent.width, buffer_extent.height,
|
2022-06-05 05:07:51 +01:00
|
|
|
buffer_layout.row_stride_B, buffer_format,
|
|
|
|
false, &buffer.surf, &buffer_isl_surf);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
bool dst_has_shadow = false;
|
|
|
|
struct blorp_surf dst_shadow_surf;
|
|
|
|
if (&image == dst) {
|
|
|
|
/* In this case, the source is the buffer and, since blorp takes its
|
|
|
|
* copy dimensions in terms of the source format, we have to use the
|
|
|
|
* scaled down version for compressed textures because the source
|
|
|
|
* format is an RGB format.
|
|
|
|
*/
|
|
|
|
extent.width = buffer_extent.width;
|
|
|
|
extent.height = buffer_extent.height;
|
|
|
|
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, anv_image,
|
|
|
|
aspect, dst->surf.aux_usage,
|
|
|
|
dst->level,
|
|
|
|
dst->offset.z, extent.depth);
|
|
|
|
|
|
|
|
dst_has_shadow =
|
|
|
|
get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
|
|
|
|
anv_image, aspect,
|
|
|
|
&dst_shadow_surf);
|
|
|
|
}
|
2019-06-17 07:53:50 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
for (unsigned z = 0; z < extent.depth; z++) {
|
|
|
|
blorp_copy(batch, &src->surf, src->level, src->offset.z,
|
|
|
|
&dst->surf, dst->level, dst->offset.z,
|
|
|
|
src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
|
|
|
|
extent.width, extent.height);
|
|
|
|
|
|
|
|
if (dst_has_shadow) {
|
|
|
|
blorp_copy(batch, &src->surf, src->level, src->offset.z,
|
|
|
|
&dst_shadow_surf, dst->level, dst->offset.z,
|
|
|
|
src->offset.x, src->offset.y,
|
|
|
|
dst->offset.x, dst->offset.y,
|
|
|
|
extent.width, extent.height);
|
2016-08-24 04:19:57 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
image.offset.z++;
|
2022-06-05 05:07:51 +01:00
|
|
|
buffer.surf.addr.offset += buffer_layout.image_stride_B;
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-24 04:19:57 +01:00
|
|
|
}
|
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdCopyBufferToImage2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, pCopyBufferToImageInfo->dstImage);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < pCopyBufferToImageInfo->regionCount; r++) {
|
|
|
|
copy_buffer_to_image(cmd_buffer, &batch, src_buffer, dst_image,
|
|
|
|
pCopyBufferToImageInfo->dstImageLayout,
|
|
|
|
&pCopyBufferToImageInfo->pRegions[r], true);
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-08-27 20:57:01 +01:00
|
|
|
}
|
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdCopyImageToBuffer2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, pCopyImageToBufferInfo->srcImage);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < pCopyImageToBufferInfo->regionCount; r++) {
|
|
|
|
copy_buffer_to_image(cmd_buffer, &batch, dst_buffer, src_image,
|
|
|
|
pCopyImageToBufferInfo->srcImageLayout,
|
|
|
|
&pCopyImageToBufferInfo->pRegions[r], false);
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2019-01-17 17:00:14 +00:00
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
|
2016-08-24 04:19:57 +01:00
|
|
|
}
|
|
|
|
|
2016-08-23 06:33:06 +01:00
|
|
|
static bool
|
|
|
|
flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
|
|
|
|
{
|
|
|
|
bool flip = false;
|
|
|
|
if (*src0 > *src1) {
|
|
|
|
unsigned tmp = *src0;
|
|
|
|
*src0 = *src1;
|
|
|
|
*src1 = tmp;
|
|
|
|
flip = !flip;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*dst0 > *dst1) {
|
|
|
|
unsigned tmp = *dst0;
|
|
|
|
*dst0 = *dst1;
|
|
|
|
*dst1 = tmp;
|
|
|
|
flip = !flip;
|
|
|
|
}
|
|
|
|
|
|
|
|
return flip;
|
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
static void
|
|
|
|
blit_image(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
struct anv_image *src_image,
|
|
|
|
VkImageLayout src_image_layout,
|
|
|
|
struct anv_image *dst_image,
|
|
|
|
VkImageLayout dst_image_layout,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkImageBlit2 *region,
|
2020-07-15 10:30:49 +01:00
|
|
|
VkFilter filter)
|
2016-08-23 06:33:06 +01:00
|
|
|
{
|
2020-07-15 10:30:49 +01:00
|
|
|
const VkImageSubresourceLayers *src_res = ®ion->srcSubresource;
|
|
|
|
const VkImageSubresourceLayers *dst_res = ®ion->dstSubresource;
|
2016-08-23 06:33:06 +01:00
|
|
|
|
|
|
|
struct blorp_surf src, dst;
|
|
|
|
|
2018-06-25 23:14:38 +01:00
|
|
|
enum blorp_filter blorp_filter;
|
2016-08-23 06:33:06 +01:00
|
|
|
switch (filter) {
|
|
|
|
case VK_FILTER_NEAREST:
|
2018-06-25 23:14:38 +01:00
|
|
|
blorp_filter = BLORP_FILTER_NEAREST;
|
2016-08-23 06:33:06 +01:00
|
|
|
break;
|
|
|
|
case VK_FILTER_LINEAR:
|
2018-06-25 23:14:38 +01:00
|
|
|
blorp_filter = BLORP_FILTER_BILINEAR;
|
2016-08-23 06:33:06 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Invalid filter");
|
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
assert(anv_image_aspects_compatible(src_res->aspectMask,
|
|
|
|
dst_res->aspectMask));
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
anv_foreach_image_aspect_bit(aspect_bit, src_image, src_res->aspectMask) {
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
src_image, 1U << aspect_bit,
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
src_image_layout, ISL_AUX_USAGE_NONE, &src);
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
dst_image, 1U << aspect_bit,
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
dst_image_layout, ISL_AUX_USAGE_NONE, &dst);
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
struct anv_format_plane src_format =
|
2021-07-22 02:42:00 +01:00
|
|
|
anv_get_format_aspect(&cmd_buffer->device->info, src_image->vk.format,
|
|
|
|
1U << aspect_bit, src_image->vk.tiling);
|
2020-07-15 10:30:49 +01:00
|
|
|
struct anv_format_plane dst_format =
|
2021-07-22 02:42:00 +01:00
|
|
|
anv_get_format_aspect(&cmd_buffer->device->info, dst_image->vk.format,
|
|
|
|
1U << aspect_bit, dst_image->vk.tiling);
|
2018-08-28 11:16:33 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
unsigned dst_start, dst_end;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (dst_image->vk.image_type == VK_IMAGE_TYPE_3D) {
|
2020-07-15 10:30:49 +01:00
|
|
|
assert(dst_res->baseArrayLayer == 0);
|
|
|
|
dst_start = region->dstOffsets[0].z;
|
|
|
|
dst_end = region->dstOffsets[1].z;
|
|
|
|
} else {
|
|
|
|
dst_start = dst_res->baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
dst_end = dst_start +
|
|
|
|
vk_image_subresource_layer_count(&dst_image->vk, dst_res);
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
unsigned src_start, src_end;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (src_image->vk.image_type == VK_IMAGE_TYPE_3D) {
|
2020-07-15 10:30:49 +01:00
|
|
|
assert(src_res->baseArrayLayer == 0);
|
|
|
|
src_start = region->srcOffsets[0].z;
|
|
|
|
src_end = region->srcOffsets[1].z;
|
|
|
|
} else {
|
|
|
|
src_start = src_res->baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
src_end = src_start +
|
|
|
|
vk_image_subresource_layer_count(&src_image->vk, src_res);
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
|
2020-09-29 08:55:35 +01:00
|
|
|
const unsigned num_layers = dst_end - dst_start;
|
|
|
|
float src_z_step = (float)(src_end - src_start) / (float)num_layers;
|
|
|
|
|
|
|
|
/* There is no interpolation to the pixel center during rendering, so
|
|
|
|
* add the 0.5 offset ourselves here. */
|
|
|
|
float depth_center_offset = 0;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (src_image->vk.image_type == VK_IMAGE_TYPE_3D)
|
2020-09-29 08:55:35 +01:00
|
|
|
depth_center_offset = 0.5 / num_layers * (src_end - src_start);
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
if (flip_z) {
|
|
|
|
src_start = src_end;
|
|
|
|
src_z_step *= -1;
|
2020-09-29 08:55:35 +01:00
|
|
|
depth_center_offset *= -1;
|
2020-07-15 10:30:49 +01:00
|
|
|
}
|
2016-08-23 06:33:06 +01:00
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
unsigned src_x0 = region->srcOffsets[0].x;
|
|
|
|
unsigned src_x1 = region->srcOffsets[1].x;
|
|
|
|
unsigned dst_x0 = region->dstOffsets[0].x;
|
|
|
|
unsigned dst_x1 = region->dstOffsets[1].x;
|
|
|
|
bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
|
|
|
|
|
|
|
|
unsigned src_y0 = region->srcOffsets[0].y;
|
|
|
|
unsigned src_y1 = region->srcOffsets[1].y;
|
|
|
|
unsigned dst_y0 = region->dstOffsets[0].y;
|
|
|
|
unsigned dst_y1 = region->dstOffsets[1].y;
|
|
|
|
bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
|
|
|
|
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
1U << aspect_bit,
|
|
|
|
dst.aux_usage,
|
|
|
|
dst_res->mipLevel,
|
|
|
|
dst_start, num_layers);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < num_layers; i++) {
|
|
|
|
unsigned dst_z = dst_start + i;
|
2020-09-29 08:55:35 +01:00
|
|
|
float src_z = src_start + i * src_z_step + depth_center_offset;
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
blorp_blit(batch, &src, src_res->mipLevel, src_z,
|
|
|
|
src_format.isl_format, src_format.swizzle,
|
|
|
|
&dst, dst_res->mipLevel, dst_z,
|
|
|
|
dst_format.isl_format, dst_format.swizzle,
|
|
|
|
src_x0, src_y0, src_x1, src_y1,
|
|
|
|
dst_x0, dst_y0, dst_x1, dst_y1,
|
|
|
|
blorp_filter, flip_x, flip_y);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-28 11:16:33 +01:00
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdBlitImage2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkBlitImageInfo2* pBlitImageInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, pBlitImageInfo->srcImage);
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, pBlitImageInfo->dstImage);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < pBlitImageInfo->regionCount; r++) {
|
|
|
|
blit_image(cmd_buffer, &batch,
|
|
|
|
src_image, pBlitImageInfo->srcImageLayout,
|
|
|
|
dst_image, pBlitImageInfo->dstImageLayout,
|
|
|
|
&pBlitImageInfo->pRegions[r], pBlitImageInfo->filter);
|
2016-08-23 06:33:06 +01:00
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-08-23 06:33:06 +01:00
|
|
|
}
|
2016-08-30 23:43:46 +01:00
|
|
|
|
2016-09-10 03:21:18 +01:00
|
|
|
/**
|
|
|
|
* Returns the greatest common divisor of a and b that is a power of two.
|
|
|
|
*/
|
2017-07-07 05:18:03 +01:00
|
|
|
static uint64_t
|
2016-09-10 03:21:18 +01:00
|
|
|
gcd_pow2_u64(uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
assert(a > 0 || b > 0);
|
|
|
|
|
|
|
|
unsigned a_log2 = ffsll(a) - 1;
|
|
|
|
unsigned b_log2 = ffsll(b) - 1;
|
|
|
|
|
|
|
|
/* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
|
|
|
|
* case, the MIN2() will take the other one. If both are 0 then we will
|
|
|
|
* hit the assert above.
|
|
|
|
*/
|
|
|
|
return 1 << MIN2(a_log2, b_log2);
|
|
|
|
}
|
|
|
|
|
2016-08-30 23:43:46 +01:00
|
|
|
/* This is maximum possible width/height our HW can handle */
|
|
|
|
#define MAX_SURFACE_DIM (1ull << 14)
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
static void
|
|
|
|
copy_buffer(struct anv_device *device,
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
struct anv_buffer *src_buffer,
|
|
|
|
struct anv_buffer *dst_buffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkBufferCopy2 *region)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
struct blorp_address src = {
|
|
|
|
.buffer = src_buffer->address.bo,
|
|
|
|
.offset = src_buffer->address.offset + region->srcOffset,
|
2020-10-07 15:44:56 +01:00
|
|
|
.mocs = anv_mocs(device, src_buffer->address.bo,
|
|
|
|
ISL_SURF_USAGE_TEXTURE_BIT),
|
2020-07-15 10:30:49 +01:00
|
|
|
};
|
|
|
|
struct blorp_address dst = {
|
|
|
|
.buffer = dst_buffer->address.bo,
|
|
|
|
.offset = dst_buffer->address.offset + region->dstOffset,
|
2020-10-07 15:44:56 +01:00
|
|
|
.mocs = anv_mocs(device, dst_buffer->address.bo,
|
|
|
|
ISL_SURF_USAGE_RENDER_TARGET_BIT),
|
2020-07-15 10:30:49 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
blorp_buffer_copy(batch, src, dst, region->size);
|
|
|
|
}
|
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdCopyBuffer2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkCopyBufferInfo2* pCopyBufferInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
|
|
|
|
copy_buffer(cmd_buffer->device, &batch, src_buffer, dst_buffer,
|
|
|
|
&pCopyBufferInfo->pRegions[r]);
|
2016-08-30 23:43:46 +01:00
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2019-01-17 17:00:14 +00:00
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
|
2016-08-30 23:43:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
|
2016-08-30 23:43:46 +01:00
|
|
|
void anv_CmdUpdateBuffer(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
VkDeviceSize dstOffset,
|
|
|
|
VkDeviceSize dataSize,
|
2016-11-11 01:44:10 +00:00
|
|
|
const void* pData)
|
2016-08-30 23:43:46 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2016-08-30 23:43:46 +01:00
|
|
|
|
|
|
|
/* We can't quite grab a full block because the state stream needs a
|
|
|
|
* little data at the top to build its linked list.
|
|
|
|
*/
|
|
|
|
const uint32_t max_update_size =
|
2017-04-26 09:27:33 +01:00
|
|
|
cmd_buffer->device->dynamic_state_pool.block_size - 64;
|
2016-08-30 23:43:46 +01:00
|
|
|
|
|
|
|
assert(max_update_size < MAX_SURFACE_DIM * 4);
|
|
|
|
|
2017-03-31 23:33:51 +01:00
|
|
|
/* We're about to read data that was written from the CPU. Flush the
|
|
|
|
* texture cache so we don't get anything stale.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT,
|
|
|
|
"before UpdateBuffer");
|
2017-03-31 23:33:51 +01:00
|
|
|
|
2016-08-30 23:43:46 +01:00
|
|
|
while (dataSize) {
|
|
|
|
const uint32_t copy_size = MIN2(dataSize, max_update_size);
|
|
|
|
|
|
|
|
struct anv_state tmp_data =
|
|
|
|
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
|
|
|
|
|
|
|
|
memcpy(tmp_data.map, pData, copy_size);
|
|
|
|
|
2017-08-28 23:57:20 +01:00
|
|
|
struct blorp_address src = {
|
2018-11-21 19:36:49 +00:00
|
|
|
.buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
|
2017-08-28 23:57:20 +01:00
|
|
|
.offset = tmp_data.offset,
|
2020-10-07 15:44:56 +01:00
|
|
|
.mocs = isl_mocs(&cmd_buffer->device->isl_dev,
|
2020-12-14 09:11:59 +00:00
|
|
|
ISL_SURF_USAGE_TEXTURE_BIT, false)
|
2017-08-28 23:57:20 +01:00
|
|
|
};
|
|
|
|
struct blorp_address dst = {
|
2018-05-31 02:05:54 +01:00
|
|
|
.buffer = dst_buffer->address.bo,
|
|
|
|
.offset = dst_buffer->address.offset + dstOffset,
|
2020-10-07 15:44:56 +01:00
|
|
|
.mocs = anv_mocs(cmd_buffer->device, dst_buffer->address.bo,
|
|
|
|
ISL_SURF_USAGE_RENDER_TARGET_BIT),
|
2017-08-28 23:57:20 +01:00
|
|
|
};
|
2016-08-30 23:43:46 +01:00
|
|
|
|
2017-08-28 23:57:20 +01:00
|
|
|
blorp_buffer_copy(&batch, src, dst, copy_size);
|
2016-08-30 23:43:46 +01:00
|
|
|
|
|
|
|
dataSize -= copy_size;
|
|
|
|
dstOffset += copy_size;
|
|
|
|
pData = (void *)pData + copy_size;
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2019-01-17 17:00:14 +00:00
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
|
2016-08-30 23:43:46 +01:00
|
|
|
}
|
2016-08-31 00:56:25 +01:00
|
|
|
|
2016-09-25 16:44:40 +01:00
|
|
|
void anv_CmdFillBuffer(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
VkDeviceSize dstOffset,
|
|
|
|
VkDeviceSize fillSize,
|
|
|
|
uint32_t data)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
struct blorp_surf surf;
|
|
|
|
struct isl_surf isl_surf;
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2016-09-25 16:44:40 +01:00
|
|
|
|
2022-05-19 15:00:30 +01:00
|
|
|
fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize);
|
2017-03-04 18:07:56 +00:00
|
|
|
|
|
|
|
/* From the Vulkan spec:
|
|
|
|
*
|
|
|
|
* "size is the number of bytes to fill, and must be either a multiple
|
|
|
|
* of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
|
|
|
|
* the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
|
|
|
|
* buffer is not a multiple of 4, then the nearest smaller multiple is
|
|
|
|
* used."
|
|
|
|
*/
|
|
|
|
fillSize &= ~3ull;
|
2016-09-25 16:44:40 +01:00
|
|
|
|
|
|
|
/* First, we compute the biggest format that can be used with the
|
|
|
|
* given offsets and size.
|
|
|
|
*/
|
|
|
|
int bs = 16;
|
|
|
|
bs = gcd_pow2_u64(bs, dstOffset);
|
|
|
|
bs = gcd_pow2_u64(bs, fillSize);
|
|
|
|
enum isl_format isl_format = isl_format_for_size(bs);
|
|
|
|
|
|
|
|
union isl_color_value color = {
|
|
|
|
.u32 = { data, data, data, data },
|
|
|
|
};
|
|
|
|
|
|
|
|
const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
|
|
|
|
while (fillSize >= max_fill_size) {
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
MAX_SURFACE_DIM, MAX_SURFACE_DIM,
|
2020-09-30 23:05:54 +01:00
|
|
|
MAX_SURFACE_DIM * bs, isl_format, true,
|
2016-09-25 16:44:40 +01:00
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
|
2021-09-16 22:25:42 +01:00
|
|
|
color, 0 /* color_write_disable */);
|
2016-09-25 16:44:40 +01:00
|
|
|
fillSize -= max_fill_size;
|
|
|
|
dstOffset += max_fill_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
|
|
|
|
assert(height < MAX_SURFACE_DIM);
|
|
|
|
if (height != 0) {
|
|
|
|
const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
MAX_SURFACE_DIM, height,
|
2020-09-30 23:05:54 +01:00
|
|
|
MAX_SURFACE_DIM * bs, isl_format, true,
|
2016-09-25 16:44:40 +01:00
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
|
2021-09-16 22:25:42 +01:00
|
|
|
color, 0 /* color_write_disable */);
|
2016-09-25 16:44:40 +01:00
|
|
|
fillSize -= rect_fill_size;
|
|
|
|
dstOffset += rect_fill_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fillSize != 0) {
|
|
|
|
const uint32_t width = fillSize / bs;
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
width, 1,
|
2020-09-30 23:05:54 +01:00
|
|
|
width * bs, isl_format, true,
|
2016-09-25 16:44:40 +01:00
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
0, 0, 1, 0, 0, width, 1,
|
2021-09-16 22:25:42 +01:00
|
|
|
color, 0 /* color_write_disable */);
|
2016-09-25 16:44:40 +01:00
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2019-01-17 17:00:14 +00:00
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
|
2016-09-25 16:44:40 +01:00
|
|
|
}
|
|
|
|
|
2016-08-31 00:56:25 +01:00
|
|
|
void anv_CmdClearColorImage(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkImage _image,
|
|
|
|
VkImageLayout imageLayout,
|
|
|
|
const VkClearColorValue* pColor,
|
|
|
|
uint32_t rangeCount,
|
|
|
|
const VkImageSubresourceRange* pRanges)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, image, _image);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2016-08-31 00:56:25 +01:00
|
|
|
|
|
|
|
for (unsigned r = 0; r < rangeCount; r++) {
|
|
|
|
if (pRanges[r].aspectMask == 0)
|
|
|
|
continue;
|
|
|
|
|
2017-11-02 23:05:45 +00:00
|
|
|
assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, pRanges[r].aspectMask,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
2018-02-02 22:51:56 +00:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &surf);
|
2016-08-31 00:56:25 +01:00
|
|
|
|
2017-03-14 17:20:07 +00:00
|
|
|
struct anv_format_plane src_format =
|
2021-07-22 02:42:00 +01:00
|
|
|
anv_get_format_aspect(&cmd_buffer->device->info, image->vk.format,
|
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT, image->vk.tiling);
|
2016-08-31 00:56:25 +01:00
|
|
|
|
|
|
|
unsigned base_layer = pRanges[r].baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
uint32_t layer_count =
|
|
|
|
vk_image_subresource_layer_count(&image->vk, &pRanges[r]);
|
|
|
|
uint32_t level_count =
|
|
|
|
vk_image_subresource_level_count(&image->vk, &pRanges[r]);
|
2016-08-31 00:56:25 +01:00
|
|
|
|
2021-07-22 02:48:30 +01:00
|
|
|
for (uint32_t i = 0; i < level_count; i++) {
|
2016-08-31 00:56:25 +01:00
|
|
|
const unsigned level = pRanges[r].baseMipLevel + i;
|
2021-07-22 02:42:00 +01:00
|
|
|
const unsigned level_width = anv_minify(image->vk.extent.width, level);
|
|
|
|
const unsigned level_height = anv_minify(image->vk.extent.height, level);
|
2016-08-31 00:56:25 +01:00
|
|
|
|
2021-07-22 02:42:00 +01:00
|
|
|
if (image->vk.image_type == VK_IMAGE_TYPE_3D) {
|
2016-08-31 00:56:25 +01:00
|
|
|
base_layer = 0;
|
2021-07-22 02:42:00 +01:00
|
|
|
layer_count = anv_minify(image->vk.extent.depth, level);
|
2016-08-31 00:56:25 +01:00
|
|
|
}
|
|
|
|
|
2017-11-27 16:35:12 +00:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, image,
|
|
|
|
pRanges[r].aspectMask,
|
|
|
|
surf.aux_usage, level,
|
|
|
|
base_layer, layer_count);
|
|
|
|
|
2016-08-31 00:56:25 +01:00
|
|
|
blorp_clear(&batch, &surf,
|
2017-02-09 20:00:51 +00:00
|
|
|
src_format.isl_format, src_format.swizzle,
|
2016-08-31 00:56:25 +01:00
|
|
|
level, base_layer, layer_count,
|
|
|
|
0, 0, level_width, level_height,
|
2021-09-16 22:25:42 +01:00
|
|
|
vk_to_isl_color(*pColor), 0 /* color_write_disable */);
|
2016-08-31 00:56:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-08-31 00:56:25 +01:00
|
|
|
}
|
2016-08-31 01:49:56 +01:00
|
|
|
|
2016-10-07 07:35:22 +01:00
|
|
|
void anv_CmdClearDepthStencilImage(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkImage image_h,
|
|
|
|
VkImageLayout imageLayout,
|
|
|
|
const VkClearDepthStencilValue* pDepthStencil,
|
|
|
|
uint32_t rangeCount,
|
|
|
|
const VkImageSubresourceRange* pRanges)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, image, image_h);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2016-10-07 07:35:22 +01:00
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
struct blorp_surf depth, stencil, stencil_shadow;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (image->vk.aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
2018-02-02 22:51:56 +00:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &depth);
|
2016-10-07 07:35:22 +01:00
|
|
|
} else {
|
|
|
|
memset(&depth, 0, sizeof(depth));
|
|
|
|
}
|
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
bool has_stencil_shadow = false;
|
2021-07-22 02:42:00 +01:00
|
|
|
if (image->vk.aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
2018-02-02 22:51:56 +00:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &stencil);
|
2019-06-17 07:53:50 +01:00
|
|
|
|
|
|
|
has_stencil_shadow =
|
|
|
|
get_blorp_surf_for_anv_shadow_image(cmd_buffer->device, image,
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT,
|
|
|
|
&stencil_shadow);
|
2016-10-07 07:35:22 +01:00
|
|
|
} else {
|
|
|
|
memset(&stencil, 0, sizeof(stencil));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rangeCount; r++) {
|
|
|
|
if (pRanges[r].aspectMask == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
|
|
|
|
|
|
unsigned base_layer = pRanges[r].baseArrayLayer;
|
2021-07-22 02:48:30 +01:00
|
|
|
uint32_t layer_count =
|
|
|
|
vk_image_subresource_layer_count(&image->vk, &pRanges[r]);
|
|
|
|
uint32_t level_count =
|
|
|
|
vk_image_subresource_level_count(&image->vk, &pRanges[r]);
|
2016-10-07 07:35:22 +01:00
|
|
|
|
2021-07-22 02:48:30 +01:00
|
|
|
for (uint32_t i = 0; i < level_count; i++) {
|
2016-10-07 07:35:22 +01:00
|
|
|
const unsigned level = pRanges[r].baseMipLevel + i;
|
2021-07-22 02:42:00 +01:00
|
|
|
const unsigned level_width = anv_minify(image->vk.extent.width, level);
|
|
|
|
const unsigned level_height = anv_minify(image->vk.extent.height, level);
|
2016-10-07 07:35:22 +01:00
|
|
|
|
2021-07-22 02:42:00 +01:00
|
|
|
if (image->vk.image_type == VK_IMAGE_TYPE_3D)
|
|
|
|
layer_count = anv_minify(image->vk.extent.depth, level);
|
2016-10-07 07:35:22 +01:00
|
|
|
|
|
|
|
blorp_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
0, 0, level_width, level_height,
|
|
|
|
clear_depth, pDepthStencil->depth,
|
|
|
|
clear_stencil ? 0xff : 0,
|
|
|
|
pDepthStencil->stencil);
|
2019-06-17 07:53:50 +01:00
|
|
|
|
|
|
|
if (clear_stencil && has_stencil_shadow) {
|
|
|
|
union isl_color_value stencil_color = {
|
|
|
|
.u32 = { pDepthStencil->stencil, },
|
|
|
|
};
|
|
|
|
blorp_clear(&batch, &stencil_shadow,
|
|
|
|
ISL_FORMAT_R8_UINT, ISL_SWIZZLE_IDENTITY,
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
0, 0, level_width, level_height,
|
2021-09-16 22:25:42 +01:00
|
|
|
stencil_color, 0 /* color_write_disable */);
|
2019-06-17 07:53:50 +01:00
|
|
|
}
|
2016-10-07 07:35:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-10-07 07:35:22 +01:00
|
|
|
}
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
VkResult
|
2016-10-22 01:01:17 +01:00
|
|
|
anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
uint32_t num_entries,
|
2017-03-09 10:49:01 +00:00
|
|
|
uint32_t *state_offset,
|
|
|
|
struct anv_state *bt_state)
|
2016-10-22 01:01:17 +01:00
|
|
|
{
|
2017-03-09 10:49:01 +00:00
|
|
|
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
|
|
|
|
state_offset);
|
|
|
|
if (bt_state->map == NULL) {
|
2016-10-22 01:01:17 +01:00
|
|
|
/* We ran out of space. Grab a new binding table block. */
|
2017-03-09 10:49:01 +00:00
|
|
|
VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
2016-10-22 01:01:17 +01:00
|
|
|
|
|
|
|
/* Re-emit state base addresses so we get the new surface state base
|
|
|
|
* address before we start emitting binding tables etc.
|
|
|
|
*/
|
|
|
|
anv_cmd_buffer_emit_state_base_address(cmd_buffer);
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
|
|
|
|
state_offset);
|
|
|
|
assert(bt_state->map != NULL);
|
2016-10-22 01:01:17 +01:00
|
|
|
}
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
return VK_SUCCESS;
|
2016-10-22 01:01:17 +01:00
|
|
|
}
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
static VkResult
|
2016-10-22 01:13:51 +01:00
|
|
|
binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
|
2017-03-09 10:49:01 +00:00
|
|
|
struct anv_state surface_state,
|
|
|
|
uint32_t *bt_offset)
|
2016-10-22 01:13:51 +01:00
|
|
|
{
|
|
|
|
uint32_t state_offset;
|
2017-03-09 10:49:01 +00:00
|
|
|
struct anv_state bt_state;
|
|
|
|
|
|
|
|
VkResult result =
|
|
|
|
anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
|
|
|
|
&bt_state);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
2016-10-22 01:13:51 +01:00
|
|
|
|
|
|
|
uint32_t *bt_map = bt_state.map;
|
|
|
|
bt_map[0] = surface_state.offset + state_offset;
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
*bt_offset = bt_state.offset;
|
|
|
|
return VK_SUCCESS;
|
2016-10-22 01:13:51 +01:00
|
|
|
}
|
|
|
|
|
2016-10-07 22:43:21 +01:00
|
|
|
static void
|
|
|
|
clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
const VkClearAttachment *attachment,
|
|
|
|
uint32_t rectCount, const VkClearRect *pRects)
|
|
|
|
{
|
2022-02-09 18:03:15 +00:00
|
|
|
struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
|
|
|
|
const uint32_t att_idx = attachment->colorAttachment;
|
|
|
|
assert(att_idx < gfx->color_att_count);
|
|
|
|
const struct anv_attachment *att = &gfx->color_att[att_idx];
|
2016-10-22 07:19:44 +01:00
|
|
|
|
2022-02-09 18:03:15 +00:00
|
|
|
if (att->vk_format == VK_FORMAT_UNDEFINED)
|
2016-10-22 07:19:44 +01:00
|
|
|
return;
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
uint32_t binding_table;
|
|
|
|
VkResult result =
|
2022-02-09 18:03:15 +00:00
|
|
|
binding_table_for_surface_state(cmd_buffer, att->surface_state.state,
|
2017-03-09 10:49:01 +00:00
|
|
|
&binding_table);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return;
|
2016-10-07 22:43:21 +01:00
|
|
|
|
2016-11-18 21:35:16 +00:00
|
|
|
union isl_color_value clear_color =
|
|
|
|
vk_to_isl_color(attachment->clearValue.color);
|
2016-10-07 22:43:21 +01:00
|
|
|
|
2017-05-18 07:23:38 +01:00
|
|
|
/* If multiview is enabled we ignore baseArrayLayer and layerCount */
|
2022-02-09 18:03:15 +00:00
|
|
|
if (gfx->view_mask) {
|
|
|
|
u_foreach_bit(view_idx, gfx->view_mask) {
|
2017-05-18 07:23:38 +01:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
blorp_clear_attachments(batch, binding_table,
|
2022-02-09 18:03:15 +00:00
|
|
|
ISL_FORMAT_UNSUPPORTED,
|
|
|
|
gfx->samples,
|
2017-05-18 07:23:38 +01:00
|
|
|
view_idx, 1,
|
|
|
|
offset.x, offset.y,
|
|
|
|
offset.x + extent.width,
|
|
|
|
offset.y + extent.height,
|
|
|
|
true, clear_color, false, 0.0f, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-07 22:43:21 +01:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
2017-12-19 07:59:36 +00:00
|
|
|
assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
|
2016-10-22 01:13:51 +01:00
|
|
|
blorp_clear_attachments(batch, binding_table,
|
2022-02-09 18:03:15 +00:00
|
|
|
ISL_FORMAT_UNSUPPORTED,
|
|
|
|
gfx->samples,
|
2016-10-22 01:13:51 +01:00
|
|
|
pRects[r].baseArrayLayer,
|
|
|
|
pRects[r].layerCount,
|
|
|
|
offset.x, offset.y,
|
|
|
|
offset.x + extent.width, offset.y + extent.height,
|
|
|
|
true, clear_color, false, 0.0f, 0, 0);
|
2016-10-07 22:43:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
const VkClearAttachment *attachment,
|
|
|
|
uint32_t rectCount, const VkClearRect *pRects)
|
|
|
|
{
|
2016-10-22 01:13:51 +01:00
|
|
|
static const union isl_color_value color_value = { .u32 = { 0, } };
|
2022-02-09 18:03:15 +00:00
|
|
|
struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
|
|
|
|
const struct anv_attachment *d_att = &gfx->depth_att;
|
|
|
|
const struct anv_attachment *s_att = &gfx->stencil_att;
|
|
|
|
if (d_att->vk_format == VK_FORMAT_UNDEFINED &&
|
|
|
|
s_att->vk_format == VK_FORMAT_UNDEFINED)
|
2016-10-22 07:19:44 +01:00
|
|
|
return;
|
|
|
|
|
2016-10-07 22:43:21 +01:00
|
|
|
bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
|
|
|
2016-10-22 01:13:51 +01:00
|
|
|
enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
|
2022-02-09 18:03:15 +00:00
|
|
|
if (d_att->vk_format != VK_FORMAT_UNDEFINED) {
|
2016-10-22 01:13:51 +01:00
|
|
|
depth_format = anv_get_isl_format(&cmd_buffer->device->info,
|
2022-02-09 18:03:15 +00:00
|
|
|
d_att->vk_format,
|
2016-10-22 01:13:51 +01:00
|
|
|
VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
VK_IMAGE_TILING_OPTIMAL);
|
2016-10-07 22:43:21 +01:00
|
|
|
}
|
|
|
|
|
2017-03-09 10:49:01 +00:00
|
|
|
uint32_t binding_table;
|
|
|
|
VkResult result =
|
2016-10-22 01:13:51 +01:00
|
|
|
binding_table_for_surface_state(cmd_buffer,
|
2022-02-09 18:03:15 +00:00
|
|
|
gfx->null_surface_state,
|
2017-03-09 10:49:01 +00:00
|
|
|
&binding_table);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return;
|
2016-10-07 22:43:21 +01:00
|
|
|
|
2017-05-18 07:23:38 +01:00
|
|
|
/* If multiview is enabled we ignore baseArrayLayer and layerCount */
|
2022-02-09 18:03:15 +00:00
|
|
|
if (gfx->view_mask) {
|
|
|
|
u_foreach_bit(view_idx, gfx->view_mask) {
|
2017-05-18 07:23:38 +01:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
|
|
|
|
blorp_clear_attachments(batch, binding_table,
|
2022-02-09 18:03:15 +00:00
|
|
|
depth_format,
|
|
|
|
gfx->samples,
|
2017-05-18 07:23:38 +01:00
|
|
|
view_idx, 1,
|
|
|
|
offset.x, offset.y,
|
|
|
|
offset.x + extent.width,
|
|
|
|
offset.y + extent.height,
|
|
|
|
false, color_value,
|
|
|
|
clear_depth, value.depth,
|
|
|
|
clear_stencil ? 0xff : 0, value.stencil);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-07 22:43:21 +01:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
|
2017-12-19 07:59:36 +00:00
|
|
|
assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
|
2016-10-22 01:13:51 +01:00
|
|
|
blorp_clear_attachments(batch, binding_table,
|
2022-02-09 18:03:15 +00:00
|
|
|
depth_format,
|
|
|
|
gfx->samples,
|
2016-10-22 01:13:51 +01:00
|
|
|
pRects[r].baseArrayLayer,
|
|
|
|
pRects[r].layerCount,
|
|
|
|
offset.x, offset.y,
|
|
|
|
offset.x + extent.width, offset.y + extent.height,
|
|
|
|
false, color_value,
|
|
|
|
clear_depth, value.depth,
|
|
|
|
clear_stencil ? 0xff : 0, value.stencil);
|
2016-10-07 22:43:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void anv_CmdClearAttachments(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
uint32_t attachmentCount,
|
|
|
|
const VkClearAttachment* pAttachments,
|
|
|
|
uint32_t rectCount,
|
|
|
|
const VkClearRect* pRects)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
|
|
|
/* Because this gets called within a render pass, we tell blorp not to
|
|
|
|
* trash our depth and stencil buffers.
|
|
|
|
*/
|
|
|
|
struct blorp_batch batch;
|
2018-10-05 15:54:07 +01:00
|
|
|
enum blorp_batch_flags flags = BLORP_BATCH_NO_EMIT_DEPTH_STENCIL;
|
|
|
|
if (cmd_buffer->state.conditional_render_enabled) {
|
|
|
|
anv_cmd_emit_conditional_render_predicate(cmd_buffer);
|
|
|
|
flags |= BLORP_BATCH_PREDICATE_ENABLE;
|
|
|
|
}
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, flags);
|
2016-10-07 22:43:21 +01:00
|
|
|
|
|
|
|
for (uint32_t a = 0; a < attachmentCount; ++a) {
|
2017-11-02 23:05:45 +00:00
|
|
|
if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
|
2016-10-07 22:43:21 +01:00
|
|
|
clear_color_attachment(cmd_buffer, &batch,
|
|
|
|
&pAttachments[a],
|
|
|
|
rectCount, pRects);
|
|
|
|
} else {
|
|
|
|
clear_depth_stencil_attachment(cmd_buffer, &batch,
|
|
|
|
&pAttachments[a],
|
|
|
|
rectCount, pRects);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-10-07 22:43:21 +01:00
|
|
|
}
|
|
|
|
|
2016-11-16 07:11:55 +00:00
|
|
|
enum subpass_stage {
|
|
|
|
SUBPASS_STAGE_LOAD,
|
|
|
|
SUBPASS_STAGE_DRAW,
|
|
|
|
SUBPASS_STAGE_RESOLVE,
|
|
|
|
};
|
|
|
|
|
2018-06-26 18:01:00 +01:00
|
|
|
void
|
|
|
|
anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *src_image,
|
|
|
|
enum isl_aux_usage src_aux_usage,
|
|
|
|
uint32_t src_level, uint32_t src_base_layer,
|
|
|
|
const struct anv_image *dst_image,
|
|
|
|
enum isl_aux_usage dst_aux_usage,
|
|
|
|
uint32_t dst_level, uint32_t dst_base_layer,
|
|
|
|
VkImageAspectFlagBits aspect,
|
|
|
|
uint32_t src_x, uint32_t src_y,
|
|
|
|
uint32_t dst_x, uint32_t dst_y,
|
|
|
|
uint32_t width, uint32_t height,
|
|
|
|
uint32_t layer_count,
|
|
|
|
enum blorp_filter filter)
|
2016-08-31 01:49:56 +01:00
|
|
|
{
|
2018-06-26 18:01:00 +01:00
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-11-27 16:35:12 +00:00
|
|
|
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(src_image->vk.image_type == VK_IMAGE_TYPE_2D);
|
|
|
|
assert(src_image->vk.samples > 1);
|
|
|
|
assert(dst_image->vk.image_type == VK_IMAGE_TYPE_2D);
|
|
|
|
assert(dst_image->vk.samples == 1);
|
2018-06-26 18:01:00 +01:00
|
|
|
|
|
|
|
struct blorp_surf src_surf, dst_surf;
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, aspect,
|
2020-10-07 15:44:56 +01:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2018-06-26 18:01:00 +01:00
|
|
|
src_aux_usage, &src_surf);
|
|
|
|
if (src_aux_usage == ISL_AUX_USAGE_MCS) {
|
|
|
|
src_surf.clear_color_addr = anv_to_blorp_address(
|
|
|
|
anv_image_get_clear_color_addr(cmd_buffer->device, src_image,
|
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT));
|
|
|
|
}
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, aspect,
|
2020-10-07 15:44:56 +01:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2018-06-26 18:01:00 +01:00
|
|
|
dst_aux_usage, &dst_surf);
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
aspect, dst_aux_usage,
|
|
|
|
dst_level, dst_base_layer, layer_count);
|
|
|
|
|
|
|
|
if (filter == BLORP_FILTER_NONE) {
|
|
|
|
/* If no explicit filter is provided, then it's implied by the type of
|
|
|
|
* the source image.
|
|
|
|
*/
|
2018-06-25 23:14:38 +01:00
|
|
|
if ((src_surf.surf->usage & ISL_SURF_USAGE_DEPTH_BIT) ||
|
|
|
|
(src_surf.surf->usage & ISL_SURF_USAGE_STENCIL_BIT) ||
|
|
|
|
isl_format_has_int_channel(src_surf.surf->format)) {
|
|
|
|
filter = BLORP_FILTER_SAMPLE_0;
|
|
|
|
} else {
|
|
|
|
filter = BLORP_FILTER_AVERAGE;
|
|
|
|
}
|
2018-06-26 18:01:00 +01:00
|
|
|
}
|
2018-06-25 23:14:38 +01:00
|
|
|
|
2018-06-26 18:01:00 +01:00
|
|
|
for (uint32_t l = 0; l < layer_count; l++) {
|
|
|
|
blorp_blit(&batch,
|
|
|
|
&src_surf, src_level, src_base_layer + l,
|
|
|
|
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
|
|
|
|
&dst_surf, dst_level, dst_base_layer + l,
|
|
|
|
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
|
|
|
|
src_x, src_y, src_x + width, src_y + height,
|
|
|
|
dst_x, dst_y, dst_x + width, dst_y + height,
|
|
|
|
filter, false, false);
|
2016-08-31 01:49:56 +01:00
|
|
|
}
|
2018-06-26 18:01:00 +01:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2016-08-31 01:49:56 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 10:30:49 +01:00
|
|
|
static void
|
|
|
|
resolve_image(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct anv_image *src_image,
|
|
|
|
VkImageLayout src_image_layout,
|
|
|
|
struct anv_image *dst_image,
|
|
|
|
VkImageLayout dst_image_layout,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkImageResolve2 *region)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
assert(region->srcSubresource.aspectMask == region->dstSubresource.aspectMask);
|
2021-07-22 02:48:30 +01:00
|
|
|
assert(vk_image_subresource_layer_count(&src_image->vk, ®ion->srcSubresource) ==
|
|
|
|
vk_image_subresource_layer_count(&dst_image->vk, ®ion->dstSubresource));
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
const uint32_t layer_count =
|
2021-07-22 02:48:30 +01:00
|
|
|
vk_image_subresource_layer_count(&dst_image->vk, ®ion->dstSubresource);
|
2020-07-15 10:30:49 +01:00
|
|
|
|
|
|
|
anv_foreach_image_aspect_bit(aspect_bit, src_image,
|
|
|
|
region->srcSubresource.aspectMask) {
|
|
|
|
enum isl_aux_usage src_aux_usage =
|
|
|
|
anv_layout_to_aux_usage(&cmd_buffer->device->info, src_image,
|
|
|
|
(1 << aspect_bit),
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
src_image_layout);
|
|
|
|
enum isl_aux_usage dst_aux_usage =
|
|
|
|
anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_image,
|
|
|
|
(1 << aspect_bit),
|
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
dst_image_layout);
|
|
|
|
|
|
|
|
anv_image_msaa_resolve(cmd_buffer,
|
|
|
|
src_image, src_aux_usage,
|
|
|
|
region->srcSubresource.mipLevel,
|
|
|
|
region->srcSubresource.baseArrayLayer,
|
|
|
|
dst_image, dst_aux_usage,
|
|
|
|
region->dstSubresource.mipLevel,
|
|
|
|
region->dstSubresource.baseArrayLayer,
|
|
|
|
(1 << aspect_bit),
|
|
|
|
region->srcOffset.x,
|
|
|
|
region->srcOffset.y,
|
|
|
|
region->dstOffset.x,
|
|
|
|
region->dstOffset.y,
|
|
|
|
region->extent.width,
|
|
|
|
region->extent.height,
|
|
|
|
layer_count, BLORP_FILTER_NONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-13 11:06:43 +01:00
|
|
|
void anv_CmdResolveImage2(
|
2020-07-15 10:30:49 +01:00
|
|
|
VkCommandBuffer commandBuffer,
|
2022-04-13 11:06:43 +01:00
|
|
|
const VkResolveImageInfo2* pResolveImageInfo)
|
2020-07-15 10:30:49 +01:00
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, pResolveImageInfo->srcImage);
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, pResolveImageInfo->dstImage);
|
|
|
|
|
|
|
|
for (uint32_t r = 0; r < pResolveImageInfo->regionCount; r++) {
|
|
|
|
resolve_image(cmd_buffer,
|
|
|
|
src_image, pResolveImageInfo->srcImageLayout,
|
|
|
|
dst_image, pResolveImageInfo->dstImageLayout,
|
|
|
|
&pResolveImageInfo->pRegions[r]);
|
2016-08-31 01:49:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
void
|
|
|
|
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
2019-06-17 03:36:21 +01:00
|
|
|
VkImageAspectFlagBits aspect,
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
uint32_t base_level, uint32_t level_count,
|
|
|
|
uint32_t base_layer, uint32_t layer_count)
|
|
|
|
{
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
|
2019-06-19 20:14:20 +01:00
|
|
|
/* We don't know who touched the main surface last so flush a bunch of
|
|
|
|
* caches to ensure we get good data.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
|
2021-03-18 16:44:33 +00:00
|
|
|
ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
|
2021-03-11 16:40:56 +00:00
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT,
|
|
|
|
"before copy_to_shadow");
|
2019-06-19 20:14:20 +01:00
|
|
|
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
struct blorp_surf surf;
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
2019-06-17 03:36:21 +01:00
|
|
|
image, aspect,
|
2019-11-19 23:51:20 +00:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
2018-02-02 22:51:56 +00:00
|
|
|
VK_IMAGE_LAYOUT_GENERAL,
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
ISL_AUX_USAGE_NONE, &surf);
|
2018-02-02 22:51:56 +00:00
|
|
|
assert(surf.aux_usage == ISL_AUX_USAGE_NONE);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
struct blorp_surf shadow_surf;
|
|
|
|
get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
|
|
|
|
image, aspect, &shadow_surf);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
|
|
|
|
for (uint32_t l = 0; l < level_count; l++) {
|
|
|
|
const uint32_t level = base_level + l;
|
|
|
|
|
2021-07-22 02:42:00 +01:00
|
|
|
const VkExtent3D extent = vk_image_mip_level_extent(&image->vk, level);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
|
2021-07-22 02:42:00 +01:00
|
|
|
if (image->vk.image_type == VK_IMAGE_TYPE_3D)
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
layer_count = extent.depth;
|
|
|
|
|
|
|
|
for (uint32_t a = 0; a < layer_count; a++) {
|
|
|
|
const uint32_t layer = base_layer + a;
|
|
|
|
|
|
|
|
blorp_copy(&batch, &surf, level, layer,
|
|
|
|
&shadow_surf, level, layer,
|
|
|
|
0, 0, 0, 0, extent.width, extent.height);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 20:14:20 +01:00
|
|
|
/* We just wrote to the buffer with the render cache. Flush it. */
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT,
|
|
|
|
"after copy_to_shadow");
|
2019-06-19 20:14:20 +01:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 21:17:06 +01:00
|
|
|
}
|
|
|
|
|
2017-11-21 21:30:49 +00:00
|
|
|
void
|
|
|
|
anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageAspectFlagBits aspect,
|
|
|
|
enum isl_aux_usage aux_usage,
|
|
|
|
enum isl_format format, struct isl_swizzle swizzle,
|
|
|
|
uint32_t level, uint32_t base_layer, uint32_t layer_count,
|
|
|
|
VkRect2D area, union isl_color_value clear_color)
|
|
|
|
{
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(image->vk.aspects == VK_IMAGE_ASPECT_COLOR_BIT);
|
2017-11-21 21:30:49 +00:00
|
|
|
|
|
|
|
/* We don't support planar images with multisampling yet */
|
|
|
|
assert(image->n_planes == 1);
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2017-11-21 21:30:49 +00:00
|
|
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2020-10-07 15:44:56 +01:00
|
|
|
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 21:30:49 +00:00
|
|
|
aux_usage, &surf);
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, image, aspect, aux_usage,
|
|
|
|
level, base_layer, layer_count);
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, format, anv_swizzle_for_render(swizzle),
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
area.offset.y + area.extent.height,
|
2021-09-16 22:25:42 +01:00
|
|
|
clear_color, 0 /* color_write_disable */);
|
2017-11-21 21:30:49 +00:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-11-21 21:30:49 +00:00
|
|
|
}
|
|
|
|
|
2017-11-21 22:46:25 +00:00
|
|
|
void
|
|
|
|
anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageAspectFlags aspects,
|
|
|
|
enum isl_aux_usage depth_aux_usage,
|
|
|
|
uint32_t level,
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
VkRect2D area,
|
|
|
|
float depth_value, uint8_t stencil_value)
|
|
|
|
{
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(image->vk.aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT));
|
2017-11-21 22:46:25 +00:00
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-11-21 22:46:25 +00:00
|
|
|
|
|
|
|
struct blorp_surf depth = {};
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 22:46:25 +00:00
|
|
|
depth_aux_usage, &depth);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct blorp_surf stencil = {};
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane =
|
|
|
|
anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_STENCIL_BIT);
|
2017-11-21 22:46:25 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2020-04-24 18:09:45 +01:00
|
|
|
image->planes[plane].aux_usage, &stencil);
|
2017-11-21 22:46:25 +00:00
|
|
|
}
|
|
|
|
|
2018-02-03 17:12:15 +00:00
|
|
|
/* Blorp may choose to clear stencil using RGBA32_UINT for better
|
|
|
|
* performance. If it does this, we need to flush it out of the depth
|
|
|
|
* cache before rendering to it.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"before clear DS");
|
2018-02-03 17:12:15 +00:00
|
|
|
|
2017-11-21 22:46:25 +00:00
|
|
|
blorp_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
area.offset.y + area.extent.height,
|
|
|
|
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
depth_value,
|
|
|
|
(aspects & VK_IMAGE_ASPECT_STENCIL_BIT) ? 0xff : 0,
|
|
|
|
stencil_value);
|
|
|
|
|
2018-02-03 17:12:15 +00:00
|
|
|
/* Blorp may choose to clear stencil using RGBA32_UINT for better
|
|
|
|
* performance. If it does this, we need to flush it out of the render
|
|
|
|
* cache before someone starts trying to do stencil on it.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"after clear DS");
|
2018-02-03 17:12:15 +00:00
|
|
|
|
2019-06-17 07:53:50 +01:00
|
|
|
struct blorp_surf stencil_shadow;
|
|
|
|
if ((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
|
|
|
|
get_blorp_surf_for_anv_shadow_image(cmd_buffer->device, image,
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT,
|
|
|
|
&stencil_shadow)) {
|
|
|
|
union isl_color_value stencil_color = {
|
|
|
|
.u32 = { stencil_value },
|
|
|
|
};
|
|
|
|
blorp_clear(&batch, &stencil_shadow,
|
|
|
|
ISL_FORMAT_R8_UINT, ISL_SWIZZLE_IDENTITY,
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
area.offset.y + area.extent.height,
|
2021-09-16 22:25:42 +01:00
|
|
|
stencil_color, 0 /* color_write_disable */);
|
2019-06-17 07:53:50 +01:00
|
|
|
}
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-11-21 22:46:25 +00:00
|
|
|
}
|
|
|
|
|
2017-11-21 18:20:57 +00:00
|
|
|
void
|
|
|
|
anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageAspectFlagBits aspect, uint32_t level,
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
enum isl_aux_op hiz_op)
|
|
|
|
{
|
|
|
|
assert(aspect == VK_IMAGE_ASPECT_DEPTH_BIT);
|
|
|
|
assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
|
2020-03-06 22:21:30 +00:00
|
|
|
assert(plane == 0);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
2017-01-06 07:32:07 +00:00
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-01-06 07:32:07 +00:00
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2020-03-06 22:21:30 +00:00
|
|
|
image->planes[plane].aux_usage, &surf);
|
2017-01-06 07:32:07 +00:00
|
|
|
|
2018-01-19 23:14:37 +00:00
|
|
|
blorp_hiz_op(&batch, &surf, level, base_layer, layer_count, hiz_op);
|
2017-11-21 18:20:57 +00:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-01-06 07:32:07 +00:00
|
|
|
}
|
2017-03-11 00:31:16 +00:00
|
|
|
|
2017-11-21 22:46:25 +00:00
|
|
|
void
|
|
|
|
anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageAspectFlags aspects,
|
|
|
|
uint32_t level,
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
VkRect2D area, uint8_t stencil_value)
|
|
|
|
{
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(image->vk.aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT));
|
2017-11-21 22:46:25 +00:00
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch, 0);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-11-21 22:46:25 +00:00
|
|
|
|
|
|
|
struct blorp_surf depth = {};
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane =
|
|
|
|
anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_DEPTH_BIT);
|
2017-11-21 22:46:25 +00:00
|
|
|
assert(base_layer + layer_count <=
|
|
|
|
anv_image_aux_layers(image, VK_IMAGE_ASPECT_DEPTH_BIT, level));
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2020-03-06 22:21:30 +00:00
|
|
|
image->planes[plane].aux_usage, &depth);
|
2017-11-21 22:46:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct blorp_surf stencil = {};
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane =
|
|
|
|
anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_STENCIL_BIT);
|
2017-11-21 22:46:25 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2020-03-06 22:21:30 +00:00
|
|
|
image->planes[plane].aux_usage, &stencil);
|
2017-11-21 22:46:25 +00:00
|
|
|
}
|
|
|
|
|
2018-08-30 18:05:06 +01:00
|
|
|
/* From the Sky Lake PRM Volume 7, "Depth Buffer Clear":
|
|
|
|
*
|
|
|
|
* "The following is required when performing a depth buffer clear with
|
|
|
|
* using the WM_STATE or 3DSTATE_WM:
|
|
|
|
*
|
|
|
|
* * If other rendering operations have preceded this clear, a
|
|
|
|
* PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
|
|
|
|
* enabled must be issued before the rectangle primitive used for
|
|
|
|
* the depth buffer clear operation.
|
|
|
|
* * [...]"
|
|
|
|
*
|
|
|
|
* Even though the PRM only says that this is required if using 3DSTATE_WM
|
2018-09-01 15:11:17 +01:00
|
|
|
* and a 3DPRIMITIVE, the GPU appears to also need this to avoid occasional
|
|
|
|
* hangs when doing a clear with WM_HZ_OP.
|
2018-08-30 18:05:06 +01:00
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT,
|
|
|
|
"before clear hiz");
|
2018-08-30 18:05:06 +01:00
|
|
|
|
2021-06-22 18:35:08 +01:00
|
|
|
if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
|
|
|
|
depth.aux_usage == ISL_AUX_USAGE_HIZ_CCS_WT) {
|
|
|
|
/* From Bspec 47010 (Depth Buffer Clear):
|
|
|
|
*
|
|
|
|
* Since the fast clear cycles to CCS are not cached in TileCache,
|
|
|
|
* any previous depth buffer writes to overlapping pixels must be
|
|
|
|
* flushed out of TileCache before a succeeding Depth Buffer Clear.
|
|
|
|
* This restriction only applies to Depth Buffer with write-thru
|
|
|
|
* enabled, since fast clears to CCS only occur for write-thru mode.
|
|
|
|
*
|
|
|
|
* There may have been a write to this depth buffer. Flush it from the
|
|
|
|
* tile cache just in case.
|
|
|
|
*/
|
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_TILE_CACHE_FLUSH_BIT,
|
|
|
|
"before clear hiz_ccs_wt");
|
|
|
|
}
|
|
|
|
|
2017-11-21 22:46:25 +00:00
|
|
|
blorp_hiz_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
area.offset.y + area.extent.height,
|
|
|
|
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
ANV_HZ_FC_VAL,
|
|
|
|
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
|
|
|
|
stencil_value);
|
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-11-21 22:46:25 +00:00
|
|
|
|
|
|
|
/* From the SKL PRM, Depth Buffer Clear:
|
|
|
|
*
|
2018-08-30 18:05:06 +01:00
|
|
|
* "Depth Buffer Clear Workaround
|
|
|
|
*
|
|
|
|
* Depth buffer clear pass using any of the methods (WM_STATE,
|
|
|
|
* 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL
|
|
|
|
* command with DEPTH_STALL bit and Depth FLUSH bits “set” before
|
|
|
|
* starting to render. DepthStall and DepthFlush are not needed between
|
|
|
|
* consecutive depth clear passes nor is it required if the depth-clear
|
|
|
|
* pass was done with “full_surf_clear” bit set in the
|
|
|
|
* 3DSTATE_WM_HZ_OP."
|
|
|
|
*
|
|
|
|
* Even though the PRM provides a bunch of conditions under which this is
|
|
|
|
* supposedly unnecessary, we choose to perform the flush unconditionally
|
|
|
|
* just to be safe.
|
2017-11-21 22:46:25 +00:00
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT,
|
|
|
|
"after clear hiz");
|
2017-11-21 22:46:25 +00:00
|
|
|
}
|
|
|
|
|
2017-03-11 00:31:16 +00:00
|
|
|
void
|
2017-11-21 17:56:41 +00:00
|
|
|
anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
2020-03-17 18:12:35 +00:00
|
|
|
enum isl_format format, struct isl_swizzle swizzle,
|
2017-11-21 17:56:41 +00:00
|
|
|
VkImageAspectFlagBits aspect,
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
2018-03-06 17:21:40 +00:00
|
|
|
enum isl_aux_op mcs_op, union isl_color_value *clear_value,
|
|
|
|
bool predicate)
|
2017-03-11 00:31:16 +00:00
|
|
|
{
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(image->vk.aspects == VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
assert(image->vk.samples > 1);
|
2017-11-21 17:56:41 +00:00
|
|
|
assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, 0));
|
2017-03-11 00:31:16 +00:00
|
|
|
|
2017-11-21 17:56:41 +00:00
|
|
|
/* Multisampling with multi-planar formats is not supported */
|
|
|
|
assert(image->n_planes == 1);
|
|
|
|
|
2021-10-08 18:48:08 +01:00
|
|
|
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
|
2017-11-21 17:56:41 +00:00
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch,
|
|
|
|
BLORP_BATCH_PREDICATE_ENABLE * predicate +
|
|
|
|
BLORP_BATCH_NO_UPDATE_CLEAR_COLOR * !clear_value);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-11-21 17:56:41 +00:00
|
|
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2018-02-02 22:51:56 +00:00
|
|
|
ISL_AUX_USAGE_MCS, &surf);
|
2017-11-21 17:56:41 +00:00
|
|
|
|
2018-03-06 17:21:40 +00:00
|
|
|
/* Blorp will store the clear color for us if we provide the clear color
|
|
|
|
* address and we are doing a fast clear. So we save the clear value into
|
2019-10-23 23:51:56 +01:00
|
|
|
* the blorp surface.
|
2018-03-06 17:21:40 +00:00
|
|
|
*/
|
2019-10-23 23:51:56 +01:00
|
|
|
if (clear_value)
|
|
|
|
surf.clear_color = *clear_value;
|
2017-11-11 22:32:21 +00:00
|
|
|
|
2017-11-21 17:56:41 +00:00
|
|
|
/* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
|
|
|
|
*
|
|
|
|
* "After Render target fast clear, pipe-control with color cache
|
|
|
|
* write-flush must be issued before sending any DRAW commands on
|
|
|
|
* that render target."
|
|
|
|
*
|
|
|
|
* This comment is a bit cryptic and doesn't really tell you what's going
|
|
|
|
* or what's really needed. It appears that fast clear ops are not
|
|
|
|
* properly synchronized with other drawing. This means that we cannot
|
|
|
|
* have a fast clear operation in the pipe at the same time as other
|
|
|
|
* regular drawing operations. We need to use a PIPE_CONTROL to ensure
|
|
|
|
* that the contents of the previous draw hit the render target before we
|
|
|
|
* resolve and then use a second PIPE_CONTROL after the resolve to ensure
|
|
|
|
* that it is completed before any additional drawing occurs.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
2021-03-17 04:02:35 +00:00
|
|
|
ANV_PIPE_TILE_CACHE_FLUSH_BIT |
|
2021-11-08 16:33:19 +00:00
|
|
|
(devinfo->verx10 == 120 ?
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT : 0) |
|
2021-11-08 16:41:20 +00:00
|
|
|
(devinfo->verx10 == 125 ?
|
2022-07-28 21:34:32 +01:00
|
|
|
ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DATA_CACHE_FLUSH_BIT : 0) |
|
2021-10-08 19:16:35 +01:00
|
|
|
ANV_PIPE_PSS_STALL_SYNC_BIT |
|
2021-03-11 16:40:56 +00:00
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"before fast clear mcs");
|
2017-07-19 12:14:19 +01:00
|
|
|
|
2017-11-21 17:56:41 +00:00
|
|
|
switch (mcs_op) {
|
|
|
|
case ISL_AUX_OP_FAST_CLEAR:
|
2020-03-17 18:12:35 +00:00
|
|
|
blorp_fast_clear(&batch, &surf, format, swizzle,
|
2017-11-21 17:56:41 +00:00
|
|
|
0, base_layer, layer_count,
|
2021-07-22 02:42:00 +01:00
|
|
|
0, 0, image->vk.extent.width, image->vk.extent.height);
|
2017-11-21 17:56:41 +00:00
|
|
|
break;
|
|
|
|
case ISL_AUX_OP_PARTIAL_RESOLVE:
|
2018-12-24 10:11:59 +00:00
|
|
|
blorp_mcs_partial_resolve(&batch, &surf, format,
|
2017-11-11 22:32:21 +00:00
|
|
|
base_layer, layer_count);
|
|
|
|
break;
|
|
|
|
case ISL_AUX_OP_FULL_RESOLVE:
|
2017-11-21 17:56:41 +00:00
|
|
|
case ISL_AUX_OP_AMBIGUATE:
|
|
|
|
default:
|
|
|
|
unreachable("Unsupported MCS operation");
|
|
|
|
}
|
|
|
|
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
2021-10-08 18:48:08 +01:00
|
|
|
(devinfo->verx10 == 120 ?
|
2021-11-08 16:33:19 +00:00
|
|
|
ANV_PIPE_TILE_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT : 0) |
|
2021-10-08 19:16:35 +01:00
|
|
|
ANV_PIPE_PSS_STALL_SYNC_BIT |
|
2021-03-11 16:40:56 +00:00
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"after fast clear mcs");
|
2017-11-21 17:56:41 +00:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-11-21 17:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
2020-03-17 18:12:35 +00:00
|
|
|
enum isl_format format, struct isl_swizzle swizzle,
|
2017-11-21 17:56:41 +00:00
|
|
|
VkImageAspectFlagBits aspect, uint32_t level,
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
2018-03-06 17:21:40 +00:00
|
|
|
enum isl_aux_op ccs_op, union isl_color_value *clear_value,
|
|
|
|
bool predicate)
|
2017-11-21 17:56:41 +00:00
|
|
|
{
|
2021-07-22 02:42:00 +01:00
|
|
|
assert(image->vk.aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
|
|
|
|
assert(image->vk.samples == 1);
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(level < anv_image_aux_levels(image, aspect));
|
2017-11-21 17:56:41 +00:00
|
|
|
/* Multi-LOD YcBcR is not allowed */
|
|
|
|
assert(image->n_planes == 1 || level == 0);
|
|
|
|
assert(base_layer + layer_count <=
|
2017-11-11 20:22:45 +00:00
|
|
|
anv_image_aux_layers(image, aspect, level));
|
2017-11-21 17:56:41 +00:00
|
|
|
|
2021-07-31 00:57:35 +01:00
|
|
|
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
|
2021-10-08 18:48:08 +01:00
|
|
|
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
|
2017-03-11 00:31:16 +00:00
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_init(cmd_buffer, &batch,
|
|
|
|
BLORP_BATCH_PREDICATE_ENABLE * predicate +
|
|
|
|
BLORP_BATCH_NO_UPDATE_CLEAR_COLOR * !clear_value);
|
2019-01-01 00:06:47 +00:00
|
|
|
assert((batch.flags & BLORP_BATCH_USE_COMPUTE) == 0);
|
2017-03-11 00:31:16 +00:00
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 22:18:45 +00:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2019-11-19 23:51:20 +00:00
|
|
|
0, ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2020-01-21 23:13:30 +00:00
|
|
|
image->planes[plane].aux_usage,
|
2017-03-11 00:31:16 +00:00
|
|
|
&surf);
|
|
|
|
|
2021-07-21 23:54:39 +01:00
|
|
|
uint32_t level_width = anv_minify(surf.surf->logical_level0_px.w, level);
|
|
|
|
uint32_t level_height = anv_minify(surf.surf->logical_level0_px.h, level);
|
|
|
|
|
2018-03-06 17:21:40 +00:00
|
|
|
/* Blorp will store the clear color for us if we provide the clear color
|
|
|
|
* address and we are doing a fast clear. So we save the clear value into
|
2019-10-23 23:51:56 +01:00
|
|
|
* the blorp surface.
|
2018-03-06 17:21:40 +00:00
|
|
|
*/
|
2019-10-23 23:51:56 +01:00
|
|
|
if (clear_value)
|
|
|
|
surf.clear_color = *clear_value;
|
2017-11-21 17:56:41 +00:00
|
|
|
|
|
|
|
/* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
|
|
|
|
*
|
|
|
|
* "After Render target fast clear, pipe-control with color cache
|
|
|
|
* write-flush must be issued before sending any DRAW commands on
|
|
|
|
* that render target."
|
|
|
|
*
|
|
|
|
* This comment is a bit cryptic and doesn't really tell you what's going
|
|
|
|
* or what's really needed. It appears that fast clear ops are not
|
|
|
|
* properly synchronized with other drawing. This means that we cannot
|
|
|
|
* have a fast clear operation in the pipe at the same time as other
|
|
|
|
* regular drawing operations. We need to use a PIPE_CONTROL to ensure
|
|
|
|
* that the contents of the previous draw hit the render target before we
|
|
|
|
* resolve and then use a second PIPE_CONTROL after the resolve to ensure
|
|
|
|
* that it is completed before any additional drawing occurs.
|
|
|
|
*/
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
2021-03-17 04:02:35 +00:00
|
|
|
ANV_PIPE_TILE_CACHE_FLUSH_BIT |
|
2021-11-08 16:33:19 +00:00
|
|
|
(devinfo->verx10 == 120 ?
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT : 0) |
|
2021-11-08 16:41:20 +00:00
|
|
|
(devinfo->verx10 == 125 ?
|
2022-07-28 21:32:10 +01:00
|
|
|
ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DATA_CACHE_FLUSH_BIT : 0) |
|
2021-10-08 19:16:35 +01:00
|
|
|
ANV_PIPE_PSS_STALL_SYNC_BIT |
|
2021-03-11 16:40:56 +00:00
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"before fast clear ccs");
|
2017-11-21 17:56:41 +00:00
|
|
|
|
|
|
|
switch (ccs_op) {
|
|
|
|
case ISL_AUX_OP_FAST_CLEAR:
|
2020-03-17 18:12:35 +00:00
|
|
|
blorp_fast_clear(&batch, &surf, format, swizzle,
|
2017-11-21 17:56:41 +00:00
|
|
|
level, base_layer, layer_count,
|
|
|
|
0, 0, level_width, level_height);
|
|
|
|
break;
|
|
|
|
case ISL_AUX_OP_FULL_RESOLVE:
|
|
|
|
case ISL_AUX_OP_PARTIAL_RESOLVE:
|
|
|
|
blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
|
2018-12-24 10:11:59 +00:00
|
|
|
format, ccs_op);
|
2017-11-21 17:56:41 +00:00
|
|
|
break;
|
|
|
|
case ISL_AUX_OP_AMBIGUATE:
|
2017-11-28 02:09:48 +00:00
|
|
|
for (uint32_t a = 0; a < layer_count; a++) {
|
|
|
|
const uint32_t layer = base_layer + a;
|
|
|
|
blorp_ccs_ambiguate(&batch, &surf, level, layer);
|
|
|
|
}
|
|
|
|
break;
|
2017-11-21 17:56:41 +00:00
|
|
|
default:
|
|
|
|
unreachable("Unsupported CCS operation");
|
|
|
|
}
|
|
|
|
|
2021-03-11 16:40:56 +00:00
|
|
|
anv_add_pending_pipe_bits(cmd_buffer,
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
|
2021-10-08 18:48:08 +01:00
|
|
|
(devinfo->verx10 == 120 ?
|
2021-11-08 16:33:19 +00:00
|
|
|
ANV_PIPE_TILE_CACHE_FLUSH_BIT |
|
|
|
|
ANV_PIPE_DEPTH_STALL_BIT : 0) |
|
2021-10-08 19:16:35 +01:00
|
|
|
ANV_PIPE_PSS_STALL_SYNC_BIT |
|
2021-03-11 16:40:56 +00:00
|
|
|
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
|
|
|
|
"after fast clear ccs");
|
2017-03-11 00:31:16 +00:00
|
|
|
|
2021-09-15 20:35:30 +01:00
|
|
|
anv_blorp_batch_finish(&batch);
|
2017-03-11 00:31:16 +00:00
|
|
|
}
|