anv: Enable VK_VALVE_mutable_descriptor_type

This change introduces the anv_descriptor_size_for_mutable_type and
anv_descriptor_data_for_mutable_type helpers to compute the size and
data flags respectively for mutable descriptor types.

In order to make handling these types easier we now store a precomputed
descriptor stride for all types and use in the in appropriate places.

We also need to adjust the compiler to take into account this descriptor
stride. To that extent, we now pack the stride into the upper 16 bits
alongside the index and the dynamic offset index and use it later to
compute the correct offset.

Closes: #4250
Signed-off-by: Rohan Garg <rohan.garg@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14633>
This commit is contained in:
Rohan Garg 2022-01-14 15:02:17 +01:00 committed by Marge Bot
parent e1376b59ef
commit 63e91148b7
4 changed files with 229 additions and 115 deletions

View File

@ -122,6 +122,34 @@ anv_descriptor_data_for_type(const struct anv_physical_device *device,
return data;
}
static enum anv_descriptor_data
anv_descriptor_data_for_mutable_type(const struct anv_physical_device *device,
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info,
int binding)
{
enum anv_descriptor_data desc_data = 0;
if (!mutable_info || mutable_info->mutableDescriptorTypeListCount == 0) {
for(VkDescriptorType i = 0; i <= VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; i++) {
if (i == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
i == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
i == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
continue;
desc_data |= anv_descriptor_data_for_type(device, i);
}
desc_data |= anv_descriptor_data_for_type(device, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
return desc_data;
}
for (uint32_t i = 0; i < mutable_info->pMutableDescriptorTypeLists[binding].descriptorTypeCount; i++)
desc_data |= anv_descriptor_data_for_type(device, mutable_info->pMutableDescriptorTypeLists[binding].pDescriptorTypes[i]);
return desc_data;
}
static unsigned
anv_descriptor_data_size(enum anv_descriptor_data data)
{
@ -156,7 +184,7 @@ anv_needs_descriptor_buffer(VkDescriptorType desc_type,
}
/** Returns the size in bytes of each descriptor with the given layout */
unsigned
static unsigned
anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout)
{
if (layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
@ -177,23 +205,38 @@ anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout)
return size;
}
/** Returns the size in bytes of each descriptor of the given type
*
* This version of the function does not have access to the entire layout so
* it may only work on certain descriptor types where the descriptor size is
* entirely determined by the descriptor type. Whenever possible, code should
* use anv_descriptor_size() instead.
*/
unsigned
anv_descriptor_type_size(const struct anv_physical_device *pdevice,
VkDescriptorType type)
/** Returns size in bytes of the biggest descriptor in the given layout */
static unsigned
anv_descriptor_size_for_mutable_type(const struct anv_physical_device *device,
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info,
int binding)
{
assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT &&
type != VK_DESCRIPTOR_TYPE_SAMPLER &&
type != VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE &&
type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
unsigned size = 0;
return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type));
if (!mutable_info || mutable_info->mutableDescriptorTypeListCount == 0) {
for(VkDescriptorType i = 0; i <= VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; i++) {
if (i == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
i == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
i == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
continue;
enum anv_descriptor_data desc_data = anv_descriptor_data_for_type(device, i);
size = MAX2(size, anv_descriptor_data_size(desc_data));
}
enum anv_descriptor_data desc_data = anv_descriptor_data_for_type(device, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
size = MAX2(size, anv_descriptor_data_size(desc_data));
return size;
}
for (uint32_t i = 0; i < mutable_info->pMutableDescriptorTypeLists[binding].descriptorTypeCount; i++) {
enum anv_descriptor_data desc_data = anv_descriptor_data_for_type(device, mutable_info->pMutableDescriptorTypeLists[binding].pDescriptorTypes[i]);
size = MAX2(size, anv_descriptor_data_size(desc_data));
}
return size;
}
static bool
@ -260,6 +303,9 @@ void anv_GetDescriptorSetLayoutSupport(
const VkDescriptorSetLayoutBindingFlagsCreateInfo *binding_flags_info =
vk_find_struct_const(pCreateInfo->pNext,
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
vk_find_struct_const(pCreateInfo->pNext,
MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) {
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b];
@ -270,8 +316,9 @@ void anv_GetDescriptorSetLayoutSupport(
flags = binding_flags_info->pBindingFlags[b];
}
enum anv_descriptor_data desc_data =
anv_descriptor_data_for_type(pdevice, binding->descriptorType);
enum anv_descriptor_data desc_data = binding->descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_mutable_type(pdevice, mutable_info, b) :
anv_descriptor_data_for_type(pdevice, binding->descriptorType);
if (anv_needs_descriptor_buffer(binding->descriptorType, desc_data))
needs_descriptor_buffer = true;
@ -428,6 +475,10 @@ VkResult anv_CreateDescriptorSetLayout(
vk_find_struct_const(pCreateInfo->pNext,
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
vk_find_struct_const(pCreateInfo->pNext,
MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
for (uint32_t b = 0; b < num_bindings; b++) {
/* We stashed the pCreateInfo->pBindings[] index (plus one) in the
* immutable_samplers pointer. Check for NULL (empty binding) and then
@ -470,9 +521,11 @@ VkResult anv_CreateDescriptorSetLayout(
}
}
set_layout->binding[b].data =
anv_descriptor_data_for_type(device->physical,
binding->descriptorType);
set_layout->binding[b].data = binding->descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_mutable_type(device->physical, mutable_info, b) :
anv_descriptor_data_for_type(device->physical,
binding->descriptorType);
set_layout->binding[b].array_size = binding->descriptorCount;
set_layout->binding[b].descriptor_index = set_layout->descriptor_count;
set_layout->descriptor_count += binding->descriptorCount;
@ -485,6 +538,7 @@ VkResult anv_CreateDescriptorSetLayout(
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
set_layout->binding[b].max_plane_count = 1;
if (binding->pImmutableSamplers) {
set_layout->binding[b].immutable_samplers = samplers;
@ -522,6 +576,10 @@ VkResult anv_CreateDescriptorSetLayout(
break;
}
set_layout->binding[b].descriptor_stride = binding->descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_size_for_mutable_type(device->physical, mutable_info, b) :
anv_descriptor_size(&set_layout->binding[b]);
if (binding->descriptorType ==
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
/* Inline uniform blocks are specified to use the descriptor array
@ -533,8 +591,7 @@ VkResult anv_CreateDescriptorSetLayout(
descriptor_buffer_size += binding->descriptorCount;
} else {
set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
descriptor_buffer_size += anv_descriptor_size(&set_layout->binding[b]) *
binding->descriptorCount;
descriptor_buffer_size += set_layout->binding[b].descriptor_stride * binding->descriptorCount;
}
set_layout->shader_stages |= binding->stageFlags;
@ -627,7 +684,7 @@ anv_descriptor_set_layout_descriptor_buffer_size(const struct anv_descriptor_set
set_size = set_layout->descriptor_buffer_size - shrink;
} else {
set_size = set_layout->descriptor_buffer_size -
shrink * anv_descriptor_size(dynamic_binding);
shrink * dynamic_binding->descriptor_stride;
}
return ALIGN(set_size, ANV_UBO_ALIGNMENT);
@ -800,20 +857,29 @@ VkResult anv_CreateDescriptorPool(
const VkDescriptorPoolInlineUniformBlockCreateInfoEXT *inline_info =
vk_find_struct_const(pCreateInfo->pNext,
DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT);
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
vk_find_struct_const(pCreateInfo->pNext,
MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
uint32_t descriptor_count = 0;
uint32_t buffer_view_count = 0;
uint32_t descriptor_bo_size = 0;
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
enum anv_descriptor_data desc_data =
anv_descriptor_data_for_type(device->physical,
pCreateInfo->pPoolSizes[i].type);
enum anv_descriptor_data desc_data = pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_mutable_type(device->physical,
mutable_info, i) :
anv_descriptor_data_for_type(device->physical,
pCreateInfo->pPoolSizes[i].type);
if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
buffer_view_count += pCreateInfo->pPoolSizes[i].descriptorCount;
unsigned desc_data_size = anv_descriptor_data_size(desc_data) *
pCreateInfo->pPoolSizes[i].descriptorCount;
unsigned desc_data_size = pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_size_for_mutable_type(device->physical, mutable_info, i) :
anv_descriptor_data_size(desc_data);
desc_data_size *= pCreateInfo->pPoolSizes[i].descriptorCount;
/* Combined image sampler descriptors can take up to 3 slots if they
* hold a YCbCr image.
@ -1289,7 +1355,8 @@ anv_descriptor_set_write_image_view(struct anv_device *device,
* set initialization to set the bindless samplers.
*/
assert(type == bind_layout->type ||
type == VK_DESCRIPTOR_TYPE_SAMPLER);
type == VK_DESCRIPTOR_TYPE_SAMPLER ||
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE);
switch (type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
@ -1323,10 +1390,15 @@ anv_descriptor_set_write_image_view(struct anv_device *device,
};
void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
element * anv_descriptor_size(bind_layout);
memset(desc_map, 0, anv_descriptor_size(bind_layout));
element * bind_layout->descriptor_stride;
memset(desc_map, 0, bind_layout->descriptor_stride);
enum anv_descriptor_data data =
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_type(device->physical, type) :
bind_layout->data;
if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) {
if (data & ANV_DESCRIPTOR_SAMPLED_IMAGE) {
struct anv_sampled_image_descriptor desc_data[3];
memset(desc_data, 0, sizeof(desc_data));
@ -1356,8 +1428,8 @@ anv_descriptor_set_write_image_view(struct anv_device *device,
if (image_view == NULL)
return;
if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM));
if (data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
assert(!(data & ANV_DESCRIPTOR_IMAGE_PARAM));
assert(image_view->n_planes == 1);
struct anv_storage_image_descriptor desc_data = {
.vanilla = anv_surface_state_to_handle(
@ -1368,7 +1440,7 @@ anv_descriptor_set_write_image_view(struct anv_device *device,
memcpy(desc_map, &desc_data, sizeof(desc_data));
}
if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
if (data & ANV_DESCRIPTOR_IMAGE_PARAM) {
/* Storage images can only ever have one plane */
assert(image_view->n_planes == 1);
const struct brw_image_param *image_param =
@ -1377,8 +1449,8 @@ anv_descriptor_set_write_image_view(struct anv_device *device,
anv_descriptor_set_write_image_param(desc_map, image_param);
}
if (bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) {
assert(!(bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE));
if (data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) {
assert(!(data & ANV_DESCRIPTOR_SAMPLED_IMAGE));
assert(image_view);
struct anv_texture_swizzle_descriptor desc_data[3];
memset(desc_data, 0, sizeof(desc_data));
@ -1411,14 +1483,20 @@ anv_descriptor_set_write_buffer_view(struct anv_device *device,
struct anv_descriptor *desc =
&set->descriptors[bind_layout->descriptor_index + element];
assert(type == bind_layout->type);
assert(type == bind_layout->type ||
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE);
enum anv_descriptor_data data =
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_type(device->physical, type) :
bind_layout->data;
void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
element * anv_descriptor_size(bind_layout);
element * bind_layout->descriptor_stride;
if (buffer_view == NULL) {
*desc = (struct anv_descriptor) { .type = type, };
memset(desc_map, 0, anv_descriptor_size(bind_layout));
memset(desc_map, 0, bind_layout->descriptor_stride);
return;
}
@ -1427,15 +1505,15 @@ anv_descriptor_set_write_buffer_view(struct anv_device *device,
.buffer_view = buffer_view,
};
if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) {
if (data & ANV_DESCRIPTOR_SAMPLED_IMAGE) {
struct anv_sampled_image_descriptor desc_data = {
.image = anv_surface_state_to_handle(buffer_view->surface_state),
};
memcpy(desc_map, &desc_data, sizeof(desc_data));
}
if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM));
if (data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
assert(!(data & ANV_DESCRIPTOR_IMAGE_PARAM));
struct anv_storage_image_descriptor desc_data = {
.vanilla = anv_surface_state_to_handle(
buffer_view->storage_surface_state),
@ -1445,7 +1523,7 @@ anv_descriptor_set_write_buffer_view(struct anv_device *device,
memcpy(desc_map, &desc_data, sizeof(desc_data));
}
if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
if (data & ANV_DESCRIPTOR_IMAGE_PARAM) {
anv_descriptor_set_write_image_param(desc_map,
&buffer_view->lowered_storage_image_param);
}
@ -1467,19 +1545,24 @@ anv_descriptor_set_write_buffer(struct anv_device *device,
struct anv_descriptor *desc =
&set->descriptors[bind_layout->descriptor_index + element];
assert(type == bind_layout->type);
assert(type == bind_layout->type ||
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE);
void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
element * anv_descriptor_size(bind_layout);
element * bind_layout->descriptor_stride;
if (buffer == NULL) {
*desc = (struct anv_descriptor) { .type = type, };
memset(desc_map, 0, anv_descriptor_size(bind_layout));
memset(desc_map, 0, bind_layout->descriptor_stride);
return;
}
struct anv_address bind_addr = anv_address_add(buffer->address, offset);
uint64_t bind_range = anv_buffer_get_range(buffer, offset, range);
enum anv_descriptor_data data =
bind_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_type(device->physical, type) :
bind_layout->data;
/* We report a bounds checking alignment of 32B for the sake of block
* messages which read an entire register worth at a time.
@ -1497,7 +1580,7 @@ anv_descriptor_set_write_buffer(struct anv_device *device,
.range = range,
};
} else {
assert(bind_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW);
assert(data & ANV_DESCRIPTOR_BUFFER_VIEW);
struct anv_buffer_view *bview =
&set->buffer_views[bind_layout->buffer_view_index + element];
@ -1528,7 +1611,7 @@ anv_descriptor_set_write_buffer(struct anv_device *device,
};
}
if (bind_layout->data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
struct anv_address_range_descriptor desc_data = {
.address = anv_address_physical(bind_addr),
.range = bind_range,
@ -1567,7 +1650,7 @@ anv_descriptor_set_write_acceleration_structure(struct anv_device *device,
struct anv_descriptor *desc =
&set->descriptors[bind_layout->descriptor_index + element];
assert(bind_layout->type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
assert(bind_layout->data & ANV_DESCRIPTOR_ADDRESS_RANGE);
*desc = (struct anv_descriptor) {
.type = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR,
};
@ -1577,13 +1660,86 @@ anv_descriptor_set_write_acceleration_structure(struct anv_device *device,
desc_data.address = anv_address_physical(accel->address);
desc_data.range = accel->size;
}
assert(anv_descriptor_size(bind_layout) == sizeof(desc_data));
assert(sizeof(desc_data) <= bind_layout->descriptor_stride);
void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
element * sizeof(desc_data);
element * bind_layout->descriptor_stride;
memcpy(desc_map, &desc_data, sizeof(desc_data));
}
static void
anv_copy_descriptor_set(const struct anv_device *device, const VkCopyDescriptorSet *copy)
{
ANV_FROM_HANDLE(anv_descriptor_set, src, copy->srcSet);
ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
const struct anv_descriptor_set_binding_layout *src_layout =
&src->layout->binding[copy->srcBinding];
const struct anv_descriptor_set_binding_layout *dst_layout =
&dst->layout->binding[copy->dstBinding];
/* Copy CPU side data */
for (uint32_t j = 0; j < copy->descriptorCount; j++) {
struct anv_descriptor *src_desc =
&src->descriptors[src_layout->descriptor_index];
src_desc += copy->srcArrayElement;
struct anv_descriptor *dst_desc =
&dst->descriptors[dst_layout->descriptor_index];
dst_desc += copy->dstArrayElement;
struct anv_buffer_view *dst_bview =
&dst->buffer_views[dst_layout->buffer_view_index +
copy->dstArrayElement];
struct anv_buffer_view *src_bview =
&src->buffer_views[src_layout->buffer_view_index +
copy->srcArrayElement];
/* When the source descriptor type is mutable, we cannot guess
* the actual data type in the descriptor from the layout. To
* know what is in there look at the type of descriptor written
* (anv_descriptor::type).
*/
enum anv_descriptor_data desc_data = src_layout->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE ?
anv_descriptor_data_for_type(device->physical, src_desc[j].type) :
src_layout->data;
/* If ANV_DESCRIPTOR_BUFFER_VIEW is present in the source descriptor,
* it means we're using an anv_buffer_view allocated by the source
* descriptor set. In that case we want to careful copy it because
* his lifecycle is tied to the source descriptor set, not the
* destination descriptor set.
*/
if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW) {
dst_bview[j].format = src_bview[j].format;
dst_bview[j].range = src_bview[j].range;
dst_bview[j].address = src_bview[j].address;
memcpy(dst_bview[j].surface_state.map,
src_bview[j].surface_state.map,
src_bview[j].surface_state.alloc_size);
dst_desc[j].type = src_desc[j].type;
dst_desc[j].buffer_view = &dst_bview[j];
} else {
dst_desc[j] = src_desc[j];
}
}
unsigned min_stride = MIN2(src_layout->descriptor_stride, dst_layout->descriptor_stride);
if (min_stride > 0) {
for (uint32_t j = 0; j < copy->descriptorCount; j++) {
void *src_element = src->desc_mem.map + src_layout->descriptor_offset +
(copy->srcArrayElement + j) * src_layout->descriptor_stride;
void *dst_element = dst->desc_mem.map + dst_layout->descriptor_offset +
(copy->dstArrayElement + j) * dst_layout->descriptor_stride;
/* Copy GPU visible data */
memcpy(dst_element, src_element, min_stride);
}
}
}
void anv_UpdateDescriptorSets(
VkDevice _device,
uint32_t descriptorWriteCount,
@ -1702,46 +1858,7 @@ void anv_UpdateDescriptorSets(
copy->srcArrayElement,
copy->descriptorCount);
} else {
struct anv_buffer_view *dst_bview =
&dst->buffer_views[dst_layout->buffer_view_index +
copy->dstArrayElement];
struct anv_buffer_view *src_bview =
&src->buffer_views[src_layout->buffer_view_index +
copy->srcArrayElement];
/* If ANV_DESCRIPTOR_BUFFER_VIEW is present in the source descriptor,
* it means we're using an anv_buffer_view allocated by the source
* descriptor set. In that case we want to careful copy it because
* his lifecycle is tied to the source descriptor set, not the
* destination descriptor set.
*/
if (src_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW) {
assert(dst_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW);
for (uint32_t j = 0; j < copy->descriptorCount; j++) {
dst_bview[j].format = src_bview[j].format;
dst_bview[j].range = src_bview[j].range;
dst_bview[j].address = src_bview[j].address;
memcpy(dst_bview[j].surface_state.map,
src_bview[j].surface_state.map,
src_bview[j].surface_state.alloc_size);
dst_desc[j].type = src_desc[j].type;
dst_desc[j].buffer_view = &dst_bview[j];
}
} else {
for (uint32_t j = 0; j < copy->descriptorCount; j++)
dst_desc[j] = src_desc[j];
}
unsigned desc_size = anv_descriptor_size(src_layout);
if (desc_size > 0) {
assert(desc_size == anv_descriptor_size(dst_layout));
memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
copy->dstArrayElement * desc_size,
src->desc_mem.map + src_layout->descriptor_offset +
copy->srcArrayElement * desc_size,
copy->descriptorCount * desc_size);
}
anv_copy_descriptor_set(device, copy);
}
}
}

View File

@ -310,6 +310,7 @@ get_device_extensions(const struct anv_physical_device *device,
.INTEL_shader_integer_functions2 = device->info.ver >= 8,
.EXT_multi_draw = true,
.NV_compute_shader_derivatives = true,
.VALVE_mutable_descriptor_type = true,
};
}
@ -1568,6 +1569,13 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE: {
VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE *features =
(VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE *)ext;
features->mutableDescriptorType = true;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: {
VkPhysicalDevicePerformanceQueryFeaturesKHR *feature =
(VkPhysicalDevicePerformanceQueryFeaturesKHR *)ext;

View File

@ -94,7 +94,7 @@ add_binding(struct apply_pipeline_layout_state *state,
* this binding. This lets us be lazy and call this function constantly
* without worrying about unnecessarily enabling the buffer.
*/
if (anv_descriptor_size(bind_layout))
if (bind_layout->descriptor_stride)
state->set[set].desc_buffer_used = true;
}
@ -216,12 +216,7 @@ descriptor_address_format(nir_intrinsic_instr *intrin,
{
assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
uint32_t set = nir_intrinsic_desc_set(intrin);
uint32_t binding = nir_intrinsic_binding(intrin);
const struct anv_descriptor_set_binding_layout *bind_layout =
&state->layout->set[set].layout->binding[binding];
return addr_format_for_desc_type(bind_layout->type, state);
return addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
}
static nir_intrinsic_instr *
@ -341,7 +336,7 @@ build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
bind_layout->dynamic_offset_index;
}
const uint32_t packed = (set_idx << 16) | dynamic_offset_index;
const uint32_t packed = (bind_layout->descriptor_stride << 16 ) | (set_idx << 8) | dynamic_offset_index;
return nir_vec4(b, nir_imm_int(b, packed),
nir_imm_int(b, bind_layout->descriptor_offset),
@ -374,6 +369,7 @@ struct res_index_defs {
nir_ssa_def *dyn_offset_base;
nir_ssa_def *desc_offset_base;
nir_ssa_def *array_index;
nir_ssa_def *desc_stride;
};
static struct res_index_defs
@ -382,8 +378,9 @@ unpack_res_index(nir_builder *b, nir_ssa_def *index)
struct res_index_defs defs;
nir_ssa_def *packed = nir_channel(b, index, 0);
defs.set_idx = nir_extract_u16(b, packed, nir_imm_int(b, 1));
defs.dyn_offset_base = nir_extract_u16(b, packed, nir_imm_int(b, 0));
defs.desc_stride = nir_extract_u8(b, packed, nir_imm_int(b, 2));
defs.set_idx = nir_extract_u8(b, packed, nir_imm_int(b, 1));
defs.dyn_offset_base = nir_extract_u8(b, packed, nir_imm_int(b, 0));
defs.desc_offset_base = nir_channel(b, index, 1);
defs.array_index = nir_umin(b, nir_channel(b, index, 2),
@ -449,15 +446,9 @@ build_desc_addr(nir_builder *b,
* the array index is ignored as they are only allowed to be a single
* descriptor (not an array) and there is no concept of a "stride".
*
* We use the bind_layout, if available, because it provides a more
* accurate descriptor size.
*/
const unsigned stride = bind_layout ?
anv_descriptor_size(bind_layout) :
anv_descriptor_type_size(state->pdevice, desc_type);
desc_offset =
nir_iadd(b, desc_offset, nir_imul_imm(b, res.array_index, stride));
nir_iadd(b, desc_offset, nir_imul(b, res.array_index, res.desc_stride));
}
switch (state->desc_addr_format) {

View File

@ -1801,15 +1801,13 @@ struct anv_descriptor_set_binding_layout {
/* Offset into the descriptor buffer where this descriptor lives */
uint32_t descriptor_offset;
/* Pre computed stride */
unsigned descriptor_stride;
/* Immutable samplers (or NULL if no immutable samplers) */
struct anv_sampler **immutable_samplers;
};
unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout);
unsigned anv_descriptor_type_size(const struct anv_physical_device *pdevice,
VkDescriptorType type);
bool anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
const struct anv_descriptor_set_binding_layout *binding,
bool sampler);