diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 3f5e79942f5..85b6dfc6451 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -424,8 +424,11 @@ radv_destroy_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); for (unsigned i = 0; i < MAX_BIND_POINTS; i++) { - free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr); - vk_object_base_finish(&cmd_buffer->descriptors[i].push_set.set.base); + struct radv_descriptor_set_header *set = &cmd_buffer->descriptors[i].push_set.set; + free(set->mapped_ptr); + if (set->layout) + radv_descriptor_set_layout_unref(cmd_buffer->device, set->layout); + vk_object_base_finish(&set->base); } vk_object_base_finish(&cmd_buffer->meta_push_descriptors.base); @@ -4787,7 +4790,13 @@ radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer, struct radv_de struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point); set->header.size = layout->size; - set->header.layout = layout; + + if (set->header.layout != layout) { + if (set->header.layout) + radv_descriptor_set_layout_unref(cmd_buffer->device, set->header.layout); + radv_descriptor_set_layout_ref(layout); + set->header.layout = layout; + } if (descriptors_state->push_set.capacity < set->header.size) { size_t new_size = MAX2(set->header.size, 1024); diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c index 56baa8ae73a..c891b55a313 100644 --- a/src/amd/vulkan/radv_descriptor_set.c +++ b/src/amd/vulkan/radv_descriptor_set.c @@ -88,6 +88,15 @@ radv_mutable_descriptor_type_size_alignment(const VkMutableDescriptorTypeListVAL return true; } +void +radv_descriptor_set_layout_destroy(struct radv_device *device, + struct radv_descriptor_set_layout *set_layout) +{ + assert(set_layout->ref_cnt == 0); + vk_object_base_finish(&set_layout->base); + vk_free2(&device->vk.alloc, NULL, set_layout); +} + VKAPI_ATTR VkResult VKAPI_CALL radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, @@ -134,8 +143,12 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion); } + /* We need to allocate decriptor set layouts off the device allocator with DEVICE scope because + * they are reference counted and may not be destroyed when vkDestroyDescriptorSetLayout is + * called. + */ set_layout = - vk_zalloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + vk_zalloc(&device->vk.alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); if (!set_layout) return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -165,11 +178,11 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea VkResult result = vk_create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings); if (result != VK_SUCCESS) { - vk_object_base_finish(&set_layout->base); - vk_free2(&device->vk.alloc, pAllocator, set_layout); + radv_descriptor_set_layout_destroy(device, set_layout); return vk_error(device, result); } + set_layout->ref_cnt = 1; set_layout->binding_count = num_bindings; set_layout->shader_stages = 0; set_layout->dynamic_shader_stages = 0; @@ -346,8 +359,7 @@ radv_DestroyDescriptorSetLayout(VkDevice _device, VkDescriptorSetLayout _set_lay if (!set_layout) return; - vk_object_base_finish(&set_layout->base); - vk_free2(&device->vk.alloc, pAllocator, set_layout); + radv_descriptor_set_layout_unref(device, set_layout); } VKAPI_ATTR void VKAPI_CALL @@ -494,6 +506,7 @@ radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pC for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) { RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]); layout->set[set].layout = set_layout; + radv_descriptor_set_layout_ref(set_layout); layout->set[set].dynamic_offset_start = dynamic_offset_count; layout->set[set].dynamic_offset_count = 0; @@ -507,11 +520,12 @@ radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pC dynamic_offset_count += layout->set[set].dynamic_offset_count; dynamic_shader_stages |= layout->set[set].dynamic_offset_stages; - /* Hash the entire set layout except for the vk_object_base. The - * rest of the set layout is carefully constructed to not have - * pointers so a full hash instead of a per-field hash should be ok. */ - _mesa_sha1_update(&ctx, (const char *)set_layout + sizeof(struct vk_object_base), - set_layout->layout_size - sizeof(struct vk_object_base)); + /* Hash the entire set layout except for the vk_object_base and the reference counter. The + * rest of the set layout is carefully constructed to not have pointers so a full hash instead + * of a per-field hash should be ok. */ + uint32_t hash_offset = sizeof(struct vk_object_base) + sizeof(uint32_t); + _mesa_sha1_update(&ctx, (const char *)set_layout + hash_offset, + set_layout->layout_size - hash_offset); } layout->dynamic_offset_count = dynamic_offset_count; @@ -541,14 +555,17 @@ radv_DestroyPipelineLayout(VkDevice _device, VkPipelineLayout _pipelineLayout, if (!pipeline_layout) return; + for (uint32_t i = 0; i < pipeline_layout->num_sets; i++) + radv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout); + vk_object_base_finish(&pipeline_layout->base); vk_free2(&device->vk.alloc, pAllocator, pipeline_layout); } static VkResult radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool, - const struct radv_descriptor_set_layout *layout, - const uint32_t *variable_count, struct radv_descriptor_set **out_set) + struct radv_descriptor_set_layout *layout, const uint32_t *variable_count, + struct radv_descriptor_set **out_set) { struct radv_descriptor_set *set; uint32_t buffer_count = layout->buffer_count; @@ -667,6 +684,8 @@ radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_po } } } + + radv_descriptor_set_layout_ref(layout); *out_set = set; return VK_SUCCESS; } @@ -677,6 +696,8 @@ radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_p { assert(!pool->host_memory_base); + radv_descriptor_set_layout_unref(device, set->header.layout); + if (free_bo && !pool->host_memory_base) { for (int i = 0; i < pool->entry_count; ++i) { if (pool->entries[i].set == set) { diff --git a/src/amd/vulkan/radv_descriptor_set.h b/src/amd/vulkan/radv_descriptor_set.h index 1038bc52b16..42586b8180a 100644 --- a/src/amd/vulkan/radv_descriptor_set.h +++ b/src/amd/vulkan/radv_descriptor_set.h @@ -53,6 +53,13 @@ struct radv_descriptor_set_binding_layout { struct radv_descriptor_set_layout { struct vk_object_base base; + /* Descriptor set layouts can be destroyed at almost any time */ + uint32_t ref_cnt; + + /* Everything below is hashed and shouldn't contain any pointers. Be careful when modifying this + * structure. + */ + /* The create flags for this descriptor set layout */ VkDescriptorSetLayoutCreateFlags flags; diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 7d2038dd8ee..ad8e2df6cbd 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -894,7 +894,7 @@ struct radv_descriptor_range { struct radv_descriptor_set_header { struct vk_object_base base; - const struct radv_descriptor_set_layout *layout; + struct radv_descriptor_set_layout *layout; uint32_t size; uint32_t buffer_count; @@ -971,6 +971,25 @@ struct radv_descriptor_update_template { struct radv_descriptor_update_template_entry entry[0]; }; +void radv_descriptor_set_layout_destroy(struct radv_device *device, + struct radv_descriptor_set_layout *set_layout); + +static inline void +radv_descriptor_set_layout_ref(struct radv_descriptor_set_layout *set_layout) +{ + assert(set_layout && set_layout->ref_cnt >= 1); + p_atomic_inc(&set_layout->ref_cnt); +} + +static inline void +radv_descriptor_set_layout_unref(struct radv_device *device, + struct radv_descriptor_set_layout *set_layout) +{ + assert(set_layout && set_layout->ref_cnt >= 1); + if (p_atomic_dec_zero(&set_layout->ref_cnt)) + radv_descriptor_set_layout_destroy(device, set_layout); +} + struct radv_buffer { struct vk_object_base base; VkDeviceSize size;