anv: Move device memory maps back to anv_device_memory
This effectively partially reverts13fe43714c
("anv: Add helpers in anv_allocator for mapping BOs") where we both added helpers and reworked memory mapping to stash the maps on the BO. The problem comes with external memory. Due to GEM rules, if a memory object is exported and then imported or imported twice, we have to deduplicate the anv_bo struct but, according to Vulkan rules, they are separate VkDeviceMemory objects. This means we either need to always map whole objects and reference-count the map or we need to handle maps separately for separate VkDeviceMemory objects. For now, take the later path. Fixes:13fe43714c
("anv: Add helpers in anv_allocator for mapping BOs") Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/5612 Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13795>
This commit is contained in:
parent
32c0c5fcd9
commit
e6b4678fdb
|
@ -1606,7 +1606,7 @@ anv_bo_finish(struct anv_device *device, struct anv_bo *bo)
|
|||
anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
|
||||
|
||||
if (bo->map && !bo->from_host_ptr)
|
||||
anv_device_unmap_bo(device, bo);
|
||||
anv_device_unmap_bo(device, bo, bo->map, bo->size);
|
||||
|
||||
assert(bo->gem_handle != 0);
|
||||
anv_gem_close(device, bo->gem_handle);
|
||||
|
@ -1717,7 +1717,8 @@ anv_device_alloc_bo(struct anv_device *device,
|
|||
};
|
||||
|
||||
if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
|
||||
VkResult result = anv_device_map_bo(device, &new_bo, 0, size, 0, NULL);
|
||||
VkResult result = anv_device_map_bo(device, &new_bo, 0, size,
|
||||
0 /* gem_flags */, &new_bo.map);
|
||||
if (unlikely(result != VK_SUCCESS)) {
|
||||
anv_gem_close(device, new_bo.gem_handle);
|
||||
return result;
|
||||
|
@ -1785,7 +1786,6 @@ anv_device_map_bo(struct anv_device *device,
|
|||
{
|
||||
assert(!bo->is_wrapper && !bo->from_host_ptr);
|
||||
assert(size > 0);
|
||||
assert(bo->map == NULL && bo->map_size == 0);
|
||||
|
||||
void *map = anv_gem_mmap(device, bo->gem_handle, offset, size, gem_flags);
|
||||
if (unlikely(map == MAP_FAILED))
|
||||
|
@ -1793,9 +1793,6 @@ anv_device_map_bo(struct anv_device *device,
|
|||
|
||||
assert(map != NULL);
|
||||
|
||||
bo->map = map;
|
||||
bo->map_size = size;
|
||||
|
||||
if (map_out)
|
||||
*map_out = map;
|
||||
|
||||
|
@ -1804,15 +1801,12 @@ anv_device_map_bo(struct anv_device *device,
|
|||
|
||||
void
|
||||
anv_device_unmap_bo(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
struct anv_bo *bo,
|
||||
void *map, size_t map_size)
|
||||
{
|
||||
assert(!bo->is_wrapper && !bo->from_host_ptr);
|
||||
assert(bo->map != NULL && bo->map_size > 0);
|
||||
|
||||
anv_gem_munmap(device, bo->map, bo->map_size);
|
||||
|
||||
bo->map = NULL;
|
||||
bo->map_size = 0;
|
||||
anv_gem_munmap(device, map, map_size);
|
||||
}
|
||||
|
||||
VkResult
|
||||
|
|
|
@ -3677,6 +3677,9 @@ VkResult anv_AllocateMemory(
|
|||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
mem->type = mem_type;
|
||||
mem->map = NULL;
|
||||
mem->map_size = 0;
|
||||
mem->map_delta = 0;
|
||||
mem->ahw = NULL;
|
||||
mem->host_ptr = NULL;
|
||||
|
||||
|
@ -3980,6 +3983,9 @@ void anv_FreeMemory(
|
|||
list_del(&mem->link);
|
||||
pthread_mutex_unlock(&device->mutex);
|
||||
|
||||
if (mem->map)
|
||||
anv_UnmapMemory(_device, _mem);
|
||||
|
||||
p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
|
||||
-mem->bo->size);
|
||||
|
||||
|
@ -4037,7 +4043,7 @@ VkResult anv_MapMemory(
|
|||
*
|
||||
* "memory must not be currently host mapped"
|
||||
*/
|
||||
if (mem->bo->map != NULL) {
|
||||
if (mem->map != NULL) {
|
||||
return vk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED,
|
||||
"Memory object already mapped.");
|
||||
}
|
||||
|
@ -4066,8 +4072,10 @@ VkResult anv_MapMemory(
|
|||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
mem->map = map;
|
||||
mem->map_size = map_size;
|
||||
mem->map_delta = (offset - map_offset);
|
||||
*ppData = map + mem->map_delta;
|
||||
*ppData = mem->map + mem->map_delta;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
@ -4082,8 +4090,10 @@ void anv_UnmapMemory(
|
|||
if (mem == NULL || mem->host_ptr)
|
||||
return;
|
||||
|
||||
anv_device_unmap_bo(device, mem->bo);
|
||||
anv_device_unmap_bo(device, mem->bo, mem->map, mem->map_size);
|
||||
|
||||
mem->map = NULL;
|
||||
mem->map_size = 0;
|
||||
mem->map_delta = 0;
|
||||
}
|
||||
|
||||
|
@ -4095,14 +4105,14 @@ clflush_mapped_ranges(struct anv_device *device,
|
|||
for (uint32_t i = 0; i < count; i++) {
|
||||
ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
|
||||
uint64_t map_offset = ranges[i].offset + mem->map_delta;
|
||||
if (map_offset >= mem->bo->map_size)
|
||||
if (map_offset >= mem->map_size)
|
||||
continue;
|
||||
|
||||
if (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
|
||||
continue;
|
||||
|
||||
intel_clflush_range(mem->bo->map + map_offset,
|
||||
MIN2(ranges[i].size, mem->bo->map_size - map_offset));
|
||||
intel_clflush_range(mem->map + map_offset,
|
||||
MIN2(ranges[i].size, mem->map_size - map_offset));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -452,17 +452,13 @@ struct anv_bo {
|
|||
/** Size of the buffer not including implicit aux */
|
||||
uint64_t size;
|
||||
|
||||
/* Map for mapped BOs.
|
||||
/* Map for internally mapped BOs.
|
||||
*
|
||||
* If ANV_BO_ALLOC_MAPPED is set in flags, this is the map for the whole
|
||||
* BO. If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
|
||||
* Otherwise, this is the map for the currently mapped range mapped via
|
||||
* vkMapMemory().
|
||||
*/
|
||||
void *map;
|
||||
|
||||
size_t map_size;
|
||||
|
||||
/** Size of the implicit CCS range at the end of the buffer
|
||||
*
|
||||
* On Gfx12, CCS data is always a direct 1/256 scale-down. A single 64K
|
||||
|
@ -1413,7 +1409,8 @@ VkResult anv_device_map_bo(struct anv_device *device,
|
|||
uint32_t gem_flags,
|
||||
void **map_out);
|
||||
void anv_device_unmap_bo(struct anv_device *device,
|
||||
struct anv_bo *bo);
|
||||
struct anv_bo *bo,
|
||||
void *map, size_t map_size);
|
||||
VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device,
|
||||
void *host_ptr, uint32_t size,
|
||||
enum anv_bo_alloc_flags alloc_flags,
|
||||
|
@ -1808,7 +1805,10 @@ struct anv_device_memory {
|
|||
struct anv_bo * bo;
|
||||
const struct anv_memory_type * type;
|
||||
|
||||
/* The map, from the user PoV is bo->map + map_delta */
|
||||
void * map;
|
||||
size_t map_size;
|
||||
|
||||
/* The map, from the user PoV is map + map_delta */
|
||||
uint64_t map_delta;
|
||||
|
||||
/* If set, we are holding reference to AHardwareBuffer
|
||||
|
|
Loading…
Reference in New Issue