radv: use vk_error() everywhere an error is returned
For consistency and it might help for debugging purposes. Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
This commit is contained in:
parent
4e16c6a41e
commit
cd64a4f705
|
@ -2526,7 +2526,7 @@ VkResult radv_EndCommandBuffer(
|
|||
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
|
||||
|
||||
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
|
||||
return cmd_buffer->record_result;
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ radv_physical_device_init(struct radv_physical_device *device,
|
|||
|
||||
fd = open(path, O_RDWR | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return VK_ERROR_INCOMPATIBLE_DRIVER;
|
||||
return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
|
||||
|
||||
version = drmGetVersion(fd);
|
||||
if (!version) {
|
||||
|
@ -196,7 +196,7 @@ radv_physical_device_init(struct radv_physical_device *device,
|
|||
if (strcmp(version->name, "amdgpu")) {
|
||||
drmFreeVersion(version);
|
||||
close(fd);
|
||||
return VK_ERROR_INCOMPATIBLE_DRIVER;
|
||||
return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
|
||||
}
|
||||
drmFreeVersion(version);
|
||||
|
||||
|
@ -436,7 +436,7 @@ radv_enumerate_devices(struct radv_instance *instance)
|
|||
|
||||
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
|
||||
if (max_devices < 1)
|
||||
return VK_ERROR_INCOMPATIBLE_DRIVER;
|
||||
return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
|
||||
|
||||
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
|
||||
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
|
||||
|
@ -895,7 +895,7 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue,
|
|||
|
||||
queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
|
||||
if (!queue->hw_ctx)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
@ -1797,7 +1797,7 @@ fail:
|
|||
queue->device->ws->buffer_destroy(tess_factor_ring_bo);
|
||||
if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
|
||||
queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
|
||||
static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
|
||||
|
@ -1821,14 +1821,14 @@ static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
|
|||
if (counts->syncobj_count) {
|
||||
counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
|
||||
if (!counts->syncobj)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
if (counts->sem_count) {
|
||||
counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
|
||||
if (!counts->sem) {
|
||||
free(counts->syncobj);
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2227,7 +2227,7 @@ VkResult radv_MapMemory(
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
return VK_ERROR_MEMORY_MAP_FAILED;
|
||||
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
|
||||
}
|
||||
|
||||
void radv_UnmapMemory(
|
||||
|
@ -2542,7 +2542,7 @@ VkResult radv_CreateFence(
|
|||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
|
||||
if (!fence)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
memset(fence, 0, sizeof(*fence));
|
||||
fence->submitted = false;
|
||||
|
@ -2550,7 +2550,7 @@ VkResult radv_CreateFence(
|
|||
fence->fence = device->ws->create_fence();
|
||||
if (!fence->fence) {
|
||||
vk_free2(&device->alloc, pAllocator, fence);
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
*pFence = radv_fence_to_handle(fence);
|
||||
|
@ -2666,7 +2666,7 @@ VkResult radv_CreateSemaphore(
|
|||
sizeof(*sem), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!sem)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
sem->temp_syncobj = 0;
|
||||
/* create a syncobject if we are going to export this semaphore */
|
||||
|
@ -2676,14 +2676,14 @@ VkResult radv_CreateSemaphore(
|
|||
int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
|
||||
if (ret) {
|
||||
vk_free2(&device->alloc, pAllocator, sem);
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
sem->sem = NULL;
|
||||
} else {
|
||||
sem->sem = device->ws->create_sem(device->ws);
|
||||
if (!sem->sem) {
|
||||
vk_free2(&device->alloc, pAllocator, sem);
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
sem->syncobj = 0;
|
||||
}
|
||||
|
@ -2721,14 +2721,14 @@ VkResult radv_CreateEvent(
|
|||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
|
||||
if (!event)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
event->bo = device->ws->buffer_create(device->ws, 8, 8,
|
||||
RADEON_DOMAIN_GTT,
|
||||
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
|
||||
if (!event->bo) {
|
||||
vk_free2(&device->alloc, pAllocator, event);
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
|
||||
event->map = (uint64_t*)device->ws->buffer_map(event->bo);
|
||||
|
@ -3506,7 +3506,7 @@ VkResult radv_GetMemoryFdKHR(VkDevice _device,
|
|||
|
||||
bool ret = radv_get_memory_fd(device, memory, pFD);
|
||||
if (ret == false)
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -3521,7 +3521,7 @@ VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
|
|||
*
|
||||
* Since we only handle opaque handles for now, there are no FD properties.
|
||||
*/
|
||||
return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
|
||||
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
|
||||
}
|
||||
|
||||
VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
|
||||
|
@ -3534,7 +3534,7 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
|
|||
|
||||
int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
|
||||
if (ret != 0)
|
||||
return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
|
||||
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
|
||||
|
||||
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
|
||||
sem->temp_syncobj = syncobj_handle;
|
||||
|
|
|
@ -1144,7 +1144,7 @@ unsupported:
|
|||
.maxResourceSize = 0,
|
||||
};
|
||||
|
||||
return VK_ERROR_FORMAT_NOT_SUPPORTED;
|
||||
return vk_error(VK_ERROR_FORMAT_NOT_SUPPORTED);
|
||||
}
|
||||
|
||||
VkResult radv_GetPhysicalDeviceImageFormatProperties(
|
||||
|
|
|
@ -134,7 +134,7 @@ radv_pipeline_scratch_init(struct radv_device *device,
|
|||
if (scratch_bytes_per_wave && max_waves < min_waves) {
|
||||
/* Not really true at this moment, but will be true on first
|
||||
* execution. Avoid having hanging shaders. */
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
pipeline->scratch_bytes_per_wave = scratch_bytes_per_wave;
|
||||
pipeline->max_waves = max_waves;
|
||||
|
|
|
@ -206,7 +206,7 @@ radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
|
|||
|
||||
table = malloc(byte_size);
|
||||
if (table == NULL)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
cache->hash_table = table;
|
||||
cache->table_size = table_size;
|
||||
|
|
|
@ -754,7 +754,7 @@ VkResult radv_CreateQueryPool(
|
|||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
|
||||
if (!pool)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
|
||||
switch(pCreateInfo->queryType) {
|
||||
|
@ -784,7 +784,7 @@ VkResult radv_CreateQueryPool(
|
|||
|
||||
if (!pool->bo) {
|
||||
vk_free2(&device->alloc, pAllocator, pool);
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
|
||||
pool->ptr = device->ws->buffer_map(pool->bo);
|
||||
|
@ -792,7 +792,7 @@ VkResult radv_CreateQueryPool(
|
|||
if (!pool->ptr) {
|
||||
device->ws->buffer_destroy(pool->bo);
|
||||
vk_free2(&device->alloc, pAllocator, pool);
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
memset(pool->ptr, 0, size);
|
||||
|
||||
|
|
|
@ -664,7 +664,7 @@ radv_GetShaderInfoAMD(VkDevice _device,
|
|||
/* Spec doesn't indicate what to do if the stage is invalid, so just
|
||||
* return no info for this. */
|
||||
if (!variant)
|
||||
return VK_ERROR_FEATURE_NOT_PRESENT;
|
||||
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
|
||||
|
||||
switch (infoType) {
|
||||
case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
|
||||
|
|
|
@ -305,7 +305,7 @@ radv_wsi_create_prime_command_buffers(struct radv_device *device,
|
|||
swapchain->cmd_buffers = vk_alloc(alloc, (sizeof(VkCommandBuffer) * num_cmd_buffers), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
||||
if (!swapchain->cmd_buffers)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
memset(swapchain->cmd_buffers, 0, sizeof(VkCommandBuffer) * num_cmd_buffers);
|
||||
memset(swapchain->cmd_pools, 0, sizeof(VkCommandPool) * num_pools);
|
||||
|
|
Loading…
Reference in New Issue