2018-08-08 23:23:57 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Red Hat.
|
|
|
|
* Copyright © 2016 Bas Nieuwenhuizen
|
|
|
|
*
|
|
|
|
* based in part on anv driver which is:
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
2019-01-09 22:16:01 +00:00
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-08-08 23:23:57 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "tu_private.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <fcntl.h>
|
2019-01-10 22:07:50 +00:00
|
|
|
#include <libsync.h>
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
2018-11-07 04:26:45 +00:00
|
|
|
#include <sys/mman.h>
|
2018-08-17 13:35:59 +01:00
|
|
|
#include <sys/sysinfo.h>
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <xf86drm.h>
|
2019-01-09 22:16:01 +00:00
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
#include "compiler/glsl_types.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
#include "util/debug.h"
|
|
|
|
#include "util/disk_cache.h"
|
2020-05-11 17:46:04 +01:00
|
|
|
#include "util/u_atomic.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
#include "vk_format.h"
|
|
|
|
#include "vk_util.h"
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2019-05-13 23:21:06 +01:00
|
|
|
#include "drm-uapi/msm_drm.h"
|
2019-01-11 18:03:51 +00:00
|
|
|
|
2020-04-09 11:56:08 +01:00
|
|
|
/* for fd_get_driver/device_uuid() */
|
|
|
|
#include "freedreno/common/freedreno_uuid.h"
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
static void
|
|
|
|
tu_semaphore_remove_temp(struct tu_device *device,
|
|
|
|
struct tu_semaphore *sem);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
static int
|
|
|
|
tu_device_get_cache_uuid(uint16_t family, void *uuid)
|
|
|
|
{
|
|
|
|
uint32_t mesa_timestamp;
|
|
|
|
uint16_t f = family;
|
|
|
|
memset(uuid, 0, VK_UUID_SIZE);
|
|
|
|
if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
|
|
|
|
&mesa_timestamp))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memcpy(uuid, &mesa_timestamp, 4);
|
2019-01-09 22:16:01 +00:00
|
|
|
memcpy((char *) uuid + 4, &f, 2);
|
|
|
|
snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
|
2018-08-08 23:23:57 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-31 23:03:03 +00:00
|
|
|
static VkResult
|
|
|
|
tu_bo_init(struct tu_device *dev,
|
|
|
|
struct tu_bo *bo,
|
|
|
|
uint32_t gem_handle,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
|
|
|
uint64_t iova = tu_gem_info_iova(dev, gem_handle);
|
|
|
|
if (!iova)
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
|
|
|
|
|
|
*bo = (struct tu_bo) {
|
|
|
|
.gem_handle = gem_handle,
|
|
|
|
.size = size,
|
|
|
|
.iova = iova,
|
|
|
|
};
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-07 04:26:45 +00:00
|
|
|
VkResult
|
|
|
|
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
|
|
|
|
{
|
|
|
|
/* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
|
|
|
|
* always sets `flags = MSM_BO_WC`, and we copy that behavior here.
|
|
|
|
*/
|
|
|
|
uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
|
|
|
|
if (!gem_handle)
|
2019-01-31 23:03:03 +00:00
|
|
|
return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2018-11-07 04:26:45 +00:00
|
|
|
|
2019-01-31 23:03:03 +00:00
|
|
|
VkResult result = tu_bo_init(dev, bo, gem_handle, size);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
tu_gem_close(dev, gem_handle);
|
|
|
|
return vk_error(dev->instance, result);
|
|
|
|
}
|
2018-11-07 04:26:45 +00:00
|
|
|
|
2019-01-31 23:03:03 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_bo_init_dmabuf(struct tu_device *dev,
|
|
|
|
struct tu_bo *bo,
|
|
|
|
uint64_t size,
|
|
|
|
int fd)
|
|
|
|
{
|
|
|
|
uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
|
|
|
|
if (!gem_handle)
|
|
|
|
return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
|
|
|
|
|
|
|
VkResult result = tu_bo_init(dev, bo, gem_handle, size);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
tu_gem_close(dev, gem_handle);
|
|
|
|
return vk_error(dev->instance, result);
|
|
|
|
}
|
2018-11-07 04:26:45 +00:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2019-01-31 23:03:03 +00:00
|
|
|
}
|
2018-11-07 04:26:45 +00:00
|
|
|
|
2019-01-31 23:03:03 +00:00
|
|
|
int
|
|
|
|
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
|
|
|
|
{
|
|
|
|
return tu_gem_export_dmabuf(dev, bo->gem_handle);
|
2018-11-07 04:26:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
|
|
|
|
{
|
|
|
|
if (bo->map)
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
2019-01-16 19:02:38 +00:00
|
|
|
uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
|
|
|
|
if (!offset)
|
2019-01-31 23:03:03 +00:00
|
|
|
return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2019-01-16 19:02:38 +00:00
|
|
|
|
2018-11-07 04:26:45 +00:00
|
|
|
/* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
|
|
|
|
void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
2019-01-16 19:02:38 +00:00
|
|
|
dev->physical_device->local_fd, offset);
|
2018-11-07 04:26:45 +00:00
|
|
|
if (map == MAP_FAILED)
|
|
|
|
return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
|
|
|
|
|
2018-12-20 23:54:15 +00:00
|
|
|
bo->map = map;
|
2018-11-07 04:26:45 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
|
|
|
|
{
|
|
|
|
assert(bo->gem_handle);
|
|
|
|
|
|
|
|
if (bo->map)
|
|
|
|
munmap(bo->map, bo->size);
|
|
|
|
|
|
|
|
tu_gem_close(dev, bo->gem_handle);
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
static VkResult
|
|
|
|
tu_physical_device_init(struct tu_physical_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
struct tu_instance *instance,
|
|
|
|
drmDevicePtr drm_device)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
const char *path = drm_device->nodes[DRM_NODE_RENDER];
|
2018-11-07 07:17:30 +00:00
|
|
|
VkResult result = VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
drmVersionPtr version;
|
|
|
|
int fd;
|
|
|
|
int master_fd = -1;
|
|
|
|
|
|
|
|
fd = open(path, O_RDWR | O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
2018-11-12 22:21:45 +00:00
|
|
|
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
|
|
|
|
"failed to open device %s", path);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2018-11-12 22:13:13 +00:00
|
|
|
/* Version 1.3 added MSM_INFO_IOVA. */
|
|
|
|
const int min_version_major = 1;
|
|
|
|
const int min_version_minor = 3;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
version = drmGetVersion(fd);
|
|
|
|
if (!version) {
|
|
|
|
close(fd);
|
2018-11-12 22:21:45 +00:00
|
|
|
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
|
|
|
|
"failed to query kernel driver version for device %s",
|
2018-08-08 23:23:57 +01:00
|
|
|
path);
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:36:06 +01:00
|
|
|
if (strcmp(version->name, "msm")) {
|
2018-08-08 23:23:57 +01:00
|
|
|
drmFreeVersion(version);
|
|
|
|
close(fd);
|
2018-11-12 22:21:45 +00:00
|
|
|
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
|
|
|
|
"device %s does not use the msm kernel driver", path);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
2018-11-12 22:13:13 +00:00
|
|
|
|
2018-12-21 12:46:06 +00:00
|
|
|
if (version->version_major != min_version_major ||
|
|
|
|
version->version_minor < min_version_minor) {
|
2018-11-12 22:13:13 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
|
|
|
|
"kernel driver for device %s has version %d.%d, "
|
|
|
|
"but Vulkan requires version >= %d.%d",
|
2019-01-09 22:16:01 +00:00
|
|
|
path, version->version_major, version->version_minor,
|
2018-11-12 22:13:13 +00:00
|
|
|
min_version_major, min_version_minor);
|
|
|
|
drmFreeVersion(version);
|
|
|
|
close(fd);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
device->msm_major_version = version->version_major;
|
|
|
|
device->msm_minor_version = version->version_minor;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
drmFreeVersion(version);
|
|
|
|
|
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
|
|
|
tu_logi("Found compatible device '%s'.", path);
|
|
|
|
|
|
|
|
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
device->instance = instance;
|
|
|
|
assert(strlen(path) < ARRAY_SIZE(device->path));
|
|
|
|
strncpy(device->path, path, ARRAY_SIZE(device->path));
|
|
|
|
|
|
|
|
if (instance->enabled_extensions.KHR_display) {
|
2019-01-09 22:16:01 +00:00
|
|
|
master_fd =
|
|
|
|
open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (master_fd >= 0) {
|
|
|
|
/* TODO: free master_fd is accel is not working? */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
device->master_fd = master_fd;
|
|
|
|
device->local_fd = fd;
|
|
|
|
|
2019-01-10 23:27:28 +00:00
|
|
|
if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
|
2018-12-21 13:49:30 +00:00
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
|
|
|
tu_logi("Could not query the GPU ID");
|
2019-01-09 22:16:01 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"could not get GPU ID");
|
2018-08-09 10:09:01 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2019-01-10 23:27:28 +00:00
|
|
|
if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
|
2018-12-21 13:49:30 +00:00
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
|
|
|
tu_logi("Could not query the GMEM size");
|
2019-01-09 22:16:01 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"could not get GMEM size");
|
2018-08-09 10:09:01 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2020-02-27 16:18:45 +00:00
|
|
|
if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
|
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
|
|
|
tu_logi("Could not query the GMEM size");
|
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"could not get GMEM size");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:09:01 +01:00
|
|
|
memset(device->name, 0, sizeof(device->name));
|
|
|
|
sprintf(device->name, "FD%d", device->gpu_id);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
switch (device->gpu_id) {
|
2020-02-07 01:53:49 +00:00
|
|
|
case 618:
|
2020-03-13 15:47:15 +00:00
|
|
|
device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
|
|
|
|
device->ccu_offset_bypass = 0x10000;
|
2020-01-22 02:12:57 +00:00
|
|
|
device->tile_align_w = 64;
|
2020-02-07 01:53:49 +00:00
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x0;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x0;
|
|
|
|
break;
|
2018-12-20 17:08:49 +00:00
|
|
|
case 630:
|
2019-11-07 12:28:37 +00:00
|
|
|
case 640:
|
2020-03-13 15:47:15 +00:00
|
|
|
device->ccu_offset_gmem = 0xf8000;
|
|
|
|
device->ccu_offset_bypass = 0x20000;
|
2020-01-22 02:12:57 +00:00
|
|
|
device->tile_align_w = 64;
|
2020-02-07 01:47:59 +00:00
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x1;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x1;
|
2018-08-09 10:09:01 +01:00
|
|
|
break;
|
2020-01-22 02:12:57 +00:00
|
|
|
case 650:
|
|
|
|
device->ccu_offset_gmem = 0x114000;
|
|
|
|
device->ccu_offset_bypass = 0x30000;
|
|
|
|
device->tile_align_w = 96;
|
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x2;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x2;
|
|
|
|
break;
|
2018-08-09 10:09:01 +01:00
|
|
|
default:
|
2018-11-12 22:21:45 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"device %s is unsupported", device->name);
|
2018-08-09 10:09:01 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
|
2019-01-09 22:16:01 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"cannot generate UUID");
|
2018-08-08 23:23:57 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The gpu id is already embedded in the uuid so we just pass "tu"
|
|
|
|
* when creating the cache.
|
|
|
|
*/
|
|
|
|
char buf[VK_UUID_SIZE * 2 + 1];
|
|
|
|
disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
|
|
|
|
device->disk_cache = disk_cache_create(device->name, buf, 0);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
|
|
|
|
"testing use only.\n");
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-04-09 11:56:08 +01:00
|
|
|
fd_get_driver_uuid(device->driver_uuid);
|
2020-05-13 11:57:43 +01:00
|
|
|
fd_get_device_uuid(device->device_uuid, device->gpu_id);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-05-12 15:17:31 +01:00
|
|
|
tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_error(instance, result);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2019-02-08 21:45:53 +00:00
|
|
|
result = tu_wsi_init(device);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_error(instance, result);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
close(fd);
|
|
|
|
if (master_fd != -1)
|
|
|
|
close(master_fd);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_physical_device_finish(struct tu_physical_device *device)
|
|
|
|
{
|
2019-02-08 21:45:53 +00:00
|
|
|
tu_wsi_finish(device);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
disk_cache_destroy(device->disk_cache);
|
|
|
|
close(device->local_fd);
|
|
|
|
if (device->master_fd != -1)
|
|
|
|
close(device->master_fd);
|
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void *
|
2018-08-08 23:23:57 +01:00
|
|
|
default_alloc_func(void *pUserData,
|
|
|
|
size_t size,
|
|
|
|
size_t align,
|
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return malloc(size);
|
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void *
|
2018-08-08 23:23:57 +01:00
|
|
|
default_realloc_func(void *pUserData,
|
|
|
|
void *pOriginal,
|
|
|
|
size_t size,
|
|
|
|
size_t align,
|
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return realloc(pOriginal, size);
|
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void
|
2018-08-08 23:23:57 +01:00
|
|
|
default_free_func(void *pUserData, void *pMemory)
|
|
|
|
{
|
|
|
|
free(pMemory);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VkAllocationCallbacks default_alloc = {
|
|
|
|
.pUserData = NULL,
|
|
|
|
.pfnAllocation = default_alloc_func,
|
|
|
|
.pfnReallocation = default_realloc_func,
|
|
|
|
.pfnFree = default_free_func,
|
|
|
|
};
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
static const struct debug_control tu_debug_options[] = {
|
2019-02-20 17:53:47 +00:00
|
|
|
{ "startup", TU_DEBUG_STARTUP },
|
|
|
|
{ "nir", TU_DEBUG_NIR },
|
|
|
|
{ "ir3", TU_DEBUG_IR3 },
|
2019-11-20 03:19:46 +00:00
|
|
|
{ "nobin", TU_DEBUG_NOBIN },
|
2020-02-03 13:25:41 +00:00
|
|
|
{ "sysmem", TU_DEBUG_SYSMEM },
|
2020-02-18 13:50:39 +00:00
|
|
|
{ "forcebin", TU_DEBUG_FORCEBIN },
|
2020-05-12 16:45:26 +01:00
|
|
|
{ "noubwc", TU_DEBUG_NOUBWC },
|
2019-02-20 17:53:47 +00:00
|
|
|
{ NULL, 0 }
|
2019-01-09 22:16:01 +00:00
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
const char *
|
|
|
|
tu_get_debug_option_name(int id)
|
|
|
|
{
|
|
|
|
assert(id < ARRAY_SIZE(tu_debug_options) - 1);
|
|
|
|
return tu_debug_options[id].string;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tu_get_instance_extension_index(const char *name)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
|
|
|
|
if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkInstance *pInstance)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
struct tu_instance *instance;
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
|
|
|
|
|
|
|
|
uint32_t client_version;
|
|
|
|
if (pCreateInfo->pApplicationInfo &&
|
|
|
|
pCreateInfo->pApplicationInfo->apiVersion != 0) {
|
|
|
|
client_version = pCreateInfo->pApplicationInfo->apiVersion;
|
|
|
|
} else {
|
|
|
|
tu_EnumerateInstanceVersion(&client_version);
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
|
|
|
if (!instance)
|
|
|
|
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
|
|
|
|
if (pAllocator)
|
|
|
|
instance->alloc = *pAllocator;
|
|
|
|
else
|
|
|
|
instance->alloc = default_alloc;
|
|
|
|
|
|
|
|
instance->api_version = client_version;
|
|
|
|
instance->physical_device_count = -1;
|
|
|
|
|
|
|
|
instance->debug_flags =
|
2019-01-09 22:16:01 +00:00
|
|
|
parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
|
|
|
tu_logi("Created an instance");
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
|
|
|
|
int index = tu_get_instance_extension_index(ext_name);
|
|
|
|
|
2020-05-12 15:17:31 +01:00
|
|
|
if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&default_alloc, pAllocator, instance);
|
|
|
|
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
instance->enabled_extensions.extensions[index] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_free2(&default_alloc, pAllocator, instance);
|
|
|
|
return vk_error(instance, result);
|
|
|
|
}
|
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
glsl_type_singleton_init_or_ref();
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
|
|
|
|
|
|
|
|
*pInstance = tu_instance_to_handle(instance);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyInstance(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
|
|
|
|
if (!instance)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (int i = 0; i < instance->physical_device_count; ++i) {
|
|
|
|
tu_physical_device_finish(instance->physical_devices + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(instance));
|
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
glsl_type_singleton_decref();
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
|
|
|
|
|
|
|
|
vk_free(&instance->alloc, instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
tu_enumerate_devices(struct tu_instance *instance)
|
|
|
|
{
|
|
|
|
/* TODO: Check for more devices ? */
|
|
|
|
drmDevicePtr devices[8];
|
|
|
|
VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
int max_devices;
|
|
|
|
|
|
|
|
instance->physical_device_count = 0;
|
|
|
|
|
|
|
|
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
|
|
|
|
|
2020-06-17 19:25:17 +01:00
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP) {
|
|
|
|
if (max_devices < 0)
|
|
|
|
tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
|
|
|
|
else
|
|
|
|
tu_logi("Found %d drm nodes", max_devices);
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (max_devices < 1)
|
|
|
|
return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
for (unsigned i = 0; i < (unsigned) max_devices; i++) {
|
2018-08-08 23:23:57 +01:00
|
|
|
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
|
2018-08-09 09:36:06 +01:00
|
|
|
devices[i]->bustype == DRM_BUS_PLATFORM) {
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
result = tu_physical_device_init(
|
|
|
|
instance->physical_devices + instance->physical_device_count,
|
|
|
|
instance, devices[i]);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (result == VK_SUCCESS)
|
|
|
|
++instance->physical_device_count;
|
|
|
|
else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
drmFreeDevices(devices, max_devices);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumeratePhysicalDevices(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPhysicalDeviceCount,
|
|
|
|
VkPhysicalDevice *pPhysicalDevices)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2018-11-07 06:51:05 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
if (instance->physical_device_count < 0) {
|
|
|
|
result = tu_enumerate_devices(instance);
|
|
|
|
if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
*p = tu_physical_device_to_handle(instance->physical_devices + i);
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
return vk_outarray_status(&out);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumeratePhysicalDeviceGroups(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkInstance _instance,
|
|
|
|
uint32_t *pPhysicalDeviceGroupCount,
|
|
|
|
VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2019-01-09 22:16:01 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
|
|
|
|
pPhysicalDeviceGroupCount);
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
if (instance->physical_device_count < 0) {
|
|
|
|
result = tu_enumerate_devices(instance);
|
|
|
|
if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
p->physicalDeviceCount = 1;
|
|
|
|
p->physicalDevices[0] =
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_physical_device_to_handle(instance->physical_devices + i);
|
2018-11-07 06:51:05 +00:00
|
|
|
p->subsetAllocation = false;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-07 06:51:05 +00:00
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDeviceFeatures *pFeatures)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
memset(pFeatures, 0, sizeof(*pFeatures));
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
*pFeatures = (VkPhysicalDeviceFeatures) {
|
2020-06-02 22:21:30 +01:00
|
|
|
.robustBufferAccess = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.fullDrawIndexUint32 = true,
|
2020-04-20 22:57:22 +01:00
|
|
|
.imageCubeArray = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.independentBlend = true,
|
2020-04-02 19:01:54 +01:00
|
|
|
.geometryShader = true,
|
2020-04-24 21:49:19 +01:00
|
|
|
.tessellationShader = true,
|
2020-02-23 22:29:37 +00:00
|
|
|
.sampleRateShading = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.dualSrcBlend = true,
|
|
|
|
.logicOp = true,
|
2020-06-24 21:00:30 +01:00
|
|
|
.multiDrawIndirect = true,
|
|
|
|
.drawIndirectFirstInstance = true,
|
2020-03-24 01:37:25 +00:00
|
|
|
.depthClamp = true,
|
2020-06-29 01:27:46 +01:00
|
|
|
.depthBiasClamp = true,
|
2020-06-10 21:05:53 +01:00
|
|
|
.fillModeNonSolid = true,
|
2020-06-23 23:45:32 +01:00
|
|
|
.depthBounds = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.wideLines = false,
|
2020-06-29 00:58:08 +01:00
|
|
|
.largePoints = true,
|
2020-06-05 01:00:59 +01:00
|
|
|
.alphaToOne = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.multiViewport = false,
|
2019-10-05 17:39:13 +01:00
|
|
|
.samplerAnisotropy = true,
|
|
|
|
.textureCompressionETC2 = true,
|
|
|
|
.textureCompressionASTC_LDR = true,
|
|
|
|
.textureCompressionBC = true,
|
2020-01-28 22:18:27 +00:00
|
|
|
.occlusionQueryPrecise = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.pipelineStatisticsQuery = false,
|
|
|
|
.vertexPipelineStoresAndAtomics = false,
|
|
|
|
.fragmentStoresAndAtomics = false,
|
|
|
|
.shaderTessellationAndGeometryPointSize = false,
|
|
|
|
.shaderImageGatherExtended = false,
|
|
|
|
.shaderStorageImageExtendedFormats = false,
|
|
|
|
.shaderStorageImageMultisample = false,
|
|
|
|
.shaderUniformBufferArrayDynamicIndexing = false,
|
|
|
|
.shaderSampledImageArrayDynamicIndexing = false,
|
|
|
|
.shaderStorageBufferArrayDynamicIndexing = false,
|
|
|
|
.shaderStorageImageArrayDynamicIndexing = false,
|
|
|
|
.shaderStorageImageReadWithoutFormat = false,
|
|
|
|
.shaderStorageImageWriteWithoutFormat = false,
|
|
|
|
.shaderClipDistance = false,
|
|
|
|
.shaderCullDistance = false,
|
|
|
|
.shaderFloat64 = false,
|
|
|
|
.shaderInt64 = false,
|
|
|
|
.shaderInt16 = false,
|
|
|
|
.sparseBinding = false,
|
|
|
|
.variableMultisampleRate = false,
|
|
|
|
.inheritedQueries = false,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkPhysicalDeviceFeatures2 *pFeatures)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
vk_foreach_struct(ext, pFeatures->pNext)
|
|
|
|
{
|
|
|
|
switch (ext->sType) {
|
2020-06-26 04:32:20 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
|
|
|
|
VkPhysicalDeviceVulkan11Features *features = (void *) ext;
|
|
|
|
features->storageBuffer16BitAccess = false;
|
|
|
|
features->uniformAndStorageBuffer16BitAccess = false;
|
|
|
|
features->storagePushConstant16 = false;
|
|
|
|
features->storageInputOutput16 = false;
|
|
|
|
features->multiview = false;
|
|
|
|
features->multiviewGeometryShader = false;
|
|
|
|
features->multiviewTessellationShader = false;
|
|
|
|
features->variablePointersStorageBuffer = false;
|
|
|
|
features->variablePointers = false;
|
|
|
|
features->protectedMemory = false;
|
|
|
|
features->samplerYcbcrConversion = true;
|
|
|
|
features->shaderDrawParameters = true;
|
2020-06-24 21:00:30 +01:00
|
|
|
break;
|
2020-06-26 04:32:20 +01:00
|
|
|
}
|
2019-04-23 13:48:39 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
|
|
|
|
VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
features->variablePointersStorageBuffer = false;
|
|
|
|
features->variablePointers = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
|
|
|
|
VkPhysicalDeviceMultiviewFeatures *features =
|
|
|
|
(VkPhysicalDeviceMultiviewFeatures *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
features->multiview = false;
|
|
|
|
features->multiviewGeometryShader = false;
|
|
|
|
features->multiviewTessellationShader = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-04-23 13:48:39 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
|
|
|
|
VkPhysicalDeviceShaderDrawParametersFeatures *features =
|
|
|
|
(VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
|
2020-06-24 21:00:30 +01:00
|
|
|
features->shaderDrawParameters = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
|
|
|
|
VkPhysicalDeviceProtectedMemoryFeatures *features =
|
|
|
|
(VkPhysicalDeviceProtectedMemoryFeatures *) ext;
|
|
|
|
features->protectedMemory = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
|
|
|
|
VkPhysicalDevice16BitStorageFeatures *features =
|
|
|
|
(VkPhysicalDevice16BitStorageFeatures *) ext;
|
|
|
|
features->storageBuffer16BitAccess = false;
|
|
|
|
features->uniformAndStorageBuffer16BitAccess = false;
|
|
|
|
features->storagePushConstant16 = false;
|
|
|
|
features->storageInputOutput16 = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
|
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
|
|
|
|
(VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
|
2020-04-10 14:19:36 +01:00
|
|
|
features->samplerYcbcrConversion = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
|
|
|
|
features->shaderInputAttachmentArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderSampledImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderInputAttachmentArrayNonUniformIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->descriptorBindingUniformBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingSampledImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUpdateUnusedWhilePending = false;
|
|
|
|
features->descriptorBindingPartiallyBound = false;
|
|
|
|
features->descriptorBindingVariableDescriptorCount = false;
|
|
|
|
features->runtimeDescriptorArray = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
|
|
|
|
features->conditionalRendering = false;
|
|
|
|
features->inheritedConditionalRendering = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-02-20 05:41:55 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
|
|
|
|
features->transformFeedback = true;
|
|
|
|
features->geometryStreams = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-20 20:02:10 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
|
|
|
|
features->indexTypeUint8 = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-25 00:56:01 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
|
|
|
|
features->vertexAttributeInstanceRateDivisor = true;
|
|
|
|
features->vertexAttributeInstanceRateZeroDivisor = true;
|
|
|
|
break;
|
|
|
|
}
|
2019-01-09 22:16:01 +00:00
|
|
|
default:
|
|
|
|
break;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDeviceProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
|
2020-04-08 03:20:10 +01:00
|
|
|
VkSampleCountFlags sample_counts =
|
|
|
|
VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
/* I have no idea what the maximum size is, but the hardware supports very
|
|
|
|
* large numbers of descriptors (at least 2^16). This limit is based on
|
|
|
|
* CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
|
|
|
|
* we don't have to think about what to do if that overflows, but really
|
|
|
|
* nothing is likely to get close to this.
|
|
|
|
*/
|
|
|
|
const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
VkPhysicalDeviceLimits limits = {
|
|
|
|
.maxImageDimension1D = (1 << 14),
|
|
|
|
.maxImageDimension2D = (1 << 14),
|
|
|
|
.maxImageDimension3D = (1 << 11),
|
|
|
|
.maxImageDimensionCube = (1 << 14),
|
|
|
|
.maxImageArrayLayers = (1 << 11),
|
|
|
|
.maxTexelBufferElements = 128 * 1024 * 1024,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
|
2019-12-02 22:32:53 +00:00
|
|
|
.maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
|
|
|
|
.maxMemoryAllocationCount = UINT32_MAX,
|
|
|
|
.maxSamplerAllocationCount = 64 * 1024,
|
|
|
|
.bufferImageGranularity = 64, /* A cache line */
|
|
|
|
.sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
|
|
|
|
.maxBoundDescriptorSets = MAX_SETS,
|
|
|
|
.maxPerStageDescriptorSamplers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorSampledImages = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorStorageImages = max_descriptor_set_size,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxPerStageDescriptorInputAttachments = MAX_RTS,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxPerStageResources = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetSamplers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetUniformBuffers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
|
|
|
|
.maxDescriptorSetStorageBuffers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
|
|
|
|
.maxDescriptorSetSampledImages = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetStorageImages = max_descriptor_set_size,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxDescriptorSetInputAttachments = MAX_RTS,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxVertexInputAttributes = 32,
|
|
|
|
.maxVertexInputBindings = 32,
|
2020-03-18 02:28:38 +00:00
|
|
|
.maxVertexInputAttributeOffset = 4095,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxVertexInputBindingStride = 2048,
|
|
|
|
.maxVertexOutputComponents = 128,
|
|
|
|
.maxTessellationGenerationLevel = 64,
|
|
|
|
.maxTessellationPatchSize = 32,
|
|
|
|
.maxTessellationControlPerVertexInputComponents = 128,
|
|
|
|
.maxTessellationControlPerVertexOutputComponents = 128,
|
|
|
|
.maxTessellationControlPerPatchOutputComponents = 120,
|
|
|
|
.maxTessellationControlTotalOutputComponents = 4096,
|
|
|
|
.maxTessellationEvaluationInputComponents = 128,
|
|
|
|
.maxTessellationEvaluationOutputComponents = 128,
|
2020-04-03 15:59:47 +01:00
|
|
|
.maxGeometryShaderInvocations = 32,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxGeometryInputComponents = 64,
|
|
|
|
.maxGeometryOutputComponents = 128,
|
|
|
|
.maxGeometryOutputVertices = 256,
|
|
|
|
.maxGeometryTotalOutputComponents = 1024,
|
2020-04-20 12:41:42 +01:00
|
|
|
.maxFragmentInputComponents = 124,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxFragmentOutputAttachments = 8,
|
|
|
|
.maxFragmentDualSrcAttachments = 1,
|
|
|
|
.maxFragmentCombinedOutputResources = 8,
|
|
|
|
.maxComputeSharedMemorySize = 32768,
|
|
|
|
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
|
|
|
|
.maxComputeWorkGroupInvocations = 2048,
|
|
|
|
.maxComputeWorkGroupSize = { 2048, 2048, 2048 },
|
2020-03-12 21:27:29 +00:00
|
|
|
.subPixelPrecisionBits = 8,
|
2020-06-07 03:07:09 +01:00
|
|
|
.subTexelPrecisionBits = 8,
|
|
|
|
.mipmapPrecisionBits = 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxDrawIndexedIndexValue = UINT32_MAX,
|
|
|
|
.maxDrawIndirectCount = UINT32_MAX,
|
2020-06-07 03:07:09 +01:00
|
|
|
.maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxSamplerAnisotropy = 16,
|
|
|
|
.maxViewports = MAX_VIEWPORTS,
|
|
|
|
.maxViewportDimensions = { (1 << 14), (1 << 14) },
|
|
|
|
.viewportBoundsRange = { INT16_MIN, INT16_MAX },
|
|
|
|
.viewportSubPixelBits = 8,
|
|
|
|
.minMemoryMapAlignment = 4096, /* A page */
|
2020-01-22 20:25:10 +00:00
|
|
|
.minTexelBufferOffsetAlignment = 64,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.minUniformBufferOffsetAlignment = 64,
|
|
|
|
.minStorageBufferOffsetAlignment = 64,
|
2020-06-07 03:07:09 +01:00
|
|
|
.minTexelOffset = -16,
|
|
|
|
.maxTexelOffset = 15,
|
2018-08-08 23:23:57 +01:00
|
|
|
.minTexelGatherOffset = -32,
|
|
|
|
.maxTexelGatherOffset = 31,
|
2020-06-07 03:07:09 +01:00
|
|
|
.minInterpolationOffset = -0.5,
|
|
|
|
.maxInterpolationOffset = 0.4375,
|
|
|
|
.subPixelInterpolationOffsetBits = 4,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxFramebufferWidth = (1 << 14),
|
|
|
|
.maxFramebufferHeight = (1 << 14),
|
|
|
|
.maxFramebufferLayers = (1 << 10),
|
|
|
|
.framebufferColorSampleCounts = sample_counts,
|
|
|
|
.framebufferDepthSampleCounts = sample_counts,
|
|
|
|
.framebufferStencilSampleCounts = sample_counts,
|
|
|
|
.framebufferNoAttachmentsSampleCounts = sample_counts,
|
|
|
|
.maxColorAttachments = MAX_RTS,
|
|
|
|
.sampledImageColorSampleCounts = sample_counts,
|
|
|
|
.sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
.sampledImageDepthSampleCounts = sample_counts,
|
|
|
|
.sampledImageStencilSampleCounts = sample_counts,
|
|
|
|
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
.maxSampleMaskWords = 1,
|
2020-03-03 01:52:15 +00:00
|
|
|
.timestampComputeAndGraphics = true,
|
|
|
|
.timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxClipDistances = 8,
|
|
|
|
.maxCullDistances = 8,
|
|
|
|
.maxCombinedClipAndCullDistances = 8,
|
|
|
|
.discreteQueuePriorities = 1,
|
2020-06-29 00:58:08 +01:00
|
|
|
.pointSizeRange = { 1, 4092 },
|
2018-08-08 23:23:57 +01:00
|
|
|
.lineWidthRange = { 0.0, 7.9921875 },
|
2020-06-29 00:58:08 +01:00
|
|
|
.pointSizeGranularity = 0.0625,
|
2018-08-08 23:23:57 +01:00
|
|
|
.lineWidthGranularity = (1.0 / 128.0),
|
|
|
|
.strictLines = false, /* FINISHME */
|
|
|
|
.standardSampleLocations = true,
|
|
|
|
.optimalBufferCopyOffsetAlignment = 128,
|
|
|
|
.optimalBufferCopyRowPitchAlignment = 128,
|
|
|
|
.nonCoherentAtomSize = 64,
|
|
|
|
};
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
*pProperties = (VkPhysicalDeviceProperties) {
|
2018-08-08 23:23:57 +01:00
|
|
|
.apiVersion = tu_physical_device_api_version(pdevice),
|
|
|
|
.driverVersion = vk_get_driver_version(),
|
|
|
|
.vendorID = 0, /* TODO */
|
|
|
|
.deviceID = 0,
|
|
|
|
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
|
|
|
|
.limits = limits,
|
|
|
|
.sparseProperties = { 0 },
|
|
|
|
};
|
|
|
|
|
|
|
|
strcpy(pProperties->deviceName, pdevice->name);
|
|
|
|
memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkPhysicalDeviceProperties2 *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
|
|
|
|
tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pProperties->pNext)
|
|
|
|
{
|
|
|
|
switch (ext->sType) {
|
2019-01-09 22:16:01 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
|
|
|
|
VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
|
|
|
|
(VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
|
|
|
|
properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceIDProperties *properties =
|
|
|
|
(VkPhysicalDeviceIDProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
|
|
|
|
memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
|
|
|
|
properties->deviceLUIDValid = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceMultiviewProperties *properties =
|
|
|
|
(VkPhysicalDeviceMultiviewProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
properties->maxMultiviewViewCount = MAX_VIEWS;
|
|
|
|
properties->maxMultiviewInstanceIndex = INT_MAX;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
|
|
|
|
VkPhysicalDevicePointClippingProperties *properties =
|
|
|
|
(VkPhysicalDevicePointClippingProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
properties->pointClippingBehavior =
|
2019-02-02 01:08:51 +00:00
|
|
|
VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceMaintenance3Properties *properties =
|
|
|
|
(VkPhysicalDeviceMaintenance3Properties *) ext;
|
|
|
|
/* Make sure everything is addressable by a signed 32-bit int, and
|
|
|
|
* our largest descriptors are 96 bytes. */
|
|
|
|
properties->maxPerSetDescriptors = (1ull << 31) / 96;
|
|
|
|
/* Our buffer size fields allow only this much */
|
|
|
|
properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
|
|
|
|
break;
|
|
|
|
}
|
2020-02-20 05:41:55 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
|
|
|
|
(VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
|
|
|
|
|
|
|
|
properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
|
|
|
|
properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
|
|
|
|
properties->maxTransformFeedbackBufferSize = UINT32_MAX;
|
|
|
|
properties->maxTransformFeedbackStreamDataSize = 512;
|
|
|
|
properties->maxTransformFeedbackBufferDataSize = 512;
|
|
|
|
properties->maxTransformFeedbackBufferDataStride = 512;
|
2020-04-17 08:08:17 +01:00
|
|
|
properties->transformFeedbackQueries = true;
|
2020-02-20 05:41:55 +00:00
|
|
|
properties->transformFeedbackStreamsLinesTriangles = false;
|
|
|
|
properties->transformFeedbackRasterizationStreamSelect = false;
|
|
|
|
properties->transformFeedbackDraw = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-04-21 17:14:23 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
|
|
|
|
(VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
|
|
|
|
properties->sampleLocationSampleCounts = 0;
|
|
|
|
if (pdevice->supported_extensions.EXT_sample_locations) {
|
|
|
|
properties->sampleLocationSampleCounts =
|
|
|
|
VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
|
|
|
|
}
|
|
|
|
properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
|
|
|
|
properties->sampleLocationCoordinateRange[0] = 0.0f;
|
|
|
|
properties->sampleLocationCoordinateRange[1] = 0.9375f;
|
|
|
|
properties->sampleLocationSubPixelBits = 4;
|
|
|
|
properties->variableSampleLocations = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-04-20 22:54:36 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
|
|
|
|
(VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
|
|
|
|
properties->filterMinmaxImageComponentMapping = true;
|
|
|
|
properties->filterMinmaxSingleComponentFormats = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-19 17:47:08 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceSubgroupProperties *properties =
|
|
|
|
(VkPhysicalDeviceSubgroupProperties *)ext;
|
|
|
|
properties->subgroupSize = 64;
|
|
|
|
properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
|
|
|
|
properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
|
|
|
|
VK_SUBGROUP_FEATURE_VOTE_BIT;
|
|
|
|
properties->quadOperationsInAllStages = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-25 00:56:01 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
|
|
|
|
props->maxVertexAttribDivisor = UINT32_MAX;
|
|
|
|
break;
|
|
|
|
}
|
2019-01-09 22:16:01 +00:00
|
|
|
default:
|
|
|
|
break;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
static const VkQueueFamilyProperties tu_queue_family_properties = {
|
|
|
|
.queueFlags =
|
|
|
|
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
|
2018-11-07 06:51:05 +00:00
|
|
|
.queueCount = 1,
|
2020-03-03 01:52:15 +00:00
|
|
|
.timestampValidBits = 48,
|
2019-05-30 22:47:37 +01:00
|
|
|
.minImageTransferGranularity = { 1, 1, 1 },
|
2018-11-07 06:51:05 +00:00
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceQueueFamilyProperties(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2018-11-07 06:51:05 +00:00
|
|
|
uint32_t *pQueueFamilyPropertyCount,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkQueueFamilyProperties *pQueueFamilyProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceQueueFamilyProperties2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2018-11-07 06:51:05 +00:00
|
|
|
uint32_t *pQueueFamilyPropertyCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkQueueFamilyProperties2 *pQueueFamilyProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
p->queueFamilyProperties = tu_queue_family_properties;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 12:30:08 +01:00
|
|
|
static uint64_t
|
2018-08-17 13:35:59 +01:00
|
|
|
tu_get_system_heap_size()
|
2018-08-10 12:30:08 +01:00
|
|
|
{
|
2018-08-17 13:35:59 +01:00
|
|
|
struct sysinfo info;
|
|
|
|
sysinfo(&info);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
|
2018-08-17 13:35:59 +01:00
|
|
|
|
|
|
|
/* We don't want to burn too much ram with the GPU. If the user has 4GiB
|
|
|
|
* or less, we use at most half. If they have more than 4GiB, we use 3/4.
|
|
|
|
*/
|
|
|
|
uint64_t available_ram;
|
|
|
|
if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
|
|
|
|
available_ram = total_ram / 2;
|
|
|
|
else
|
|
|
|
available_ram = total_ram * 3 / 4;
|
|
|
|
|
|
|
|
return available_ram;
|
2018-08-10 12:30:08 +01:00
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceMemoryProperties(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceMemoryProperties *pMemoryProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-08-10 12:30:08 +01:00
|
|
|
pMemoryProperties->memoryHeapCount = 1;
|
2018-08-17 13:35:59 +01:00
|
|
|
pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
|
2018-08-10 12:30:08 +01:00
|
|
|
pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
|
|
|
|
|
|
|
|
pMemoryProperties->memoryTypeCount = 1;
|
2019-01-09 22:16:01 +00:00
|
|
|
pMemoryProperties->memoryTypes[0].propertyFlags =
|
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
2018-08-10 12:30:08 +01:00
|
|
|
pMemoryProperties->memoryTypes[0].heapIndex = 0;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceMemoryProperties2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return tu_GetPhysicalDeviceMemoryProperties(
|
2019-01-09 22:16:01 +00:00
|
|
|
physicalDevice, &pMemoryProperties->memoryProperties);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2019-01-10 20:12:38 +00:00
|
|
|
static VkResult
|
2018-08-08 23:23:57 +01:00
|
|
|
tu_queue_init(struct tu_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
struct tu_queue *queue,
|
|
|
|
uint32_t queue_family_index,
|
|
|
|
int idx,
|
|
|
|
VkDeviceQueueCreateFlags flags)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
queue->device = device;
|
|
|
|
queue->queue_family_index = queue_family_index;
|
|
|
|
queue->queue_idx = idx;
|
|
|
|
queue->flags = flags;
|
|
|
|
|
2019-01-10 23:34:44 +00:00
|
|
|
int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
|
2019-01-10 20:12:38 +00:00
|
|
|
if (ret)
|
|
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
|
|
|
2019-02-14 22:36:52 +00:00
|
|
|
tu_fence_init(&queue->submit_fence, false);
|
2019-01-10 22:07:50 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_queue_finish(struct tu_queue *queue)
|
|
|
|
{
|
2019-02-14 22:36:52 +00:00
|
|
|
tu_fence_finish(&queue->submit_fence);
|
2019-01-10 23:34:44 +00:00
|
|
|
tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tu_get_device_extension_index(const char *name)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
|
|
|
|
if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-12 11:39:16 +00:00
|
|
|
struct PACKED bcolor_entry {
|
|
|
|
uint32_t fp32[4];
|
|
|
|
uint16_t ui16[4];
|
|
|
|
int16_t si16[4];
|
|
|
|
uint16_t fp16[4];
|
|
|
|
uint16_t rgb565;
|
|
|
|
uint16_t rgb5a1;
|
|
|
|
uint16_t rgba4;
|
|
|
|
uint8_t __pad0[2];
|
|
|
|
uint8_t ui8[4];
|
|
|
|
int8_t si8[4];
|
|
|
|
uint32_t rgb10a2;
|
|
|
|
uint32_t z24; /* also s8? */
|
|
|
|
uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
|
|
|
|
uint8_t __pad1[56];
|
|
|
|
} border_color[] = {
|
|
|
|
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
|
|
|
|
[VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
|
|
|
|
.fp32[3] = 0x3f800000,
|
|
|
|
.ui16[3] = 0xffff,
|
|
|
|
.si16[3] = 0x7fff,
|
|
|
|
.fp16[3] = 0x3c00,
|
|
|
|
.rgb5a1 = 0x8000,
|
|
|
|
.rgba4 = 0xf000,
|
|
|
|
.ui8[3] = 0xff,
|
|
|
|
.si8[3] = 0x7f,
|
|
|
|
.rgb10a2 = 0xc0000000,
|
|
|
|
.srgb[3] = 0x3c00,
|
|
|
|
},
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
|
|
|
|
.fp32[3] = 1,
|
|
|
|
.fp16[3] = 1,
|
|
|
|
},
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
|
|
|
|
.fp32[0 ... 3] = 0x3f800000,
|
|
|
|
.ui16[0 ... 3] = 0xffff,
|
|
|
|
.si16[0 ... 3] = 0x7fff,
|
|
|
|
.fp16[0 ... 3] = 0x3c00,
|
|
|
|
.rgb565 = 0xffff,
|
|
|
|
.rgb5a1 = 0xffff,
|
|
|
|
.rgba4 = 0xffff,
|
|
|
|
.ui8[0 ... 3] = 0xff,
|
|
|
|
.si8[0 ... 3] = 0x7f,
|
|
|
|
.rgb10a2 = 0xffffffff,
|
|
|
|
.z24 = 0xffffff,
|
|
|
|
.srgb[0 ... 3] = 0x3c00,
|
|
|
|
},
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
|
|
|
|
.fp32[0 ... 3] = 1,
|
|
|
|
.fp16[0 ... 3] = 1,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult
|
|
|
|
tu_CreateDevice(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkDeviceCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDevice *pDevice)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
|
|
|
|
VkResult result;
|
|
|
|
struct tu_device *device;
|
|
|
|
|
|
|
|
/* Check enabled features */
|
|
|
|
if (pCreateInfo->pEnabledFeatures) {
|
|
|
|
VkPhysicalDeviceFeatures supported_features;
|
|
|
|
tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
|
2019-01-09 22:16:01 +00:00
|
|
|
VkBool32 *supported_feature = (VkBool32 *) &supported_features;
|
|
|
|
VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
|
2018-08-08 23:23:57 +01:00
|
|
|
unsigned num_features =
|
2019-01-09 22:16:01 +00:00
|
|
|
sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
|
2018-08-08 23:23:57 +01:00
|
|
|
for (uint32_t i = 0; i < num_features; i++) {
|
|
|
|
if (enabled_feature[i] && !supported_feature[i])
|
|
|
|
return vk_error(physical_device->instance,
|
|
|
|
VK_ERROR_FEATURE_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
|
|
|
|
sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!device)
|
|
|
|
return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
device->instance = physical_device->instance;
|
|
|
|
device->physical_device = physical_device;
|
2020-06-17 23:58:33 +01:00
|
|
|
device->_lost = false;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (pAllocator)
|
|
|
|
device->alloc = *pAllocator;
|
|
|
|
else
|
|
|
|
device->alloc = physical_device->instance->alloc;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
|
|
|
|
int index = tu_get_device_extension_index(ext_name);
|
|
|
|
if (index < 0 ||
|
|
|
|
!physical_device->supported_extensions.extensions[index]) {
|
|
|
|
vk_free(&device->alloc, device);
|
|
|
|
return vk_error(physical_device->instance,
|
|
|
|
VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
device->enabled_extensions.extensions[index] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
const VkDeviceQueueCreateInfo *queue_create =
|
2019-01-09 22:16:01 +00:00
|
|
|
&pCreateInfo->pQueueCreateInfos[i];
|
2018-08-08 23:23:57 +01:00
|
|
|
uint32_t qfi = queue_create->queueFamilyIndex;
|
2019-01-09 22:16:01 +00:00
|
|
|
device->queues[qfi] = vk_alloc(
|
|
|
|
&device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
|
|
|
|
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!device->queues[qfi]) {
|
|
|
|
result = VK_ERROR_OUT_OF_HOST_MEMORY;
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
memset(device->queues[qfi], 0,
|
2018-08-08 23:23:57 +01:00
|
|
|
queue_create->queueCount * sizeof(struct tu_queue));
|
|
|
|
|
|
|
|
device->queue_count[qfi] = queue_create->queueCount;
|
|
|
|
|
|
|
|
for (unsigned q = 0; q < queue_create->queueCount; q++) {
|
2019-01-09 22:16:01 +00:00
|
|
|
result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
|
|
|
|
queue_create->flags);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (result != VK_SUCCESS)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-20 17:53:47 +00:00
|
|
|
device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
|
|
|
|
if (!device->compiler)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
|
|
|
|
2020-04-25 17:51:09 +01:00
|
|
|
#define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
|
|
|
|
#define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
|
2020-01-28 16:30:44 +00:00
|
|
|
|
2020-04-25 17:51:09 +01:00
|
|
|
device->vsc_draw_strm_pitch = 0x440 * 4;
|
|
|
|
device->vsc_prim_strm_pitch = 0x1040 * 4;
|
2020-01-28 16:30:44 +00:00
|
|
|
|
2020-04-25 17:51:09 +01:00
|
|
|
result = tu_bo_init_new(device, &device->vsc_draw_strm, VSC_DRAW_STRM_SIZE(device->vsc_draw_strm_pitch));
|
2020-01-28 16:30:44 +00:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_vsc_data;
|
|
|
|
|
2020-04-25 17:51:09 +01:00
|
|
|
result = tu_bo_init_new(device, &device->vsc_prim_strm, VSC_PRIM_STRM_SIZE(device->vsc_prim_strm_pitch));
|
2020-01-28 16:30:44 +00:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_vsc_data2;
|
2019-02-20 17:53:47 +00:00
|
|
|
|
2020-03-12 11:39:16 +00:00
|
|
|
STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
|
|
|
|
result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_border_color;
|
|
|
|
|
|
|
|
result = tu_bo_map(device, &device->border_color);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_border_color_map;
|
|
|
|
|
|
|
|
memcpy(device->border_color.map, border_color, sizeof(border_color));
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkPipelineCacheCreateInfo ci;
|
|
|
|
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
|
|
|
|
ci.pNext = NULL;
|
|
|
|
ci.flags = 0;
|
|
|
|
ci.pInitialData = NULL;
|
|
|
|
ci.initialDataSize = 0;
|
|
|
|
VkPipelineCache pc;
|
|
|
|
result =
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (result != VK_SUCCESS)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_pipeline_cache;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
device->mem_cache = tu_pipeline_cache_from_handle(pc);
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
|
|
|
|
mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pDevice = tu_device_to_handle(device);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
2020-01-28 16:30:44 +00:00
|
|
|
fail_pipeline_cache:
|
2020-03-12 11:39:16 +00:00
|
|
|
fail_border_color_map:
|
|
|
|
tu_bo_finish(device, &device->border_color);
|
|
|
|
|
|
|
|
fail_border_color:
|
2020-04-25 17:51:09 +01:00
|
|
|
tu_bo_finish(device, &device->vsc_prim_strm);
|
2020-01-28 16:30:44 +00:00
|
|
|
|
|
|
|
fail_vsc_data2:
|
2020-04-25 17:51:09 +01:00
|
|
|
tu_bo_finish(device, &device->vsc_draw_strm);
|
2020-01-28 16:30:44 +00:00
|
|
|
|
|
|
|
fail_vsc_data:
|
|
|
|
ralloc_free(device->compiler);
|
|
|
|
|
|
|
|
fail_queues:
|
2018-08-08 23:23:57 +01:00
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++)
|
|
|
|
tu_queue_finish(&device->queues[i][q]);
|
|
|
|
if (device->queue_count[i])
|
|
|
|
vk_free(&device->alloc, device->queues[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
vk_free(&device->alloc, device);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
|
|
|
if (!device)
|
|
|
|
return;
|
|
|
|
|
2020-04-25 17:51:09 +01:00
|
|
|
tu_bo_finish(device, &device->vsc_draw_strm);
|
|
|
|
tu_bo_finish(device, &device->vsc_prim_strm);
|
2020-01-28 16:30:44 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++)
|
|
|
|
tu_queue_finish(&device->queues[i][q]);
|
|
|
|
if (device->queue_count[i])
|
|
|
|
vk_free(&device->alloc, device->queues[i]);
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
|
|
|
|
if (device->scratch_bos[i].initialized)
|
|
|
|
tu_bo_finish(device, &device->scratch_bos[i].bo);
|
|
|
|
}
|
|
|
|
|
2020-06-04 20:55:41 +01:00
|
|
|
ir3_compiler_destroy(device->compiler);
|
2019-02-20 17:53:47 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
|
|
|
|
tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
|
|
|
|
|
|
|
|
vk_free(&device->alloc, device);
|
|
|
|
}
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
VkResult
|
|
|
|
_tu_device_set_lost(struct tu_device *device,
|
|
|
|
const char *file, int line,
|
|
|
|
const char *msg, ...)
|
|
|
|
{
|
|
|
|
/* Set the flag indicating that waits should return in finite time even
|
|
|
|
* after device loss.
|
|
|
|
*/
|
|
|
|
p_atomic_inc(&device->_lost);
|
|
|
|
|
|
|
|
/* TODO: Report the log message through VkDebugReportCallbackEXT instead */
|
|
|
|
fprintf(stderr, "%s:%d: ", file, line);
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, msg);
|
|
|
|
vfprintf(stderr, msg, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
|
|
|
|
abort();
|
|
|
|
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
VkResult
|
|
|
|
tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
|
|
|
|
{
|
|
|
|
unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
|
|
|
|
unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
|
|
|
|
assert(index < ARRAY_SIZE(dev->scratch_bos));
|
|
|
|
|
|
|
|
for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
|
|
|
|
if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
|
|
|
|
/* Fast path: just return the already-allocated BO. */
|
|
|
|
*bo = &dev->scratch_bos[i].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Slow path: actually allocate the BO. We take a lock because the process
|
|
|
|
* of allocating it is slow, and we don't want to block the CPU while it
|
|
|
|
* finishes.
|
|
|
|
*/
|
|
|
|
mtx_lock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
|
|
|
|
/* Another thread may have allocated it already while we were waiting on
|
|
|
|
* the lock. We need to check this in order to avoid double-allocating.
|
|
|
|
*/
|
|
|
|
if (dev->scratch_bos[index].initialized) {
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
*bo = &dev->scratch_bos[index].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned bo_size = 1ull << size_log2;
|
|
|
|
VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
p_atomic_set(&dev->scratch_bos[index].initialized, true);
|
|
|
|
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
|
|
|
|
*bo = &dev->scratch_bos[index].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult
|
|
|
|
tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkLayerProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:52:57 +00:00
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkLayerProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:52:57 +00:00
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceQueue2(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkDeviceQueueInfo2 *pQueueInfo,
|
|
|
|
VkQueue *pQueue)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_queue *queue;
|
|
|
|
|
|
|
|
queue =
|
2019-01-09 22:16:01 +00:00
|
|
|
&device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
|
2018-08-08 23:23:57 +01:00
|
|
|
if (pQueueInfo->flags != queue->flags) {
|
|
|
|
/* From the Vulkan 1.1.70 spec:
|
|
|
|
*
|
|
|
|
* "The queue returned by vkGetDeviceQueue2 must have the same
|
|
|
|
* flags value from this structure as that used at device
|
|
|
|
* creation time in a VkDeviceQueueCreateInfo instance. If no
|
|
|
|
* matching flags were specified at device creation time then
|
|
|
|
* pQueue will return VK_NULL_HANDLE."
|
|
|
|
*/
|
|
|
|
*pQueue = VK_NULL_HANDLE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pQueue = tu_queue_to_handle(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceQueue(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t queueFamilyIndex,
|
|
|
|
uint32_t queueIndex,
|
|
|
|
VkQueue *pQueue)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
const VkDeviceQueueInfo2 info =
|
2019-01-09 22:16:01 +00:00
|
|
|
(VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
|
|
|
|
.queueFamilyIndex = queueFamilyIndex,
|
|
|
|
.queueIndex = queueIndex };
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
tu_GetDeviceQueue2(_device, &info, pQueue);
|
|
|
|
}
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
static VkResult
|
|
|
|
tu_get_semaphore_syncobjs(const VkSemaphore *sems,
|
|
|
|
uint32_t sem_count,
|
|
|
|
bool wait,
|
|
|
|
struct drm_msm_gem_submit_syncobj **out,
|
|
|
|
uint32_t *out_count)
|
|
|
|
{
|
|
|
|
uint32_t syncobj_count = 0;
|
|
|
|
struct drm_msm_gem_submit_syncobj *syncobjs;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < sem_count; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
|
|
|
|
|
|
|
|
struct tu_semaphore_part *part =
|
|
|
|
sem->temporary.kind != TU_SEMAPHORE_NONE ?
|
|
|
|
&sem->temporary : &sem->permanent;
|
|
|
|
|
|
|
|
if (part->kind == TU_SEMAPHORE_SYNCOBJ)
|
|
|
|
++syncobj_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
*out = NULL;
|
|
|
|
*out_count = syncobj_count;
|
|
|
|
if (!syncobj_count)
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
*out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
|
|
|
|
if (!syncobjs)
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
|
|
|
|
|
|
for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
|
|
|
|
|
|
|
|
struct tu_semaphore_part *part =
|
|
|
|
sem->temporary.kind != TU_SEMAPHORE_NONE ?
|
|
|
|
&sem->temporary : &sem->permanent;
|
|
|
|
|
|
|
|
if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
|
|
|
|
syncobjs[j].handle = part->syncobj;
|
|
|
|
syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_semaphores_remove_temp(struct tu_device *device,
|
|
|
|
const VkSemaphore *sems,
|
|
|
|
uint32_t sem_count)
|
|
|
|
{
|
|
|
|
for (uint32_t i = 0; i < sem_count; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
|
|
|
|
tu_semaphore_remove_temp(device, sem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult
|
|
|
|
tu_QueueSubmit(VkQueue _queue,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t submitCount,
|
|
|
|
const VkSubmitInfo *pSubmits,
|
|
|
|
VkFence _fence)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-10 20:25:20 +00:00
|
|
|
TU_FROM_HANDLE(tu_queue, queue, _queue);
|
2019-11-17 05:23:15 +00:00
|
|
|
VkResult result;
|
2019-01-10 20:25:20 +00:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < submitCount; ++i) {
|
|
|
|
const VkSubmitInfo *submit = pSubmits + i;
|
2019-01-10 22:07:50 +00:00
|
|
|
const bool last_submit = (i == submitCount - 1);
|
2019-11-17 05:23:15 +00:00
|
|
|
struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
|
|
|
|
uint32_t nr_in_syncobjs, nr_out_syncobjs;
|
2019-01-10 20:25:20 +00:00
|
|
|
struct tu_bo_list bo_list;
|
|
|
|
tu_bo_list_init(&bo_list);
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
|
|
|
|
pSubmits[i].waitSemaphoreCount,
|
|
|
|
false, &in_syncobjs, &nr_in_syncobjs);
|
|
|
|
if (result != VK_SUCCESS) {
|
2020-06-17 23:58:33 +01:00
|
|
|
return tu_device_set_lost(queue->device,
|
|
|
|
"failed to allocate space for semaphore submission\n");
|
2019-11-17 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
|
|
|
|
pSubmits[i].signalSemaphoreCount,
|
|
|
|
false, &out_syncobjs, &nr_out_syncobjs);
|
|
|
|
if (result != VK_SUCCESS) {
|
2020-06-17 23:58:33 +01:00
|
|
|
free(in_syncobjs);
|
|
|
|
return tu_device_set_lost(queue->device,
|
|
|
|
"failed to allocate space for semaphore submission\n");
|
2019-11-17 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 20:25:20 +00:00
|
|
|
uint32_t entry_count = 0;
|
2019-01-16 18:03:02 +00:00
|
|
|
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
|
2019-01-10 20:25:20 +00:00
|
|
|
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
|
2019-01-10 21:07:04 +00:00
|
|
|
entry_count += cmdbuf->cs.entry_count;
|
2019-01-10 20:25:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct drm_msm_gem_submit_cmd cmds[entry_count];
|
|
|
|
uint32_t entry_idx = 0;
|
2019-01-16 18:03:02 +00:00
|
|
|
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
|
2019-01-10 20:25:20 +00:00
|
|
|
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
|
2019-01-16 18:03:02 +00:00
|
|
|
struct tu_cs *cs = &cmdbuf->cs;
|
|
|
|
for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
|
2019-01-10 20:25:20 +00:00
|
|
|
cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
|
2019-02-25 22:32:36 +00:00
|
|
|
cmds[entry_idx].submit_idx =
|
|
|
|
tu_bo_list_add(&bo_list, cs->entries[i].bo,
|
|
|
|
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
|
2019-01-16 18:03:02 +00:00
|
|
|
cmds[entry_idx].submit_offset = cs->entries[i].offset;
|
|
|
|
cmds[entry_idx].size = cs->entries[i].size;
|
2019-01-10 20:25:20 +00:00
|
|
|
cmds[entry_idx].pad = 0;
|
|
|
|
cmds[entry_idx].nr_relocs = 0;
|
|
|
|
cmds[entry_idx].relocs = 0;
|
|
|
|
}
|
2019-01-17 19:15:39 +00:00
|
|
|
|
|
|
|
tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
|
2019-01-10 20:25:20 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 22:07:50 +00:00
|
|
|
uint32_t flags = MSM_PIPE_3D0;
|
2019-11-17 05:23:15 +00:00
|
|
|
if (nr_in_syncobjs) {
|
|
|
|
flags |= MSM_SUBMIT_SYNCOBJ_IN;
|
|
|
|
}
|
|
|
|
if (nr_out_syncobjs) {
|
|
|
|
flags |= MSM_SUBMIT_SYNCOBJ_OUT;
|
|
|
|
}
|
|
|
|
|
2019-01-10 22:07:50 +00:00
|
|
|
if (last_submit) {
|
|
|
|
flags |= MSM_SUBMIT_FENCE_FD_OUT;
|
|
|
|
}
|
|
|
|
|
2019-01-10 20:25:20 +00:00
|
|
|
struct drm_msm_gem_submit req = {
|
2019-01-10 22:07:50 +00:00
|
|
|
.flags = flags,
|
2019-01-10 20:25:20 +00:00
|
|
|
.queueid = queue->msm_queue_id,
|
2019-01-17 18:23:19 +00:00
|
|
|
.bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
|
2019-01-10 20:25:20 +00:00
|
|
|
.nr_bos = bo_list.count,
|
|
|
|
.cmds = (uint64_t)(uintptr_t)cmds,
|
|
|
|
.nr_cmds = entry_count,
|
2019-11-17 05:23:15 +00:00
|
|
|
.in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
|
|
|
|
.out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
|
|
|
|
.nr_in_syncobjs = nr_in_syncobjs,
|
|
|
|
.nr_out_syncobjs = nr_out_syncobjs,
|
|
|
|
.syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
|
2019-01-10 20:25:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
|
|
|
|
DRM_MSM_GEM_SUBMIT,
|
|
|
|
&req, sizeof(req));
|
|
|
|
if (ret) {
|
2020-06-17 23:58:33 +01:00
|
|
|
free(in_syncobjs);
|
|
|
|
free(out_syncobjs);
|
|
|
|
return tu_device_set_lost(queue->device, "submit failed: %s\n",
|
|
|
|
strerror(errno));
|
2019-01-10 20:25:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tu_bo_list_destroy(&bo_list);
|
2019-11-17 05:23:15 +00:00
|
|
|
free(in_syncobjs);
|
|
|
|
free(out_syncobjs);
|
2019-01-10 22:07:50 +00:00
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
|
|
|
|
pSubmits[i].waitSemaphoreCount);
|
2019-01-10 22:07:50 +00:00
|
|
|
if (last_submit) {
|
|
|
|
/* no need to merge fences as queue execution is serialized */
|
2019-02-14 22:36:52 +00:00
|
|
|
tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
|
2019-11-17 05:23:15 +00:00
|
|
|
} else if (last_submit) {
|
|
|
|
close(req.fence_fd);
|
2019-01-10 22:07:50 +00:00
|
|
|
}
|
2019-01-10 20:25:20 +00:00
|
|
|
}
|
2019-02-14 22:36:52 +00:00
|
|
|
|
|
|
|
if (_fence != VK_NULL_HANDLE) {
|
|
|
|
TU_FROM_HANDLE(tu_fence, fence, _fence);
|
|
|
|
tu_fence_copy(fence, &queue->submit_fence);
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_QueueWaitIdle(VkQueue _queue)
|
|
|
|
{
|
2019-01-10 22:07:50 +00:00
|
|
|
TU_FROM_HANDLE(tu_queue, queue, _queue);
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
if (tu_device_is_lost(queue->device))
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
2019-02-14 22:36:52 +00:00
|
|
|
tu_fence_wait_idle(&queue->submit_fence);
|
2019-01-10 22:07:50 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_DeviceWaitIdle(VkDevice _device)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
if (tu_device_is_lost(device))
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++) {
|
|
|
|
tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
|
2020-05-12 15:17:31 +01:00
|
|
|
if (tu_instance_extensions_supported.extensions[i]) {
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
const char *pLayerName,
|
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
2018-08-08 23:23:57 +01:00
|
|
|
TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
|
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
|
|
|
|
if (device->supported_extensions.extensions[i]) {
|
|
|
|
vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
}
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
return tu_lookup_entrypoint_checked(
|
|
|
|
pName, instance ? instance->api_version : 0,
|
|
|
|
instance ? &instance->enabled_extensions : NULL, NULL);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The loader wants us to expose a second GetInstanceProcAddr function
|
|
|
|
* to work around certain LD_PRELOAD issues seen in apps.
|
|
|
|
*/
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
|
|
|
|
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
|
|
|
|
{
|
|
|
|
return tu_GetInstanceProcAddr(instance, pName);
|
|
|
|
}
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
|
|
|
|
&device->instance->enabled_extensions,
|
|
|
|
&device->enabled_extensions);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
tu_alloc_memory(struct tu_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkMemoryAllocateInfo *pAllocateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDeviceMemory *pMem)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
struct tu_device_memory *mem;
|
2018-11-07 04:26:45 +00:00
|
|
|
VkResult result;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
|
|
|
|
|
|
|
|
if (pAllocateInfo->allocationSize == 0) {
|
|
|
|
/* Apparently, this is allowed */
|
|
|
|
*pMem = VK_NULL_HANDLE;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (mem == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2019-02-01 18:36:19 +00:00
|
|
|
const VkImportMemoryFdInfoKHR *fd_info =
|
|
|
|
vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
|
|
|
|
if (fd_info && !fd_info->handleType)
|
|
|
|
fd_info = NULL;
|
|
|
|
|
|
|
|
if (fd_info) {
|
|
|
|
assert(fd_info->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
fd_info->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO Importing the same fd twice gives us the same handle without
|
|
|
|
* reference counting. We need to maintain a per-instance handle-to-bo
|
|
|
|
* table and add reference count to tu_bo.
|
|
|
|
*/
|
|
|
|
result = tu_bo_init_dmabuf(device, &mem->bo,
|
|
|
|
pAllocateInfo->allocationSize, fd_info->fd);
|
|
|
|
if (result == VK_SUCCESS) {
|
|
|
|
/* take ownership and close the fd */
|
|
|
|
close(fd_info->fd);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
result =
|
|
|
|
tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
|
|
|
|
}
|
|
|
|
|
2018-12-20 21:57:07 +00:00
|
|
|
if (result != VK_SUCCESS) {
|
2018-08-10 12:19:22 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, mem);
|
2018-11-07 04:26:45 +00:00
|
|
|
return result;
|
2018-08-10 12:19:22 +01:00
|
|
|
}
|
2018-11-07 04:26:45 +00:00
|
|
|
|
2018-08-10 12:19:22 +01:00
|
|
|
mem->size = pAllocateInfo->allocationSize;
|
|
|
|
mem->type_index = pAllocateInfo->memoryTypeIndex;
|
|
|
|
|
|
|
|
mem->map = NULL;
|
|
|
|
mem->user_ptr = NULL;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pMem = tu_device_memory_to_handle(mem);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_AllocateMemory(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkMemoryAllocateInfo *pAllocateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDeviceMemory *pMem)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_FreeMemory(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory _mem,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, _mem);
|
|
|
|
|
|
|
|
if (mem == NULL)
|
|
|
|
return;
|
|
|
|
|
2018-11-07 04:26:45 +00:00
|
|
|
tu_bo_finish(device, &mem->bo);
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_MapMemory(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory _memory,
|
|
|
|
VkDeviceSize offset,
|
|
|
|
VkDeviceSize size,
|
|
|
|
VkMemoryMapFlags flags,
|
|
|
|
void **ppData)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, _memory);
|
2018-11-07 04:26:45 +00:00
|
|
|
VkResult result;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (mem == NULL) {
|
|
|
|
*ppData = NULL;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-08-10 12:19:22 +01:00
|
|
|
if (mem->user_ptr) {
|
2018-08-08 23:23:57 +01:00
|
|
|
*ppData = mem->user_ptr;
|
2019-01-09 22:16:01 +00:00
|
|
|
} else if (!mem->map) {
|
2018-11-07 04:26:45 +00:00
|
|
|
result = tu_bo_map(device, &mem->bo);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
2018-12-20 23:54:15 +00:00
|
|
|
*ppData = mem->map = mem->bo.map;
|
2018-08-10 12:19:22 +01:00
|
|
|
} else
|
|
|
|
*ppData = mem->map;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (*ppData) {
|
|
|
|
*ppData += offset;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
|
|
|
|
{
|
2018-08-10 12:19:22 +01:00
|
|
|
/* I do not see any unmapping done by the freedreno Gallium driver. */
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_FlushMappedMemoryRanges(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_InvalidateMappedMemoryRanges(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetBufferMemoryRequirements(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkBuffer _buffer,
|
|
|
|
VkMemoryRequirements *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
|
|
|
|
|
2018-08-17 13:43:01 +01:00
|
|
|
pMemoryRequirements->memoryTypeBits = 1;
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
pMemoryRequirements->alignment = 64;
|
2018-08-08 23:23:57 +01:00
|
|
|
pMemoryRequirements->size =
|
2019-01-09 22:16:01 +00:00
|
|
|
align64(buffer->size, pMemoryRequirements->alignment);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetBufferMemoryRequirements2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkBufferMemoryRequirementsInfo2 *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_GetBufferMemoryRequirements(device, pInfo->buffer,
|
|
|
|
&pMemoryRequirements->memoryRequirements);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageMemoryRequirements(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkImage _image,
|
|
|
|
VkMemoryRequirements *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_image, image, _image);
|
|
|
|
|
2018-08-17 13:43:01 +01:00
|
|
|
pMemoryRequirements->memoryTypeBits = 1;
|
2019-11-26 20:29:19 +00:00
|
|
|
pMemoryRequirements->size = image->layout.size;
|
2020-03-27 11:02:59 +00:00
|
|
|
pMemoryRequirements->alignment = image->layout.base_align;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageMemoryRequirements2(VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkImageMemoryRequirementsInfo2 *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_GetImageMemoryRequirements(device, pInfo->image,
|
|
|
|
&pMemoryRequirements->memoryRequirements);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageSparseMemoryRequirements(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
|
|
|
VkImage image,
|
|
|
|
uint32_t *pSparseMemoryRequirementCount,
|
|
|
|
VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-12 22:42:36 +00:00
|
|
|
tu_stub();
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageSparseMemoryRequirements2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkImageSparseMemoryRequirementsInfo2 *pInfo,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pSparseMemoryRequirementCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-12 22:42:36 +00:00
|
|
|
tu_stub();
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceMemoryCommitment(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory memory,
|
|
|
|
VkDeviceSize *pCommittedMemoryInBytes)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
*pCommittedMemoryInBytes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindBufferMemory2(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkBindBufferMemoryInfo *pBindInfos)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-15 21:54:15 +00:00
|
|
|
for (uint32_t i = 0; i < bindInfoCount; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
|
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
|
|
|
|
|
|
|
|
if (mem) {
|
|
|
|
buffer->bo = &mem->bo;
|
|
|
|
buffer->bo_offset = pBindInfos[i].memoryOffset;
|
|
|
|
} else {
|
|
|
|
buffer->bo = NULL;
|
|
|
|
}
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindBufferMemory(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkBuffer buffer,
|
|
|
|
VkDeviceMemory memory,
|
|
|
|
VkDeviceSize memoryOffset)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkBindBufferMemoryInfo info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
|
2018-08-08 23:23:57 +01:00
|
|
|
.buffer = buffer,
|
|
|
|
.memory = memory,
|
|
|
|
.memoryOffset = memoryOffset
|
|
|
|
};
|
|
|
|
|
|
|
|
return tu_BindBufferMemory2(device, 1, &info);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindImageMemory2(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
2019-01-10 19:51:39 +00:00
|
|
|
const VkBindImageMemoryInfo *pBindInfos)
|
|
|
|
{
|
|
|
|
for (uint32_t i = 0; i < bindInfoCount; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
|
|
|
|
|
|
|
|
if (mem) {
|
|
|
|
image->bo = &mem->bo;
|
|
|
|
image->bo_offset = pBindInfos[i].memoryOffset;
|
|
|
|
} else {
|
|
|
|
image->bo = NULL;
|
|
|
|
image->bo_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindImageMemory(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkImage image,
|
|
|
|
VkDeviceMemory memory,
|
|
|
|
VkDeviceSize memoryOffset)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-10 19:51:39 +00:00
|
|
|
const VkBindImageMemoryInfo info = {
|
2019-02-02 01:08:51 +00:00
|
|
|
.sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
|
2018-08-08 23:23:57 +01:00
|
|
|
.image = image,
|
|
|
|
.memory = memory,
|
|
|
|
.memoryOffset = memoryOffset
|
|
|
|
};
|
|
|
|
|
|
|
|
return tu_BindImageMemory2(device, 1, &info);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_QueueBindSparse(VkQueue _queue,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
|
|
|
const VkBindSparseInfo *pBindInfo,
|
|
|
|
VkFence _fence)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Queue semaphore functions
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
tu_semaphore_part_destroy(struct tu_device *device,
|
|
|
|
struct tu_semaphore_part *part)
|
|
|
|
{
|
|
|
|
switch(part->kind) {
|
|
|
|
case TU_SEMAPHORE_NONE:
|
|
|
|
break;
|
|
|
|
case TU_SEMAPHORE_SYNCOBJ:
|
|
|
|
drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
part->kind = TU_SEMAPHORE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_semaphore_remove_temp(struct tu_device *device,
|
|
|
|
struct tu_semaphore *sem)
|
|
|
|
{
|
|
|
|
if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
|
|
|
|
tu_semaphore_part_destroy(device, &sem->temporary);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult
|
|
|
|
tu_CreateSemaphore(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkSemaphoreCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkSemaphore *pSemaphore)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
struct tu_semaphore *sem =
|
|
|
|
vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!sem)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
const VkExportSemaphoreCreateInfo *export =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
|
|
|
|
VkExternalSemaphoreHandleTypeFlags handleTypes =
|
|
|
|
export ? export->handleTypes : 0;
|
|
|
|
|
|
|
|
sem->permanent.kind = TU_SEMAPHORE_NONE;
|
|
|
|
sem->temporary.kind = TU_SEMAPHORE_NONE;
|
|
|
|
|
|
|
|
if (handleTypes) {
|
|
|
|
if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
|
|
|
|
vk_free2(&device->alloc, pAllocator, sem);
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
|
|
}
|
|
|
|
sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
*pSemaphore = tu_semaphore_to_handle(sem);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroySemaphore(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkSemaphore _semaphore,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
|
|
|
|
if (!_semaphore)
|
|
|
|
return;
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
tu_semaphore_part_destroy(device, &sem->permanent);
|
|
|
|
tu_semaphore_part_destroy(device, &sem->temporary);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateEvent(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkEventCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkEvent *pEvent)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
2019-01-09 22:16:01 +00:00
|
|
|
struct tu_event *event =
|
|
|
|
vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (!event)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2019-10-14 16:24:27 +01:00
|
|
|
VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_alloc;
|
|
|
|
|
|
|
|
result = tu_bo_map(device, &event->bo);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_map;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pEvent = tu_event_to_handle(event);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2019-10-14 16:24:27 +01:00
|
|
|
|
|
|
|
fail_map:
|
|
|
|
tu_bo_finish(device, &event->bo);
|
|
|
|
fail_alloc:
|
|
|
|
vk_free2(&device->alloc, pAllocator, event);
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyEvent(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkEvent _event,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
|
|
|
|
|
|
|
if (!event)
|
|
|
|
return;
|
2020-01-30 16:02:29 +00:00
|
|
|
|
|
|
|
tu_bo_finish(device, &event->bo);
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetEventStatus(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
|
|
|
|
2019-10-14 16:24:27 +01:00
|
|
|
if (*(uint64_t*) event->bo.map == 1)
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_EVENT_SET;
|
|
|
|
return VK_EVENT_RESET;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_SetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
2019-10-14 16:24:27 +01:00
|
|
|
*(uint64_t*) event->bo.map = 1;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_ResetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
2019-10-14 16:24:27 +01:00
|
|
|
*(uint64_t*) event->bo.map = 0;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateBuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkBufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkBuffer *pBuffer)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_buffer *buffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (buffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
buffer->size = pCreateInfo->size;
|
|
|
|
buffer->usage = pCreateInfo->usage;
|
|
|
|
buffer->flags = pCreateInfo->flags;
|
|
|
|
|
|
|
|
*pBuffer = tu_buffer_to_handle(buffer);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyBuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkBuffer _buffer,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateFramebuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkFramebufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkFramebuffer *pFramebuffer)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_framebuffer *framebuffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
|
|
|
|
pCreateInfo->attachmentCount;
|
|
|
|
framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (framebuffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
framebuffer->attachment_count = pCreateInfo->attachmentCount;
|
|
|
|
framebuffer->width = pCreateInfo->width;
|
|
|
|
framebuffer->height = pCreateInfo->height;
|
|
|
|
framebuffer->layers = pCreateInfo->layers;
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
|
|
|
|
VkImageView _iview = pCreateInfo->pAttachments[i];
|
|
|
|
struct tu_image_view *iview = tu_image_view_from_handle(_iview);
|
|
|
|
framebuffer->attachments[i].attachment = iview;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pFramebuffer = tu_framebuffer_to_handle(framebuffer);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyFramebuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkFramebuffer _fb,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
|
|
|
|
|
|
|
|
if (!fb)
|
|
|
|
return;
|
|
|
|
vk_free2(&device->alloc, pAllocator, fb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_init_sampler(struct tu_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
struct tu_sampler *sampler,
|
|
|
|
const VkSamplerCreateInfo *pCreateInfo)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-04-20 22:54:36 +01:00
|
|
|
const struct VkSamplerReductionModeCreateInfo *reduction =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
|
2020-04-10 14:19:36 +01:00
|
|
|
const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
|
2020-04-20 22:54:36 +01:00
|
|
|
|
2019-09-25 17:55:14 +01:00
|
|
|
unsigned aniso = pCreateInfo->anisotropyEnable ?
|
|
|
|
util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
|
|
|
|
bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
|
2020-06-07 03:08:41 +01:00
|
|
|
float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
|
|
|
|
float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
|
2019-09-25 17:55:14 +01:00
|
|
|
|
2020-03-12 11:39:16 +00:00
|
|
|
sampler->descriptor[0] =
|
2019-09-25 17:55:14 +01:00
|
|
|
COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
|
2019-10-05 17:29:01 +01:00
|
|
|
A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
|
|
|
|
A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
|
2019-09-25 17:55:14 +01:00
|
|
|
A6XX_TEX_SAMP_0_ANISO(aniso) |
|
2020-03-12 11:39:16 +00:00
|
|
|
A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
|
|
|
|
A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
|
|
|
|
A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
|
2019-09-25 17:55:14 +01:00
|
|
|
A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
|
2020-03-12 11:39:16 +00:00
|
|
|
sampler->descriptor[1] =
|
2019-09-25 17:55:14 +01:00
|
|
|
/* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
|
|
|
|
COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
|
2020-06-07 03:08:41 +01:00
|
|
|
A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
|
|
|
|
A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
|
2020-02-03 20:52:47 +00:00
|
|
|
COND(pCreateInfo->compareEnable,
|
|
|
|
A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
|
2020-03-12 11:39:16 +00:00
|
|
|
/* This is an offset into the border_color BO, which we fill with all the
|
|
|
|
* possible Vulkan border colors in the correct order, so we can just use
|
|
|
|
* the Vulkan enum with no translation necessary.
|
|
|
|
*/
|
|
|
|
sampler->descriptor[2] =
|
|
|
|
A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
|
|
|
|
sizeof(struct bcolor_entry));
|
|
|
|
sampler->descriptor[3] = 0;
|
2019-09-25 17:55:14 +01:00
|
|
|
|
2020-04-20 22:54:36 +01:00
|
|
|
if (reduction) {
|
2020-05-22 21:57:53 +01:00
|
|
|
sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
|
|
|
|
tu6_reduction_mode(reduction->reductionMode));
|
2020-04-20 22:54:36 +01:00
|
|
|
}
|
|
|
|
|
2020-04-10 14:19:36 +01:00
|
|
|
sampler->ycbcr_sampler = ycbcr_conversion ?
|
|
|
|
tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
|
|
|
|
|
|
|
|
if (sampler->ycbcr_sampler &&
|
|
|
|
sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
|
|
|
|
sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
|
|
|
|
}
|
|
|
|
|
2019-09-25 17:55:14 +01:00
|
|
|
/* TODO:
|
|
|
|
* A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
|
|
|
|
*/
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateSampler(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkSamplerCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkSampler *pSampler)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_sampler *sampler;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (!sampler)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
tu_init_sampler(device, sampler, pCreateInfo);
|
|
|
|
*pSampler = tu_sampler_to_handle(sampler);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroySampler(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkSampler _sampler,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
|
|
|
|
|
|
|
|
if (!sampler)
|
|
|
|
return;
|
|
|
|
vk_free2(&device->alloc, pAllocator, sampler);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* vk_icd.h does not declare this function, so we declare it here to
|
|
|
|
* suppress Wmissing-prototypes.
|
|
|
|
*/
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
|
|
|
|
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
|
|
|
|
{
|
|
|
|
/* For the full details on loader interface versioning, see
|
2019-01-09 22:16:01 +00:00
|
|
|
* <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
|
|
|
|
* What follows is a condensed summary, to help you navigate the large and
|
|
|
|
* confusing official doc.
|
|
|
|
*
|
|
|
|
* - Loader interface v0 is incompatible with later versions. We don't
|
|
|
|
* support it.
|
|
|
|
*
|
|
|
|
* - In loader interface v1:
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
* vk_icdGetInstanceProcAddr(). The ICD must statically expose this
|
|
|
|
* entrypoint.
|
|
|
|
* - The ICD must statically expose no other Vulkan symbol unless it
|
|
|
|
* is linked with -Bsymbolic.
|
|
|
|
* - Each dispatchable Vulkan handle created by the ICD must be
|
|
|
|
* a pointer to a struct whose first member is VK_LOADER_DATA. The
|
|
|
|
* ICD must initialize VK_LOADER_DATA.loadMagic to
|
|
|
|
* ICD_LOADER_MAGIC.
|
|
|
|
* - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
|
|
|
|
* vkDestroySurfaceKHR(). The ICD must be capable of working with
|
|
|
|
* such loader-managed surfaces.
|
|
|
|
*
|
|
|
|
* - Loader interface v2 differs from v1 in:
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
* vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
|
|
|
|
* statically expose this entrypoint.
|
|
|
|
*
|
|
|
|
* - Loader interface v3 differs from v2 in:
|
|
|
|
* - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
|
|
|
|
* vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
|
|
|
|
* because the loader no longer does so.
|
|
|
|
*/
|
2018-08-08 23:23:57 +01:00
|
|
|
*pSupportedVersion = MIN2(*pSupportedVersion, 3u);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-01 18:36:19 +00:00
|
|
|
VkResult
|
|
|
|
tu_GetMemoryFdKHR(VkDevice _device,
|
|
|
|
const VkMemoryGetFdInfoKHR *pGetFdInfo,
|
|
|
|
int *pFd)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
|
|
|
|
|
|
|
|
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
|
|
|
|
|
|
|
|
/* At the moment, we support only the below handle types. */
|
|
|
|
assert(pGetFdInfo->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
pGetFdInfo->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
|
|
|
|
int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
|
|
|
|
if (prime_fd < 0)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
|
|
*pFd = prime_fd;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetMemoryFdPropertiesKHR(VkDevice _device,
|
|
|
|
VkExternalMemoryHandleTypeFlagBits handleType,
|
|
|
|
int fd,
|
|
|
|
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
|
|
|
|
{
|
|
|
|
assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
pMemoryFdProperties->memoryTypeBits = 1;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-06-27 00:29:15 +01:00
|
|
|
VkResult
|
|
|
|
tu_ImportFenceFdKHR(VkDevice _device,
|
|
|
|
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
|
|
|
|
{
|
|
|
|
tu_stub();
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetFenceFdKHR(VkDevice _device,
|
|
|
|
const VkFenceGetFdInfoKHR *pGetFdInfo,
|
|
|
|
int *pFd)
|
|
|
|
{
|
|
|
|
tu_stub();
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-11-17 05:23:15 +00:00
|
|
|
VkResult
|
|
|
|
tu_ImportSemaphoreFdKHR(VkDevice _device,
|
|
|
|
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
|
|
|
|
int ret;
|
|
|
|
struct tu_semaphore_part *dst = NULL;
|
|
|
|
|
|
|
|
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
|
|
|
|
dst = &sem->temporary;
|
|
|
|
} else {
|
|
|
|
dst = &sem->permanent;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
|
|
|
|
|
|
|
|
switch(pImportSemaphoreFdInfo->handleType) {
|
|
|
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
|
|
|
|
uint32_t old_syncobj = syncobj;
|
|
|
|
ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
|
|
|
|
if (ret == 0) {
|
|
|
|
close(pImportSemaphoreFdInfo->fd);
|
|
|
|
if (old_syncobj)
|
|
|
|
drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
|
|
|
|
if (!syncobj) {
|
|
|
|
ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (pImportSemaphoreFdInfo->fd == -1) {
|
|
|
|
ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
|
|
|
|
} else {
|
|
|
|
ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
|
|
|
|
}
|
|
|
|
if (!ret)
|
|
|
|
close(pImportSemaphoreFdInfo->fd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
unreachable("Unhandled semaphore handle type");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
|
|
|
}
|
|
|
|
dst->syncobj = syncobj;
|
|
|
|
dst->kind = TU_SEMAPHORE_SYNCOBJ;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetSemaphoreFdKHR(VkDevice _device,
|
|
|
|
const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
|
|
|
|
int *pFd)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
|
|
|
|
int ret;
|
|
|
|
uint32_t syncobj_handle;
|
|
|
|
|
|
|
|
if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
|
|
|
|
assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
|
|
|
|
syncobj_handle = sem->temporary.syncobj;
|
|
|
|
} else {
|
|
|
|
assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
|
|
|
|
syncobj_handle = sem->permanent.syncobj;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch(pGetFdInfo->handleType) {
|
|
|
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
|
|
|
|
ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
|
|
|
|
break;
|
|
|
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
|
|
|
|
ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
|
|
|
|
if (!ret) {
|
|
|
|
if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
|
|
|
|
tu_semaphore_part_destroy(device, &sem->temporary);
|
|
|
|
} else {
|
|
|
|
drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Unhandled semaphore handle type");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static bool tu_has_syncobj(struct tu_physical_device *pdev)
|
|
|
|
{
|
|
|
|
uint64_t value;
|
|
|
|
if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
|
|
|
|
return false;
|
|
|
|
return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceExternalSemaphoreProperties(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
|
|
|
|
VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-11-17 05:23:15 +00:00
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
|
|
|
|
|
|
|
|
if (tu_has_syncobj(pdev) &&
|
|
|
|
(pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
|
|
|
|
pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
|
|
|
|
VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
|
|
|
|
} else {
|
|
|
|
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
|
|
|
|
pExternalSemaphoreProperties->compatibleHandleTypes = 0;
|
|
|
|
pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceExternalFenceProperties(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
|
|
|
|
VkExternalFenceProperties *pExternalFenceProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
pExternalFenceProperties->exportFromImportedHandleTypes = 0;
|
|
|
|
pExternalFenceProperties->compatibleHandleTypes = 0;
|
|
|
|
pExternalFenceProperties->externalFenceFeatures = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateDebugReportCallbackEXT(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkInstance _instance,
|
|
|
|
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDebugReportCallbackEXT *pCallback)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
return vk_create_debug_report_callback(&instance->debug_report_callbacks,
|
2019-01-09 22:16:01 +00:00
|
|
|
pCreateInfo, pAllocator,
|
|
|
|
&instance->alloc, pCallback);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDebugReportCallbackEXT _callback,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
|
2019-01-09 22:16:01 +00:00
|
|
|
_callback, pAllocator, &instance->alloc);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DebugReportMessageEXT(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDebugReportFlagsEXT flags,
|
|
|
|
VkDebugReportObjectTypeEXT objectType,
|
|
|
|
uint64_t object,
|
|
|
|
size_t location,
|
|
|
|
int32_t messageCode,
|
|
|
|
const char *pLayerPrefix,
|
|
|
|
const char *pMessage)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
|
|
|
|
object, location, messageCode, pLayerPrefix, pMessage);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceGroupPeerMemoryFeatures(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
|
|
|
uint32_t heapIndex,
|
|
|
|
uint32_t localDeviceIndex,
|
|
|
|
uint32_t remoteDeviceIndex,
|
|
|
|
VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
assert(localDeviceIndex == remoteDeviceIndex);
|
|
|
|
|
|
|
|
*pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
|
|
|
|
}
|
2020-04-21 17:14:23 +01:00
|
|
|
|
|
|
|
void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
|
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
VkSampleCountFlagBits samples,
|
|
|
|
VkMultisamplePropertiesEXT* pMultisampleProperties)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
|
|
|
|
|
|
|
|
if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
|
|
|
|
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
|
|
|
|
else
|
|
|
|
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
|
|
|
|
}
|