2018-08-08 23:23:57 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Red Hat.
|
|
|
|
* Copyright © 2016 Bas Nieuwenhuizen
|
|
|
|
*
|
|
|
|
* based in part on anv driver which is:
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
2019-01-09 22:16:01 +00:00
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-08-08 23:23:57 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "tu_private.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <fcntl.h>
|
2019-01-10 22:07:50 +00:00
|
|
|
#include <libsync.h>
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
2018-08-17 13:35:59 +01:00
|
|
|
#include <sys/sysinfo.h>
|
2018-08-08 23:23:57 +01:00
|
|
|
#include <unistd.h>
|
2019-01-09 22:16:01 +00:00
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
#include "compiler/glsl_types.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
#include "util/debug.h"
|
|
|
|
#include "util/disk_cache.h"
|
2020-05-11 17:46:04 +01:00
|
|
|
#include "util/u_atomic.h"
|
2019-01-09 22:16:01 +00:00
|
|
|
#include "vk_format.h"
|
|
|
|
#include "vk_util.h"
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-04-09 11:56:08 +01:00
|
|
|
/* for fd_get_driver/device_uuid() */
|
|
|
|
#include "freedreno/common/freedreno_uuid.h"
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
static int
|
|
|
|
tu_device_get_cache_uuid(uint16_t family, void *uuid)
|
|
|
|
{
|
|
|
|
uint32_t mesa_timestamp;
|
|
|
|
uint16_t f = family;
|
|
|
|
memset(uuid, 0, VK_UUID_SIZE);
|
|
|
|
if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
|
|
|
|
&mesa_timestamp))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memcpy(uuid, &mesa_timestamp, 4);
|
2019-01-09 22:16:01 +00:00
|
|
|
memcpy((char *) uuid + 4, &f, 2);
|
|
|
|
snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
|
2018-08-08 23:23:57 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:28:49 +01:00
|
|
|
VkResult
|
2018-08-08 23:23:57 +01:00
|
|
|
tu_physical_device_init(struct tu_physical_device *device,
|
2020-04-07 18:28:49 +01:00
|
|
|
struct tu_instance *instance)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 07:17:30 +00:00
|
|
|
VkResult result = VK_SUCCESS;
|
2020-02-27 16:18:45 +00:00
|
|
|
|
2018-08-09 10:09:01 +01:00
|
|
|
memset(device->name, 0, sizeof(device->name));
|
|
|
|
sprintf(device->name, "FD%d", device->gpu_id);
|
|
|
|
|
2020-06-11 22:57:54 +01:00
|
|
|
device->limited_z24s8 = (device->gpu_id == 630);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
switch (device->gpu_id) {
|
2020-09-16 03:23:06 +01:00
|
|
|
case 615:
|
2020-02-07 01:53:49 +00:00
|
|
|
case 618:
|
2020-03-13 15:47:15 +00:00
|
|
|
device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
|
|
|
|
device->ccu_offset_bypass = 0x10000;
|
2020-06-18 04:42:48 +01:00
|
|
|
device->tile_align_w = 32;
|
2020-02-07 01:53:49 +00:00
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x0;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x0;
|
2020-07-13 18:27:53 +01:00
|
|
|
device->supports_multiview_mask = false; /* TODO */
|
2020-02-07 01:53:49 +00:00
|
|
|
break;
|
2018-12-20 17:08:49 +00:00
|
|
|
case 630:
|
2019-11-07 12:28:37 +00:00
|
|
|
case 640:
|
2020-03-13 15:47:15 +00:00
|
|
|
device->ccu_offset_gmem = 0xf8000;
|
|
|
|
device->ccu_offset_bypass = 0x20000;
|
2020-06-18 04:42:48 +01:00
|
|
|
device->tile_align_w = 32;
|
2020-02-07 01:47:59 +00:00
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x1;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x1;
|
2020-07-13 18:27:53 +01:00
|
|
|
device->supports_multiview_mask = device->gpu_id != 630;
|
2018-08-09 10:09:01 +01:00
|
|
|
break;
|
2020-01-22 02:12:57 +00:00
|
|
|
case 650:
|
|
|
|
device->ccu_offset_gmem = 0x114000;
|
|
|
|
device->ccu_offset_bypass = 0x30000;
|
|
|
|
device->tile_align_w = 96;
|
|
|
|
device->magic.PC_UNKNOWN_9805 = 0x2;
|
|
|
|
device->magic.SP_UNKNOWN_A0F8 = 0x2;
|
2020-07-13 18:27:53 +01:00
|
|
|
device->supports_multiview_mask = true;
|
2020-01-22 02:12:57 +00:00
|
|
|
break;
|
2018-08-09 10:09:01 +01:00
|
|
|
default:
|
2018-11-12 22:21:45 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"device %s is unsupported", device->name);
|
2018-08-09 10:09:01 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
|
2019-01-09 22:16:01 +00:00
|
|
|
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
"cannot generate UUID");
|
2018-08-08 23:23:57 +01:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The gpu id is already embedded in the uuid so we just pass "tu"
|
|
|
|
* when creating the cache.
|
|
|
|
*/
|
|
|
|
char buf[VK_UUID_SIZE * 2 + 1];
|
|
|
|
disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
|
|
|
|
device->disk_cache = disk_cache_create(device->name, buf, 0);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
|
|
|
|
"testing use only.\n");
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-04-09 11:56:08 +01:00
|
|
|
fd_get_driver_uuid(device->driver_uuid);
|
2020-05-13 11:57:43 +01:00
|
|
|
fd_get_device_uuid(device->device_uuid, device->gpu_id);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-05-12 15:17:31 +01:00
|
|
|
tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_error(instance, result);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2019-02-08 21:45:53 +00:00
|
|
|
result = tu_wsi_init(device);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_error(instance, result);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
fail:
|
2020-04-07 18:28:49 +01:00
|
|
|
close(device->local_fd);
|
|
|
|
if (device->master_fd != -1)
|
|
|
|
close(device->master_fd);
|
2018-08-08 23:23:57 +01:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_physical_device_finish(struct tu_physical_device *device)
|
|
|
|
{
|
2019-02-08 21:45:53 +00:00
|
|
|
tu_wsi_finish(device);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
disk_cache_destroy(device->disk_cache);
|
|
|
|
close(device->local_fd);
|
|
|
|
if (device->master_fd != -1)
|
|
|
|
close(device->master_fd);
|
2020-07-13 04:08:15 +01:00
|
|
|
|
|
|
|
vk_object_base_finish(&device->base);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void *
|
2018-08-08 23:23:57 +01:00
|
|
|
default_alloc_func(void *pUserData,
|
|
|
|
size_t size,
|
|
|
|
size_t align,
|
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return malloc(size);
|
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void *
|
2018-08-08 23:23:57 +01:00
|
|
|
default_realloc_func(void *pUserData,
|
|
|
|
void *pOriginal,
|
|
|
|
size_t size,
|
|
|
|
size_t align,
|
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return realloc(pOriginal, size);
|
|
|
|
}
|
|
|
|
|
2020-04-07 16:45:03 +01:00
|
|
|
static VKAPI_ATTR void
|
2018-08-08 23:23:57 +01:00
|
|
|
default_free_func(void *pUserData, void *pMemory)
|
|
|
|
{
|
|
|
|
free(pMemory);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VkAllocationCallbacks default_alloc = {
|
|
|
|
.pUserData = NULL,
|
|
|
|
.pfnAllocation = default_alloc_func,
|
|
|
|
.pfnReallocation = default_realloc_func,
|
|
|
|
.pfnFree = default_free_func,
|
|
|
|
};
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
static const struct debug_control tu_debug_options[] = {
|
2019-02-20 17:53:47 +00:00
|
|
|
{ "startup", TU_DEBUG_STARTUP },
|
|
|
|
{ "nir", TU_DEBUG_NIR },
|
|
|
|
{ "ir3", TU_DEBUG_IR3 },
|
2019-11-20 03:19:46 +00:00
|
|
|
{ "nobin", TU_DEBUG_NOBIN },
|
2020-02-03 13:25:41 +00:00
|
|
|
{ "sysmem", TU_DEBUG_SYSMEM },
|
2020-02-18 13:50:39 +00:00
|
|
|
{ "forcebin", TU_DEBUG_FORCEBIN },
|
2020-05-12 16:45:26 +01:00
|
|
|
{ "noubwc", TU_DEBUG_NOUBWC },
|
2020-08-21 12:46:09 +01:00
|
|
|
{ "nomultipos", TU_DEBUG_NOMULTIPOS },
|
2020-05-19 16:08:34 +01:00
|
|
|
{ "nolrz", TU_DEBUG_NOLRZ },
|
2019-02-20 17:53:47 +00:00
|
|
|
{ NULL, 0 }
|
2019-01-09 22:16:01 +00:00
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
const char *
|
|
|
|
tu_get_debug_option_name(int id)
|
|
|
|
{
|
|
|
|
assert(id < ARRAY_SIZE(tu_debug_options) - 1);
|
|
|
|
return tu_debug_options[id].string;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tu_get_instance_extension_index(const char *name)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
|
|
|
|
if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkInstance *pInstance)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
struct tu_instance *instance;
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
|
|
|
|
|
|
|
|
uint32_t client_version;
|
|
|
|
if (pCreateInfo->pApplicationInfo &&
|
|
|
|
pCreateInfo->pApplicationInfo->apiVersion != 0) {
|
|
|
|
client_version = pCreateInfo->pApplicationInfo->apiVersion;
|
|
|
|
} else {
|
|
|
|
tu_EnumerateInstanceVersion(&client_version);
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
2020-07-13 04:08:15 +01:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!instance)
|
|
|
|
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (pAllocator)
|
|
|
|
instance->alloc = *pAllocator;
|
|
|
|
else
|
|
|
|
instance->alloc = default_alloc;
|
|
|
|
|
|
|
|
instance->api_version = client_version;
|
|
|
|
instance->physical_device_count = -1;
|
|
|
|
|
|
|
|
instance->debug_flags =
|
2019-01-09 22:16:01 +00:00
|
|
|
parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (instance->debug_flags & TU_DEBUG_STARTUP)
|
2020-09-21 21:02:14 +01:00
|
|
|
mesa_logi("Created an instance");
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
|
|
|
|
int index = tu_get_instance_extension_index(ext_name);
|
|
|
|
|
2020-05-12 15:17:31 +01:00
|
|
|
if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_base_finish(&instance->base);
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&default_alloc, pAllocator, instance);
|
|
|
|
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
instance->enabled_extensions.extensions[index] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
|
|
|
|
if (result != VK_SUCCESS) {
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_base_finish(&instance->base);
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free2(&default_alloc, pAllocator, instance);
|
|
|
|
return vk_error(instance, result);
|
|
|
|
}
|
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
glsl_type_singleton_init_or_ref();
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
|
|
|
|
|
|
|
|
*pInstance = tu_instance_to_handle(instance);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyInstance(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
|
|
|
|
if (!instance)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (int i = 0; i < instance->physical_device_count; ++i) {
|
|
|
|
tu_physical_device_finish(instance->physical_devices + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(instance));
|
|
|
|
|
2019-09-18 13:11:47 +01:00
|
|
|
glsl_type_singleton_decref();
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_base_finish(&instance->base);
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_free(&instance->alloc, instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumeratePhysicalDevices(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPhysicalDeviceCount,
|
|
|
|
VkPhysicalDevice *pPhysicalDevices)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2018-11-07 06:51:05 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
if (instance->physical_device_count < 0) {
|
|
|
|
result = tu_enumerate_devices(instance);
|
|
|
|
if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
*p = tu_physical_device_to_handle(instance->physical_devices + i);
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
return vk_outarray_status(&out);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumeratePhysicalDeviceGroups(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkInstance _instance,
|
|
|
|
uint32_t *pPhysicalDeviceGroupCount,
|
|
|
|
VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2019-01-09 22:16:01 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
|
|
|
|
pPhysicalDeviceGroupCount);
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
if (instance->physical_device_count < 0) {
|
|
|
|
result = tu_enumerate_devices(instance);
|
|
|
|
if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-11-07 06:51:05 +00:00
|
|
|
for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
p->physicalDeviceCount = 1;
|
|
|
|
p->physicalDevices[0] =
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_physical_device_to_handle(instance->physical_devices + i);
|
2018-11-07 06:51:05 +00:00
|
|
|
p->subsetAllocation = false;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-07 06:51:05 +00:00
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-09-29 17:04:17 +01:00
|
|
|
tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceFeatures2 *pFeatures)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-09-29 17:04:17 +01:00
|
|
|
pFeatures->features = (VkPhysicalDeviceFeatures) {
|
2020-06-02 22:21:30 +01:00
|
|
|
.robustBufferAccess = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.fullDrawIndexUint32 = true,
|
2020-04-20 22:57:22 +01:00
|
|
|
.imageCubeArray = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.independentBlend = true,
|
2020-04-02 19:01:54 +01:00
|
|
|
.geometryShader = true,
|
2020-04-24 21:49:19 +01:00
|
|
|
.tessellationShader = true,
|
2020-02-23 22:29:37 +00:00
|
|
|
.sampleRateShading = true,
|
2020-02-23 22:30:15 +00:00
|
|
|
.dualSrcBlend = true,
|
|
|
|
.logicOp = true,
|
2020-06-24 21:00:30 +01:00
|
|
|
.multiDrawIndirect = true,
|
|
|
|
.drawIndirectFirstInstance = true,
|
2020-03-24 01:37:25 +00:00
|
|
|
.depthClamp = true,
|
2020-06-29 01:27:46 +01:00
|
|
|
.depthBiasClamp = true,
|
2020-06-10 21:05:53 +01:00
|
|
|
.fillModeNonSolid = true,
|
2020-06-23 23:45:32 +01:00
|
|
|
.depthBounds = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.wideLines = false,
|
2020-06-29 00:58:08 +01:00
|
|
|
.largePoints = true,
|
2020-06-05 01:00:59 +01:00
|
|
|
.alphaToOne = true,
|
2020-07-14 15:38:09 +01:00
|
|
|
.multiViewport = true,
|
2019-10-05 17:39:13 +01:00
|
|
|
.samplerAnisotropy = true,
|
|
|
|
.textureCompressionETC2 = true,
|
|
|
|
.textureCompressionASTC_LDR = true,
|
|
|
|
.textureCompressionBC = true,
|
2020-01-28 22:18:27 +00:00
|
|
|
.occlusionQueryPrecise = true,
|
2020-09-01 06:13:52 +01:00
|
|
|
.pipelineStatisticsQuery = true,
|
2020-07-03 17:44:56 +01:00
|
|
|
.vertexPipelineStoresAndAtomics = true,
|
|
|
|
.fragmentStoresAndAtomics = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.shaderTessellationAndGeometryPointSize = false,
|
2020-09-21 13:06:22 +01:00
|
|
|
.shaderImageGatherExtended = true,
|
2020-09-21 12:34:10 +01:00
|
|
|
.shaderStorageImageExtendedFormats = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.shaderStorageImageMultisample = false,
|
2020-07-27 11:11:44 +01:00
|
|
|
.shaderUniformBufferArrayDynamicIndexing = true,
|
|
|
|
.shaderSampledImageArrayDynamicIndexing = true,
|
|
|
|
.shaderStorageBufferArrayDynamicIndexing = true,
|
|
|
|
.shaderStorageImageArrayDynamicIndexing = true,
|
2020-09-21 13:04:27 +01:00
|
|
|
.shaderStorageImageReadWithoutFormat = true,
|
|
|
|
.shaderStorageImageWriteWithoutFormat = true,
|
2018-08-08 23:23:57 +01:00
|
|
|
.shaderClipDistance = false,
|
|
|
|
.shaderCullDistance = false,
|
|
|
|
.shaderFloat64 = false,
|
|
|
|
.shaderInt64 = false,
|
|
|
|
.shaderInt16 = false,
|
|
|
|
.sparseBinding = false,
|
|
|
|
.variableMultisampleRate = false,
|
|
|
|
.inheritedQueries = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pFeatures->pNext)
|
|
|
|
{
|
|
|
|
switch (ext->sType) {
|
2020-06-26 04:32:20 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
|
|
|
|
VkPhysicalDeviceVulkan11Features *features = (void *) ext;
|
|
|
|
features->storageBuffer16BitAccess = false;
|
|
|
|
features->uniformAndStorageBuffer16BitAccess = false;
|
|
|
|
features->storagePushConstant16 = false;
|
|
|
|
features->storageInputOutput16 = false;
|
2020-07-02 10:34:54 +01:00
|
|
|
features->multiview = true;
|
2020-06-26 04:32:20 +01:00
|
|
|
features->multiviewGeometryShader = false;
|
|
|
|
features->multiviewTessellationShader = false;
|
2020-06-29 18:33:50 +01:00
|
|
|
features->variablePointersStorageBuffer = true;
|
|
|
|
features->variablePointers = true;
|
2020-06-26 04:32:20 +01:00
|
|
|
features->protectedMemory = false;
|
|
|
|
features->samplerYcbcrConversion = true;
|
|
|
|
features->shaderDrawParameters = true;
|
2020-06-24 21:00:30 +01:00
|
|
|
break;
|
2020-06-26 04:32:20 +01:00
|
|
|
}
|
2020-07-21 10:33:33 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
|
|
|
|
VkPhysicalDeviceVulkan12Features *features = (void *) ext;
|
|
|
|
features->samplerMirrorClampToEdge = true;
|
|
|
|
features->drawIndirectCount = true;
|
|
|
|
features->storageBuffer8BitAccess = false;
|
|
|
|
features->uniformAndStorageBuffer8BitAccess = false;
|
|
|
|
features->storagePushConstant8 = false;
|
|
|
|
features->shaderBufferInt64Atomics = false;
|
|
|
|
features->shaderSharedInt64Atomics = false;
|
|
|
|
features->shaderFloat16 = false;
|
|
|
|
features->shaderInt8 = false;
|
|
|
|
|
|
|
|
features->descriptorIndexing = false;
|
|
|
|
features->shaderInputAttachmentArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderSampledImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderInputAttachmentArrayNonUniformIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->descriptorBindingUniformBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingSampledImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUpdateUnusedWhilePending = false;
|
|
|
|
features->descriptorBindingPartiallyBound = false;
|
|
|
|
features->descriptorBindingVariableDescriptorCount = false;
|
|
|
|
features->runtimeDescriptorArray = false;
|
|
|
|
|
|
|
|
features->samplerFilterMinmax = true;
|
|
|
|
features->scalarBlockLayout = false;
|
|
|
|
features->imagelessFramebuffer = false;
|
|
|
|
features->uniformBufferStandardLayout = false;
|
|
|
|
features->shaderSubgroupExtendedTypes = false;
|
|
|
|
features->separateDepthStencilLayouts = false;
|
2020-09-01 06:16:02 +01:00
|
|
|
features->hostQueryReset = true;
|
2020-07-21 10:33:33 +01:00
|
|
|
features->timelineSemaphore = false;
|
|
|
|
features->bufferDeviceAddress = false;
|
|
|
|
features->bufferDeviceAddressCaptureReplay = false;
|
|
|
|
features->bufferDeviceAddressMultiDevice = false;
|
|
|
|
features->vulkanMemoryModel = false;
|
|
|
|
features->vulkanMemoryModelDeviceScope = false;
|
|
|
|
features->vulkanMemoryModelAvailabilityVisibilityChains = false;
|
2020-07-14 15:38:09 +01:00
|
|
|
features->shaderOutputViewportIndex = true;
|
|
|
|
features->shaderOutputLayer = true;
|
2020-07-21 10:33:33 +01:00
|
|
|
features->subgroupBroadcastDynamicId = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-04-23 13:48:39 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
|
|
|
|
VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
|
2020-06-29 18:33:50 +01:00
|
|
|
features->variablePointersStorageBuffer = true;
|
|
|
|
features->variablePointers = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
|
|
|
|
VkPhysicalDeviceMultiviewFeatures *features =
|
|
|
|
(VkPhysicalDeviceMultiviewFeatures *) ext;
|
2020-07-02 10:34:54 +01:00
|
|
|
features->multiview = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
features->multiviewGeometryShader = false;
|
|
|
|
features->multiviewTessellationShader = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-04-23 13:48:39 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
|
|
|
|
VkPhysicalDeviceShaderDrawParametersFeatures *features =
|
|
|
|
(VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
|
2020-06-24 21:00:30 +01:00
|
|
|
features->shaderDrawParameters = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
|
|
|
|
VkPhysicalDeviceProtectedMemoryFeatures *features =
|
|
|
|
(VkPhysicalDeviceProtectedMemoryFeatures *) ext;
|
|
|
|
features->protectedMemory = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
|
|
|
|
VkPhysicalDevice16BitStorageFeatures *features =
|
|
|
|
(VkPhysicalDevice16BitStorageFeatures *) ext;
|
|
|
|
features->storageBuffer16BitAccess = false;
|
|
|
|
features->uniformAndStorageBuffer16BitAccess = false;
|
|
|
|
features->storagePushConstant16 = false;
|
|
|
|
features->storageInputOutput16 = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
|
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
|
|
|
|
(VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
|
2020-04-10 14:19:36 +01:00
|
|
|
features->samplerYcbcrConversion = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
|
|
|
|
features->shaderInputAttachmentArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayDynamicIndexing = false;
|
|
|
|
features->shaderUniformBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderSampledImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageImageArrayNonUniformIndexing = false;
|
|
|
|
features->shaderInputAttachmentArrayNonUniformIndexing = false;
|
|
|
|
features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
|
|
|
|
features->descriptorBindingUniformBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingSampledImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageImageUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
|
|
|
|
features->descriptorBindingUpdateUnusedWhilePending = false;
|
|
|
|
features->descriptorBindingPartiallyBound = false;
|
|
|
|
features->descriptorBindingVariableDescriptorCount = false;
|
|
|
|
features->runtimeDescriptorArray = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
|
2020-07-20 11:14:41 +01:00
|
|
|
features->conditionalRendering = true;
|
|
|
|
features->inheritedConditionalRendering = true;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-02-20 05:41:55 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
|
|
|
|
features->transformFeedback = true;
|
|
|
|
features->geometryStreams = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-20 20:02:10 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
|
|
|
|
features->indexTypeUint8 = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-25 00:56:01 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
|
|
|
|
features->vertexAttributeInstanceRateDivisor = true;
|
|
|
|
features->vertexAttributeInstanceRateZeroDivisor = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-13 04:12:56 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
|
|
|
|
VkPhysicalDevicePrivateDataFeaturesEXT *features =
|
|
|
|
(VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
|
|
|
|
features->privateData = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-23 10:44:40 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
|
|
|
|
features->depthClipEnable = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-03 20:52:59 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
|
|
|
|
VkPhysicalDevice4444FormatsFeaturesEXT *features = (void *)ext;
|
|
|
|
features->formatA4R4G4B4 = true;
|
|
|
|
features->formatA4B4G4R4 = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-27 18:20:04 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceCustomBorderColorFeaturesEXT *features = (void *) ext;
|
|
|
|
features->customBorderColors = true;
|
|
|
|
features->customBorderColorWithoutFormat = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-09-01 06:16:02 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
|
|
|
|
(VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
|
|
|
|
features->hostQueryReset = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-09-17 15:16:42 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
|
|
|
|
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features = (void *)ext;
|
|
|
|
features->extendedDynamicState = true;
|
|
|
|
break;
|
|
|
|
}
|
2019-01-09 22:16:01 +00:00
|
|
|
default:
|
|
|
|
break;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-09-29 17:04:17 +01:00
|
|
|
tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceProperties2 *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
|
2020-04-08 03:20:10 +01:00
|
|
|
VkSampleCountFlags sample_counts =
|
|
|
|
VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
/* I have no idea what the maximum size is, but the hardware supports very
|
|
|
|
* large numbers of descriptors (at least 2^16). This limit is based on
|
|
|
|
* CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
|
|
|
|
* we don't have to think about what to do if that overflows, but really
|
|
|
|
* nothing is likely to get close to this.
|
|
|
|
*/
|
|
|
|
const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
VkPhysicalDeviceLimits limits = {
|
|
|
|
.maxImageDimension1D = (1 << 14),
|
|
|
|
.maxImageDimension2D = (1 << 14),
|
|
|
|
.maxImageDimension3D = (1 << 11),
|
|
|
|
.maxImageDimensionCube = (1 << 14),
|
|
|
|
.maxImageArrayLayers = (1 << 11),
|
|
|
|
.maxTexelBufferElements = 128 * 1024 * 1024,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
|
2019-12-02 22:32:53 +00:00
|
|
|
.maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
|
|
|
|
.maxMemoryAllocationCount = UINT32_MAX,
|
|
|
|
.maxSamplerAllocationCount = 64 * 1024,
|
|
|
|
.bufferImageGranularity = 64, /* A cache line */
|
|
|
|
.sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
|
|
|
|
.maxBoundDescriptorSets = MAX_SETS,
|
|
|
|
.maxPerStageDescriptorSamplers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorSampledImages = max_descriptor_set_size,
|
|
|
|
.maxPerStageDescriptorStorageImages = max_descriptor_set_size,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxPerStageDescriptorInputAttachments = MAX_RTS,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxPerStageResources = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetSamplers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetUniformBuffers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
|
|
|
|
.maxDescriptorSetStorageBuffers = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
|
|
|
|
.maxDescriptorSetSampledImages = max_descriptor_set_size,
|
|
|
|
.maxDescriptorSetStorageImages = max_descriptor_set_size,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.maxDescriptorSetInputAttachments = MAX_RTS,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxVertexInputAttributes = 32,
|
|
|
|
.maxVertexInputBindings = 32,
|
2020-03-18 02:28:38 +00:00
|
|
|
.maxVertexInputAttributeOffset = 4095,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxVertexInputBindingStride = 2048,
|
|
|
|
.maxVertexOutputComponents = 128,
|
|
|
|
.maxTessellationGenerationLevel = 64,
|
|
|
|
.maxTessellationPatchSize = 32,
|
|
|
|
.maxTessellationControlPerVertexInputComponents = 128,
|
|
|
|
.maxTessellationControlPerVertexOutputComponents = 128,
|
|
|
|
.maxTessellationControlPerPatchOutputComponents = 120,
|
|
|
|
.maxTessellationControlTotalOutputComponents = 4096,
|
|
|
|
.maxTessellationEvaluationInputComponents = 128,
|
|
|
|
.maxTessellationEvaluationOutputComponents = 128,
|
2020-04-03 15:59:47 +01:00
|
|
|
.maxGeometryShaderInvocations = 32,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxGeometryInputComponents = 64,
|
|
|
|
.maxGeometryOutputComponents = 128,
|
|
|
|
.maxGeometryOutputVertices = 256,
|
|
|
|
.maxGeometryTotalOutputComponents = 1024,
|
2020-04-20 12:41:42 +01:00
|
|
|
.maxFragmentInputComponents = 124,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxFragmentOutputAttachments = 8,
|
|
|
|
.maxFragmentDualSrcAttachments = 1,
|
|
|
|
.maxFragmentCombinedOutputResources = 8,
|
|
|
|
.maxComputeSharedMemorySize = 32768,
|
|
|
|
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
|
|
|
|
.maxComputeWorkGroupInvocations = 2048,
|
|
|
|
.maxComputeWorkGroupSize = { 2048, 2048, 2048 },
|
2020-03-12 21:27:29 +00:00
|
|
|
.subPixelPrecisionBits = 8,
|
2020-06-07 03:07:09 +01:00
|
|
|
.subTexelPrecisionBits = 8,
|
|
|
|
.mipmapPrecisionBits = 8,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxDrawIndexedIndexValue = UINT32_MAX,
|
|
|
|
.maxDrawIndirectCount = UINT32_MAX,
|
2020-06-07 03:07:09 +01:00
|
|
|
.maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxSamplerAnisotropy = 16,
|
|
|
|
.maxViewports = MAX_VIEWPORTS,
|
|
|
|
.maxViewportDimensions = { (1 << 14), (1 << 14) },
|
|
|
|
.viewportBoundsRange = { INT16_MIN, INT16_MAX },
|
|
|
|
.viewportSubPixelBits = 8,
|
|
|
|
.minMemoryMapAlignment = 4096, /* A page */
|
2020-01-22 20:25:10 +00:00
|
|
|
.minTexelBufferOffsetAlignment = 64,
|
tu: Switch to the bindless descriptor model
Under the bindless model, there are 5 "base" registers programmed with a
64-bit address, and sam/ldib/ldc and so on each specify a base register
and an offset, in units of 16 dwords. The base registers correspond to
descriptor sets in Vulkan. We allocate a buffer at descriptor set
creation time, hopefully outside the main rendering loop, and then
switching descriptor sets is just a matter of programming the base
registers differently. Note, however, that some kinds of descriptors
need to be patched at command recording time, in particular dynamic
UBO's and SSBO's, which need to be patched at CmdBindDescriptorSets
time, and input attachments which need to be patched at draw time based
on the the pipeline that's bound. We reserve the fifth base register
(which seems to be unused by the blob driver) for these, creating a
descriptor set on-the-fly and combining all the dynamic descriptors from
all the different descriptor sets. This way, we never have to copy the
rest of the descriptor set at draw time like the blob seems to do. I
mostly chose to do this because the infrastructure was already there in
the form of dynamic_descriptors, and other drivers (at least radv) don't
cheat either when implementing this.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
2020-03-16 10:49:19 +00:00
|
|
|
.minUniformBufferOffsetAlignment = 64,
|
|
|
|
.minStorageBufferOffsetAlignment = 64,
|
2020-06-07 03:07:09 +01:00
|
|
|
.minTexelOffset = -16,
|
|
|
|
.maxTexelOffset = 15,
|
2018-08-08 23:23:57 +01:00
|
|
|
.minTexelGatherOffset = -32,
|
|
|
|
.maxTexelGatherOffset = 31,
|
2020-06-07 03:07:09 +01:00
|
|
|
.minInterpolationOffset = -0.5,
|
|
|
|
.maxInterpolationOffset = 0.4375,
|
|
|
|
.subPixelInterpolationOffsetBits = 4,
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxFramebufferWidth = (1 << 14),
|
|
|
|
.maxFramebufferHeight = (1 << 14),
|
|
|
|
.maxFramebufferLayers = (1 << 10),
|
|
|
|
.framebufferColorSampleCounts = sample_counts,
|
|
|
|
.framebufferDepthSampleCounts = sample_counts,
|
|
|
|
.framebufferStencilSampleCounts = sample_counts,
|
|
|
|
.framebufferNoAttachmentsSampleCounts = sample_counts,
|
|
|
|
.maxColorAttachments = MAX_RTS,
|
|
|
|
.sampledImageColorSampleCounts = sample_counts,
|
|
|
|
.sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
.sampledImageDepthSampleCounts = sample_counts,
|
|
|
|
.sampledImageStencilSampleCounts = sample_counts,
|
|
|
|
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
.maxSampleMaskWords = 1,
|
2020-03-03 01:52:15 +00:00
|
|
|
.timestampComputeAndGraphics = true,
|
|
|
|
.timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
|
2018-08-08 23:23:57 +01:00
|
|
|
.maxClipDistances = 8,
|
|
|
|
.maxCullDistances = 8,
|
|
|
|
.maxCombinedClipAndCullDistances = 8,
|
|
|
|
.discreteQueuePriorities = 1,
|
2020-06-29 00:58:08 +01:00
|
|
|
.pointSizeRange = { 1, 4092 },
|
2018-08-08 23:23:57 +01:00
|
|
|
.lineWidthRange = { 0.0, 7.9921875 },
|
2020-06-29 00:58:08 +01:00
|
|
|
.pointSizeGranularity = 0.0625,
|
2018-08-08 23:23:57 +01:00
|
|
|
.lineWidthGranularity = (1.0 / 128.0),
|
|
|
|
.strictLines = false, /* FINISHME */
|
|
|
|
.standardSampleLocations = true,
|
|
|
|
.optimalBufferCopyOffsetAlignment = 128,
|
|
|
|
.optimalBufferCopyRowPitchAlignment = 128,
|
|
|
|
.nonCoherentAtomSize = 64,
|
|
|
|
};
|
|
|
|
|
2020-09-29 17:04:17 +01:00
|
|
|
pProperties->properties = (VkPhysicalDeviceProperties) {
|
2018-08-08 23:23:57 +01:00
|
|
|
.apiVersion = tu_physical_device_api_version(pdevice),
|
|
|
|
.driverVersion = vk_get_driver_version(),
|
|
|
|
.vendorID = 0, /* TODO */
|
|
|
|
.deviceID = 0,
|
|
|
|
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
|
|
|
|
.limits = limits,
|
|
|
|
.sparseProperties = { 0 },
|
|
|
|
};
|
|
|
|
|
2020-09-29 17:04:17 +01:00
|
|
|
strcpy(pProperties->properties.deviceName, pdevice->name);
|
|
|
|
memcpy(pProperties->properties.pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
vk_foreach_struct(ext, pProperties->pNext)
|
|
|
|
{
|
|
|
|
switch (ext->sType) {
|
2019-01-09 22:16:01 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
|
|
|
|
VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
|
|
|
|
(VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
|
|
|
|
properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceIDProperties *properties =
|
|
|
|
(VkPhysicalDeviceIDProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
|
|
|
|
memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
|
|
|
|
properties->deviceLUIDValid = false;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceMultiviewProperties *properties =
|
|
|
|
(VkPhysicalDeviceMultiviewProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
properties->maxMultiviewViewCount = MAX_VIEWS;
|
|
|
|
properties->maxMultiviewInstanceIndex = INT_MAX;
|
|
|
|
break;
|
|
|
|
}
|
2019-02-02 01:08:51 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
|
|
|
|
VkPhysicalDevicePointClippingProperties *properties =
|
|
|
|
(VkPhysicalDevicePointClippingProperties *) ext;
|
2019-01-09 22:16:01 +00:00
|
|
|
properties->pointClippingBehavior =
|
2019-02-02 01:08:51 +00:00
|
|
|
VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
|
2019-01-09 22:16:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceMaintenance3Properties *properties =
|
|
|
|
(VkPhysicalDeviceMaintenance3Properties *) ext;
|
|
|
|
/* Make sure everything is addressable by a signed 32-bit int, and
|
|
|
|
* our largest descriptors are 96 bytes. */
|
|
|
|
properties->maxPerSetDescriptors = (1ull << 31) / 96;
|
|
|
|
/* Our buffer size fields allow only this much */
|
|
|
|
properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
|
|
|
|
break;
|
|
|
|
}
|
2020-02-20 05:41:55 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
|
|
|
|
(VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
|
|
|
|
|
|
|
|
properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
|
|
|
|
properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
|
|
|
|
properties->maxTransformFeedbackBufferSize = UINT32_MAX;
|
|
|
|
properties->maxTransformFeedbackStreamDataSize = 512;
|
|
|
|
properties->maxTransformFeedbackBufferDataSize = 512;
|
|
|
|
properties->maxTransformFeedbackBufferDataStride = 512;
|
2020-04-17 08:08:17 +01:00
|
|
|
properties->transformFeedbackQueries = true;
|
2020-02-20 05:41:55 +00:00
|
|
|
properties->transformFeedbackStreamsLinesTriangles = false;
|
|
|
|
properties->transformFeedbackRasterizationStreamSelect = false;
|
|
|
|
properties->transformFeedbackDraw = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-04-21 17:14:23 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
|
|
|
|
(VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
|
|
|
|
properties->sampleLocationSampleCounts = 0;
|
|
|
|
if (pdevice->supported_extensions.EXT_sample_locations) {
|
|
|
|
properties->sampleLocationSampleCounts =
|
|
|
|
VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
|
|
|
|
}
|
|
|
|
properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
|
|
|
|
properties->sampleLocationCoordinateRange[0] = 0.0f;
|
|
|
|
properties->sampleLocationCoordinateRange[1] = 0.9375f;
|
|
|
|
properties->sampleLocationSubPixelBits = 4;
|
|
|
|
properties->variableSampleLocations = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-04-20 22:54:36 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
|
|
|
|
(VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
|
|
|
|
properties->filterMinmaxImageComponentMapping = true;
|
|
|
|
properties->filterMinmaxSingleComponentFormats = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-19 17:47:08 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceSubgroupProperties *properties =
|
|
|
|
(VkPhysicalDeviceSubgroupProperties *)ext;
|
|
|
|
properties->subgroupSize = 64;
|
|
|
|
properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
|
|
|
|
properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
|
|
|
|
VK_SUBGROUP_FEATURE_VOTE_BIT;
|
|
|
|
properties->quadOperationsInAllStages = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-06-25 00:56:01 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
|
|
|
|
props->maxVertexAttribDivisor = UINT32_MAX;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-27 18:20:04 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
|
|
|
|
VkPhysicalDeviceCustomBorderColorPropertiesEXT *props = (void *)ext;
|
|
|
|
props->maxCustomBorderColorSamplers = TU_BORDER_COLOR_COUNT;
|
|
|
|
break;
|
|
|
|
}
|
2019-01-09 22:16:01 +00:00
|
|
|
default:
|
|
|
|
break;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
static const VkQueueFamilyProperties tu_queue_family_properties = {
|
|
|
|
.queueFlags =
|
|
|
|
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
|
2018-11-07 06:51:05 +00:00
|
|
|
.queueCount = 1,
|
2020-03-03 01:52:15 +00:00
|
|
|
.timestampValidBits = 48,
|
2019-05-30 22:47:37 +01:00
|
|
|
.minImageTransferGranularity = { 1, 1, 1 },
|
2018-11-07 06:51:05 +00:00
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceQueueFamilyProperties2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2018-11-07 06:51:05 +00:00
|
|
|
uint32_t *pQueueFamilyPropertyCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkQueueFamilyProperties2 *pQueueFamilyProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_outarray_append(&out, p)
|
|
|
|
{
|
2018-11-07 06:51:05 +00:00
|
|
|
p->queueFamilyProperties = tu_queue_family_properties;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 12:30:08 +01:00
|
|
|
static uint64_t
|
2018-08-17 13:35:59 +01:00
|
|
|
tu_get_system_heap_size()
|
2018-08-10 12:30:08 +01:00
|
|
|
{
|
2018-08-17 13:35:59 +01:00
|
|
|
struct sysinfo info;
|
|
|
|
sysinfo(&info);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
|
2018-08-17 13:35:59 +01:00
|
|
|
|
|
|
|
/* We don't want to burn too much ram with the GPU. If the user has 4GiB
|
|
|
|
* or less, we use at most half. If they have more than 4GiB, we use 3/4.
|
|
|
|
*/
|
|
|
|
uint64_t available_ram;
|
|
|
|
if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
|
|
|
|
available_ram = total_ram / 2;
|
|
|
|
else
|
|
|
|
available_ram = total_ram * 3 / 4;
|
|
|
|
|
|
|
|
return available_ram;
|
2018-08-10 12:30:08 +01:00
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
void
|
2020-09-29 17:04:17 +01:00
|
|
|
tu_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice pdev,
|
|
|
|
VkPhysicalDeviceMemoryProperties2 *props2)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-09-29 17:04:17 +01:00
|
|
|
VkPhysicalDeviceMemoryProperties *props = &props2->memoryProperties;
|
2018-08-10 12:30:08 +01:00
|
|
|
|
2020-09-29 17:04:17 +01:00
|
|
|
props->memoryHeapCount = 1;
|
|
|
|
props->memoryHeaps[0].size = tu_get_system_heap_size();
|
|
|
|
props->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
|
|
|
|
|
|
|
|
props->memoryTypeCount = 1;
|
|
|
|
props->memoryTypes[0].propertyFlags =
|
2019-01-09 22:16:01 +00:00
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
2020-09-29 17:04:17 +01:00
|
|
|
props->memoryTypes[0].heapIndex = 0;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2019-01-10 20:12:38 +00:00
|
|
|
static VkResult
|
2018-08-08 23:23:57 +01:00
|
|
|
tu_queue_init(struct tu_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
struct tu_queue *queue,
|
|
|
|
uint32_t queue_family_index,
|
|
|
|
int idx,
|
|
|
|
VkDeviceQueueCreateFlags flags)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
queue->device = device;
|
|
|
|
queue->queue_family_index = queue_family_index;
|
|
|
|
queue->queue_idx = idx;
|
|
|
|
queue->flags = flags;
|
|
|
|
|
2019-01-10 23:34:44 +00:00
|
|
|
int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
|
2019-01-10 20:12:38 +00:00
|
|
|
if (ret)
|
|
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
|
|
|
2020-09-11 03:51:53 +01:00
|
|
|
queue->fence = -1;
|
2019-01-10 22:07:50 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_queue_finish(struct tu_queue *queue)
|
|
|
|
{
|
2020-09-11 03:51:53 +01:00
|
|
|
if (queue->fence >= 0)
|
|
|
|
close(queue->fence);
|
2019-01-10 23:34:44 +00:00
|
|
|
tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tu_get_device_extension_index(const char *name)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
|
|
|
|
if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateDevice(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkDeviceCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDevice *pDevice)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
|
|
|
|
VkResult result;
|
|
|
|
struct tu_device *device;
|
2020-07-27 18:20:04 +01:00
|
|
|
bool custom_border_colors = false;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
/* Check enabled features */
|
|
|
|
if (pCreateInfo->pEnabledFeatures) {
|
|
|
|
VkPhysicalDeviceFeatures supported_features;
|
|
|
|
tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
|
2019-01-09 22:16:01 +00:00
|
|
|
VkBool32 *supported_feature = (VkBool32 *) &supported_features;
|
|
|
|
VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
|
2018-08-08 23:23:57 +01:00
|
|
|
unsigned num_features =
|
2019-01-09 22:16:01 +00:00
|
|
|
sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
|
2018-08-08 23:23:57 +01:00
|
|
|
for (uint32_t i = 0; i < num_features; i++) {
|
|
|
|
if (enabled_feature[i] && !supported_feature[i])
|
|
|
|
return vk_error(physical_device->instance,
|
|
|
|
VK_ERROR_FEATURE_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-27 18:20:04 +01:00
|
|
|
vk_foreach_struct_const(ext, pCreateInfo->pNext) {
|
|
|
|
switch (ext->sType) {
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
|
|
|
|
const VkPhysicalDeviceCustomBorderColorFeaturesEXT *border_color_features = (const void *)ext;
|
|
|
|
custom_border_colors = border_color_features->customBorderColors;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
|
|
|
|
sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!device)
|
|
|
|
return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_device_init(&device->vk, pCreateInfo,
|
|
|
|
&physical_device->instance->alloc, pAllocator);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
device->instance = physical_device->instance;
|
|
|
|
device->physical_device = physical_device;
|
2020-09-11 04:32:31 +01:00
|
|
|
device->fd = physical_device->local_fd;
|
2020-06-17 23:58:33 +01:00
|
|
|
device->_lost = false;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-09-11 15:26:40 +01:00
|
|
|
mtx_init(&device->bo_mutex, mtx_plain);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
|
|
|
|
int index = tu_get_device_extension_index(ext_name);
|
|
|
|
if (index < 0 ||
|
|
|
|
!physical_device->supported_extensions.extensions[index]) {
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_free(&device->vk.alloc, device);
|
2018-08-08 23:23:57 +01:00
|
|
|
return vk_error(physical_device->instance,
|
|
|
|
VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
device->enabled_extensions.extensions[index] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
const VkDeviceQueueCreateInfo *queue_create =
|
2019-01-09 22:16:01 +00:00
|
|
|
&pCreateInfo->pQueueCreateInfos[i];
|
2018-08-08 23:23:57 +01:00
|
|
|
uint32_t qfi = queue_create->queueFamilyIndex;
|
2019-01-09 22:16:01 +00:00
|
|
|
device->queues[qfi] = vk_alloc(
|
2020-07-13 04:08:15 +01:00
|
|
|
&device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
|
2019-01-09 22:16:01 +00:00
|
|
|
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!device->queues[qfi]) {
|
|
|
|
result = VK_ERROR_OUT_OF_HOST_MEMORY;
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
memset(device->queues[qfi], 0,
|
2018-08-08 23:23:57 +01:00
|
|
|
queue_create->queueCount * sizeof(struct tu_queue));
|
|
|
|
|
|
|
|
device->queue_count[qfi] = queue_create->queueCount;
|
|
|
|
|
|
|
|
for (unsigned q = 0; q < queue_create->queueCount; q++) {
|
2019-01-09 22:16:01 +00:00
|
|
|
result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
|
|
|
|
queue_create->flags);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (result != VK_SUCCESS)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-20 17:53:47 +00:00
|
|
|
device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
|
|
|
|
if (!device->compiler)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_queues;
|
|
|
|
|
2020-06-18 23:08:58 +01:00
|
|
|
/* initial sizes, these will increase if there is overflow */
|
|
|
|
device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
|
|
|
|
device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
|
2019-02-20 17:53:47 +00:00
|
|
|
|
2020-07-27 18:20:04 +01:00
|
|
|
uint32_t global_size = sizeof(struct tu6_global);
|
|
|
|
if (custom_border_colors)
|
|
|
|
global_size += TU_BORDER_COLOR_COUNT * sizeof(struct bcolor_entry);
|
|
|
|
|
2020-09-11 15:26:40 +01:00
|
|
|
result = tu_bo_init_new(device, &device->global_bo, global_size, false);
|
2020-03-12 11:39:16 +00:00
|
|
|
if (result != VK_SUCCESS)
|
2020-06-18 23:08:58 +01:00
|
|
|
goto fail_global_bo;
|
2020-03-12 11:39:16 +00:00
|
|
|
|
2020-06-18 23:08:58 +01:00
|
|
|
result = tu_bo_map(device, &device->global_bo);
|
2020-03-12 11:39:16 +00:00
|
|
|
if (result != VK_SUCCESS)
|
2020-06-18 23:08:58 +01:00
|
|
|
goto fail_global_bo_map;
|
2020-03-12 11:39:16 +00:00
|
|
|
|
2020-07-20 11:14:41 +01:00
|
|
|
struct tu6_global *global = device->global_bo.map;
|
2020-07-27 18:20:04 +01:00
|
|
|
tu_init_clear_blit_shaders(device->global_bo.map);
|
2020-07-20 11:14:41 +01:00
|
|
|
global->predicate = 0;
|
2020-07-27 18:20:04 +01:00
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK],
|
|
|
|
&(VkClearColorValue) {}, false);
|
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_INT_TRANSPARENT_BLACK],
|
|
|
|
&(VkClearColorValue) {}, true);
|
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK],
|
|
|
|
&(VkClearColorValue) { .float32[3] = 1.0f }, false);
|
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_INT_OPAQUE_BLACK],
|
|
|
|
&(VkClearColorValue) { .int32[3] = 1 }, true);
|
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE],
|
|
|
|
&(VkClearColorValue) { .float32[0 ... 3] = 1.0f }, false);
|
|
|
|
tu6_pack_border_color(&global->bcolor_builtin[VK_BORDER_COLOR_INT_OPAQUE_WHITE],
|
|
|
|
&(VkClearColorValue) { .int32[0 ... 3] = 1 }, true);
|
|
|
|
|
|
|
|
/* initialize to ones so ffs can be used to find unused slots */
|
|
|
|
BITSET_ONES(device->custom_border_color);
|
2020-03-12 11:39:16 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkPipelineCacheCreateInfo ci;
|
|
|
|
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
|
|
|
|
ci.pNext = NULL;
|
|
|
|
ci.flags = 0;
|
|
|
|
ci.pInitialData = NULL;
|
|
|
|
ci.initialDataSize = 0;
|
|
|
|
VkPipelineCache pc;
|
|
|
|
result =
|
2019-01-09 22:16:01 +00:00
|
|
|
tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (result != VK_SUCCESS)
|
2020-01-28 16:30:44 +00:00
|
|
|
goto fail_pipeline_cache;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
device->mem_cache = tu_pipeline_cache_from_handle(pc);
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
|
|
|
|
mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
|
|
|
|
|
2020-07-27 18:20:04 +01:00
|
|
|
mtx_init(&device->mutex, mtx_plain);
|
2020-06-18 23:08:58 +01:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pDevice = tu_device_to_handle(device);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
2020-01-28 16:30:44 +00:00
|
|
|
fail_pipeline_cache:
|
2020-06-18 23:08:58 +01:00
|
|
|
fail_global_bo_map:
|
|
|
|
tu_bo_finish(device, &device->global_bo);
|
2020-01-28 16:30:44 +00:00
|
|
|
|
2020-06-18 23:08:58 +01:00
|
|
|
fail_global_bo:
|
2020-01-28 16:30:44 +00:00
|
|
|
ralloc_free(device->compiler);
|
|
|
|
|
|
|
|
fail_queues:
|
2018-08-08 23:23:57 +01:00
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++)
|
|
|
|
tu_queue_finish(&device->queues[i][q]);
|
|
|
|
if (device->queue_count[i])
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, NULL, device->queues[i]);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_free(&device->vk.alloc, device);
|
2018-08-08 23:23:57 +01:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
|
|
|
if (!device)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++)
|
|
|
|
tu_queue_finish(&device->queues[i][q]);
|
|
|
|
if (device->queue_count[i])
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, NULL, device->queues[i]);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
|
|
|
|
if (device->scratch_bos[i].initialized)
|
|
|
|
tu_bo_finish(device, &device->scratch_bos[i].bo);
|
|
|
|
}
|
|
|
|
|
2020-06-04 20:55:41 +01:00
|
|
|
ir3_compiler_destroy(device->compiler);
|
2019-02-20 17:53:47 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
|
|
|
|
tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
|
|
|
|
|
2020-09-11 15:26:40 +01:00
|
|
|
vk_free(&device->vk.alloc, device->bo_list);
|
|
|
|
vk_free(&device->vk.alloc, device->bo_idx);
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_free(&device->vk.alloc, device);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
VkResult
|
|
|
|
_tu_device_set_lost(struct tu_device *device,
|
|
|
|
const char *file, int line,
|
|
|
|
const char *msg, ...)
|
|
|
|
{
|
|
|
|
/* Set the flag indicating that waits should return in finite time even
|
|
|
|
* after device loss.
|
|
|
|
*/
|
|
|
|
p_atomic_inc(&device->_lost);
|
|
|
|
|
|
|
|
/* TODO: Report the log message through VkDebugReportCallbackEXT instead */
|
|
|
|
fprintf(stderr, "%s:%d: ", file, line);
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, msg);
|
|
|
|
vfprintf(stderr, msg, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
|
|
|
|
abort();
|
|
|
|
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:46:04 +01:00
|
|
|
VkResult
|
|
|
|
tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
|
|
|
|
{
|
|
|
|
unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
|
|
|
|
unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
|
|
|
|
assert(index < ARRAY_SIZE(dev->scratch_bos));
|
|
|
|
|
|
|
|
for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
|
|
|
|
if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
|
|
|
|
/* Fast path: just return the already-allocated BO. */
|
|
|
|
*bo = &dev->scratch_bos[i].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Slow path: actually allocate the BO. We take a lock because the process
|
|
|
|
* of allocating it is slow, and we don't want to block the CPU while it
|
|
|
|
* finishes.
|
|
|
|
*/
|
|
|
|
mtx_lock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
|
|
|
|
/* Another thread may have allocated it already while we were waiting on
|
|
|
|
* the lock. We need to check this in order to avoid double-allocating.
|
|
|
|
*/
|
|
|
|
if (dev->scratch_bos[index].initialized) {
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
*bo = &dev->scratch_bos[index].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned bo_size = 1ull << size_log2;
|
2020-09-11 15:26:40 +01:00
|
|
|
VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size, false);
|
2020-05-11 17:46:04 +01:00
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
p_atomic_set(&dev->scratch_bos[index].initialized, true);
|
|
|
|
|
|
|
|
mtx_unlock(&dev->scratch_bos[index].construct_mtx);
|
|
|
|
|
|
|
|
*bo = &dev->scratch_bos[index].bo;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
VkResult
|
|
|
|
tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkLayerProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:52:57 +00:00
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkLayerProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 06:52:57 +00:00
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceQueue2(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkDeviceQueueInfo2 *pQueueInfo,
|
|
|
|
VkQueue *pQueue)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_queue *queue;
|
|
|
|
|
|
|
|
queue =
|
2019-01-09 22:16:01 +00:00
|
|
|
&device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
|
2018-08-08 23:23:57 +01:00
|
|
|
if (pQueueInfo->flags != queue->flags) {
|
|
|
|
/* From the Vulkan 1.1.70 spec:
|
|
|
|
*
|
|
|
|
* "The queue returned by vkGetDeviceQueue2 must have the same
|
|
|
|
* flags value from this structure as that used at device
|
|
|
|
* creation time in a VkDeviceQueueCreateInfo instance. If no
|
|
|
|
* matching flags were specified at device creation time then
|
|
|
|
* pQueue will return VK_NULL_HANDLE."
|
|
|
|
*/
|
|
|
|
*pQueue = VK_NULL_HANDLE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pQueue = tu_queue_to_handle(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_QueueWaitIdle(VkQueue _queue)
|
|
|
|
{
|
2019-01-10 22:07:50 +00:00
|
|
|
TU_FROM_HANDLE(tu_queue, queue, _queue);
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
if (tu_device_is_lost(queue->device))
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
2020-09-11 03:51:53 +01:00
|
|
|
if (queue->fence < 0)
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
struct pollfd fds = { .fd = queue->fence, .events = POLLIN };
|
|
|
|
int ret;
|
|
|
|
do {
|
|
|
|
ret = poll(&fds, 1, -1);
|
|
|
|
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
|
|
|
|
|
|
|
|
/* TODO: otherwise set device lost ? */
|
|
|
|
assert(ret == 1 && !(fds.revents & (POLLERR | POLLNVAL)));
|
2019-01-10 22:07:50 +00:00
|
|
|
|
2020-09-11 03:51:53 +01:00
|
|
|
close(queue->fence);
|
|
|
|
queue->fence = -1;
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_DeviceWaitIdle(VkDevice _device)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2020-06-17 23:58:33 +01:00
|
|
|
if (tu_device_is_lost(device))
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
|
|
|
|
for (unsigned q = 0; q < device->queue_count[i]; q++) {
|
|
|
|
tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
|
2020-05-12 15:17:31 +01:00
|
|
|
if (tu_instance_extensions_supported.extensions[i]) {
|
2018-08-08 23:23:57 +01:00
|
|
|
vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
|
2018-11-05 06:42:55 +00:00
|
|
|
const char *pLayerName,
|
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
2018-08-08 23:23:57 +01:00
|
|
|
TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
|
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
2018-11-07 07:01:03 +00:00
|
|
|
/* We spport no lyaers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
|
|
|
|
if (device->supported_extensions.extensions[i]) {
|
|
|
|
vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
}
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
return tu_lookup_entrypoint_checked(
|
|
|
|
pName, instance ? instance->api_version : 0,
|
|
|
|
instance ? &instance->enabled_extensions : NULL, NULL);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The loader wants us to expose a second GetInstanceProcAddr function
|
|
|
|
* to work around certain LD_PRELOAD issues seen in apps.
|
|
|
|
*/
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
|
|
|
|
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
|
|
|
|
{
|
|
|
|
return tu_GetInstanceProcAddr(instance, pName);
|
|
|
|
}
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
|
|
|
|
&device->instance->enabled_extensions,
|
|
|
|
&device->enabled_extensions);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-09-29 16:07:39 +01:00
|
|
|
VkResult
|
|
|
|
tu_AllocateMemory(VkDevice _device,
|
|
|
|
const VkMemoryAllocateInfo *pAllocateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDeviceMemory *pMem)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-09-29 16:07:39 +01:00
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
2018-08-08 23:23:57 +01:00
|
|
|
struct tu_device_memory *mem;
|
2018-11-07 04:26:45 +00:00
|
|
|
VkResult result;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
|
|
|
|
|
|
|
|
if (pAllocateInfo->allocationSize == 0) {
|
|
|
|
/* Apparently, this is allowed */
|
|
|
|
*pMem = VK_NULL_HANDLE;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
|
|
|
|
VK_OBJECT_TYPE_DEVICE_MEMORY);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (mem == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2019-02-01 18:36:19 +00:00
|
|
|
const VkImportMemoryFdInfoKHR *fd_info =
|
|
|
|
vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
|
|
|
|
if (fd_info && !fd_info->handleType)
|
|
|
|
fd_info = NULL;
|
|
|
|
|
|
|
|
if (fd_info) {
|
|
|
|
assert(fd_info->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
fd_info->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO Importing the same fd twice gives us the same handle without
|
|
|
|
* reference counting. We need to maintain a per-instance handle-to-bo
|
|
|
|
* table and add reference count to tu_bo.
|
|
|
|
*/
|
|
|
|
result = tu_bo_init_dmabuf(device, &mem->bo,
|
|
|
|
pAllocateInfo->allocationSize, fd_info->fd);
|
|
|
|
if (result == VK_SUCCESS) {
|
|
|
|
/* take ownership and close the fd */
|
|
|
|
close(fd_info->fd);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
result =
|
2020-09-11 15:26:40 +01:00
|
|
|
tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize, false);
|
2019-02-01 18:36:19 +00:00
|
|
|
}
|
|
|
|
|
2018-12-20 21:57:07 +00:00
|
|
|
if (result != VK_SUCCESS) {
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, mem);
|
2018-11-07 04:26:45 +00:00
|
|
|
return result;
|
2018-08-10 12:19:22 +01:00
|
|
|
}
|
2018-11-07 04:26:45 +00:00
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pMem = tu_device_memory_to_handle(mem);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_FreeMemory(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory _mem,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, _mem);
|
|
|
|
|
|
|
|
if (mem == NULL)
|
|
|
|
return;
|
|
|
|
|
2018-11-07 04:26:45 +00:00
|
|
|
tu_bo_finish(device, &mem->bo);
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, mem);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_MapMemory(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory _memory,
|
|
|
|
VkDeviceSize offset,
|
|
|
|
VkDeviceSize size,
|
|
|
|
VkMemoryMapFlags flags,
|
|
|
|
void **ppData)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, _memory);
|
2018-11-07 04:26:45 +00:00
|
|
|
VkResult result;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (mem == NULL) {
|
|
|
|
*ppData = NULL;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-09-29 16:07:39 +01:00
|
|
|
if (!mem->bo.map) {
|
2018-11-07 04:26:45 +00:00
|
|
|
result = tu_bo_map(device, &mem->bo);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
2020-09-29 16:07:39 +01:00
|
|
|
*ppData = mem->bo.map + offset;
|
|
|
|
return VK_SUCCESS;
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
|
|
|
|
{
|
2020-09-29 16:07:39 +01:00
|
|
|
/* TODO: unmap here instead of waiting for FreeMemory */
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_FlushMappedMemoryRanges(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_InvalidateMappedMemoryRanges(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetBufferMemoryRequirements2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkBufferMemoryRequirementsInfo2 *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-09-29 17:04:17 +01:00
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-09-29 17:04:17 +01:00
|
|
|
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
|
|
|
|
.memoryTypeBits = 1,
|
|
|
|
.alignment = 64,
|
|
|
|
.size = align64(buffer->size, 64),
|
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageMemoryRequirements2(VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkImageMemoryRequirementsInfo2 *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-09-29 17:04:17 +01:00
|
|
|
TU_FROM_HANDLE(tu_image, image, pInfo->image);
|
2018-08-08 23:23:57 +01:00
|
|
|
|
2020-09-29 17:04:17 +01:00
|
|
|
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
|
|
|
|
.memoryTypeBits = 1,
|
|
|
|
.alignment = image->layout[0].base_align,
|
|
|
|
.size = image->total_size
|
|
|
|
};
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetImageSparseMemoryRequirements2(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkImageSparseMemoryRequirementsInfo2 *pInfo,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t *pSparseMemoryRequirementCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2018-11-12 22:42:36 +00:00
|
|
|
tu_stub();
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceMemoryCommitment(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDeviceMemory memory,
|
|
|
|
VkDeviceSize *pCommittedMemoryInBytes)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
*pCommittedMemoryInBytes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindBufferMemory2(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkBindBufferMemoryInfo *pBindInfos)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2019-01-15 21:54:15 +00:00
|
|
|
for (uint32_t i = 0; i < bindInfoCount; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
|
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
|
|
|
|
|
|
|
|
if (mem) {
|
|
|
|
buffer->bo = &mem->bo;
|
|
|
|
buffer->bo_offset = pBindInfos[i].memoryOffset;
|
|
|
|
} else {
|
|
|
|
buffer->bo = NULL;
|
|
|
|
}
|
|
|
|
}
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_BindImageMemory2(VkDevice device,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
2019-01-10 19:51:39 +00:00
|
|
|
const VkBindImageMemoryInfo *pBindInfos)
|
|
|
|
{
|
|
|
|
for (uint32_t i = 0; i < bindInfoCount; ++i) {
|
|
|
|
TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
|
|
|
|
|
|
|
|
if (mem) {
|
|
|
|
image->bo = &mem->bo;
|
|
|
|
image->bo_offset = pBindInfos[i].memoryOffset;
|
|
|
|
} else {
|
|
|
|
image->bo = NULL;
|
|
|
|
image->bo_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_QueueBindSparse(VkQueue _queue,
|
2018-11-05 06:42:55 +00:00
|
|
|
uint32_t bindInfoCount,
|
|
|
|
const VkBindSparseInfo *pBindInfo,
|
|
|
|
VkFence _fence)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateEvent(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkEventCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkEvent *pEvent)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
struct tu_event *event =
|
|
|
|
vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
|
|
|
|
VK_OBJECT_TYPE_EVENT);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!event)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2020-09-11 15:26:40 +01:00
|
|
|
VkResult result = tu_bo_init_new(device, &event->bo, 0x1000, false);
|
2019-10-14 16:24:27 +01:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_alloc;
|
|
|
|
|
|
|
|
result = tu_bo_map(device, &event->bo);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_map;
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pEvent = tu_event_to_handle(event);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2019-10-14 16:24:27 +01:00
|
|
|
|
|
|
|
fail_map:
|
|
|
|
tu_bo_finish(device, &event->bo);
|
|
|
|
fail_alloc:
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, event);
|
2019-10-14 16:24:27 +01:00
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyEvent(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkEvent _event,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
|
|
|
|
|
|
|
if (!event)
|
|
|
|
return;
|
2020-01-30 16:02:29 +00:00
|
|
|
|
|
|
|
tu_bo_finish(device, &event->bo);
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, event);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetEventStatus(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
|
|
|
|
2019-10-14 16:24:27 +01:00
|
|
|
if (*(uint64_t*) event->bo.map == 1)
|
2018-08-08 23:23:57 +01:00
|
|
|
return VK_EVENT_SET;
|
|
|
|
return VK_EVENT_RESET;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_SetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
2019-10-14 16:24:27 +01:00
|
|
|
*(uint64_t*) event->bo.map = 1;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_ResetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_event, event, _event);
|
2019-10-14 16:24:27 +01:00
|
|
|
*(uint64_t*) event->bo.map = 0;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateBuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkBufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkBuffer *pBuffer)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_buffer *buffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
|
|
|
|
VK_OBJECT_TYPE_BUFFER);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (buffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
buffer->size = pCreateInfo->size;
|
|
|
|
buffer->usage = pCreateInfo->usage;
|
|
|
|
buffer->flags = pCreateInfo->flags;
|
|
|
|
|
|
|
|
*pBuffer = tu_buffer_to_handle(buffer);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyBuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkBuffer _buffer,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, buffer);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateFramebuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkFramebufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkFramebuffer *pFramebuffer)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
2020-06-19 01:39:39 +01:00
|
|
|
TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
|
2018-08-08 23:23:57 +01:00
|
|
|
struct tu_framebuffer *framebuffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
|
|
|
|
|
2019-01-09 22:16:01 +00:00
|
|
|
size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
|
|
|
|
pCreateInfo->attachmentCount;
|
2020-07-13 04:08:15 +01:00
|
|
|
framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
|
|
|
|
VK_OBJECT_TYPE_FRAMEBUFFER);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (framebuffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
framebuffer->attachment_count = pCreateInfo->attachmentCount;
|
|
|
|
framebuffer->width = pCreateInfo->width;
|
|
|
|
framebuffer->height = pCreateInfo->height;
|
|
|
|
framebuffer->layers = pCreateInfo->layers;
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
|
|
|
|
VkImageView _iview = pCreateInfo->pAttachments[i];
|
|
|
|
struct tu_image_view *iview = tu_image_view_from_handle(_iview);
|
|
|
|
framebuffer->attachments[i].attachment = iview;
|
|
|
|
}
|
|
|
|
|
2020-06-19 01:39:39 +01:00
|
|
|
tu_framebuffer_tiling_config(framebuffer, device, pass);
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
*pFramebuffer = tu_framebuffer_to_handle(framebuffer);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyFramebuffer(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkFramebuffer _fb,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
|
|
|
|
|
|
|
|
if (!fb)
|
|
|
|
return;
|
2020-07-13 04:08:15 +01:00
|
|
|
|
|
|
|
vk_object_free(&device->vk, pAllocator, fb);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tu_init_sampler(struct tu_device *device,
|
2018-11-05 06:42:55 +00:00
|
|
|
struct tu_sampler *sampler,
|
|
|
|
const VkSamplerCreateInfo *pCreateInfo)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
2020-04-20 22:54:36 +01:00
|
|
|
const struct VkSamplerReductionModeCreateInfo *reduction =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
|
2020-04-10 14:19:36 +01:00
|
|
|
const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
|
2020-07-27 18:20:04 +01:00
|
|
|
const VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
|
|
|
|
/* for non-custom border colors, the VK enum is translated directly to an offset in
|
|
|
|
* the border color buffer. custom border colors are located immediately after the
|
|
|
|
* builtin colors, and thus an offset of TU_BORDER_COLOR_BUILTIN is added.
|
|
|
|
*/
|
|
|
|
uint32_t border_color = (unsigned) pCreateInfo->borderColor;
|
|
|
|
if (pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT ||
|
|
|
|
pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT) {
|
|
|
|
mtx_lock(&device->mutex);
|
|
|
|
border_color = BITSET_FFS(device->custom_border_color);
|
|
|
|
BITSET_CLEAR(device->custom_border_color, border_color);
|
|
|
|
mtx_unlock(&device->mutex);
|
|
|
|
tu6_pack_border_color(device->global_bo.map + gb_offset(bcolor[border_color]),
|
|
|
|
&custom_border_color->customBorderColor,
|
|
|
|
pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT);
|
|
|
|
border_color += TU_BORDER_COLOR_BUILTIN;
|
|
|
|
}
|
2020-04-20 22:54:36 +01:00
|
|
|
|
2019-09-25 17:55:14 +01:00
|
|
|
unsigned aniso = pCreateInfo->anisotropyEnable ?
|
|
|
|
util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
|
|
|
|
bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
|
2020-06-07 03:08:41 +01:00
|
|
|
float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
|
|
|
|
float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
|
2019-09-25 17:55:14 +01:00
|
|
|
|
2020-03-12 11:39:16 +00:00
|
|
|
sampler->descriptor[0] =
|
2019-09-25 17:55:14 +01:00
|
|
|
COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
|
2019-10-05 17:29:01 +01:00
|
|
|
A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
|
|
|
|
A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
|
2019-09-25 17:55:14 +01:00
|
|
|
A6XX_TEX_SAMP_0_ANISO(aniso) |
|
2020-03-12 11:39:16 +00:00
|
|
|
A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
|
|
|
|
A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
|
|
|
|
A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
|
2019-09-25 17:55:14 +01:00
|
|
|
A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
|
2020-03-12 11:39:16 +00:00
|
|
|
sampler->descriptor[1] =
|
2019-09-25 17:55:14 +01:00
|
|
|
/* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
|
|
|
|
COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
|
2020-06-07 03:08:41 +01:00
|
|
|
A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
|
|
|
|
A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
|
2020-02-03 20:52:47 +00:00
|
|
|
COND(pCreateInfo->compareEnable,
|
|
|
|
A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
|
2020-07-27 18:20:04 +01:00
|
|
|
sampler->descriptor[2] = A6XX_TEX_SAMP_2_BCOLOR(border_color);
|
2020-03-12 11:39:16 +00:00
|
|
|
sampler->descriptor[3] = 0;
|
2019-09-25 17:55:14 +01:00
|
|
|
|
2020-04-20 22:54:36 +01:00
|
|
|
if (reduction) {
|
2020-05-22 21:57:53 +01:00
|
|
|
sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
|
|
|
|
tu6_reduction_mode(reduction->reductionMode));
|
2020-04-20 22:54:36 +01:00
|
|
|
}
|
|
|
|
|
2020-04-10 14:19:36 +01:00
|
|
|
sampler->ycbcr_sampler = ycbcr_conversion ?
|
|
|
|
tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
|
|
|
|
|
|
|
|
if (sampler->ycbcr_sampler &&
|
|
|
|
sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
|
|
|
|
sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
|
|
|
|
}
|
|
|
|
|
2019-09-25 17:55:14 +01:00
|
|
|
/* TODO:
|
|
|
|
* A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
|
|
|
|
*/
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateSampler(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
const VkSamplerCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkSampler *pSampler)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
struct tu_sampler *sampler;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
|
|
|
|
VK_OBJECT_TYPE_SAMPLER);
|
2018-08-08 23:23:57 +01:00
|
|
|
if (!sampler)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
tu_init_sampler(device, sampler, pCreateInfo);
|
|
|
|
*pSampler = tu_sampler_to_handle(sampler);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroySampler(VkDevice _device,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkSampler _sampler,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
|
2020-07-27 18:20:04 +01:00
|
|
|
uint32_t border_color;
|
2018-08-08 23:23:57 +01:00
|
|
|
|
|
|
|
if (!sampler)
|
|
|
|
return;
|
2020-07-13 04:08:15 +01:00
|
|
|
|
2020-07-27 18:20:04 +01:00
|
|
|
border_color = (sampler->descriptor[2] & A6XX_TEX_SAMP_2_BCOLOR__MASK) >> A6XX_TEX_SAMP_2_BCOLOR__SHIFT;
|
|
|
|
if (border_color >= TU_BORDER_COLOR_BUILTIN) {
|
|
|
|
border_color -= TU_BORDER_COLOR_BUILTIN;
|
|
|
|
/* if the sampler had a custom border color, free it. TODO: no lock */
|
|
|
|
mtx_lock(&device->mutex);
|
|
|
|
assert(!BITSET_TEST(device->custom_border_color, border_color));
|
|
|
|
BITSET_SET(device->custom_border_color, border_color);
|
|
|
|
mtx_unlock(&device->mutex);
|
|
|
|
}
|
|
|
|
|
2020-07-13 04:08:15 +01:00
|
|
|
vk_object_free(&device->vk, pAllocator, sampler);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vk_icd.h does not declare this function, so we declare it here to
|
|
|
|
* suppress Wmissing-prototypes.
|
|
|
|
*/
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
|
|
|
|
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
|
|
|
|
{
|
|
|
|
/* For the full details on loader interface versioning, see
|
2019-01-09 22:16:01 +00:00
|
|
|
* <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
|
|
|
|
* What follows is a condensed summary, to help you navigate the large and
|
|
|
|
* confusing official doc.
|
|
|
|
*
|
|
|
|
* - Loader interface v0 is incompatible with later versions. We don't
|
|
|
|
* support it.
|
|
|
|
*
|
|
|
|
* - In loader interface v1:
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
* vk_icdGetInstanceProcAddr(). The ICD must statically expose this
|
|
|
|
* entrypoint.
|
|
|
|
* - The ICD must statically expose no other Vulkan symbol unless it
|
|
|
|
* is linked with -Bsymbolic.
|
|
|
|
* - Each dispatchable Vulkan handle created by the ICD must be
|
|
|
|
* a pointer to a struct whose first member is VK_LOADER_DATA. The
|
|
|
|
* ICD must initialize VK_LOADER_DATA.loadMagic to
|
|
|
|
* ICD_LOADER_MAGIC.
|
|
|
|
* - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
|
|
|
|
* vkDestroySurfaceKHR(). The ICD must be capable of working with
|
|
|
|
* such loader-managed surfaces.
|
|
|
|
*
|
|
|
|
* - Loader interface v2 differs from v1 in:
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
* vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
|
|
|
|
* statically expose this entrypoint.
|
|
|
|
*
|
|
|
|
* - Loader interface v3 differs from v2 in:
|
|
|
|
* - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
|
|
|
|
* vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
|
|
|
|
* because the loader no longer does so.
|
|
|
|
*/
|
2018-08-08 23:23:57 +01:00
|
|
|
*pSupportedVersion = MIN2(*pSupportedVersion, 3u);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-01 18:36:19 +00:00
|
|
|
VkResult
|
|
|
|
tu_GetMemoryFdKHR(VkDevice _device,
|
|
|
|
const VkMemoryGetFdInfoKHR *pGetFdInfo,
|
|
|
|
int *pFd)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
|
|
|
|
|
|
|
|
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
|
|
|
|
|
|
|
|
/* At the moment, we support only the below handle types. */
|
|
|
|
assert(pGetFdInfo->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
pGetFdInfo->handleType ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
|
|
|
|
int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
|
|
|
|
if (prime_fd < 0)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
|
|
*pFd = prime_fd;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_GetMemoryFdPropertiesKHR(VkDevice _device,
|
|
|
|
VkExternalMemoryHandleTypeFlagBits handleType,
|
|
|
|
int fd,
|
|
|
|
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
|
|
|
|
{
|
|
|
|
assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
pMemoryFdProperties->memoryTypeBits = 1;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:23:57 +01:00
|
|
|
void
|
|
|
|
tu_GetPhysicalDeviceExternalFenceProperties(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2019-02-02 01:08:51 +00:00
|
|
|
const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
|
|
|
|
VkExternalFenceProperties *pExternalFenceProperties)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
pExternalFenceProperties->exportFromImportedHandleTypes = 0;
|
|
|
|
pExternalFenceProperties->compatibleHandleTypes = 0;
|
|
|
|
pExternalFenceProperties->externalFenceFeatures = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreateDebugReportCallbackEXT(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkInstance _instance,
|
|
|
|
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDebugReportCallbackEXT *pCallback)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
return vk_create_debug_report_callback(&instance->debug_report_callbacks,
|
2019-01-09 22:16:01 +00:00
|
|
|
pCreateInfo, pAllocator,
|
|
|
|
&instance->alloc, pCallback);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDebugReportCallbackEXT _callback,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
|
|
|
vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
|
2019-01-09 22:16:01 +00:00
|
|
|
_callback, pAllocator, &instance->alloc);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DebugReportMessageEXT(VkInstance _instance,
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDebugReportFlagsEXT flags,
|
|
|
|
VkDebugReportObjectTypeEXT objectType,
|
|
|
|
uint64_t object,
|
|
|
|
size_t location,
|
|
|
|
int32_t messageCode,
|
|
|
|
const char *pLayerPrefix,
|
|
|
|
const char *pMessage)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_instance, instance, _instance);
|
2019-01-09 22:16:01 +00:00
|
|
|
vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
|
|
|
|
object, location, messageCode, pLayerPrefix, pMessage);
|
2018-08-08 23:23:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetDeviceGroupPeerMemoryFeatures(
|
2018-11-05 06:42:55 +00:00
|
|
|
VkDevice device,
|
|
|
|
uint32_t heapIndex,
|
|
|
|
uint32_t localDeviceIndex,
|
|
|
|
uint32_t remoteDeviceIndex,
|
|
|
|
VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
|
2018-08-08 23:23:57 +01:00
|
|
|
{
|
|
|
|
assert(localDeviceIndex == remoteDeviceIndex);
|
|
|
|
|
|
|
|
*pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
|
|
|
|
}
|
2020-04-21 17:14:23 +01:00
|
|
|
|
|
|
|
void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
|
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
VkSampleCountFlagBits samples,
|
|
|
|
VkMultisamplePropertiesEXT* pMultisampleProperties)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
|
|
|
|
|
|
|
|
if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
|
|
|
|
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
|
|
|
|
else
|
|
|
|
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
|
|
|
|
}
|
2020-07-13 04:12:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_CreatePrivateDataSlotEXT(VkDevice _device,
|
|
|
|
const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
|
|
|
VkPrivateDataSlotEXT* pPrivateDataSlot)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
return vk_private_data_slot_create(&device->vk,
|
|
|
|
pCreateInfo,
|
|
|
|
pAllocator,
|
|
|
|
pPrivateDataSlot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_DestroyPrivateDataSlotEXT(VkDevice _device,
|
|
|
|
VkPrivateDataSlotEXT privateDataSlot,
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
tu_SetPrivateDataEXT(VkDevice _device,
|
|
|
|
VkObjectType objectType,
|
|
|
|
uint64_t objectHandle,
|
|
|
|
VkPrivateDataSlotEXT privateDataSlot,
|
|
|
|
uint64_t data)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
return vk_object_base_set_private_data(&device->vk,
|
|
|
|
objectType,
|
|
|
|
objectHandle,
|
|
|
|
privateDataSlot,
|
|
|
|
data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tu_GetPrivateDataEXT(VkDevice _device,
|
|
|
|
VkObjectType objectType,
|
|
|
|
uint64_t objectHandle,
|
|
|
|
VkPrivateDataSlotEXT privateDataSlot,
|
|
|
|
uint64_t* pData)
|
|
|
|
{
|
|
|
|
TU_FROM_HANDLE(tu_device, device, _device);
|
|
|
|
vk_object_base_get_private_data(&device->vk,
|
|
|
|
objectType,
|
|
|
|
objectHandle,
|
|
|
|
privateDataSlot,
|
|
|
|
pData);
|
|
|
|
}
|