2019-11-25 15:29:12 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2019 Raspberry Pi
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
2019-11-28 08:48:29 +00:00
|
|
|
#include <fcntl.h>
|
2019-11-25 15:29:12 +00:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/sysinfo.h>
|
|
|
|
#include <unistd.h>
|
2019-11-27 12:58:02 +00:00
|
|
|
#include <xf86drm.h>
|
2019-11-25 15:29:12 +00:00
|
|
|
|
|
|
|
#include "v3dv_private.h"
|
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
#include "common/v3d_debug.h"
|
2019-12-13 09:31:05 +00:00
|
|
|
|
|
|
|
#include "broadcom/cle/v3dx_pack.h"
|
|
|
|
|
2019-12-02 12:59:04 +00:00
|
|
|
#include "compiler/v3d_compiler.h"
|
2019-11-27 10:49:49 +00:00
|
|
|
#include "compiler/glsl_types.h"
|
2019-12-02 12:59:04 +00:00
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
#include "drm-uapi/v3d_drm.h"
|
2020-01-23 10:24:05 +00:00
|
|
|
#include "format/u_format.h"
|
2020-05-18 09:41:11 +01:00
|
|
|
#include "u_atomic.h"
|
2019-12-04 09:39:01 +00:00
|
|
|
#include "vk_util.h"
|
2019-11-27 10:49:49 +00:00
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
#ifdef VK_USE_PLATFORM_XCB_KHR
|
|
|
|
#include <xcb/xcb.h>
|
|
|
|
#include <xcb/dri3.h>
|
|
|
|
#endif
|
|
|
|
|
2020-07-07 11:23:12 +01:00
|
|
|
#ifdef USE_V3D_SIMULATOR
|
|
|
|
#include "drm-uapi/i915_drm.h"
|
|
|
|
#endif
|
|
|
|
|
2019-11-27 10:49:49 +00:00
|
|
|
static void *
|
|
|
|
default_alloc_func(void *pUserData, size_t size, size_t align,
|
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return malloc(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
default_realloc_func(void *pUserData, void *pOriginal, size_t size,
|
|
|
|
size_t align, VkSystemAllocationScope allocationScope)
|
|
|
|
{
|
|
|
|
return realloc(pOriginal, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
default_free_func(void *pUserData, void *pMemory)
|
|
|
|
{
|
|
|
|
free(pMemory);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VkAllocationCallbacks default_alloc = {
|
|
|
|
.pUserData = NULL,
|
|
|
|
.pfnAllocation = default_alloc_func,
|
|
|
|
.pfnReallocation = default_realloc_func,
|
|
|
|
.pfnFree = default_free_func,
|
|
|
|
};
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_EnumerateInstanceExtensionProperties(const char *pLayerName,
|
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
|
|
|
{
|
2019-11-29 10:09:51 +00:00
|
|
|
/* We don't support any layers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
|
|
|
for (int i = 0; i < V3DV_INSTANCE_EXTENSION_COUNT; i++) {
|
|
|
|
if (v3dv_instance_extensions_supported.extensions[i]) {
|
|
|
|
vk_outarray_append(&out, prop) {
|
|
|
|
*prop = v3dv_instance_extensions[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkInstance *pInstance)
|
|
|
|
{
|
2019-11-27 10:49:49 +00:00
|
|
|
struct v3dv_instance *instance;
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
|
|
|
|
|
|
|
|
struct v3dv_instance_extension_table enabled_extensions = {};
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
int idx;
|
|
|
|
for (idx = 0; idx < V3DV_INSTANCE_EXTENSION_COUNT; idx++) {
|
|
|
|
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
|
|
|
|
v3dv_instance_extensions[idx].extensionName) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (idx >= V3DV_INSTANCE_EXTENSION_COUNT)
|
|
|
|
return vk_error(NULL, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
|
|
|
|
if (!v3dv_instance_extensions_supported.extensions[idx])
|
|
|
|
return vk_error(NULL, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
|
|
|
|
enabled_extensions.extensions[idx] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
|
|
|
if (!instance)
|
|
|
|
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
|
|
|
|
if (pAllocator)
|
|
|
|
instance->alloc = *pAllocator;
|
|
|
|
else
|
|
|
|
instance->alloc = default_alloc;
|
|
|
|
|
2019-12-03 09:54:52 +00:00
|
|
|
v3d_process_debug_variable();
|
|
|
|
|
2019-11-27 10:49:49 +00:00
|
|
|
instance->app_info = (struct v3dv_app_info) { .api_version = 0 };
|
|
|
|
if (pCreateInfo->pApplicationInfo) {
|
|
|
|
const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
|
|
|
|
|
|
|
|
instance->app_info.app_name =
|
|
|
|
vk_strdup(&instance->alloc, app->pApplicationName,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
|
|
|
instance->app_info.app_version = app->applicationVersion;
|
|
|
|
|
|
|
|
instance->app_info.engine_name =
|
|
|
|
vk_strdup(&instance->alloc, app->pEngineName,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
|
|
|
instance->app_info.engine_version = app->engineVersion;
|
|
|
|
|
|
|
|
instance->app_info.api_version = app->apiVersion;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (instance->app_info.api_version == 0)
|
|
|
|
instance->app_info.api_version = VK_API_VERSION_1_0;
|
|
|
|
|
|
|
|
instance->enabled_extensions = enabled_extensions;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
|
|
|
|
/* Vulkan requires that entrypoints for extensions which have not been
|
|
|
|
* enabled must not be advertised.
|
|
|
|
*/
|
|
|
|
if (!v3dv_instance_entrypoint_is_enabled(i,
|
|
|
|
instance->app_info.api_version,
|
|
|
|
&instance->enabled_extensions)) {
|
|
|
|
instance->dispatch.entrypoints[i] = NULL;
|
|
|
|
} else {
|
|
|
|
instance->dispatch.entrypoints[i] =
|
|
|
|
v3dv_instance_dispatch_table.entrypoints[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct v3dv_physical_device *pdevice = &instance->physicalDevice;
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(pdevice->dispatch.entrypoints); i++) {
|
|
|
|
/* Vulkan requires that entrypoints for extensions which have not been
|
|
|
|
* enabled must not be advertised.
|
|
|
|
*/
|
|
|
|
if (!v3dv_physical_device_entrypoint_is_enabled(i,
|
|
|
|
instance->app_info.api_version,
|
|
|
|
&instance->enabled_extensions)) {
|
|
|
|
pdevice->dispatch.entrypoints[i] = NULL;
|
|
|
|
} else {
|
|
|
|
pdevice->dispatch.entrypoints[i] =
|
|
|
|
v3dv_physical_device_dispatch_table.entrypoints[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
|
|
|
|
/* Vulkan requires that entrypoints for extensions which have not been
|
|
|
|
* enabled must not be advertised.
|
|
|
|
*/
|
|
|
|
if (!v3dv_device_entrypoint_is_enabled(i,
|
|
|
|
instance->app_info.api_version,
|
|
|
|
&instance->enabled_extensions,
|
|
|
|
NULL)) {
|
|
|
|
instance->device_dispatch.entrypoints[i] = NULL;
|
|
|
|
} else {
|
|
|
|
instance->device_dispatch.entrypoints[i] =
|
|
|
|
v3dv_device_dispatch_table.entrypoints[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
instance->physicalDeviceCount = -1;
|
|
|
|
|
|
|
|
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_free2(&default_alloc, pAllocator, instance);
|
|
|
|
return vk_error(NULL, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
glsl_type_singleton_init_or_ref();
|
|
|
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
|
|
|
|
|
|
|
|
*pInstance = v3dv_instance_to_handle(instance);
|
2019-11-25 15:29:12 +00:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-11-27 11:43:36 +00:00
|
|
|
static void
|
|
|
|
physical_device_finish(struct v3dv_physical_device *device)
|
|
|
|
{
|
2020-02-24 16:16:09 +00:00
|
|
|
v3dv_wsi_finish(device);
|
|
|
|
|
2020-04-23 09:46:07 +01:00
|
|
|
v3d_compiler_free(device->compiler);
|
|
|
|
|
2020-01-23 10:59:28 +00:00
|
|
|
close(device->render_fd);
|
2020-01-20 09:45:06 +00:00
|
|
|
if (device->display_fd >= 0)
|
|
|
|
close(device->display_fd);
|
2019-11-29 12:55:38 +00:00
|
|
|
|
2019-11-29 08:01:56 +00:00
|
|
|
free(device->name);
|
|
|
|
|
2019-11-29 12:55:38 +00:00
|
|
|
#if using_v3d_simulator
|
|
|
|
v3d_simulator_destroy(device->sim_file);
|
|
|
|
#endif
|
2019-11-27 11:43:36 +00:00
|
|
|
}
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
void
|
|
|
|
v3dv_DestroyInstance(VkInstance _instance,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
2019-11-27 11:43:36 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
|
|
|
|
if (!instance)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (instance->physicalDeviceCount > 0) {
|
|
|
|
/* We support at most one physical device. */
|
|
|
|
assert(instance->physicalDeviceCount == 1);
|
|
|
|
physical_device_finish(&instance->physicalDevice);
|
|
|
|
}
|
|
|
|
|
|
|
|
vk_free(&instance->alloc, (char *)instance->app_info.app_name);
|
|
|
|
vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
|
|
|
|
|
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(instance));
|
|
|
|
|
|
|
|
vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
|
|
|
|
|
|
|
|
glsl_type_singleton_decref();
|
|
|
|
|
|
|
|
vk_free(&instance->alloc, instance);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 09:25:21 +00:00
|
|
|
static uint64_t
|
|
|
|
compute_heap_size()
|
|
|
|
{
|
|
|
|
/* Query the total ram from the system */
|
|
|
|
struct sysinfo info;
|
|
|
|
sysinfo(&info);
|
|
|
|
|
|
|
|
uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
|
|
|
|
|
|
|
|
/* We don't want to burn too much ram with the GPU. If the user has 4GiB
|
|
|
|
* or less, we use at most half. If they have more than 4GiB, we use 3/4.
|
|
|
|
*/
|
|
|
|
uint64_t available_ram;
|
|
|
|
if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
|
|
|
|
available_ram = total_ram / 2;
|
|
|
|
else
|
|
|
|
available_ram = total_ram * 3 / 4;
|
|
|
|
|
|
|
|
return available_ram;
|
|
|
|
}
|
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
/* When running on the simulator we do everything on a single render node so
|
|
|
|
* we don't need to get an authenticated display fd from the display server.
|
|
|
|
*/
|
|
|
|
#if !using_v3d_simulator
|
|
|
|
#ifdef VK_USE_PLATFORM_XCB_KHR
|
|
|
|
static int
|
|
|
|
create_display_fd_xcb()
|
|
|
|
{
|
|
|
|
xcb_connection_t *conn = xcb_connect(NULL, NULL);
|
|
|
|
const xcb_setup_t *setup = xcb_get_setup(conn);
|
|
|
|
xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
|
|
|
|
xcb_screen_t *screen = iter.data;
|
|
|
|
|
|
|
|
xcb_dri3_open_cookie_t cookie;
|
|
|
|
xcb_dri3_open_reply_t *reply;
|
|
|
|
cookie = xcb_dri3_open(conn, screen->root, None);
|
|
|
|
reply = xcb_dri3_open_reply(conn, cookie, NULL);
|
|
|
|
if (!reply)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (reply->nfd != 1) {
|
|
|
|
free(reply);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int fd = xcb_dri3_open_reply_fds(conn, reply)[0];
|
|
|
|
free(reply);
|
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2020-05-26 07:38:31 +01:00
|
|
|
static bool
|
|
|
|
v3d_has_feature(struct v3dv_physical_device *device, enum drm_v3d_param feature)
|
|
|
|
{
|
|
|
|
struct drm_v3d_get_param p = {
|
|
|
|
.param = feature,
|
|
|
|
};
|
|
|
|
if (v3dv_ioctl(device->render_fd, DRM_IOCTL_V3D_GET_PARAM, &p) != 0)
|
|
|
|
return false;
|
|
|
|
return p.value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
device_has_expected_features(struct v3dv_physical_device *device)
|
|
|
|
{
|
|
|
|
return v3d_has_feature(device, DRM_V3D_PARAM_SUPPORTS_TFU) &&
|
|
|
|
v3d_has_feature(device, DRM_V3D_PARAM_SUPPORTS_CSD) &&
|
|
|
|
v3d_has_feature(device, DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH);
|
|
|
|
}
|
|
|
|
|
2019-11-27 12:58:02 +00:00
|
|
|
static VkResult
|
|
|
|
physical_device_init(struct v3dv_physical_device *device,
|
|
|
|
struct v3dv_instance *instance,
|
|
|
|
drmDevicePtr drm_device)
|
|
|
|
{
|
2019-11-29 08:01:56 +00:00
|
|
|
VkResult result = VK_SUCCESS;
|
2020-01-20 09:45:06 +00:00
|
|
|
int32_t display_fd = -1;
|
2019-11-29 08:01:56 +00:00
|
|
|
|
2019-11-28 08:48:29 +00:00
|
|
|
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
device->instance = instance;
|
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
const char *path = drm_device->nodes[DRM_NODE_RENDER];
|
|
|
|
int32_t render_fd = open(path, O_RDWR | O_CLOEXEC);
|
|
|
|
if (render_fd < 0)
|
|
|
|
return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
|
|
|
|
|
|
|
|
/* If we are running on real hardware we need to open the vc4 display
|
|
|
|
* device so we can allocate winsys BOs for the v3d core to render into.
|
|
|
|
*/
|
|
|
|
#if !using_v3d_simulator
|
|
|
|
#ifdef VK_USE_PLATFORM_XCB_KHR
|
|
|
|
display_fd = create_display_fd_xcb();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (display_fd == -1) {
|
|
|
|
result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-01-23 10:59:28 +00:00
|
|
|
device->render_fd = render_fd; /* The v3d render node */
|
2020-01-20 09:45:06 +00:00
|
|
|
device->display_fd = display_fd; /* The vc4 primary node */
|
2019-11-28 08:48:29 +00:00
|
|
|
|
|
|
|
uint8_t zeroes[VK_UUID_SIZE] = { 0 };
|
|
|
|
memcpy(device->pipeline_cache_uuid, zeroes, VK_UUID_SIZE);
|
|
|
|
|
2019-11-29 12:55:38 +00:00
|
|
|
#if using_v3d_simulator
|
2020-01-23 10:59:28 +00:00
|
|
|
device->sim_file = v3d_simulator_init(device->render_fd);
|
2019-11-29 12:55:38 +00:00
|
|
|
#endif
|
|
|
|
|
2020-01-23 10:59:28 +00:00
|
|
|
if (!v3d_get_device_info(device->render_fd, &device->devinfo, &v3dv_ioctl)) {
|
2019-11-29 08:01:56 +00:00
|
|
|
result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2020-05-26 07:38:31 +01:00
|
|
|
if (device->devinfo.ver < 42) {
|
|
|
|
result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!device_has_expected_features(device)) {
|
|
|
|
result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2019-12-02 12:59:04 +00:00
|
|
|
device->compiler = v3d_compiler_init(&device->devinfo);
|
|
|
|
device->next_program_id = 0;
|
|
|
|
|
2019-11-29 08:01:56 +00:00
|
|
|
asprintf(&device->name, "V3D %d.%d",
|
|
|
|
device->devinfo.ver / 10, device->devinfo.ver % 10);
|
|
|
|
|
2019-12-04 09:25:21 +00:00
|
|
|
/* Setup available memory heaps and types */
|
|
|
|
VkPhysicalDeviceMemoryProperties *mem = &device->memory;
|
|
|
|
mem->memoryHeapCount = 1;
|
|
|
|
mem->memoryHeaps[0].size = compute_heap_size();
|
|
|
|
mem->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
|
|
|
|
|
|
|
|
mem->memoryTypeCount = 2;
|
|
|
|
|
|
|
|
/* This is the only combination required by the spec */
|
|
|
|
mem->memoryTypes[0].propertyFlags =
|
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
|
|
|
mem->memoryTypes[0].heapIndex = 0;
|
|
|
|
|
|
|
|
mem->memoryTypes[1].propertyFlags =
|
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
|
|
|
mem->memoryTypes[1].heapIndex = 0;
|
|
|
|
|
2020-01-10 10:31:51 +00:00
|
|
|
device->options.merge_jobs = getenv("V3DV_NO_MERGE_JOBS") == NULL;
|
|
|
|
|
2020-01-16 10:14:17 +00:00
|
|
|
result = v3dv_wsi_init(device);
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_error(instance, result);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
v3dv_physical_device_get_supported_extensions(device,
|
|
|
|
&device->supported_extensions);
|
2020-06-04 10:14:10 +01:00
|
|
|
|
|
|
|
fprintf(stderr, "WARNING: v3dv is neither a complete nor a conformant "
|
|
|
|
"Vulkan implementation. Testing use only.\n");
|
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
return VK_SUCCESS;
|
2020-01-16 10:14:17 +00:00
|
|
|
|
2019-12-04 09:25:21 +00:00
|
|
|
fail:
|
2020-01-20 09:45:06 +00:00
|
|
|
if (render_fd >= 0)
|
|
|
|
close(render_fd);
|
|
|
|
if (display_fd >= 0)
|
|
|
|
close(display_fd);
|
2019-11-29 08:01:56 +00:00
|
|
|
|
|
|
|
return result;
|
2019-11-27 12:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
enumerate_devices(struct v3dv_instance *instance)
|
|
|
|
{
|
|
|
|
/* TODO: Check for more devices? */
|
|
|
|
drmDevicePtr devices[8];
|
|
|
|
VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
int max_devices;
|
|
|
|
|
|
|
|
instance->physicalDeviceCount = 0;
|
|
|
|
|
|
|
|
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
|
|
|
|
if (max_devices < 1)
|
|
|
|
return VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
#if !using_v3d_simulator
|
|
|
|
int32_t v3d_idx = -1;
|
|
|
|
int32_t vc4_idx = -1;
|
|
|
|
#endif
|
2019-11-27 12:58:02 +00:00
|
|
|
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
|
2020-01-20 09:45:06 +00:00
|
|
|
#if using_v3d_simulator
|
|
|
|
/* In the simulator, we look for an Intel render node */
|
2019-11-27 12:58:02 +00:00
|
|
|
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
|
|
|
|
devices[i]->bustype == DRM_BUS_PCI &&
|
2020-01-20 09:45:06 +00:00
|
|
|
devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
|
|
|
|
result = physical_device_init(&instance->physicalDevice, instance,
|
|
|
|
devices[i]);
|
2019-11-27 12:58:02 +00:00
|
|
|
if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
break;
|
|
|
|
}
|
2020-01-20 09:45:06 +00:00
|
|
|
#else
|
|
|
|
/* On actual hardware, we should have a render node (v3d)
|
|
|
|
* and a primary node (vc4). We will need to use the primary
|
|
|
|
* to allocate WSI buffers and share them with the render node
|
|
|
|
* via prime, but that is a privileged operation so we need the
|
|
|
|
* primary node to be authenticated, and for that we need the
|
|
|
|
* display server to provide the device fd (with DRI3), so we
|
|
|
|
* here we only check that the device is present but we don't
|
|
|
|
* try to open it.
|
|
|
|
*/
|
|
|
|
if (devices[i]->bustype != DRM_BUS_PLATFORM)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER) {
|
|
|
|
char **compat = devices[i]->deviceinfo.platform->compatible;
|
|
|
|
while (*compat) {
|
|
|
|
if (strncmp(*compat, "brcm,2711-v3d", 13) == 0) {
|
|
|
|
v3d_idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
compat++;
|
|
|
|
}
|
|
|
|
} else if (devices[i]->available_nodes & 1 << DRM_NODE_PRIMARY) {
|
|
|
|
char **compat = devices[i]->deviceinfo.platform->compatible;
|
|
|
|
while (*compat) {
|
2020-07-03 09:46:51 +01:00
|
|
|
if (strncmp(*compat, "brcm,bcm2711-vc5", 16) == 0 ||
|
|
|
|
strncmp(*compat, "brcm,bcm2835-vc4", 16) == 0 ) {
|
2020-01-20 09:45:06 +00:00
|
|
|
vc4_idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
compat++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2019-11-27 12:58:02 +00:00
|
|
|
}
|
2020-01-20 09:45:06 +00:00
|
|
|
|
|
|
|
#if !using_v3d_simulator
|
|
|
|
if (v3d_idx == -1 || vc4_idx == -1)
|
|
|
|
result = VK_ERROR_INCOMPATIBLE_DRIVER;
|
|
|
|
else
|
|
|
|
result = physical_device_init(&instance->physicalDevice, instance,
|
|
|
|
devices[v3d_idx]);
|
|
|
|
#endif
|
|
|
|
|
2019-11-27 12:58:02 +00:00
|
|
|
drmFreeDevices(devices, max_devices);
|
|
|
|
|
|
|
|
if (result == VK_SUCCESS)
|
|
|
|
instance->physicalDeviceCount = 1;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
instance_ensure_physical_device(struct v3dv_instance *instance)
|
|
|
|
{
|
|
|
|
if (instance->physicalDeviceCount < 0) {
|
|
|
|
VkResult result = enumerate_devices(instance);
|
|
|
|
if (result != VK_SUCCESS &&
|
|
|
|
result != VK_ERROR_INCOMPATIBLE_DRIVER)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_EnumeratePhysicalDevices(VkInstance _instance,
|
|
|
|
uint32_t *pPhysicalDeviceCount,
|
|
|
|
VkPhysicalDevice *pPhysicalDevices)
|
|
|
|
{
|
2019-11-27 12:58:02 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
|
|
|
|
|
|
|
|
VkResult result = instance_ensure_physical_device(instance);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
if (instance->physicalDeviceCount == 0)
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
assert(instance->physicalDeviceCount == 1);
|
|
|
|
vk_outarray_append(&out, i) {
|
|
|
|
*i = v3dv_physical_device_to_handle(&instance->physicalDevice);
|
|
|
|
}
|
2019-11-25 15:29:12 +00:00
|
|
|
|
2019-11-27 12:58:02 +00:00
|
|
|
return vk_outarray_status(&out);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceFeatures *pFeatures)
|
|
|
|
{
|
2019-11-29 09:58:53 +00:00
|
|
|
memset(pFeatures, 0, sizeof(*pFeatures));
|
|
|
|
|
|
|
|
*pFeatures = (VkPhysicalDeviceFeatures) {
|
2020-02-21 08:33:03 +00:00
|
|
|
.robustBufferAccess = true, /* This feature is mandatory */
|
2019-11-29 09:58:53 +00:00
|
|
|
.fullDrawIndexUint32 = false,
|
2020-06-28 01:08:16 +01:00
|
|
|
.imageCubeArray = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.independentBlend = false,
|
|
|
|
.geometryShader = false,
|
|
|
|
.tessellationShader = false,
|
|
|
|
.sampleRateShading = false,
|
|
|
|
.dualSrcBlend = false,
|
|
|
|
.logicOp = false,
|
|
|
|
.multiDrawIndirect = false,
|
|
|
|
.drawIndirectFirstInstance = false,
|
|
|
|
.depthClamp = false,
|
|
|
|
.depthBiasClamp = false,
|
2020-07-22 09:12:51 +01:00
|
|
|
.fillModeNonSolid = true,
|
2020-05-13 10:21:12 +01:00
|
|
|
.depthBounds = false, /* Only available since V3D 4.3.16.2 */
|
2020-05-13 11:21:55 +01:00
|
|
|
.wideLines = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.largePoints = false,
|
|
|
|
.alphaToOne = false,
|
|
|
|
.multiViewport = false,
|
2020-03-29 15:29:55 +01:00
|
|
|
.samplerAnisotropy = true,
|
2020-02-27 07:55:40 +00:00
|
|
|
.textureCompressionETC2 = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.textureCompressionASTC_LDR = false,
|
|
|
|
.textureCompressionBC = false,
|
v3dv: implement occlusion queries
The design for queries in Vulkan requires that some commands execute
in the GPU as part of a command buffer. Unfortunately, V3D doesn't
really have supprt for this, which means that we need to execute them
in the CPU but we still need to make it look as if they happened
inside the comamnd buffer from the point of view of the user, which
adds certain hassle.
The above means that in some cases we need to do CPU waits for certain
parts of the command buffer to execute so we can then run the CPU
code. For exmaple, we need to wait before executing a query resets
just in case the GPU is using them, and we have to do a CPU wait wait
for previous GPU jobs to complete before copying query results if the
user has asked us to do that. In the future, we may want to have
submission thread instead so we don't block the main thread in these
scenarios.
Because we now need to execute some tasks in the CPU as part of a
command buffer, this introduces the concept of job types, there is one
type for all GPU jobs, and then we have one type for each kind of job
that needs to execute in the CPU. CPU jobs are executed by the queue
in order just like GPU jobs, only that they are exclusively CPU tasks.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-04-16 09:30:38 +01:00
|
|
|
.occlusionQueryPrecise = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.pipelineStatisticsQuery = false,
|
v3dv: initial descriptor set support
Focused on getting the basic UBO and SSBO cases implemented. So no
dynamic offset, push contanst, samplers, and so on.
This include a initial implementation for CreatedescriptorPool,
CreateDescriptorSetLayout, AllocateDescriptorSets,
UpdateDescriptorSets, CreatePipelineLayout, and CmdBindDescriptorSets.
Also introduces lowering vulkan intrinsics. For now just
vulkan_resource_index.
We also introduce a descriptor_map, in this case for the ubos and
ssbos, used to assign a index for each set/binding combination, that
would be used when filling back the details of the ubo or ssbo on
other places (like QUNIFORM_UBO_ADDR or QUNIFORM_SSBO_OFFSET).
Note that at this point we don't need a bo for the descriptor pool, so
descriptor sets are not getting a piece of it. That would likely
change as we start to support more descriptor set types.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-01-20 14:29:38 +00:00
|
|
|
.vertexPipelineStoresAndAtomics = true,
|
|
|
|
.fragmentStoresAndAtomics = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.shaderTessellationAndGeometryPointSize = false,
|
|
|
|
.shaderImageGatherExtended = false,
|
|
|
|
.shaderStorageImageExtendedFormats = false,
|
|
|
|
.shaderStorageImageMultisample = false,
|
|
|
|
.shaderStorageImageReadWithoutFormat = false,
|
|
|
|
.shaderStorageImageWriteWithoutFormat = false,
|
|
|
|
.shaderUniformBufferArrayDynamicIndexing = false,
|
|
|
|
.shaderSampledImageArrayDynamicIndexing = false,
|
|
|
|
.shaderStorageBufferArrayDynamicIndexing = false,
|
|
|
|
.shaderStorageImageArrayDynamicIndexing = false,
|
2020-07-21 09:29:21 +01:00
|
|
|
.shaderClipDistance = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
.shaderCullDistance = false,
|
|
|
|
.shaderFloat64 = false,
|
|
|
|
.shaderInt64 = false,
|
|
|
|
.shaderInt16 = false,
|
|
|
|
.shaderResourceResidency = false,
|
|
|
|
.shaderResourceMinLod = false,
|
|
|
|
.sparseBinding = false,
|
|
|
|
.sparseResidencyBuffer = false,
|
|
|
|
.sparseResidencyImage2D = false,
|
|
|
|
.sparseResidencyImage3D = false,
|
|
|
|
.sparseResidency2Samples = false,
|
|
|
|
.sparseResidency4Samples = false,
|
|
|
|
.sparseResidency8Samples = false,
|
|
|
|
.sparseResidency16Samples = false,
|
|
|
|
.sparseResidencyAliased = false,
|
|
|
|
.variableMultisampleRate = false,
|
2020-05-26 11:05:43 +01:00
|
|
|
.inheritedQueries = true,
|
2019-11-29 09:58:53 +00:00
|
|
|
};
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 07:48:07 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceFeatures2 *pFeatures)
|
|
|
|
{
|
|
|
|
v3dv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pFeatures->pNext) {
|
|
|
|
switch (ext->sType) {
|
|
|
|
default:
|
|
|
|
v3dv_debug_ignored_stype(ext->sType);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-04 00:32:11 +01:00
|
|
|
uint32_t
|
|
|
|
v3dv_physical_device_vendor_id(struct v3dv_physical_device *dev)
|
|
|
|
{
|
|
|
|
return 0x14E4;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-07 11:23:12 +01:00
|
|
|
#if using_v3d_simulator
|
|
|
|
static bool
|
|
|
|
get_i915_param(int fd, uint32_t param, int *value)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
struct drm_i915_getparam gp = {
|
|
|
|
.param = param,
|
|
|
|
.value = &tmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
int ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
|
|
|
|
if (ret != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
*value = tmp;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-07-04 00:32:11 +01:00
|
|
|
/* FIXME:
|
|
|
|
* Getting deviceID and UUID will probably require to use the kernel pci
|
|
|
|
* interface. See this:
|
|
|
|
* https://www.kernel.org/doc/html/latest/PCI/pci.html#how-to-find-pci-devices-manually
|
|
|
|
* And check the getparam ioctl in the i915 kernel with CHIPSET_ID for
|
|
|
|
* example.
|
|
|
|
*/
|
|
|
|
uint32_t
|
|
|
|
v3dv_physical_device_device_id(struct v3dv_physical_device *dev)
|
|
|
|
{
|
2020-07-07 11:23:12 +01:00
|
|
|
#if using_v3d_simulator
|
|
|
|
int devid = 0;
|
|
|
|
|
|
|
|
if (!get_i915_param(dev->render_fd, I915_PARAM_CHIPSET_ID, &devid))
|
|
|
|
fprintf(stderr, "Error getting for device_id\n");
|
|
|
|
|
|
|
|
return devid;
|
|
|
|
#else
|
2020-07-04 00:32:11 +01:00
|
|
|
/* FIXME */
|
|
|
|
return 0;
|
2020-07-07 11:23:12 +01:00
|
|
|
#endif
|
2020-07-04 00:32:11 +01:00
|
|
|
}
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceProperties *pProperties)
|
|
|
|
{
|
2019-11-28 11:47:19 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_physical_device, pdevice, physicalDevice);
|
|
|
|
|
|
|
|
const uint32_t page_size = 4096;
|
|
|
|
const uint32_t mem_size = compute_heap_size();
|
|
|
|
|
|
|
|
/* Per-stage limits */
|
|
|
|
const uint32_t max_samplers = 16;
|
|
|
|
const uint32_t max_uniform_buffers = 12;
|
2020-03-06 12:39:36 +00:00
|
|
|
const uint32_t max_storage_buffers = 12;
|
|
|
|
const uint32_t max_dynamic_storage_buffers = 6;
|
2019-11-28 11:47:19 +00:00
|
|
|
const uint32_t max_sampled_images = 16;
|
|
|
|
const uint32_t max_storage_images = 4;
|
|
|
|
|
|
|
|
const uint32_t max_varying_components = 16 * 4;
|
|
|
|
const uint32_t max_render_targets = 4;
|
|
|
|
|
|
|
|
const uint32_t v3d_coord_shift = 6;
|
|
|
|
|
|
|
|
const uint32_t max_fb_size = 4096;
|
|
|
|
|
2020-02-21 08:26:47 +00:00
|
|
|
const VkSampleCountFlags supported_sample_counts =
|
|
|
|
VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
|
2019-11-28 11:47:19 +00:00
|
|
|
|
|
|
|
/* FIXME: this will probably require an in-depth review */
|
|
|
|
VkPhysicalDeviceLimits limits = {
|
|
|
|
.maxImageDimension1D = 4096,
|
|
|
|
.maxImageDimension2D = 4096,
|
|
|
|
.maxImageDimension3D = 4096,
|
|
|
|
.maxImageDimensionCube = 4096,
|
|
|
|
.maxImageArrayLayers = 2048,
|
|
|
|
.maxTexelBufferElements = (1ul << 28),
|
2020-02-21 08:26:47 +00:00
|
|
|
.maxUniformBufferRange = (1ul << 27),
|
|
|
|
.maxStorageBufferRange = (1ul << 27),
|
2020-09-11 22:26:07 +01:00
|
|
|
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxMemoryAllocationCount = mem_size / page_size,
|
|
|
|
.maxSamplerAllocationCount = 64 * 1024,
|
|
|
|
.bufferImageGranularity = 256, /* A cache line */
|
|
|
|
.sparseAddressSpaceSize = 0,
|
v3dv: initial descriptor set support
Focused on getting the basic UBO and SSBO cases implemented. So no
dynamic offset, push contanst, samplers, and so on.
This include a initial implementation for CreatedescriptorPool,
CreateDescriptorSetLayout, AllocateDescriptorSets,
UpdateDescriptorSets, CreatePipelineLayout, and CmdBindDescriptorSets.
Also introduces lowering vulkan intrinsics. For now just
vulkan_resource_index.
We also introduce a descriptor_map, in this case for the ubos and
ssbos, used to assign a index for each set/binding combination, that
would be used when filling back the details of the ubo or ssbo on
other places (like QUNIFORM_UBO_ADDR or QUNIFORM_SSBO_OFFSET).
Note that at this point we don't need a bo for the descriptor pool, so
descriptor sets are not getting a piece of it. That would likely
change as we start to support more descriptor set types.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-01-20 14:29:38 +00:00
|
|
|
.maxBoundDescriptorSets = MAX_SETS,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxPerStageDescriptorSamplers = max_samplers,
|
|
|
|
.maxPerStageDescriptorUniformBuffers = max_uniform_buffers,
|
|
|
|
.maxPerStageDescriptorStorageBuffers = max_storage_buffers,
|
|
|
|
.maxPerStageDescriptorSampledImages = max_sampled_images,
|
|
|
|
.maxPerStageDescriptorStorageImages = max_storage_images,
|
|
|
|
.maxPerStageDescriptorInputAttachments = 4,
|
|
|
|
.maxPerStageResources = 128,
|
|
|
|
|
|
|
|
/* We multiply some limits by 6 to account for all shader stages */
|
|
|
|
.maxDescriptorSetSamplers = 6 * max_samplers,
|
|
|
|
.maxDescriptorSetUniformBuffers = 6 * max_uniform_buffers,
|
|
|
|
.maxDescriptorSetUniformBuffersDynamic = 8,
|
|
|
|
.maxDescriptorSetStorageBuffers = 6 * max_storage_buffers,
|
2020-03-06 12:39:36 +00:00
|
|
|
.maxDescriptorSetStorageBuffersDynamic = 6 * max_dynamic_storage_buffers,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxDescriptorSetSampledImages = 6 * max_sampled_images,
|
|
|
|
.maxDescriptorSetStorageImages = 6 * max_storage_images,
|
|
|
|
.maxDescriptorSetInputAttachments = 4,
|
|
|
|
|
|
|
|
/* Vertex limits */
|
2020-07-31 00:11:39 +01:00
|
|
|
.maxVertexInputAttributes = MAX_VERTEX_ATTRIBS,
|
|
|
|
.maxVertexInputBindings = MAX_VBS,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxVertexInputAttributeOffset = 0xffffffff,
|
|
|
|
.maxVertexInputBindingStride = 0xffffffff,
|
|
|
|
.maxVertexOutputComponents = max_varying_components,
|
|
|
|
|
|
|
|
/* Tessellation limits */
|
|
|
|
.maxTessellationGenerationLevel = 0,
|
|
|
|
.maxTessellationPatchSize = 0,
|
|
|
|
.maxTessellationControlPerVertexInputComponents = 0,
|
|
|
|
.maxTessellationControlPerVertexOutputComponents = 0,
|
|
|
|
.maxTessellationControlPerPatchOutputComponents = 0,
|
|
|
|
.maxTessellationControlTotalOutputComponents = 0,
|
|
|
|
.maxTessellationEvaluationInputComponents = 0,
|
|
|
|
.maxTessellationEvaluationOutputComponents = 0,
|
|
|
|
|
|
|
|
/* Geometry limits */
|
|
|
|
.maxGeometryShaderInvocations = 0,
|
|
|
|
.maxGeometryInputComponents = 0,
|
|
|
|
.maxGeometryOutputComponents = 0,
|
|
|
|
.maxGeometryOutputVertices = 0,
|
|
|
|
.maxGeometryTotalOutputComponents = 0,
|
|
|
|
|
|
|
|
/* Fragment limits */
|
|
|
|
.maxFragmentInputComponents = max_varying_components,
|
|
|
|
.maxFragmentOutputAttachments = 4,
|
|
|
|
.maxFragmentDualSrcAttachments = 0,
|
|
|
|
.maxFragmentCombinedOutputResources = max_render_targets +
|
|
|
|
max_storage_buffers +
|
|
|
|
max_storage_images,
|
|
|
|
|
|
|
|
/* Compute limits */
|
|
|
|
.maxComputeSharedMemorySize = 16384,
|
|
|
|
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
|
|
|
|
.maxComputeWorkGroupInvocations = 256,
|
|
|
|
.maxComputeWorkGroupSize = { 256, 256, 256 },
|
|
|
|
|
|
|
|
.subPixelPrecisionBits = v3d_coord_shift,
|
|
|
|
.subTexelPrecisionBits = 8,
|
|
|
|
.mipmapPrecisionBits = 8,
|
|
|
|
.maxDrawIndexedIndexValue = 0x00ffffff,
|
|
|
|
.maxDrawIndirectCount = 0x7fffffff,
|
|
|
|
.maxSamplerLodBias = 14.0f,
|
|
|
|
.maxSamplerAnisotropy = 16.0f,
|
2019-12-28 10:59:32 +00:00
|
|
|
.maxViewports = MAX_VIEWPORTS,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxViewportDimensions = { max_fb_size, max_fb_size },
|
|
|
|
.viewportBoundsRange = { -2.0 * max_fb_size,
|
|
|
|
2.0 * max_fb_size - 1 },
|
|
|
|
.viewportSubPixelBits = 0,
|
|
|
|
.minMemoryMapAlignment = page_size,
|
|
|
|
.minTexelBufferOffsetAlignment = 16,
|
v3dv/descriptor_set: support for array of ubo/ssbo
For that we include the array_index when asking for a ubo/ssbo index
from the descriptor_map.
Until now, array_index was not included, but the descriptor_map took
into account the array_size. This had the advantage that you only need
a entry on the descriptor map, and the index was properly return.
But this make it complex to get back the set, binding and array_index
back from the ubo/ssbo binding. So it was more easy to just add
array_index. Somehow now the "key" on the descriptor map is the
combination of (set, binding, array_index).
Note that this also make sense as the vulkan api identifies each array
index as a descriptor, so for example, from spec,
VkDescriptorSetLayoutBinding:descriptorCount
"descriptorCount is the number of descriptors contained in the
binding, accessed in a shader as an array"
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-02-13 21:22:18 +00:00
|
|
|
.minUniformBufferOffsetAlignment = 32,
|
|
|
|
.minStorageBufferOffsetAlignment = 32,
|
2019-11-28 11:47:19 +00:00
|
|
|
.minTexelOffset = -8,
|
|
|
|
.maxTexelOffset = 7,
|
|
|
|
.minTexelGatherOffset = -8,
|
|
|
|
.maxTexelGatherOffset = 7,
|
|
|
|
.minInterpolationOffset = -0.5,
|
|
|
|
.maxInterpolationOffset = 0.5,
|
|
|
|
.subPixelInterpolationOffsetBits = v3d_coord_shift,
|
|
|
|
.maxFramebufferWidth = max_fb_size,
|
|
|
|
.maxFramebufferHeight = max_fb_size,
|
|
|
|
.maxFramebufferLayers = 256,
|
|
|
|
.framebufferColorSampleCounts = supported_sample_counts,
|
|
|
|
.framebufferDepthSampleCounts = supported_sample_counts,
|
|
|
|
.framebufferStencilSampleCounts = supported_sample_counts,
|
|
|
|
.framebufferNoAttachmentsSampleCounts = supported_sample_counts,
|
|
|
|
.maxColorAttachments = max_render_targets,
|
|
|
|
.sampledImageColorSampleCounts = supported_sample_counts,
|
|
|
|
.sampledImageIntegerSampleCounts = supported_sample_counts,
|
|
|
|
.sampledImageDepthSampleCounts = supported_sample_counts,
|
|
|
|
.sampledImageStencilSampleCounts = supported_sample_counts,
|
|
|
|
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
.maxSampleMaskWords = 1,
|
|
|
|
.timestampComputeAndGraphics = false,
|
|
|
|
.timestampPeriod = 0.0f,
|
2020-07-21 09:29:21 +01:00
|
|
|
.maxClipDistances = 8,
|
2019-11-28 11:47:19 +00:00
|
|
|
.maxCullDistances = 0,
|
2020-07-21 09:29:21 +01:00
|
|
|
.maxCombinedClipAndCullDistances = 8,
|
2019-11-28 11:47:19 +00:00
|
|
|
.discreteQueuePriorities = 2,
|
2020-02-21 08:26:47 +00:00
|
|
|
.pointSizeRange = { 1.0f, 1.0f },
|
2020-05-13 11:21:55 +01:00
|
|
|
.lineWidthRange = { 1.0f, 32.0f },
|
2020-02-21 08:26:47 +00:00
|
|
|
.pointSizeGranularity = 0.0f,
|
2020-05-13 11:21:55 +01:00
|
|
|
.lineWidthGranularity = 2.0f / (1 << v3d_coord_shift),
|
2019-11-28 11:47:19 +00:00
|
|
|
.strictLines = true,
|
|
|
|
.standardSampleLocations = false,
|
|
|
|
.optimalBufferCopyOffsetAlignment = 32,
|
|
|
|
.optimalBufferCopyRowPitchAlignment = 32,
|
|
|
|
.nonCoherentAtomSize = 256,
|
|
|
|
};
|
|
|
|
|
|
|
|
*pProperties = (VkPhysicalDeviceProperties) {
|
|
|
|
.apiVersion = v3dv_physical_device_api_version(pdevice),
|
|
|
|
.driverVersion = vk_get_driver_version(),
|
2020-07-04 00:32:11 +01:00
|
|
|
.vendorID = v3dv_physical_device_vendor_id(pdevice),
|
|
|
|
.deviceID = v3dv_physical_device_device_id(pdevice),
|
2019-11-28 11:47:19 +00:00
|
|
|
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
|
|
|
|
.limits = limits,
|
|
|
|
.sparseProperties = { 0 },
|
|
|
|
};
|
|
|
|
|
|
|
|
snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
|
|
|
|
"%s", pdevice->name);
|
|
|
|
memcpy(pProperties->pipelineCacheUUID,
|
|
|
|
pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 07:48:07 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceProperties2 *pProperties)
|
|
|
|
{
|
|
|
|
v3dv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pProperties->pNext) {
|
|
|
|
switch (ext->sType) {
|
2020-01-15 09:00:11 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
|
|
|
|
VkPhysicalDeviceIDProperties *id_props =
|
|
|
|
(VkPhysicalDeviceIDProperties *)ext;
|
|
|
|
/* FIXME */
|
|
|
|
memset(id_props->deviceUUID, 0, VK_UUID_SIZE);
|
|
|
|
memset(id_props->driverUUID, 0, VK_UUID_SIZE);
|
|
|
|
/* The LUID is for Windows. */
|
|
|
|
id_props->deviceLUIDValid = false;
|
|
|
|
break;
|
|
|
|
}
|
2020-01-15 07:48:07 +00:00
|
|
|
default:
|
|
|
|
v3dv_debug_ignored_stype(ext->sType);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-29 10:33:37 +00:00
|
|
|
/* We support exactly one queue family. */
|
|
|
|
static const VkQueueFamilyProperties
|
|
|
|
v3dv_queue_family_properties = {
|
|
|
|
.queueFlags = VK_QUEUE_GRAPHICS_BIT |
|
|
|
|
VK_QUEUE_COMPUTE_BIT |
|
|
|
|
VK_QUEUE_TRANSFER_BIT,
|
|
|
|
.queueCount = 1,
|
|
|
|
.timestampValidBits = 0, /* FIXME */
|
|
|
|
.minImageTransferGranularity = { 1, 1, 1 },
|
|
|
|
};
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
|
|
|
|
uint32_t *pCount,
|
|
|
|
VkQueueFamilyProperties *pQueueFamilyProperties)
|
|
|
|
{
|
2019-11-29 10:33:37 +00:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
|
|
|
|
|
|
|
|
vk_outarray_append(&out, p) {
|
|
|
|
*p = v3dv_queue_family_properties;
|
|
|
|
}
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 07:48:07 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
|
|
|
|
uint32_t *pQueueFamilyPropertyCount,
|
|
|
|
VkQueueFamilyProperties2 *pQueueFamilyProperties)
|
|
|
|
{
|
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
|
|
|
|
|
|
|
|
vk_outarray_append(&out, p) {
|
|
|
|
p->queueFamilyProperties = v3dv_queue_family_properties;
|
|
|
|
|
|
|
|
vk_foreach_struct(s, p->pNext) {
|
|
|
|
v3dv_debug_ignored_stype(s->sType);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceMemoryProperties *pMemoryProperties)
|
|
|
|
{
|
2019-12-04 09:25:21 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_physical_device, device, physicalDevice);
|
|
|
|
*pMemoryProperties = device->memory;
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 07:48:07 +00:00
|
|
|
void
|
|
|
|
v3dv_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,
|
|
|
|
VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
|
|
|
|
{
|
|
|
|
v3dv_GetPhysicalDeviceMemoryProperties(physicalDevice,
|
|
|
|
&pMemoryProperties->memoryProperties);
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pMemoryProperties->pNext) {
|
|
|
|
switch (ext->sType) {
|
|
|
|
default:
|
|
|
|
v3dv_debug_ignored_stype(ext->sType);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-25 15:29:12 +00:00
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
v3dv_GetInstanceProcAddr(VkInstance _instance,
|
|
|
|
const char *pName)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
|
|
|
|
/* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
|
|
|
|
* when we have to return valid function pointers, NULL, or it's left
|
|
|
|
* undefined. See the table for exact details.
|
|
|
|
*/
|
|
|
|
if (pName == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
#define LOOKUP_V3DV_ENTRYPOINT(entrypoint) \
|
|
|
|
if (strcmp(pName, "vk" #entrypoint) == 0) \
|
|
|
|
return (PFN_vkVoidFunction)v3dv_##entrypoint
|
|
|
|
|
|
|
|
LOOKUP_V3DV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
|
|
|
|
LOOKUP_V3DV_ENTRYPOINT(CreateInstance);
|
|
|
|
|
|
|
|
#undef LOOKUP_V3DV_ENTRYPOINT
|
|
|
|
|
|
|
|
if (instance == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
int idx = v3dv_get_instance_entrypoint_index(pName);
|
|
|
|
if (idx >= 0)
|
|
|
|
return instance->dispatch.entrypoints[idx];
|
|
|
|
|
|
|
|
idx = v3dv_get_physical_device_entrypoint_index(pName);
|
|
|
|
if (idx >= 0)
|
|
|
|
return instance->physicalDevice.dispatch.entrypoints[idx];
|
|
|
|
|
|
|
|
idx = v3dv_get_device_entrypoint_index(pName);
|
|
|
|
if (idx >= 0)
|
|
|
|
return instance->device_dispatch.entrypoints[idx];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With version 1+ of the loader interface the ICD should expose
|
|
|
|
* vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
|
|
|
|
*/
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction
|
|
|
|
VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance,
|
|
|
|
const char *pName);
|
|
|
|
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetInstanceProcAddr(VkInstance instance,
|
|
|
|
const char* pName)
|
|
|
|
{
|
|
|
|
return v3dv_GetInstanceProcAddr(instance, pName);
|
|
|
|
}
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
v3dv_GetDeviceProcAddr(VkDevice _device,
|
|
|
|
const char *pName)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
|
|
|
|
if (!device || !pName)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
int idx = v3dv_get_device_entrypoint_index(pName);
|
|
|
|
if (idx < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return device->dispatch.entrypoints[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With version 4+ of the loader interface the ICD should expose
|
|
|
|
* vk_icdGetPhysicalDeviceProcAddr()
|
|
|
|
*/
|
|
|
|
PUBLIC
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
|
|
|
|
vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance,
|
|
|
|
const char* pName);
|
|
|
|
|
|
|
|
PFN_vkVoidFunction
|
|
|
|
vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance,
|
|
|
|
const char* pName)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
|
|
|
|
if (!pName || !instance)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
int idx = v3dv_get_physical_device_entrypoint_index(pName);
|
|
|
|
if (idx < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return instance->physicalDevice.dispatch.entrypoints[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
|
|
|
|
const char *pLayerName,
|
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkExtensionProperties *pProperties)
|
|
|
|
{
|
2019-11-29 10:06:25 +00:00
|
|
|
/* We don't support any layers */
|
|
|
|
if (pLayerName)
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
2019-11-25 15:29:12 +00:00
|
|
|
|
2019-11-29 10:06:25 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_physical_device, device, physicalDevice);
|
|
|
|
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
|
|
|
|
|
|
|
|
for (int i = 0; i < V3DV_DEVICE_EXTENSION_COUNT; i++) {
|
|
|
|
if (device->supported_extensions.extensions[i]) {
|
|
|
|
vk_outarray_append(&out, prop) {
|
|
|
|
*prop = v3dv_device_extensions[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 08:10:10 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
|
|
|
|
VkLayerProperties *pProperties)
|
|
|
|
{
|
|
|
|
if (pProperties == NULL) {
|
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
|
|
|
|
uint32_t *pPropertyCount,
|
|
|
|
VkLayerProperties *pProperties)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_physical_device, physical_device, physicalDevice);
|
|
|
|
|
|
|
|
if (pProperties == NULL) {
|
|
|
|
*pPropertyCount = 0;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vk_error(physical_device->instance, VK_ERROR_LAYER_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
|
2019-11-29 11:44:40 +00:00
|
|
|
static VkResult
|
|
|
|
queue_init(struct v3dv_device *device, struct v3dv_queue *queue)
|
|
|
|
{
|
|
|
|
queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
queue->device = device;
|
|
|
|
queue->flags = 0;
|
2020-05-18 09:41:11 +01:00
|
|
|
list_inithead(&queue->submit_wait_list);
|
|
|
|
pthread_mutex_init(&queue->mutex, NULL);
|
2019-11-29 11:44:40 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
queue_finish(struct v3dv_queue *queue)
|
|
|
|
{
|
2020-05-18 09:41:11 +01:00
|
|
|
assert(list_is_empty(&queue->submit_wait_list));
|
|
|
|
pthread_mutex_destroy(&queue->mutex);
|
2019-11-29 11:44:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_device_dispatch(struct v3dv_device *device)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
|
|
|
|
/* Vulkan requires that entrypoints for extensions which have not been
|
|
|
|
* enabled must not be advertised.
|
|
|
|
*/
|
|
|
|
if (!v3dv_device_entrypoint_is_enabled(i, device->instance->app_info.api_version,
|
|
|
|
&device->instance->enabled_extensions,
|
|
|
|
&device->enabled_extensions)) {
|
|
|
|
device->dispatch.entrypoints[i] = NULL;
|
|
|
|
} else {
|
|
|
|
device->dispatch.entrypoints[i] =
|
|
|
|
v3dv_device_dispatch_table.entrypoints[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 12:06:55 +01:00
|
|
|
static uint32_t
|
2020-04-21 13:09:23 +01:00
|
|
|
u64_hash(const void *key)
|
2020-03-31 12:06:55 +01:00
|
|
|
{
|
|
|
|
return _mesa_hash_data(key, sizeof(uint64_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2020-04-21 13:09:23 +01:00
|
|
|
u64_compare(const void *key1, const void *key2)
|
2020-03-31 12:06:55 +01:00
|
|
|
{
|
|
|
|
return memcmp(key1, key2, sizeof(uint64_t)) == 0;
|
|
|
|
}
|
|
|
|
|
2020-04-21 13:09:23 +01:00
|
|
|
static void
|
2020-07-16 11:34:30 +01:00
|
|
|
init_meta_clear_resources(struct v3dv_device *device)
|
2020-04-21 13:09:23 +01:00
|
|
|
{
|
|
|
|
device->meta.color_clear.cache =
|
|
|
|
_mesa_hash_table_create(NULL, u64_hash, u64_compare);
|
2020-07-16 11:34:30 +01:00
|
|
|
device->meta.depth_clear.cache =
|
|
|
|
_mesa_hash_table_create(NULL, u64_hash, u64_compare);
|
2020-04-21 13:09:23 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 14:27:02 +01:00
|
|
|
static uint32_t
|
|
|
|
meta_blit_key_hash(const void *key)
|
|
|
|
{
|
|
|
|
return _mesa_hash_data(key, V3DV_META_BLIT_CACHE_KEY_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
meta_blit_key_compare(const void *key1, const void *key2)
|
|
|
|
{
|
|
|
|
return memcmp(key1, key2, V3DV_META_BLIT_CACHE_KEY_SIZE) == 0;
|
|
|
|
}
|
|
|
|
|
2020-04-21 13:09:23 +01:00
|
|
|
static void
|
|
|
|
init_meta_blit_resources(struct v3dv_device *device)
|
|
|
|
{
|
2020-04-28 12:36:37 +01:00
|
|
|
for (uint32_t i = 0; i < 3; i++) {
|
|
|
|
device->meta.blit.cache[i] =
|
2020-04-30 14:27:02 +01:00
|
|
|
_mesa_hash_table_create(NULL,
|
|
|
|
meta_blit_key_hash,
|
|
|
|
meta_blit_key_compare);
|
2020-04-28 12:36:37 +01:00
|
|
|
}
|
2020-04-21 13:09:23 +01:00
|
|
|
}
|
|
|
|
|
2020-03-31 12:06:55 +01:00
|
|
|
static void
|
|
|
|
init_device_meta(struct v3dv_device *device)
|
|
|
|
{
|
|
|
|
mtx_init(&device->meta.mtx, mtx_plain);
|
2020-07-16 11:34:30 +01:00
|
|
|
init_meta_clear_resources(device);
|
2020-04-21 13:09:23 +01:00
|
|
|
init_meta_blit_resources(device);
|
2020-03-31 12:06:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
destroy_device_meta(struct v3dv_device *device)
|
|
|
|
{
|
|
|
|
VkDevice _device = v3dv_device_to_handle(device);
|
|
|
|
|
|
|
|
mtx_destroy(&device->meta.mtx);
|
|
|
|
|
|
|
|
hash_table_foreach(device->meta.color_clear.cache, entry) {
|
|
|
|
struct v3dv_meta_color_clear_pipeline *item = entry->data;
|
|
|
|
v3dv_DestroyPipeline(_device, item->pipeline, &device->alloc);
|
2020-07-16 11:34:30 +01:00
|
|
|
if (item->free_render_pass)
|
|
|
|
v3dv_DestroyRenderPass(_device, item->pass, &device->alloc);
|
2020-03-31 12:06:55 +01:00
|
|
|
vk_free(&device->alloc, item);
|
|
|
|
}
|
|
|
|
_mesa_hash_table_destroy(device->meta.color_clear.cache, NULL);
|
|
|
|
|
|
|
|
if (device->meta.color_clear.playout) {
|
|
|
|
v3dv_DestroyPipelineLayout(_device, device->meta.color_clear.playout,
|
|
|
|
&device->alloc);
|
|
|
|
}
|
2020-04-21 13:09:23 +01:00
|
|
|
|
2020-07-16 11:34:30 +01:00
|
|
|
hash_table_foreach(device->meta.depth_clear.cache, entry) {
|
|
|
|
struct v3dv_meta_depth_clear_pipeline *item = entry->data;
|
|
|
|
v3dv_DestroyPipeline(_device, item->pipeline, &device->alloc);
|
|
|
|
vk_free(&device->alloc, item);
|
|
|
|
}
|
|
|
|
_mesa_hash_table_destroy(device->meta.depth_clear.cache, NULL);
|
|
|
|
|
|
|
|
if (device->meta.depth_clear.playout) {
|
|
|
|
v3dv_DestroyPipelineLayout(_device, device->meta.depth_clear.playout,
|
|
|
|
&device->alloc);
|
|
|
|
}
|
|
|
|
|
2020-04-28 12:36:37 +01:00
|
|
|
for (uint32_t i = 0; i < 3; i++) {
|
|
|
|
hash_table_foreach(device->meta.blit.cache[i], entry) {
|
|
|
|
struct v3dv_meta_blit_pipeline *item = entry->data;
|
|
|
|
v3dv_DestroyPipeline(_device, item->pipeline, &device->alloc);
|
|
|
|
v3dv_DestroyRenderPass(_device, item->pass, &device->alloc);
|
|
|
|
vk_free(&device->alloc, item);
|
|
|
|
}
|
|
|
|
_mesa_hash_table_destroy(device->meta.blit.cache[i], NULL);
|
2020-04-21 13:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (device->meta.blit.playout) {
|
|
|
|
v3dv_DestroyPipelineLayout(_device, device->meta.blit.playout,
|
|
|
|
&device->alloc);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (device->meta.blit.dslayout) {
|
|
|
|
v3dv_DestroyDescriptorSetLayout(_device, device->meta.blit.dslayout,
|
|
|
|
&device->alloc);
|
|
|
|
}
|
2020-03-31 12:06:55 +01:00
|
|
|
}
|
|
|
|
|
2019-11-25 15:29:12 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_CreateDevice(VkPhysicalDevice physicalDevice,
|
|
|
|
const VkDeviceCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDevice *pDevice)
|
|
|
|
{
|
2019-11-29 11:44:40 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_physical_device, physical_device, physicalDevice);
|
|
|
|
struct v3dv_instance *instance = physical_device->instance;
|
|
|
|
VkResult result;
|
|
|
|
struct v3dv_device *device;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
|
|
|
|
|
|
|
|
/* Check enabled extensions */
|
|
|
|
struct v3dv_device_extension_table enabled_extensions = { };
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
|
|
|
|
int idx;
|
|
|
|
for (idx = 0; idx < V3DV_DEVICE_EXTENSION_COUNT; idx++) {
|
|
|
|
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
|
|
|
|
v3dv_device_extensions[idx].extensionName) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (idx >= V3DV_DEVICE_EXTENSION_COUNT)
|
|
|
|
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
|
|
|
|
if (!physical_device->supported_extensions.extensions[idx])
|
|
|
|
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
|
|
|
|
|
|
|
|
enabled_extensions.extensions[idx] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check enabled features */
|
|
|
|
if (pCreateInfo->pEnabledFeatures) {
|
|
|
|
VkPhysicalDeviceFeatures supported_features;
|
|
|
|
v3dv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
|
|
|
|
VkBool32 *supported_feature = (VkBool32 *)&supported_features;
|
|
|
|
VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
|
|
|
|
unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
|
|
|
|
for (uint32_t i = 0; i < num_features; i++) {
|
|
|
|
if (enabled_feature[i] && !supported_feature[i])
|
|
|
|
return vk_error(instance, VK_ERROR_FEATURE_NOT_PRESENT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check requested queues (we only expose one queue ) */
|
|
|
|
assert(pCreateInfo->queueCreateInfoCount == 1);
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
assert(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex == 0);
|
|
|
|
assert(pCreateInfo->pQueueCreateInfos[i].queueCount == 1);
|
|
|
|
if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
|
|
|
|
return vk_error(instance, VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
}
|
|
|
|
|
2020-03-31 11:59:44 +01:00
|
|
|
device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
|
2019-11-29 11:44:40 +00:00
|
|
|
sizeof(*device), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
|
|
|
if (!device)
|
|
|
|
return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
|
|
|
device->instance = instance;
|
|
|
|
|
|
|
|
if (pAllocator)
|
|
|
|
device->alloc = *pAllocator;
|
|
|
|
else
|
|
|
|
device->alloc = physical_device->instance->alloc;
|
|
|
|
|
2020-01-23 10:59:28 +00:00
|
|
|
device->render_fd = physical_device->render_fd;
|
|
|
|
if (device->render_fd == -1) {
|
2020-01-20 09:45:06 +00:00
|
|
|
result = VK_ERROR_INITIALIZATION_FAILED;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (physical_device->display_fd != -1) {
|
|
|
|
device->display_fd = physical_device->display_fd;
|
|
|
|
if (device->display_fd == -1) {
|
|
|
|
result = VK_ERROR_INITIALIZATION_FAILED;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
device->display_fd = -1;
|
2019-11-29 11:44:40 +00:00
|
|
|
}
|
|
|
|
|
2020-05-18 09:41:11 +01:00
|
|
|
pthread_mutex_init(&device->mutex, NULL);
|
|
|
|
|
2019-11-29 11:44:40 +00:00
|
|
|
result = queue_init(device, &device->queue);
|
|
|
|
if (result != VK_SUCCESS)
|
2020-01-20 09:45:06 +00:00
|
|
|
goto fail;
|
2019-11-29 11:44:40 +00:00
|
|
|
|
|
|
|
device->devinfo = physical_device->devinfo;
|
|
|
|
device->enabled_extensions = enabled_extensions;
|
|
|
|
|
2020-01-23 10:59:28 +00:00
|
|
|
int ret = drmSyncobjCreate(device->render_fd,
|
2020-01-13 07:53:26 +00:00
|
|
|
DRM_SYNCOBJ_CREATE_SIGNALED,
|
|
|
|
&device->last_job_sync);
|
|
|
|
if (ret) {
|
|
|
|
result = VK_ERROR_INITIALIZATION_FAILED;
|
2020-01-20 09:45:06 +00:00
|
|
|
goto fail;
|
2020-01-13 07:53:26 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 11:44:40 +00:00
|
|
|
init_device_dispatch(device);
|
2020-03-31 12:06:55 +01:00
|
|
|
init_device_meta(device);
|
v3dv/bo: adding a BO cache
Heavily based on the already existing for the v3d OpenGL driver, but
without references, and with some extra OOM checks (Vulkan CTS has
several OOM tests).
With this commit v3dv_bo_alloc and v3dv_bo_free became frontends to
the bo_cache. The former tries to get a BO from the cache if possible,
and the latter stores the BO on the cache if possible. The former also
adds a new parameter to point if the BO to allocate is private.
As v3d we are only caching private BOs, those created by the driver
for internal use (like CLs, tile_alloc, etc). They are the ones with
the highest change of being reused (for example, CL BOs are always
4KB, so they can always be reused). User-created BOs can have any
size, including some very large ones for buffers and images, which
makes them far less likely to be reused and would add a lot of memory
pressure if we decided to cache them.
In any case, in practice, we found that we could get a performance
improvement by caching also user-created BOs, but that would need more
care and an analysis to decide which ones makes sense. Would also
require to change how the cached BOs are stored by size. Right now
there are an array of list_head, that doesn't work well with big
BOs. If done, that would be handled on a separate commit.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-06-05 11:21:54 +01:00
|
|
|
v3dv_bo_cache_init(device);
|
2019-11-29 11:44:40 +00:00
|
|
|
|
|
|
|
*pDevice = v3dv_device_to_handle(device);
|
2019-11-25 15:29:12 +00:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2019-11-29 11:44:40 +00:00
|
|
|
|
2020-01-20 09:45:06 +00:00
|
|
|
fail:
|
2019-11-29 11:44:40 +00:00
|
|
|
vk_free(&device->alloc, device);
|
|
|
|
|
|
|
|
return result;
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroyDevice(VkDevice _device,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
2019-11-29 11:44:40 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
2020-01-13 07:53:26 +00:00
|
|
|
|
2020-03-13 10:35:06 +00:00
|
|
|
v3dv_DeviceWaitIdle(_device);
|
2019-11-29 11:44:40 +00:00
|
|
|
queue_finish(&device->queue);
|
2020-05-18 09:41:11 +01:00
|
|
|
pthread_mutex_destroy(&device->mutex);
|
2020-03-13 10:35:06 +00:00
|
|
|
drmSyncobjDestroy(device->render_fd, device->last_job_sync);
|
2020-03-31 12:06:55 +01:00
|
|
|
destroy_device_meta(device);
|
v3dv/bo: adding a BO cache
Heavily based on the already existing for the v3d OpenGL driver, but
without references, and with some extra OOM checks (Vulkan CTS has
several OOM tests).
With this commit v3dv_bo_alloc and v3dv_bo_free became frontends to
the bo_cache. The former tries to get a BO from the cache if possible,
and the latter stores the BO on the cache if possible. The former also
adds a new parameter to point if the BO to allocate is private.
As v3d we are only caching private BOs, those created by the driver
for internal use (like CLs, tile_alloc, etc). They are the ones with
the highest change of being reused (for example, CL BOs are always
4KB, so they can always be reused). User-created BOs can have any
size, including some very large ones for buffers and images, which
makes them far less likely to be reused and would add a lot of memory
pressure if we decided to cache them.
In any case, in practice, we found that we could get a performance
improvement by caching also user-created BOs, but that would need more
care and an analysis to decide which ones makes sense. Would also
require to change how the cached BOs are stored by size. Right now
there are an array of list_head, that doesn't work well with big
BOs. If done, that would be handled on a separate commit.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-06-05 11:21:54 +01:00
|
|
|
v3dv_bo_cache_destroy(device);
|
2020-01-20 09:45:06 +00:00
|
|
|
|
2019-12-17 07:48:52 +00:00
|
|
|
vk_free2(&default_alloc, pAllocator, device);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_GetDeviceQueue(VkDevice _device,
|
2019-12-02 08:49:50 +00:00
|
|
|
uint32_t queueFamilyIndex,
|
2019-11-25 15:29:12 +00:00
|
|
|
uint32_t queueIndex,
|
|
|
|
VkQueue *pQueue)
|
|
|
|
{
|
2019-12-02 08:49:50 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
|
|
|
|
assert(queueIndex == 0);
|
|
|
|
assert(queueFamilyIndex == 0);
|
|
|
|
|
|
|
|
*pQueue = v3dv_queue_to_handle(&device->queue);
|
2019-11-25 15:29:12 +00:00
|
|
|
}
|
2019-11-27 10:24:22 +00:00
|
|
|
|
2019-12-03 08:15:43 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_DeviceWaitIdle(VkDevice _device)
|
|
|
|
{
|
2020-01-13 07:53:26 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
2020-05-18 09:41:11 +01:00
|
|
|
return v3dv_QueueWaitIdle(v3dv_queue_to_handle(&device->queue));
|
2020-02-21 15:18:17 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 10:24:22 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_CreateDebugReportCallbackEXT(VkInstance _instance,
|
|
|
|
const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
|
|
|
VkDebugReportCallbackEXT* pCallback)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
return vk_create_debug_report_callback(&instance->debug_report_callbacks,
|
|
|
|
pCreateInfo, pAllocator, &instance->alloc,
|
|
|
|
pCallback);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroyDebugReportCallbackEXT(VkInstance _instance,
|
|
|
|
VkDebugReportCallbackEXT _callback,
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_instance, instance, _instance);
|
|
|
|
vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
|
|
|
|
_callback, pAllocator, &instance->alloc);
|
|
|
|
}
|
2019-11-27 21:08:51 +00:00
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
static VkResult
|
|
|
|
device_alloc(struct v3dv_device *device,
|
|
|
|
struct v3dv_device_memory *mem,
|
|
|
|
VkDeviceSize size)
|
|
|
|
{
|
|
|
|
/* Our kernel interface is 32-bit */
|
2020-06-29 10:56:48 +01:00
|
|
|
if (size > UINT32_MAX)
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
|
|
|
v3dv/bo: adding a BO cache
Heavily based on the already existing for the v3d OpenGL driver, but
without references, and with some extra OOM checks (Vulkan CTS has
several OOM tests).
With this commit v3dv_bo_alloc and v3dv_bo_free became frontends to
the bo_cache. The former tries to get a BO from the cache if possible,
and the latter stores the BO on the cache if possible. The former also
adds a new parameter to point if the BO to allocate is private.
As v3d we are only caching private BOs, those created by the driver
for internal use (like CLs, tile_alloc, etc). They are the ones with
the highest change of being reused (for example, CL BOs are always
4KB, so they can always be reused). User-created BOs can have any
size, including some very large ones for buffers and images, which
makes them far less likely to be reused and would add a lot of memory
pressure if we decided to cache them.
In any case, in practice, we found that we could get a performance
improvement by caching also user-created BOs, but that would need more
care and an analysis to decide which ones makes sense. Would also
require to change how the cached BOs are stored by size. Right now
there are an array of list_head, that doesn't work well with big
BOs. If done, that would be handled on a separate commit.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-06-05 11:21:54 +01:00
|
|
|
mem->bo = v3dv_bo_alloc(device, size, "device_alloc", false);
|
2019-12-12 10:02:04 +00:00
|
|
|
if (!mem->bo)
|
2019-12-04 09:39:01 +00:00
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
2020-06-29 10:56:48 +01:00
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-04 09:58:05 +00:00
|
|
|
static void
|
|
|
|
device_free(struct v3dv_device *device, struct v3dv_device_memory *mem)
|
|
|
|
{
|
2020-06-23 12:36:15 +01:00
|
|
|
if (mem->has_bo_ownership)
|
|
|
|
v3dv_bo_free(device, mem->bo);
|
|
|
|
else if (mem->bo)
|
|
|
|
vk_free(&device->alloc, mem->bo);
|
2019-12-04 09:58:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-11 11:56:34 +00:00
|
|
|
static void
|
|
|
|
device_unmap(struct v3dv_device *device, struct v3dv_device_memory *mem)
|
|
|
|
{
|
|
|
|
assert(mem && mem->bo->map && mem->bo->map_size > 0);
|
|
|
|
v3dv_bo_unmap(device, mem->bo);
|
|
|
|
}
|
|
|
|
|
2019-12-04 11:21:35 +00:00
|
|
|
static VkResult
|
2020-06-17 11:15:42 +01:00
|
|
|
device_map(struct v3dv_device *device, struct v3dv_device_memory *mem)
|
2019-12-04 11:21:35 +00:00
|
|
|
{
|
2020-03-11 11:56:34 +00:00
|
|
|
assert(mem && mem->bo);
|
|
|
|
|
2019-12-04 11:21:35 +00:00
|
|
|
/* From the spec:
|
|
|
|
*
|
|
|
|
* "After a successful call to vkMapMemory the memory object memory is
|
|
|
|
* considered to be currently host mapped. It is an application error to
|
|
|
|
* call vkMapMemory on a memory object that is already host mapped."
|
2020-03-11 11:56:34 +00:00
|
|
|
*
|
|
|
|
* We are not concerned with this ourselves (validation layers should
|
|
|
|
* catch these errors and warn users), however, the driver may internally
|
2020-06-17 11:15:42 +01:00
|
|
|
* map things (for example for debug CLIF dumps or some CPU-side operations)
|
|
|
|
* so by the time the user calls here the buffer might already been mapped
|
|
|
|
* internally by the driver.
|
2019-12-04 11:21:35 +00:00
|
|
|
*/
|
2020-06-17 11:15:42 +01:00
|
|
|
if (mem->bo->map) {
|
|
|
|
assert(mem->bo->map_size == mem->bo->size);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2019-12-04 11:21:35 +00:00
|
|
|
|
2020-06-17 11:15:42 +01:00
|
|
|
bool ok = v3dv_bo_map(device, mem->bo, mem->bo->size);
|
2019-12-10 11:00:49 +00:00
|
|
|
if (!ok)
|
2019-12-04 11:21:35 +00:00
|
|
|
return VK_ERROR_MEMORY_MAP_FAILED;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-01-15 10:32:09 +00:00
|
|
|
static VkResult
|
|
|
|
device_import_bo(struct v3dv_device *device,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
int fd, uint64_t size,
|
|
|
|
struct v3dv_bo **bo)
|
|
|
|
{
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
*bo = vk_alloc2(&device->alloc, pAllocator, sizeof(struct v3dv_bo), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (*bo == NULL) {
|
|
|
|
result = VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
off_t real_size = lseek(fd, 0, SEEK_END);
|
|
|
|
lseek(fd, 0, SEEK_SET);
|
|
|
|
if (real_size < 0 || (uint64_t) real_size < size) {
|
|
|
|
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ret;
|
|
|
|
uint32_t handle;
|
2020-01-23 10:59:28 +00:00
|
|
|
ret = drmPrimeFDToHandle(device->render_fd, fd, &handle);
|
2020-01-15 10:32:09 +00:00
|
|
|
if (ret) {
|
|
|
|
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct drm_v3d_get_bo_offset get_offset = {
|
|
|
|
.handle = handle,
|
|
|
|
};
|
2020-01-23 10:59:28 +00:00
|
|
|
ret = v3dv_ioctl(device->render_fd, DRM_IOCTL_V3D_GET_BO_OFFSET, &get_offset);
|
2020-01-15 10:32:09 +00:00
|
|
|
if (ret) {
|
|
|
|
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
assert(get_offset.offset != 0);
|
|
|
|
|
|
|
|
(*bo)->handle = handle;
|
|
|
|
(*bo)->size = size;
|
|
|
|
(*bo)->offset = get_offset.offset;
|
|
|
|
(*bo)->map = NULL;
|
|
|
|
(*bo)->map_size = 0;
|
2020-06-23 12:36:15 +01:00
|
|
|
(*bo)->private = false;
|
2020-01-15 10:32:09 +00:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
if (*bo) {
|
|
|
|
vk_free2(&device->alloc, pAllocator, *bo);
|
|
|
|
*bo = NULL;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-01-23 10:24:05 +00:00
|
|
|
static VkResult
|
|
|
|
device_alloc_for_wsi(struct v3dv_device *device,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
struct v3dv_device_memory *mem,
|
|
|
|
VkDeviceSize size)
|
|
|
|
{
|
|
|
|
/* In the simulator we can get away with a regular allocation since both
|
|
|
|
* allocation and rendering happen in the same DRM render node. On actual
|
|
|
|
* hardware we need to allocate our winsys BOs on the vc4 display device
|
|
|
|
* and import them into v3d.
|
|
|
|
*/
|
|
|
|
#if using_v3d_simulator
|
|
|
|
return device_alloc(device, mem, size);
|
|
|
|
#else
|
|
|
|
assert(device->display_fd != -1);
|
|
|
|
int display_fd = device->instance->physicalDevice.display_fd;
|
|
|
|
struct drm_mode_create_dumb create_dumb = {
|
|
|
|
.width = 1024, /* one page */
|
|
|
|
.height = align(size, 4096) / 4096,
|
|
|
|
.bpp = util_format_get_blocksizebits(PIPE_FORMAT_RGBA8888_UNORM),
|
|
|
|
};
|
|
|
|
|
|
|
|
int err;
|
|
|
|
err = v3dv_ioctl(display_fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
|
|
|
|
if (err < 0)
|
|
|
|
goto fail_create;
|
|
|
|
|
|
|
|
int fd;
|
|
|
|
err =
|
|
|
|
drmPrimeHandleToFD(display_fd, create_dumb.handle, O_CLOEXEC, &fd);
|
|
|
|
if (err < 0)
|
|
|
|
goto fail_export;
|
|
|
|
|
|
|
|
VkResult result = device_import_bo(device, pAllocator, fd, size, &mem->bo);
|
|
|
|
close(fd);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail_import;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
fail_import:
|
|
|
|
fail_export: {
|
|
|
|
struct drm_mode_destroy_dumb destroy_dumb = {
|
|
|
|
.handle = create_dumb.handle,
|
|
|
|
};
|
|
|
|
v3dv_ioctl(display_fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_create:
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-27 21:08:51 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_AllocateMemory(VkDevice _device,
|
|
|
|
const VkMemoryAllocateInfo *pAllocateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkDeviceMemory *pMem)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_device_memory *mem;
|
2019-12-04 09:39:01 +00:00
|
|
|
struct v3dv_physical_device *pdevice = &device->instance->physicalDevice;
|
2019-11-27 21:08:51 +00:00
|
|
|
|
|
|
|
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
|
|
|
|
|
|
|
|
/* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
|
|
|
|
assert(pAllocateInfo->allocationSize > 0);
|
|
|
|
|
|
|
|
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (mem == NULL)
|
|
|
|
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.memoryTypeCount);
|
|
|
|
mem->type = &pdevice->memory.memoryTypes[pAllocateInfo->memoryTypeIndex];
|
2020-06-23 12:36:15 +01:00
|
|
|
mem->has_bo_ownership = true;
|
2019-11-27 21:08:51 +00:00
|
|
|
|
2020-01-23 10:24:05 +00:00
|
|
|
const struct wsi_memory_allocate_info *wsi_info = NULL;
|
2020-01-15 10:32:09 +00:00
|
|
|
const VkImportMemoryFdInfoKHR *fd_info = NULL;
|
|
|
|
vk_foreach_struct_const(ext, pAllocateInfo->pNext) {
|
2020-01-23 10:24:05 +00:00
|
|
|
switch ((unsigned)ext->sType) {
|
|
|
|
case VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA:
|
|
|
|
wsi_info = (void *)ext;
|
|
|
|
break;
|
2020-01-15 10:32:09 +00:00
|
|
|
case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
|
|
|
|
fd_info = (void *)ext;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
v3dv_debug_ignored_stype(ext->sType);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult result = VK_SUCCESS;
|
2020-01-23 10:24:05 +00:00
|
|
|
if (wsi_info) {
|
|
|
|
result = device_alloc_for_wsi(device, pAllocator, mem,
|
|
|
|
pAllocateInfo->allocationSize);
|
|
|
|
} else if (fd_info && fd_info->handleType) {
|
2020-01-15 10:32:09 +00:00
|
|
|
assert(fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
result = device_import_bo(device, pAllocator,
|
|
|
|
fd_info->fd, pAllocateInfo->allocationSize,
|
|
|
|
&mem->bo);
|
2020-06-23 12:36:15 +01:00
|
|
|
mem->has_bo_ownership = false;
|
2020-01-15 10:32:09 +00:00
|
|
|
if (result == VK_SUCCESS)
|
|
|
|
close(fd_info->fd);
|
|
|
|
} else {
|
|
|
|
result = device_alloc(device, mem, pAllocateInfo->allocationSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
vk_free2(&device->alloc, pAllocator, mem);
|
|
|
|
return vk_error(device->instance, result);
|
|
|
|
}
|
2019-11-27 21:08:51 +00:00
|
|
|
|
2019-12-04 09:39:01 +00:00
|
|
|
*pMem = v3dv_device_memory_to_handle(mem);
|
|
|
|
return result;
|
2019-11-27 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_FreeMemory(VkDevice _device,
|
|
|
|
VkDeviceMemory _mem,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, _mem);
|
|
|
|
|
|
|
|
if (mem == NULL)
|
|
|
|
return;
|
|
|
|
|
2019-12-12 10:02:04 +00:00
|
|
|
if (mem->bo->map)
|
2019-11-27 21:08:51 +00:00
|
|
|
v3dv_UnmapMemory(_device, _mem);
|
|
|
|
|
2019-12-04 09:58:05 +00:00
|
|
|
device_free(device, mem);
|
2019-11-27 21:08:51 +00:00
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_MapMemory(VkDevice _device,
|
|
|
|
VkDeviceMemory _memory,
|
|
|
|
VkDeviceSize offset,
|
|
|
|
VkDeviceSize size,
|
|
|
|
VkMemoryMapFlags flags,
|
|
|
|
void **ppData)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, _memory);
|
|
|
|
|
|
|
|
if (mem == NULL) {
|
|
|
|
*ppData = NULL;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-12 10:02:04 +00:00
|
|
|
assert(offset < mem->bo->size);
|
2019-12-04 11:21:35 +00:00
|
|
|
|
2020-06-17 11:15:42 +01:00
|
|
|
/* Since the driver can map BOs internally as well and the mapped range
|
|
|
|
* required by the user or the driver might not be the same, we always map
|
|
|
|
* the entire BO and then add the requested offset to the start address
|
|
|
|
* of the mapped region.
|
2019-12-04 11:21:35 +00:00
|
|
|
*/
|
2020-06-17 11:15:42 +01:00
|
|
|
VkResult result = device_map(device, mem);
|
2019-12-04 11:21:35 +00:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return vk_error(device->instance, result);
|
2019-11-27 21:08:51 +00:00
|
|
|
|
2019-12-12 10:02:04 +00:00
|
|
|
*ppData = ((uint8_t *) mem->bo->map) + offset;
|
2019-12-04 11:21:35 +00:00
|
|
|
return VK_SUCCESS;
|
2019-11-27 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_UnmapMemory(VkDevice _device,
|
|
|
|
VkDeviceMemory _memory)
|
|
|
|
{
|
2019-12-04 11:29:00 +00:00
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, _memory);
|
|
|
|
|
|
|
|
if (mem == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
device_unmap(device, mem);
|
2019-11-27 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_FlushMappedMemoryRanges(VkDevice _device,
|
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
|
|
|
{
|
|
|
|
/* FIXME: stub (although note that both radv and tu just returns success
|
|
|
|
* here. Pending further research)
|
|
|
|
*/
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_InvalidateMappedMemoryRanges(VkDevice _device,
|
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
const VkMappedMemoryRange *pMemoryRanges)
|
|
|
|
{
|
|
|
|
/* FIXME: stub (although note that both radv and tu just returns success
|
|
|
|
* here. Pending further research)
|
|
|
|
*/
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2019-12-04 08:24:03 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_GetImageMemoryRequirements(VkDevice _device,
|
|
|
|
VkImage _image,
|
|
|
|
VkMemoryRequirements *pMemoryRequirements)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_image, image, _image);
|
|
|
|
|
|
|
|
assert(image->size > 0);
|
|
|
|
|
|
|
|
pMemoryRequirements->size = image->size;
|
|
|
|
pMemoryRequirements->alignment = image->alignment;
|
|
|
|
pMemoryRequirements->memoryTypeBits = 0x3; /* Both memory types */
|
|
|
|
}
|
2019-12-05 09:36:24 +00:00
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_BindImageMemory(VkDevice _device,
|
|
|
|
VkImage _image,
|
|
|
|
VkDeviceMemory _memory,
|
|
|
|
VkDeviceSize memoryOffset)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, _memory);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_image, image, _image);
|
|
|
|
|
|
|
|
/* Valid usage:
|
|
|
|
*
|
|
|
|
* "memoryOffset must be an integer multiple of the alignment member of
|
|
|
|
* the VkMemoryRequirements structure returned from a call to
|
|
|
|
* vkGetImageMemoryRequirements with image"
|
|
|
|
*/
|
|
|
|
assert(memoryOffset % image->alignment == 0);
|
2019-12-12 10:02:04 +00:00
|
|
|
assert(memoryOffset < mem->bo->size);
|
2019-12-05 09:36:24 +00:00
|
|
|
|
|
|
|
image->mem = mem;
|
|
|
|
image->mem_offset = memoryOffset;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2019-12-09 09:07:36 +00:00
|
|
|
|
2019-12-09 09:35:03 +00:00
|
|
|
void
|
|
|
|
v3dv_GetBufferMemoryRequirements(VkDevice _device,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
VkMemoryRequirements* pMemoryRequirements)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
|
|
|
|
|
|
|
|
pMemoryRequirements->memoryTypeBits = 0x3; /* Both memory types */
|
|
|
|
pMemoryRequirements->alignment = buffer->alignment;
|
|
|
|
pMemoryRequirements->size =
|
|
|
|
align64(buffer->size, pMemoryRequirements->alignment);
|
|
|
|
}
|
|
|
|
|
2019-12-09 09:40:32 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_BindBufferMemory(VkDevice _device,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
VkDeviceMemory _memory,
|
|
|
|
VkDeviceSize memoryOffset)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, _memory);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
|
|
|
|
|
|
|
|
/* Valid usage:
|
|
|
|
*
|
|
|
|
* "memoryOffset must be an integer multiple of the alignment member of
|
|
|
|
* the VkMemoryRequirements structure returned from a call to
|
|
|
|
* vkGetBufferMemoryRequirements with buffer"
|
|
|
|
*/
|
|
|
|
assert(memoryOffset % buffer->alignment == 0);
|
2019-12-12 10:02:04 +00:00
|
|
|
assert(memoryOffset < mem->bo->size);
|
2019-12-09 09:40:32 +00:00
|
|
|
|
|
|
|
buffer->mem = mem;
|
|
|
|
buffer->mem_offset = memoryOffset;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-09 09:07:36 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_CreateBuffer(VkDevice _device,
|
|
|
|
const VkBufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkBuffer *pBuffer)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_buffer *buffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
|
|
|
assert(pCreateInfo->usage != 0);
|
|
|
|
|
|
|
|
/* We don't support any flags for now */
|
|
|
|
assert(pCreateInfo->flags == 0);
|
|
|
|
|
|
|
|
buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (buffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
buffer->size = pCreateInfo->size;
|
|
|
|
buffer->usage = pCreateInfo->usage;
|
|
|
|
buffer->alignment = 256; /* nonCoherentAtomSize */
|
|
|
|
|
2020-06-23 10:32:04 +01:00
|
|
|
/* Limit allocations to 32-bit */
|
|
|
|
const VkDeviceSize aligned_size = align64(buffer->size, buffer->alignment);
|
|
|
|
if (aligned_size > UINT32_MAX || aligned_size < buffer->size)
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
2019-12-09 09:07:36 +00:00
|
|
|
|
|
|
|
*pBuffer = v3dv_buffer_to_handle(buffer);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroyBuffer(VkDevice _device,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, buffer);
|
|
|
|
}
|
2019-12-09 12:16:16 +00:00
|
|
|
|
2020-03-03 10:57:59 +00:00
|
|
|
/**
|
|
|
|
* This computes the maximum bpp used by any of the render targets used by
|
|
|
|
* a particular subpass. If we don't have a subpass (when we are not inside a
|
|
|
|
* render pass), then we assume that all framebuffer attachments are used.
|
|
|
|
*/
|
2020-03-02 16:21:26 +00:00
|
|
|
uint8_t
|
|
|
|
v3dv_framebuffer_compute_internal_bpp(const struct v3dv_framebuffer *framebuffer,
|
|
|
|
const struct v3dv_subpass *subpass)
|
2020-01-07 07:42:38 +00:00
|
|
|
{
|
|
|
|
STATIC_ASSERT(RENDER_TARGET_MAXIMUM_32BPP == 0);
|
|
|
|
uint8_t max_bpp = RENDER_TARGET_MAXIMUM_32BPP;
|
2020-03-02 16:21:26 +00:00
|
|
|
|
|
|
|
if (subpass) {
|
|
|
|
for (uint32_t i = 0; i < subpass->color_count; i++) {
|
|
|
|
uint32_t att_idx = subpass->color_attachments[i].attachment;
|
|
|
|
if (att_idx == VK_ATTACHMENT_UNUSED)
|
|
|
|
continue;
|
|
|
|
|
2020-04-06 09:19:53 +01:00
|
|
|
const struct v3dv_image_view *att = framebuffer->attachments[att_idx];
|
2020-03-02 16:21:26 +00:00
|
|
|
assert(att);
|
|
|
|
|
|
|
|
if (att->aspects & VK_IMAGE_ASPECT_COLOR_BIT)
|
|
|
|
max_bpp = MAX2(max_bpp, att->internal_bpp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return max_bpp;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(framebuffer->attachment_count <= 4);
|
2020-01-07 07:42:38 +00:00
|
|
|
for (uint32_t i = 0; i < framebuffer->attachment_count; i++) {
|
|
|
|
const struct v3dv_image_view *att = framebuffer->attachments[i];
|
2020-01-28 12:03:41 +00:00
|
|
|
assert(att);
|
|
|
|
|
|
|
|
if (att->aspects & VK_IMAGE_ASPECT_COLOR_BIT)
|
2020-01-07 07:42:38 +00:00
|
|
|
max_bpp = MAX2(max_bpp, att->internal_bpp);
|
|
|
|
}
|
2020-03-02 16:21:26 +00:00
|
|
|
|
|
|
|
return max_bpp;
|
2020-01-07 07:42:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-09 12:16:16 +00:00
|
|
|
VkResult
|
|
|
|
v3dv_CreateFramebuffer(VkDevice _device,
|
|
|
|
const VkFramebufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkFramebuffer *pFramebuffer)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_framebuffer *framebuffer;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
|
|
|
|
|
|
|
|
size_t size = sizeof(*framebuffer) +
|
|
|
|
sizeof(struct v3dv_image_view *) * pCreateInfo->attachmentCount;
|
|
|
|
framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (framebuffer == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
framebuffer->width = pCreateInfo->width;
|
|
|
|
framebuffer->height = pCreateInfo->height;
|
|
|
|
framebuffer->layers = pCreateInfo->layers;
|
|
|
|
framebuffer->attachment_count = pCreateInfo->attachmentCount;
|
2020-01-28 12:03:41 +00:00
|
|
|
framebuffer->color_attachment_count = 0;
|
2019-12-09 12:16:16 +00:00
|
|
|
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
|
|
|
|
framebuffer->attachments[i] =
|
|
|
|
v3dv_image_view_from_handle(pCreateInfo->pAttachments[i]);
|
2020-01-28 12:03:41 +00:00
|
|
|
if (framebuffer->attachments[i]->aspects & VK_IMAGE_ASPECT_COLOR_BIT)
|
|
|
|
framebuffer->color_attachment_count++;
|
2019-12-09 12:16:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*pFramebuffer = v3dv_framebuffer_to_handle(framebuffer);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroyFramebuffer(VkDevice _device,
|
|
|
|
VkFramebuffer _fb,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_framebuffer, fb, _fb);
|
|
|
|
|
|
|
|
if (!fb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, fb);
|
|
|
|
}
|
2020-01-15 10:32:09 +00:00
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_GetMemoryFdPropertiesKHR(VkDevice _device,
|
|
|
|
VkExternalMemoryHandleTypeFlagBits handleType,
|
|
|
|
int fd,
|
|
|
|
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
|
|
|
|
|
|
switch (handleType) {
|
|
|
|
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
|
|
|
|
pMemoryFdProperties->memoryTypeBits =
|
|
|
|
(1 << pdevice->memory.memoryTypeCount) - 1;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
default:
|
|
|
|
return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_GetMemoryFdKHR(VkDevice _device,
|
|
|
|
const VkMemoryGetFdInfoKHR *pGetFdInfo,
|
|
|
|
int *pFd)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device_memory, mem, pGetFdInfo->memory);
|
|
|
|
|
|
|
|
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
|
|
|
|
assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
|
|
|
pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
|
|
|
|
|
|
|
int fd, ret;
|
2020-01-23 10:59:28 +00:00
|
|
|
ret =
|
|
|
|
drmPrimeHandleToFD(device->render_fd, mem->bo->handle, DRM_CLOEXEC, &fd);
|
2020-01-15 10:32:09 +00:00
|
|
|
if (ret)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
*pFd = fd;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2020-02-26 08:36:27 +00:00
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_CreateEvent(VkDevice _device,
|
|
|
|
const VkEventCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkEvent *pEvent)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_event *event =
|
|
|
|
vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (!event)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
/* Events are created in the unsignaled state */
|
2020-05-18 09:41:11 +01:00
|
|
|
event->state = false;
|
2020-02-26 08:36:27 +00:00
|
|
|
*pEvent = v3dv_event_to_handle(event);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroyEvent(VkDevice _device,
|
|
|
|
VkEvent _event,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_event, event, _event);
|
|
|
|
|
|
|
|
if (!event)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_GetEventStatus(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_event, event, _event);
|
2020-05-18 09:41:11 +01:00
|
|
|
return p_atomic_read(&event->state) ? VK_EVENT_SET : VK_EVENT_RESET;
|
2020-02-26 08:36:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_SetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_event, event, _event);
|
2020-05-18 09:41:11 +01:00
|
|
|
p_atomic_set(&event->state, 1);
|
2020-02-26 08:36:27 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_ResetEvent(VkDevice _device, VkEvent _event)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_event, event, _event);
|
2020-05-18 09:41:11 +01:00
|
|
|
p_atomic_set(&event->state, 0);
|
2020-02-26 08:36:27 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2020-03-29 15:29:55 +01:00
|
|
|
|
|
|
|
static const enum V3DX(Wrap_Mode) vk_to_v3d_wrap_mode[] = {
|
|
|
|
[VK_SAMPLER_ADDRESS_MODE_REPEAT] = V3D_WRAP_MODE_REPEAT,
|
|
|
|
[VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = V3D_WRAP_MODE_MIRROR,
|
|
|
|
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE] = V3D_WRAP_MODE_CLAMP,
|
|
|
|
[VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = V3D_WRAP_MODE_MIRROR_ONCE,
|
|
|
|
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = V3D_WRAP_MODE_BORDER,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const enum V3DX(Compare_Function)
|
|
|
|
vk_to_v3d_compare_func[] = {
|
|
|
|
[VK_COMPARE_OP_NEVER] = V3D_COMPARE_FUNC_NEVER,
|
|
|
|
[VK_COMPARE_OP_LESS] = V3D_COMPARE_FUNC_LESS,
|
|
|
|
[VK_COMPARE_OP_EQUAL] = V3D_COMPARE_FUNC_EQUAL,
|
|
|
|
[VK_COMPARE_OP_LESS_OR_EQUAL] = V3D_COMPARE_FUNC_LEQUAL,
|
|
|
|
[VK_COMPARE_OP_GREATER] = V3D_COMPARE_FUNC_GREATER,
|
|
|
|
[VK_COMPARE_OP_NOT_EQUAL] = V3D_COMPARE_FUNC_NOTEQUAL,
|
|
|
|
[VK_COMPARE_OP_GREATER_OR_EQUAL] = V3D_COMPARE_FUNC_GEQUAL,
|
|
|
|
[VK_COMPARE_OP_ALWAYS] = V3D_COMPARE_FUNC_ALWAYS,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
pack_sampler_state(struct v3dv_sampler *sampler,
|
|
|
|
const VkSamplerCreateInfo *pCreateInfo)
|
|
|
|
{
|
|
|
|
enum V3DX(Border_Color_Mode) border_color_mode;
|
|
|
|
|
2020-07-03 09:06:28 +01:00
|
|
|
/* For now we only support the preset Vulkan border color modes. If we
|
|
|
|
* want to implement VK_EXT_custom_border_color in the future we would have
|
|
|
|
* to use V3D_BORDER_COLOR_FOLLOWS, and fill up border_color_word_[0/1/2/3]
|
|
|
|
* SAMPLER_STATE.
|
2020-03-29 15:29:55 +01:00
|
|
|
*/
|
|
|
|
switch (pCreateInfo->borderColor) {
|
|
|
|
case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
|
|
|
|
case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
|
|
|
|
border_color_mode = V3D_BORDER_COLOR_0000;
|
|
|
|
break;
|
|
|
|
case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
|
|
|
|
case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
|
|
|
|
border_color_mode = V3D_BORDER_COLOR_0001;
|
|
|
|
break;
|
|
|
|
case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
|
|
|
|
case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
|
|
|
|
border_color_mode = V3D_BORDER_COLOR_1111;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("Unknown border color");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-07-03 09:06:28 +01:00
|
|
|
/* For some texture formats, when clamping to transparent black border the
|
|
|
|
* CTS expects alpha to be set to 1 instead of 0, but the border color mode
|
|
|
|
* will take priority over the texture state swizzle, so the only way to
|
|
|
|
* fix that is to apply a swizzle in the shader. Here we keep track of
|
|
|
|
* whether we are activating that mode and we will decide if we need to
|
|
|
|
* activate the texture swizzle lowering in the shader key at compile time
|
|
|
|
* depending on the actual texture format.
|
|
|
|
*/
|
|
|
|
if ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
|
|
|
|
pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
|
|
|
|
pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) &&
|
|
|
|
border_color_mode == V3D_BORDER_COLOR_0000) {
|
|
|
|
sampler->clamp_to_transparent_black_border = true;
|
|
|
|
}
|
|
|
|
|
2020-06-03 00:22:48 +01:00
|
|
|
v3dv_pack(sampler->sampler_state, SAMPLER_STATE, s) {
|
2020-03-29 15:29:55 +01:00
|
|
|
if (pCreateInfo->anisotropyEnable) {
|
|
|
|
s.anisotropy_enable = true;
|
|
|
|
if (pCreateInfo->maxAnisotropy > 8)
|
|
|
|
s.maximum_anisotropy = 3;
|
|
|
|
else if (pCreateInfo->maxAnisotropy > 4)
|
|
|
|
s.maximum_anisotropy = 2;
|
|
|
|
else if (pCreateInfo->maxAnisotropy > 2)
|
|
|
|
s.maximum_anisotropy = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
s.border_color_mode = border_color_mode;
|
|
|
|
|
|
|
|
s.wrap_i_border = false; /* Also hardcoded on v3d */
|
|
|
|
s.wrap_s = vk_to_v3d_wrap_mode[pCreateInfo->addressModeU];
|
|
|
|
s.wrap_t = vk_to_v3d_wrap_mode[pCreateInfo->addressModeV];
|
|
|
|
s.wrap_r = vk_to_v3d_wrap_mode[pCreateInfo->addressModeW];
|
|
|
|
s.fixed_bias = pCreateInfo->mipLodBias;
|
|
|
|
s.max_level_of_detail = MIN2(MAX2(0, pCreateInfo->maxLod), 15);
|
|
|
|
s.min_level_of_detail = MIN2(MAX2(0, pCreateInfo->minLod), 15);
|
|
|
|
s.srgb_disable = 0; /* Not even set by v3d */
|
|
|
|
s.depth_compare_function =
|
|
|
|
vk_to_v3d_compare_func[pCreateInfo->compareEnable ?
|
|
|
|
pCreateInfo->compareOp : VK_COMPARE_OP_NEVER];
|
|
|
|
s.mip_filter_nearest = pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_NEAREST;
|
|
|
|
s.min_filter_nearest = pCreateInfo->minFilter == VK_FILTER_NEAREST;
|
|
|
|
s.mag_filter_nearest = pCreateInfo->magFilter == VK_FILTER_NEAREST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
v3dv_CreateSampler(VkDevice _device,
|
|
|
|
const VkSamplerCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkSampler *pSampler)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
struct v3dv_sampler *sampler;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
|
|
|
|
|
|
|
|
sampler = vk_zalloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (!sampler)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
v3dv/descriptor_set: combine texture and sampler indices
OpenGL doesn't have the concept of individual texture and sampler, so
texture and sampler indexes have the same value. v3d compiler uses
this assumption, so for example, the texture info at the v3d key
include values that you need to use the texture format and the sampler
to fill (like the return_size).
One option would be to adapt the v3d compiler to handle both, but then
we would need to adapt to the lowerings it uses, like nir_lower_tex,
that also take the same assumption.
We deal with this on the Vulkan driver, by reassigning the texture and
sampler index to a combined one. We add a hash table to map the
combined texture idx and sampler idx to this combined idx, and a
simple array to the opposite map. On the driver we work with the
separate indices to fill up the data, while the v3d compiler works
with the combined one.
As mentioned, this is needed to properly fill up the texture return
size, so as we are here, we fix that. This gets tests like the
following working:
dEQP-VK.glsl.texture_gather.basic.2d.depth32f.base_level.level_2
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
2020-04-06 23:33:14 +01:00
|
|
|
sampler->compare_enable = pCreateInfo->compareEnable;
|
2020-07-01 13:21:09 +01:00
|
|
|
sampler->unnormalized_coordinates = pCreateInfo->unnormalizedCoordinates;
|
2020-03-29 15:29:55 +01:00
|
|
|
pack_sampler_state(sampler, pCreateInfo);
|
|
|
|
|
|
|
|
*pSampler = v3dv_sampler_to_handle(sampler);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_DestroySampler(VkDevice _device,
|
|
|
|
VkSampler _sampler,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
V3DV_FROM_HANDLE(v3dv_device, device, _device);
|
|
|
|
V3DV_FROM_HANDLE(v3dv_sampler, sampler, _sampler);
|
|
|
|
|
|
|
|
if (!sampler)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vk_free2(&device->alloc, pAllocator, sampler);
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:03:42 +01:00
|
|
|
void
|
|
|
|
v3dv_GetDeviceMemoryCommitment(VkDevice device,
|
|
|
|
VkDeviceMemory memory,
|
|
|
|
VkDeviceSize *pCommittedMemoryInBytes)
|
|
|
|
{
|
|
|
|
*pCommittedMemoryInBytes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
v3dv_GetImageSparseMemoryRequirements(
|
|
|
|
VkDevice device,
|
|
|
|
VkImage image,
|
|
|
|
uint32_t *pSparseMemoryRequirementCount,
|
|
|
|
VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
|
|
|
|
{
|
|
|
|
*pSparseMemoryRequirementCount = 0;
|
|
|
|
}
|
2020-03-29 15:29:55 +01:00
|
|
|
|
2020-06-04 08:03:42 +01:00
|
|
|
void
|
|
|
|
v3dv_GetImageSparseMemoryRequirements2(
|
|
|
|
VkDevice device,
|
|
|
|
const VkImageSparseMemoryRequirementsInfo2 *pInfo,
|
|
|
|
uint32_t *pSparseMemoryRequirementCount,
|
|
|
|
VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
|
|
|
|
{
|
|
|
|
*pSparseMemoryRequirementCount = 0;
|
|
|
|
}
|