panvk: Convert to the common sync/submit framework

Acked-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15296>
This commit is contained in:
Jason Ekstrand 2022-03-08 22:13:20 -06:00 committed by Marge Bot
parent d68b9f0e6b
commit 0f048c5782
7 changed files with 169 additions and 534 deletions

View File

@ -47,7 +47,6 @@ libpanvk_files = files(
'panvk_private.h',
'panvk_query.c',
'panvk_shader.c',
'panvk_sync.c',
'panvk_util.c',
'panvk_wsi.c',
)

View File

@ -47,6 +47,7 @@
#include "util/disk_cache.h"
#include "util/strtod.h"
#include "vk_format.h"
#include "vk_drm_syncobj.h"
#include "vk_util.h"
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
@ -340,6 +341,17 @@ panvk_physical_device_init(struct panvk_physical_device *device,
panvk_get_driver_uuid(&device->device_uuid);
panvk_get_device_uuid(&device->device_uuid);
device->drm_syncobj_type = vk_drm_syncobj_get_type(device->pdev.fd);
/* We don't support timelines in the uAPI yet and we don't want it getting
* suddenly turned on by vk_drm_syncobj_get_type() without us adding panvk
* code for it first.
*/
device->drm_syncobj_type.features &= ~VK_SYNC_FEATURE_TIMELINE;
device->sync_types[0] = &device->drm_syncobj_type;
device->sync_types[1] = NULL;
device->vk.supported_sync_types = device->sync_types;
result = panvk_wsi_init(device);
if (result != VK_SUCCESS) {
vk_error(instance, result);
@ -943,6 +955,13 @@ panvk_queue_init(struct panvk_device *device,
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
switch (pdev->arch) {
case 5: queue->vk.driver_submit = panvk_v5_queue_submit; break;
case 6: queue->vk.driver_submit = panvk_v6_queue_submit; break;
case 7: queue->vk.driver_submit = panvk_v7_queue_submit; break;
default: unreachable("Invalid arch");
}
queue->sync = create.handle;
return VK_SUCCESS;
}
@ -1004,6 +1023,9 @@ panvk_CreateDevice(VkPhysicalDevice physicalDevice,
device->instance = physical_device->instance;
device->physical_device = physical_device;
const struct panfrost_device *pdev = &physical_device->pdev;
vk_device_set_drm_fd(&device->vk, pdev->fd);
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
const VkDeviceQueueCreateInfo *queue_create =
&pCreateInfo->pQueueCreateInfos[i];

View File

@ -58,6 +58,7 @@
#include "vk_object.h"
#include "vk_physical_device.h"
#include "vk_queue.h"
#include "vk_sync.h"
#include "wsi_common.h"
#include "drm-uapi/panfrost_drm.h"
@ -185,6 +186,9 @@ struct panvk_physical_device {
uint8_t device_uuid[VK_UUID_SIZE];
uint8_t cache_uuid[VK_UUID_SIZE];
struct vk_sync_type drm_syncobj_type;
const struct vk_sync_type *sync_types[2];
struct wsi_device wsi_device;
struct panvk_meta meta;
@ -289,10 +293,6 @@ struct panvk_batch {
bool issued;
};
struct panvk_syncobj {
uint32_t permanent, temporary;
};
enum panvk_event_op_type {
PANVK_EVENT_OP_SET,
PANVK_EVENT_OP_RESET,
@ -304,25 +304,6 @@ struct panvk_event_op {
struct panvk_event *event;
};
struct panvk_fence {
struct vk_object_base base;
struct panvk_syncobj syncobj;
};
struct panvk_semaphore {
struct vk_object_base base;
struct panvk_syncobj syncobj;
};
int
panvk_signal_syncobjs(struct panvk_device *device,
struct panvk_syncobj *syncobj1,
struct panvk_syncobj *syncobj2);
int
panvk_syncobj_to_fd(struct panvk_device *device,
struct panvk_syncobj *sync);
struct panvk_device_memory {
struct vk_object_base base;
struct panfrost_bo *bo;
@ -1054,7 +1035,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_descriptor_set, base, VkDescriptorSet, VK_O
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_descriptor_set_layout, base,
VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_device_memory, base, VkDeviceMemory, VK_OBJECT_TYPE_DEVICE_MEMORY)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_fence, base, VkFence, VK_OBJECT_TYPE_FENCE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_framebuffer, base, VkFramebuffer, VK_OBJECT_TYPE_FRAMEBUFFER)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
@ -1064,7 +1044,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_pipeline, base, VkPipeline, VK_OBJECT_TYPE_
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_pipeline_layout, base, VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_render_pass, base, VkRenderPass, VK_OBJECT_TYPE_RENDER_PASS)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_sampler, base, VkSampler, VK_OBJECT_TYPE_SAMPLER)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_semaphore, base, VkSemaphore, VK_OBJECT_TYPE_SEMAPHORE)
#define panvk_arch_name(name, version) panvk_## version ## _ ## name
@ -1088,12 +1067,14 @@ do { \
#endif
#include "panvk_vX_cmd_buffer.h"
#include "panvk_vX_cs.h"
#include "panvk_vX_device.h"
#include "panvk_vX_meta.h"
#else
#define PAN_ARCH 5
#define panvk_per_arch(name) panvk_arch_name(name, v5)
#include "panvk_vX_cmd_buffer.h"
#include "panvk_vX_cs.h"
#include "panvk_vX_device.h"
#include "panvk_vX_meta.h"
#undef PAN_ARCH
#undef panvk_per_arch
@ -1101,6 +1082,7 @@ do { \
#define panvk_per_arch(name) panvk_arch_name(name, v6)
#include "panvk_vX_cmd_buffer.h"
#include "panvk_vX_cs.h"
#include "panvk_vX_device.h"
#include "panvk_vX_meta.h"
#undef PAN_ARCH
#undef panvk_per_arch
@ -1108,6 +1090,7 @@ do { \
#define panvk_per_arch(name) panvk_arch_name(name, v7)
#include "panvk_vX_cmd_buffer.h"
#include "panvk_vX_cs.h"
#include "panvk_vX_device.h"
#include "panvk_vX_meta.h"
#undef PAN_ARCH
#undef panvk_per_arch

View File

@ -1,420 +0,0 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Derived from tu_drm.c which is:
* Copyright © 2018 Google, Inc.
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <xf86drm.h>
#include "panvk_private.h"
static VkResult
sync_create(struct panvk_device *device,
struct panvk_syncobj *sync,
bool signaled)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_create create = {
.flags = signaled ? DRM_SYNCOBJ_CREATE_SIGNALED : 0,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return VK_ERROR_OUT_OF_HOST_MEMORY;
sync->permanent = create.handle;
return VK_SUCCESS;
}
static void
sync_set_temporary(struct panvk_device *device, struct panvk_syncobj *sync,
uint32_t syncobj)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
if (sync->temporary) {
struct drm_syncobj_destroy destroy = { .handle = sync->temporary };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
sync->temporary = syncobj;
}
static void
sync_destroy(struct panvk_device *device, struct panvk_syncobj *sync)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
if (!sync)
return;
sync_set_temporary(device, sync, 0);
struct drm_syncobj_destroy destroy = { .handle = sync->permanent };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
static VkResult
sync_import(struct panvk_device *device, struct panvk_syncobj *sync,
bool temporary, bool sync_fd, int fd)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
int ret;
if (!sync_fd) {
uint32_t *dst = temporary ? &sync->temporary : &sync->permanent;
struct drm_syncobj_handle handle = { .fd = fd };
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
if (ret)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
if (*dst) {
struct drm_syncobj_destroy destroy = { .handle = *dst };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
*dst = handle.handle;
close(fd);
} else {
assert(temporary);
struct drm_syncobj_create create = {};
if (fd == -1)
create.flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
if (fd != -1) {
struct drm_syncobj_handle handle = {
.fd = fd,
.handle = create.handle,
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
};
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
if (ret) {
struct drm_syncobj_destroy destroy = { .handle = create.handle };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
close(fd);
}
sync_set_temporary(device, sync, create.handle);
}
return VK_SUCCESS;
}
static VkResult
sync_export(struct panvk_device *device, struct panvk_syncobj *sync,
bool sync_fd, int *p_fd)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_handle handle = {
.handle = sync->temporary ? : sync->permanent,
.flags = sync_fd ? DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE : 0,
.fd = -1,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
if (ret)
return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
/* restore permanent payload on export */
sync_set_temporary(device, sync, 0);
*p_fd = handle.fd;
return VK_SUCCESS;
}
VkResult
panvk_CreateSemaphore(VkDevice _device,
const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSemaphore *pSemaphore)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_semaphore *sem =
vk_object_zalloc(&device->vk, pAllocator, sizeof(*sem),
VK_OBJECT_TYPE_SEMAPHORE);
if (!sem)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult ret = sync_create(device, &sem->syncobj, false);
if (ret != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, sync);
return ret;
}
*pSemaphore = panvk_semaphore_to_handle(sem);
return VK_SUCCESS;
}
void
panvk_DestroySemaphore(VkDevice _device, VkSemaphore _sem, const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, _sem);
sync_destroy(device, &sem->syncobj);
vk_object_free(&device->vk, pAllocator, sem);
}
VkResult
panvk_ImportSemaphoreFdKHR(VkDevice _device, const VkImportSemaphoreFdInfoKHR *info)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
bool temp = info->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_import(device, &sem->syncobj, temp, sync_fd, info->fd);
}
VkResult
panvk_GetSemaphoreFdKHR(VkDevice _device, const VkSemaphoreGetFdInfoKHR *info, int *pFd)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_export(device, &sem->syncobj, sync_fd, pFd);
}
VkResult
panvk_CreateFence(VkDevice _device,
const VkFenceCreateInfo *info,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_fence *fence =
vk_object_zalloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (!fence)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult ret = sync_create(device, &fence->syncobj,
info->flags & VK_FENCE_CREATE_SIGNALED_BIT);
if (ret != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, fence);
return ret;
}
*pFence = panvk_fence_to_handle(fence);
return VK_SUCCESS;
}
void
panvk_DestroyFence(VkDevice _device, VkFence _fence,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, _fence);
if (!fence)
return;
sync_destroy(device, &fence->syncobj);
vk_object_free(&device->vk, pAllocator, fence);
}
VkResult
panvk_ImportFenceFdKHR(VkDevice _device, const VkImportFenceFdInfoKHR *info)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, info->fence);
bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
bool temp = info->flags & VK_FENCE_IMPORT_TEMPORARY_BIT;
return sync_import(device, &fence->syncobj, temp, sync_fd, info->fd);
}
VkResult
panvk_GetFenceFdKHR(VkDevice _device, const VkFenceGetFdInfoKHR *info, int *pFd)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, info->fence);
bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_export(device, &fence->syncobj, sync_fd, pFd);
}
static VkResult
drm_syncobj_wait(struct panvk_device *device,
const uint32_t *handles, uint32_t count_handles,
int64_t timeout_nsec, bool wait_all)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_wait wait = {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = count_handles,
.timeout_nsec = timeout_nsec,
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
(wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0)
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
if (ret) {
if (errno == ETIME)
return VK_TIMEOUT;
assert(0);
return VK_ERROR_DEVICE_LOST; /* TODO */
}
return VK_SUCCESS;
}
static uint64_t
gettime_ns(void)
{
struct timespec current;
clock_gettime(CLOCK_MONOTONIC, &current);
return (uint64_t)current.tv_sec * 1000000000 + current.tv_nsec;
}
/* and the kernel converts it right back to relative timeout - very smart UAPI */
static uint64_t
absolute_timeout(uint64_t timeout)
{
if (timeout == 0)
return 0;
uint64_t current_time = gettime_ns();
uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
timeout = MIN2(max_timeout, timeout);
return (current_time + timeout);
}
VkResult
panvk_WaitForFences(VkDevice _device,
uint32_t fenceCount,
const VkFence *pFences,
VkBool32 waitAll,
uint64_t timeout)
{
VK_FROM_HANDLE(panvk_device, device, _device);
if (panvk_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
if (fence->syncobj.temporary) {
handles[i] = fence->syncobj.temporary;
} else {
handles[i] = fence->syncobj.permanent;
}
}
return drm_syncobj_wait(device, handles, fenceCount, absolute_timeout(timeout), waitAll);
}
VkResult
panvk_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const struct panfrost_device *pdev = &device->physical_device->pdev;
int ret;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
sync_set_temporary(device, &fence->syncobj, 0);
handles[i] = fence->syncobj.permanent;
}
struct drm_syncobj_array objs = {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = fenceCount,
};
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_RESET, &objs);
if (ret) {
panvk_device_set_lost(device, "DRM_IOCTL_SYNCOBJ_RESET failure: %s",
strerror(errno));
}
return VK_SUCCESS;
}
VkResult
panvk_GetFenceStatus(VkDevice _device, VkFence _fence)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, _fence);
uint32_t handle = fence->syncobj.temporary ? : fence->syncobj.permanent;
VkResult result;
result = drm_syncobj_wait(device, &handle, 1, 0, false);
if (result == VK_TIMEOUT)
result = VK_NOT_READY;
return result;
}
int
panvk_signal_syncobjs(struct panvk_device *device,
struct panvk_syncobj *syncobj1,
struct panvk_syncobj *syncobj2)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
uint32_t handles[2], count = 0;
if (syncobj1)
handles[count++] = syncobj1->temporary ?: syncobj1->permanent;
if (syncobj2)
handles[count++] = syncobj2->temporary ?: syncobj2->permanent;
if (!count)
return 0;
struct drm_syncobj_array objs = {
.handles = (uintptr_t) handles,
.count_handles = count
};
return drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs);
}
int
panvk_syncobj_to_fd(struct panvk_device *device, struct panvk_syncobj *sync)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_handle handle = { .handle = sync->permanent };
int ret;
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
return ret ? -1 : handle.fd;
}

View File

@ -33,6 +33,8 @@
#include "panvk_private.h"
#include "panvk_cs.h"
#include "vk_drm_syncobj.h"
static void
panvk_queue_submit_batch(struct panvk_queue *queue,
struct panvk_batch *batch,
@ -198,106 +200,102 @@ panvk_signal_event_syncobjs(struct panvk_queue *queue, struct panvk_batch *batch
}
VkResult
panvk_per_arch(QueueSubmit)(VkQueue _queue,
uint32_t submitCount,
const VkSubmitInfo *pSubmits,
VkFence _fence)
panvk_per_arch(queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *submit)
{
VK_FROM_HANDLE(panvk_queue, queue, _queue);
VK_FROM_HANDLE(panvk_fence, fence, _fence);
struct panvk_queue *queue =
container_of(vk_queue, struct panvk_queue, vk);
const struct panfrost_device *pdev = &queue->device->physical_device->pdev;
for (uint32_t i = 0; i < submitCount; ++i) {
const VkSubmitInfo *submit = pSubmits + i;
unsigned nr_semaphores = submit->waitSemaphoreCount + 1;
uint32_t semaphores[nr_semaphores];
semaphores[0] = queue->sync;
for (unsigned i = 0; i < submit->waitSemaphoreCount; i++) {
VK_FROM_HANDLE(panvk_semaphore, sem, submit->pWaitSemaphores[i]);
unsigned nr_semaphores = submit->wait_count + 1;
uint32_t semaphores[nr_semaphores];
semaphores[i + 1] = sem->syncobj.temporary ? : sem->syncobj.permanent;
}
semaphores[0] = queue->sync;
for (unsigned i = 0; i < submit->wait_count; i++) {
assert(vk_sync_type_is_drm_syncobj(submit->waits[i].sync->type));
struct vk_drm_syncobj *syncobj =
vk_sync_as_drm_syncobj(submit->waits[i].sync);
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, (submit->pCommandBuffers[j]));
semaphores[i + 1] = syncobj->syncobj;
}
list_for_each_entry(struct panvk_batch, batch, &cmdbuf->batches, node) {
/* FIXME: should be done at the batch level */
unsigned nr_bos =
panvk_pool_num_bos(&cmdbuf->desc_pool) +
panvk_pool_num_bos(&cmdbuf->varying_pool) +
panvk_pool_num_bos(&cmdbuf->tls_pool) +
(batch->fb.info ? batch->fb.info->attachment_count : 0) +
(batch->blit.src ? 1 : 0) +
(batch->blit.dst ? 1 : 0) +
(batch->scoreboard.first_tiler ? 1 : 0) + 1;
unsigned bo_idx = 0;
uint32_t bos[nr_bos];
for (uint32_t j = 0; j < submit->command_buffer_count; ++j) {
struct panvk_cmd_buffer *cmdbuf =
container_of(submit->command_buffers[j], struct panvk_cmd_buffer, vk);
panvk_pool_get_bo_handles(&cmdbuf->desc_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->desc_pool);
list_for_each_entry(struct panvk_batch, batch, &cmdbuf->batches, node) {
/* FIXME: should be done at the batch level */
unsigned nr_bos =
panvk_pool_num_bos(&cmdbuf->desc_pool) +
panvk_pool_num_bos(&cmdbuf->varying_pool) +
panvk_pool_num_bos(&cmdbuf->tls_pool) +
(batch->fb.info ? batch->fb.info->attachment_count : 0) +
(batch->blit.src ? 1 : 0) +
(batch->blit.dst ? 1 : 0) +
(batch->scoreboard.first_tiler ? 1 : 0) + 1;
unsigned bo_idx = 0;
uint32_t bos[nr_bos];
panvk_pool_get_bo_handles(&cmdbuf->varying_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->varying_pool);
panvk_pool_get_bo_handles(&cmdbuf->desc_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->desc_pool);
panvk_pool_get_bo_handles(&cmdbuf->tls_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->tls_pool);
panvk_pool_get_bo_handles(&cmdbuf->varying_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->varying_pool);
if (batch->fb.info) {
for (unsigned i = 0; i < batch->fb.info->attachment_count; i++) {
bos[bo_idx++] = batch->fb.info->attachments[i].iview->pview.image->data.bo->gem_handle;
}
panvk_pool_get_bo_handles(&cmdbuf->tls_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->tls_pool);
if (batch->fb.info) {
for (unsigned i = 0; i < batch->fb.info->attachment_count; i++) {
bos[bo_idx++] = batch->fb.info->attachments[i].iview->pview.image->data.bo->gem_handle;
}
if (batch->blit.src)
bos[bo_idx++] = batch->blit.src->gem_handle;
if (batch->blit.dst)
bos[bo_idx++] = batch->blit.dst->gem_handle;
if (batch->scoreboard.first_tiler)
bos[bo_idx++] = pdev->tiler_heap->gem_handle;
bos[bo_idx++] = pdev->sample_positions->gem_handle;
assert(bo_idx == nr_bos);
/* Merge identical BO entries. */
for (unsigned x = 0; x < nr_bos; x++) {
for (unsigned y = x + 1; y < nr_bos; ) {
if (bos[x] == bos[y])
bos[y] = bos[--nr_bos];
else
y++;
}
}
unsigned nr_in_fences = 0;
unsigned max_wait_event_syncobjs =
util_dynarray_num_elements(&batch->event_ops,
struct panvk_event_op);
uint32_t in_fences[nr_semaphores + max_wait_event_syncobjs];
memcpy(in_fences, semaphores, nr_semaphores * sizeof(*in_fences));
nr_in_fences += nr_semaphores;
panvk_add_wait_event_syncobjs(batch, in_fences, &nr_in_fences);
panvk_queue_submit_batch(queue, batch, bos, nr_bos, in_fences, nr_in_fences);
panvk_signal_event_syncobjs(queue, batch);
}
}
/* Transfer the out fence to signal semaphores */
for (unsigned i = 0; i < submit->signalSemaphoreCount; i++) {
VK_FROM_HANDLE(panvk_semaphore, sem, submit->pSignalSemaphores[i]);
panvk_queue_transfer_sync(queue, sem->syncobj.temporary ? : sem->syncobj.permanent);
if (batch->blit.src)
bos[bo_idx++] = batch->blit.src->gem_handle;
if (batch->blit.dst)
bos[bo_idx++] = batch->blit.dst->gem_handle;
if (batch->scoreboard.first_tiler)
bos[bo_idx++] = pdev->tiler_heap->gem_handle;
bos[bo_idx++] = pdev->sample_positions->gem_handle;
assert(bo_idx == nr_bos);
/* Merge identical BO entries. */
for (unsigned x = 0; x < nr_bos; x++) {
for (unsigned y = x + 1; y < nr_bos; ) {
if (bos[x] == bos[y])
bos[y] = bos[--nr_bos];
else
y++;
}
}
unsigned nr_in_fences = 0;
unsigned max_wait_event_syncobjs =
util_dynarray_num_elements(&batch->event_ops,
struct panvk_event_op);
uint32_t in_fences[nr_semaphores + max_wait_event_syncobjs];
memcpy(in_fences, semaphores, nr_semaphores * sizeof(*in_fences));
nr_in_fences += nr_semaphores;
panvk_add_wait_event_syncobjs(batch, in_fences, &nr_in_fences);
panvk_queue_submit_batch(queue, batch, bos, nr_bos, in_fences, nr_in_fences);
panvk_signal_event_syncobjs(queue, batch);
}
}
if (fence) {
/* Transfer the last out fence to the fence object */
panvk_queue_transfer_sync(queue, fence->syncobj.temporary ? : fence->syncobj.permanent);
/* Transfer the out fence to signal semaphores */
for (unsigned i = 0; i < submit->signal_count; i++) {
assert(vk_sync_type_is_drm_syncobj(submit->signals[i].sync->type));
struct vk_drm_syncobj *syncobj =
vk_sync_as_drm_syncobj(submit->signals[i].sync);
panvk_queue_transfer_sync(queue, syncobj->syncobj);
}
return VK_SUCCESS;

View File

@ -0,0 +1,34 @@
/*
* Copyright (C) 2022 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef PANVK_PRIVATE_H
#error "Must be included from panvk_private.h"
#endif
#ifndef PAN_ARCH
#error "no arch"
#endif
VkResult
panvk_per_arch(queue_submit)(struct vk_queue *queue,
struct vk_queue_submit *submit);

View File

@ -27,6 +27,9 @@
#include "panvk_private.h"
#include "vk_fence.h"
#include "vk_semaphore.h"
#include "vk_sync_dummy.h"
#include "vk_util.h"
#include "wsi_common.h"
@ -72,8 +75,8 @@ panvk_AcquireNextImage2KHR(VkDevice _device,
uint32_t *pImageIndex)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, pAcquireInfo->fence);
VK_FROM_HANDLE(panvk_semaphore, sem, pAcquireInfo->semaphore);
VK_FROM_HANDLE(vk_fence, fence, pAcquireInfo->fence);
VK_FROM_HANDLE(vk_semaphore, sem, pAcquireInfo->semaphore);
struct panvk_physical_device *pdevice = device->physical_device;
VkResult result =
@ -82,8 +85,24 @@ panvk_AcquireNextImage2KHR(VkDevice _device,
/* signal fence/semaphore - image is available immediately */
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
panvk_signal_syncobjs(device, fence ? &fence->syncobj : NULL,
sem ? &sem->syncobj : NULL);
VkResult sync_res;
if (fence) {
vk_fence_reset_temporary(&device->vk, fence);
sync_res = vk_sync_create(&device->vk, &vk_sync_dummy_type,
0 /* flags */, 0 /* initial_value */,
&fence->temporary);
if (sync_res != VK_SUCCESS)
return sync_res;
}
if (sem) {
vk_semaphore_reset_temporary(&device->vk, sem);
sync_res = vk_sync_create(&device->vk, &vk_sync_dummy_type,
0 /* flags */, 0 /* initial_value */,
&sem->temporary);
if (sync_res != VK_SUCCESS)
return sync_res;
}
}
return result;