turnip: rework fences to use syncobjs

Fences are now just a syncobj, which makes our life easier.

The next step will be to fill out ImportFenceFdKHR()/GetFenceFdKHR().

Signed-off-by: Jonathan Marek <jonathan@marek.ca>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6683>
This commit is contained in:
Jonathan Marek 2020-09-10 22:51:53 -04:00 committed by Marge Bot
parent c23206757a
commit cec0bc73e5
7 changed files with 324 additions and 496 deletions

View File

@ -47,7 +47,6 @@ libtu_files = files(
'tu_device.c',
'tu_descriptor_set.c',
'tu_descriptor_set.h',
'tu_fence.c',
'tu_formats.c',
'tu_image.c',
'tu_nir_lower_multiview.c',

View File

@ -975,7 +975,7 @@ tu_queue_init(struct tu_device *device,
if (ret)
return VK_ERROR_INITIALIZATION_FAILED;
tu_fence_init(&queue->submit_fence, false);
queue->fence = -1;
return VK_SUCCESS;
}
@ -983,7 +983,8 @@ tu_queue_init(struct tu_device *device,
static void
tu_queue_finish(struct tu_queue *queue)
{
tu_fence_finish(&queue->submit_fence);
if (queue->fence >= 0)
close(queue->fence);
tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
}
@ -1331,8 +1332,20 @@ tu_QueueWaitIdle(VkQueue _queue)
if (tu_device_is_lost(queue->device))
return VK_ERROR_DEVICE_LOST;
tu_fence_wait_idle(&queue->submit_fence);
if (queue->fence < 0)
return VK_SUCCESS;
struct pollfd fds = { .fd = queue->fence, .events = POLLIN };
int ret;
do {
ret = poll(&fds, 1, -1);
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
/* TODO: otherwise set device lost ? */
assert(ret == 1 && !(fds.revents & (POLLERR | POLLNVAL)));
close(queue->fence);
queue->fence = -1;
return VK_SUCCESS;
}
@ -2092,25 +2105,6 @@ tu_GetMemoryFdPropertiesKHR(VkDevice _device,
return VK_SUCCESS;
}
VkResult
tu_ImportFenceFdKHR(VkDevice _device,
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
{
tu_stub();
return VK_SUCCESS;
}
VkResult
tu_GetFenceFdKHR(VkDevice _device,
const VkFenceGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
tu_stub();
return VK_SUCCESS;
}
void
tu_GetPhysicalDeviceExternalFenceProperties(
VkPhysicalDevice physicalDevice,

View File

@ -644,13 +644,17 @@ tu_QueueSubmit(VkQueue _queue,
VkFence _fence)
{
TU_FROM_HANDLE(tu_queue, queue, _queue);
TU_FROM_HANDLE(tu_fence, fence, _fence);
for (uint32_t i = 0; i < submitCount; ++i) {
const VkSubmitInfo *submit = pSubmits + i;
const bool last_submit = (i == submitCount - 1);
uint32_t out_syncobjs_size = submit->signalSemaphoreCount;
if (last_submit && fence)
out_syncobjs_size += 1;
/* note: assuming there won't be any very large semaphore counts */
struct drm_msm_gem_submit_syncobj in_syncobjs[submit->waitSemaphoreCount];
struct drm_msm_gem_submit_syncobj out_syncobjs[submit->signalSemaphoreCount];
struct drm_msm_gem_submit_syncobj out_syncobjs[out_syncobjs_size];
uint32_t nr_in_syncobjs = 0, nr_out_syncobjs = 0;
for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
@ -681,6 +685,13 @@ tu_QueueSubmit(VkQueue _queue,
};
}
if (last_submit && fence) {
out_syncobjs[nr_out_syncobjs++] = (struct drm_msm_gem_submit_syncobj) {
.handle = fence->syncobj,
.flags = 0,
};
}
uint32_t entry_count = 0;
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
@ -713,7 +724,6 @@ tu_QueueSubmit(VkQueue _queue,
if (nr_out_syncobjs) {
flags |= MSM_SUBMIT_SYNCOBJ_OUT;
}
if (last_submit) {
flags |= MSM_SUBMIT_FENCE_FD_OUT;
}
@ -744,17 +754,184 @@ tu_QueueSubmit(VkQueue _queue,
tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
pSubmits[i].waitSemaphoreCount);
if (last_submit) {
/* no need to merge fences as queue execution is serialized */
tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
} else if (last_submit) {
close(req.fence_fd);
if (queue->fence >= 0)
close(queue->fence);
queue->fence = req.fence_fd;
}
}
if (_fence != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_fence, fence, _fence);
tu_fence_copy(fence, &queue->submit_fence);
if (!submitCount && fence) {
/* signal fence imemediately since we don't have a submit to do it */
ioctl(queue->device->fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &(struct drm_syncobj_array) {
.handles = (uintptr_t) &fence->syncobj,
.count_handles = 1,
});
}
return VK_SUCCESS;
}
VkResult
tu_CreateFence(VkDevice _device,
const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
TU_FROM_HANDLE(tu_device, device, _device);
int ret;
struct tu_fence *fence =
vk_object_alloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (!fence)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
struct drm_syncobj_create create = {
.flags = COND(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT,
DRM_SYNCOBJ_CREATE_SIGNALED)
};
ret = ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret) {
vk_free2(&device->vk.alloc, pAllocator, fence);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
fence->syncobj = create.handle;
*pFence = tu_fence_to_handle(fence);
return VK_SUCCESS;
}
void
tu_DestroyFence(VkDevice _device, VkFence _fence, const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_fence, fence, _fence);
if (!fence)
return;
ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY,
&(struct drm_syncobj_destroy) { .handle = fence->syncobj });
vk_object_free(&device->vk, pAllocator, fence);
}
VkResult
tu_ImportFenceFdKHR(VkDevice _device,
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
{
tu_stub();
return VK_SUCCESS;
}
VkResult
tu_GetFenceFdKHR(VkDevice _device,
const VkFenceGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
tu_stub();
return VK_SUCCESS;
}
static VkResult
drm_syncobj_wait(struct tu_device *device,
const uint32_t *handles, uint32_t count_handles,
int64_t timeout_nsec, bool wait_all)
{
int ret = ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &(struct drm_syncobj_wait) {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = count_handles,
.timeout_nsec = timeout_nsec,
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
COND(wait_all, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
});
if (ret) {
if (errno == ETIME)
return VK_TIMEOUT;
assert(0);
return VK_ERROR_DEVICE_LOST; /* TODO */
}
return VK_SUCCESS;
}
static uint64_t
gettime_ns(void)
{
struct timespec current;
clock_gettime(CLOCK_MONOTONIC, &current);
return (uint64_t)current.tv_sec * 1000000000 + current.tv_nsec;
}
/* and the kernel converts it right back to relative timeout - very smart UAPI */
static uint64_t
absolute_timeout(uint64_t timeout)
{
if (timeout == 0)
return 0;
uint64_t current_time = gettime_ns();
uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
timeout = MIN2(max_timeout, timeout);
return (current_time + timeout);
}
VkResult
tu_WaitForFences(VkDevice _device,
uint32_t fenceCount,
const VkFence *pFences,
VkBool32 waitAll,
uint64_t timeout)
{
TU_FROM_HANDLE(tu_device, device, _device);
if (tu_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
handles[i] = fence->syncobj;
}
return drm_syncobj_wait(device, handles, fenceCount, absolute_timeout(timeout), waitAll);
}
VkResult
tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
{
TU_FROM_HANDLE(tu_device, device, _device);
int ret;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
handles[i] = fence->syncobj;
}
ret = ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &(struct drm_syncobj_array) {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = fenceCount,
});
assert(!ret);
return VK_SUCCESS;
}
VkResult
tu_GetFenceStatus(VkDevice _device, VkFence _fence)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_fence, fence, _fence);
VkResult result;
result = drm_syncobj_wait(device, &fence->syncobj, 1, 0, false);
if (result == VK_TIMEOUT)
result = VK_NOT_READY;
return result;
}

View File

@ -1,406 +0,0 @@
/*
* Copyright © 2019 Google LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "tu_private.h"
#include <fcntl.h>
#include <libsync.h>
#include <unistd.h>
#include "util/os_file.h"
#include "util/os_time.h"
/**
* Internally, a fence can be in one of these states.
*/
enum tu_fence_state
{
TU_FENCE_STATE_RESET,
TU_FENCE_STATE_PENDING,
TU_FENCE_STATE_SIGNALED,
};
static enum tu_fence_state
tu_fence_get_state(const struct tu_fence *fence)
{
if (fence->signaled)
assert(fence->fd < 0);
if (fence->signaled)
return TU_FENCE_STATE_SIGNALED;
else if (fence->fd >= 0)
return TU_FENCE_STATE_PENDING;
else
return TU_FENCE_STATE_RESET;
}
static void
tu_fence_set_state(struct tu_fence *fence, enum tu_fence_state state, int fd)
{
if (fence->fd >= 0)
close(fence->fd);
switch (state) {
case TU_FENCE_STATE_RESET:
assert(fd < 0);
fence->signaled = false;
fence->fd = -1;
break;
case TU_FENCE_STATE_PENDING:
assert(fd >= 0);
fence->signaled = false;
fence->fd = fd;
break;
case TU_FENCE_STATE_SIGNALED:
assert(fd < 0);
fence->signaled = true;
fence->fd = -1;
break;
default:
unreachable("unknown fence state");
break;
}
}
void
tu_fence_init(struct tu_fence *fence, bool signaled)
{
fence->signaled = signaled;
fence->fd = -1;
fence->fence_wsi = NULL;
}
void
tu_fence_finish(struct tu_fence *fence)
{
if (fence->fd >= 0)
close(fence->fd);
if (fence->fence_wsi)
fence->fence_wsi->destroy(fence->fence_wsi);
}
/**
* Update the associated fd of a fence. Ownership of \a fd is transferred to
* \a fence.
*
* This function does not block. \a fence can also be in any state when this
* function is called. To be able to do that, the caller must make sure that,
* when both the currently associated fd and the new fd are valid, they are on
* the same timeline with the new fd being later on the timeline.
*/
void
tu_fence_update_fd(struct tu_fence *fence, int fd)
{
const enum tu_fence_state state =
fd >= 0 ? TU_FENCE_STATE_PENDING : TU_FENCE_STATE_SIGNALED;
tu_fence_set_state(fence, state, fd);
}
/**
* Make a fence a copy of another fence. \a fence must be in the reset state.
*/
void
tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src)
{
assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
/* dup src->fd */
int fd = -1;
if (src->fd >= 0) {
fd = os_dupfd_cloexec(src->fd);
if (fd < 0) {
tu_loge("failed to dup fd %d for fence", src->fd);
sync_wait(src->fd, -1);
}
}
tu_fence_update_fd(fence, fd);
}
/**
* Wait until a fence is idle (i.e., not pending).
*/
void
tu_fence_wait_idle(struct tu_fence *fence)
{
if (fence->fd >= 0) {
if (sync_wait(fence->fd, -1))
tu_loge("sync_wait on fence fd %d failed", fence->fd);
tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
}
}
VkResult
tu_CreateFence(VkDevice _device,
const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_fence *fence =
vk_object_alloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (!fence)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
tu_fence_init(fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
*pFence = tu_fence_to_handle(fence);
return VK_SUCCESS;
}
void
tu_DestroyFence(VkDevice _device,
VkFence _fence,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_fence, fence, _fence);
if (!fence)
return;
tu_fence_finish(fence);
vk_object_free(&device->vk, pAllocator, fence);
}
/**
* Initialize a pollfd array from fences.
*/
static nfds_t
tu_fence_init_poll_fds(uint32_t fence_count,
const VkFence *fences,
bool wait_all,
struct pollfd *fds)
{
nfds_t nfds = 0;
for (uint32_t i = 0; i < fence_count; i++) {
TU_FROM_HANDLE(tu_fence, fence, fences[i]);
/* skip wsi fences */
if (fence->fence_wsi)
continue;
if (fence->signaled) {
if (wait_all) {
/* skip signaled fences */
continue;
} else {
/* no need to poll any fd */
nfds = 0;
break;
}
}
/* negative fds are never ready, which is the desired behavior */
fds[nfds].fd = fence->fd;
fds[nfds].events = POLLIN;
fds[nfds].revents = 0;
nfds++;
}
return nfds;
}
/**
* Translate timeout from nanoseconds to milliseconds for poll().
*/
static int
tu_fence_get_poll_timeout(uint64_t timeout_ns)
{
const uint64_t ns_per_ms = 1000 * 1000;
uint64_t timeout_ms = timeout_ns / ns_per_ms;
/* round up if needed */
if (timeout_ns - timeout_ms * ns_per_ms >= ns_per_ms / 2)
timeout_ms++;
return timeout_ms < INT_MAX ? timeout_ms : INT_MAX;
}
/**
* Poll a pollfd array.
*/
static VkResult
tu_fence_poll_fds(struct pollfd *fds, nfds_t nfds, uint64_t *timeout_ns)
{
while (true) {
/* poll */
uint64_t duration = os_time_get_nano();
int ret = poll(fds, nfds, tu_fence_get_poll_timeout(*timeout_ns));
duration = os_time_get_nano() - duration;
/* update timeout_ns */
if (*timeout_ns > duration)
*timeout_ns -= duration;
else
*timeout_ns = 0;
if (ret > 0) {
return VK_SUCCESS;
} else if (ret == 0) {
if (!*timeout_ns)
return VK_TIMEOUT;
} else if (errno != EINTR && errno != EAGAIN) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
}
/**
* Update a pollfd array and the fence states. This should be called after a
* successful call to tu_fence_poll_fds.
*/
static nfds_t
tu_fence_update_fences_and_poll_fds(uint32_t fence_count,
const VkFence *fences,
bool wait_all,
struct pollfd *fds)
{
uint32_t nfds = 0;
uint32_t fds_idx = 0;
for (uint32_t i = 0; i < fence_count; i++) {
TU_FROM_HANDLE(tu_fence, fence, fences[i]);
/* skip wsi fences */
if (fence->fence_wsi)
continue;
/* no signaled fence in fds */
if (fence->signaled)
continue;
/* fds[fds_idx] corresponds to fences[i] */
assert(fence->fd == fds[fds_idx].fd);
assert(nfds <= fds_idx && fds_idx <= i);
/* fd is ready (errors are treated as ready) */
if (fds[fds_idx].revents) {
tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
} else if (wait_all) {
/* add to fds again for another poll */
fds[nfds].fd = fence->fd;
fds[nfds].events = POLLIN;
fds[nfds].revents = 0;
nfds++;
}
fds_idx++;
}
return nfds;
}
VkResult
tu_WaitForFences(VkDevice _device,
uint32_t fenceCount,
const VkFence *pFences,
VkBool32 waitAll,
uint64_t timeout)
{
TU_FROM_HANDLE(tu_device, device, _device);
if (tu_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
/* add a simpler path for when fenceCount == 1? */
struct pollfd stack_fds[8];
struct pollfd *fds = stack_fds;
if (fenceCount > ARRAY_SIZE(stack_fds)) {
fds = vk_alloc(&device->vk.alloc, sizeof(*fds) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!fds)
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
/* set up pollfd array and start polling */
nfds_t nfds = tu_fence_init_poll_fds(fenceCount, pFences, waitAll, fds);
VkResult result = VK_SUCCESS;
while (nfds) {
result = tu_fence_poll_fds(fds, nfds, &timeout);
if (result != VK_SUCCESS)
break;
nfds = tu_fence_update_fences_and_poll_fds(fenceCount, pFences, waitAll,
fds);
}
if (fds != stack_fds)
vk_free(&device->vk.alloc, fds);
if (result != VK_SUCCESS)
return result;
for (uint32_t i = 0; i < fenceCount; ++i) {
TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
if (fence->fence_wsi) {
VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
if (result != VK_SUCCESS)
return result;
}
}
return result;
}
VkResult
tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
{
for (unsigned i = 0; i < fenceCount; ++i) {
TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
assert(tu_fence_get_state(fence) != TU_FENCE_STATE_PENDING);
tu_fence_set_state(fence, TU_FENCE_STATE_RESET, -1);
}
return VK_SUCCESS;
}
VkResult
tu_GetFenceStatus(VkDevice _device, VkFence _fence)
{
TU_FROM_HANDLE(tu_fence, fence, _fence);
if (fence->fd >= 0) {
int err = sync_wait(fence->fd, 0);
if (!err)
tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
else if (err && errno != ETIME)
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
if (fence->fence_wsi) {
VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
if (result != VK_SUCCESS) {
if (result == VK_TIMEOUT)
return VK_NOT_READY;
return result;
}
}
return fence->signaled ? VK_SUCCESS : VK_NOT_READY;
}

View File

@ -297,17 +297,14 @@ tu_QueueSubmit(VkQueue _queue,
goto fail;
}
tu_fence_update_fd(&queue->submit_fence, fd);
if (queue->fence >= 0)
close(queue->fence);
queue->fence = fd;
}
}
fail:
vk_free(&queue->device->vk.alloc, cmds);
if (_fence != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_fence, fence, _fence);
tu_fence_copy(fence, &queue->submit_fence);
}
return result;
}
@ -345,3 +342,63 @@ tu_DestroySemaphore(VkDevice _device,
{
tu_finishme("DestroySemaphore");
}
VkResult
tu_ImportFenceFdKHR(VkDevice _device,
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
{
tu_stub();
return VK_SUCCESS;
}
VkResult
tu_GetFenceFdKHR(VkDevice _device,
const VkFenceGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
tu_stub();
return VK_SUCCESS;
}
VkResult
tu_CreateFence(VkDevice _device,
const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
tu_finishme("CreateFence");
return VK_SUCCESS;
}
void
tu_DestroyFence(VkDevice _device, VkFence _fence, const VkAllocationCallbacks *pAllocator)
{
tu_finishme("DestroyFence");
}
VkResult
tu_WaitForFences(VkDevice _device,
uint32_t fenceCount,
const VkFence *pFences,
VkBool32 waitAll,
uint64_t timeout)
{
tu_finishme("WaitForFences");
return VK_SUCCESS;
}
VkResult
tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
{
tu_finishme("ResetFences");
return VK_SUCCESS;
}
VkResult
tu_GetFenceStatus(VkDevice _device, VkFence _fence)
{
tu_finishme("GetFenceStatus");
return VK_SUCCESS;
}

View File

@ -291,22 +291,9 @@ struct tu_pipeline_key
struct tu_fence
{
struct vk_object_base base;
struct wsi_fence *fence_wsi;
bool signaled;
int fd;
uint32_t syncobj;
};
void
tu_fence_init(struct tu_fence *fence, bool signaled);
void
tu_fence_finish(struct tu_fence *fence);
void
tu_fence_update_fd(struct tu_fence *fence, int fd);
void
tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src);
void
tu_fence_wait_idle(struct tu_fence *fence);
struct tu_queue
{
struct vk_object_base base;
@ -317,7 +304,7 @@ struct tu_queue
VkDeviceQueueCreateFlags flags;
uint32_t msm_queue_id;
struct tu_fence submit_fence;
int fence;
};
struct tu_bo

View File

@ -24,13 +24,12 @@
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include "tu_private.h"
#include "tu_cs.h"
#include "util/disk_cache.h"
#include "util/strtod.h"
#include "vk_util.h"
#include <xf86drm.h>
#include <xf86drmMode.h>
#include "vk_format.h"
#include "util/debug.h"
#include "wsi_common_display.h"
@ -260,6 +259,19 @@ tu_DisplayPowerControlEXT(VkDevice _device,
display_power_info);
}
static int
import_syncobj(int fd, uint32_t syncobj)
{
struct drm_syncobj_handle handle = { .handle = syncobj };
int ret;
ret = ioctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
if (ret)
return 0;
return ret ? -1 : handle.fd;
}
VkResult
tu_RegisterDeviceEventEXT(VkDevice _device,
const VkDeviceEventInfoEXT *device_event_info,
@ -267,26 +279,31 @@ tu_RegisterDeviceEventEXT(VkDevice _device,
VkFence *_fence)
{
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_fence *fence;
VkResult ret;
VkResult ret;
fence = vk_alloc2(&device->instance->alloc, allocator, sizeof (*fence),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
return VK_ERROR_OUT_OF_HOST_MEMORY;
ret = tu_CreateFence(_device, &(VkFenceCreateInfo) {}, allocator, _fence);
if (ret != VK_SUCCESS)
return ret;
tu_fence_init(fence, false);
TU_FROM_HANDLE(tu_fence, fence, *_fence);
int sync_fd = import_syncobj(device->fd, fence->syncobj);
if (sync_fd >= 0) {
ret = wsi_register_device_event(_device,
&device->physical_device->wsi_device,
device_event_info,
allocator,
NULL,
sync_fd);
} else {
ret = VK_ERROR_OUT_OF_HOST_MEMORY;
}
close(sync_fd);
if (ret != VK_SUCCESS)
tu_DestroyFence(_device, *_fence, allocator);
ret = wsi_register_device_event(_device,
&device->physical_device->wsi_device,
device_event_info,
allocator,
&fence->fence_wsi,
-1);
if (ret == VK_SUCCESS)
*_fence = tu_fence_to_handle(fence);
else
vk_free2(&device->instance->alloc, allocator, fence);
return ret;
}
@ -298,29 +315,32 @@ tu_RegisterDisplayEventEXT(VkDevice _device,
VkFence *_fence)
{
TU_FROM_HANDLE(tu_device, device, _device);
VkResult ret;
struct tu_fence *fence;
VkResult ret;
ret = tu_CreateFence(_device, &(VkFenceCreateInfo) {}, allocator, _fence);
if (ret != VK_SUCCESS)
return ret;
fence = vk_alloc2(&device->instance->alloc, allocator, sizeof (*fence),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
return VK_ERROR_OUT_OF_HOST_MEMORY;
TU_FROM_HANDLE(tu_fence, fence, *_fence);
tu_fence_init(fence, false);
int sync_fd = import_syncobj(device->fd, fence->syncobj);
if (sync_fd >= 0) {
ret = wsi_register_display_event(_device,
&device->physical_device->wsi_device,
display,
display_event_info,
allocator,
NULL,
sync_fd);
} else {
ret = VK_ERROR_OUT_OF_HOST_MEMORY;
}
ret = wsi_register_display_event(_device,
&device->physical_device->wsi_device,
display,
display_event_info,
allocator,
&fence->fence_wsi,
-1);
close(sync_fd);
if (ret != VK_SUCCESS)
tu_DestroyFence(_device, *_fence, allocator);
if (ret == VK_SUCCESS)
*_fence = tu_fence_to_handle(fence);
else
vk_free2(&device->instance->alloc, allocator, fence);
return ret;
}