panvk: Add vkEvents support

Use syncobjs to implement vkEvents (as suggested by Boris).

Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11709>
This commit is contained in:
Tomeu Vizoso 2021-06-04 10:25:24 +02:00
parent 5defffaa4d
commit 1e23004600
3 changed files with 277 additions and 20 deletions

View File

@ -49,6 +49,9 @@ panvk_reset_cmdbuf(struct panvk_cmd_buffer *cmdbuf)
util_dynarray_fini(&batch->jobs);
if (!pan_is_bifrost(pdev))
panfrost_bo_unreference(batch->tiler.ctx.midgard.polygon_list);
util_dynarray_fini(&batch->event_ops);
vk_free(&cmdbuf->pool->alloc, batch);
}
@ -119,6 +122,9 @@ panvk_destroy_cmdbuf(struct panvk_cmd_buffer *cmdbuf)
util_dynarray_fini(&batch->jobs);
if (!pan_is_bifrost(pdev))
panfrost_bo_unreference(batch->tiler.ctx.midgard.polygon_list);
util_dynarray_fini(&batch->event_ops);
vk_free(&cmdbuf->pool->alloc, batch);
}
@ -690,6 +696,7 @@ panvk_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
sizeof(*cmdbuf->state.batch), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
util_dynarray_init(&cmdbuf->state.batch->jobs, NULL);
util_dynarray_init(&cmdbuf->state.batch->event_ops, NULL);
cmdbuf->state.clear = vk_zalloc(&cmdbuf->pool->alloc,
sizeof(*cmdbuf->state.clear) *
pRenderPassBegin->clearValueCount, 8,
@ -769,17 +776,31 @@ panvk_cmd_get_midgard_polygon_list(struct panvk_cmd_buffer *cmdbuf,
void
panvk_cmd_close_batch(struct panvk_cmd_buffer *cmdbuf)
{
assert(cmdbuf->state.batch);
struct panvk_batch *batch = cmdbuf->state.batch;
if (!cmdbuf->state.batch->fragment_job &&
!cmdbuf->state.batch->scoreboard.first_job) {
vk_free(&cmdbuf->pool->alloc, cmdbuf->state.batch);
assert(batch);
if (!batch->fragment_job && !batch->scoreboard.first_job) {
if (util_dynarray_num_elements(&batch->event_ops, struct panvk_event_op) == 0) {
/* Content-less batch, let's drop it */
vk_free(&cmdbuf->pool->alloc, batch);
} else {
/* Batch has no jobs but is needed for synchronization, let's add a
* NULL job so the SUBMIT ioctl doesn't choke on it.
*/
struct panfrost_ptr ptr = pan_pool_alloc_desc(&cmdbuf->desc_pool.base,
JOB_HEADER);
util_dynarray_append(&batch->jobs, void *, ptr.cpu);
panfrost_add_job(&cmdbuf->desc_pool.base, &batch->scoreboard,
MALI_JOB_TYPE_NULL, false, false, 0, 0,
&ptr, false);
list_addtail(&batch->node, &cmdbuf->batches);
}
cmdbuf->state.batch = NULL;
return;
}
struct panfrost_device *pdev = &cmdbuf->device->physical_device->pdev;
struct panvk_batch *batch = cmdbuf->state.batch;
list_addtail(&cmdbuf->state.batch->node, &cmdbuf->batches);
@ -1480,12 +1501,79 @@ panvk_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
}
}
static void
panvk_add_set_event_operation(struct panvk_cmd_buffer *cmdbuf,
struct panvk_event *event,
enum panvk_event_op_type type)
{
struct panvk_event_op op = {
.type = type,
.event = event,
};
if (cmdbuf->state.batch == NULL) {
/* No open batch, let's create a new one so this operation happens in
* the right order.
*/
panvk_cmd_open_batch(cmdbuf);
util_dynarray_append(&cmdbuf->state.batch->event_ops,
struct panvk_event_op,
op);
panvk_cmd_close_batch(cmdbuf);
} else {
/* Let's close the current batch so the operation executes before any
* future commands.
*/
util_dynarray_append(&cmdbuf->state.batch->event_ops,
struct panvk_event_op,
op);
panvk_cmd_close_batch(cmdbuf);
panvk_cmd_open_batch(cmdbuf);
}
}
static void
panvk_add_wait_event_operation(struct panvk_cmd_buffer *cmdbuf,
struct panvk_event *event)
{
struct panvk_event_op op = {
.type = PANVK_EVENT_OP_WAIT,
.event = event,
};
if (cmdbuf->state.batch == NULL) {
/* No open batch, let's create a new one and have it wait for this event. */
panvk_cmd_open_batch(cmdbuf);
util_dynarray_append(&cmdbuf->state.batch->event_ops,
struct panvk_event_op,
op);
} else {
/* Let's close the current batch so any future commands wait on the
* event signal operation.
*/
if (cmdbuf->state.batch->fragment_job ||
cmdbuf->state.batch->scoreboard.first_job) {
panvk_cmd_close_batch(cmdbuf);
panvk_cmd_open_batch(cmdbuf);
}
util_dynarray_append(&cmdbuf->state.batch->event_ops,
struct panvk_event_op,
op);
}
}
void
panvk_CmdSetEvent(VkCommandBuffer commandBuffer,
VkEvent _event,
VkPipelineStageFlags stageMask)
{
panvk_stub();
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(panvk_event, event, _event);
/* vkCmdSetEvent cannot be called inside a render pass */
assert(cmdbuf->state.pass == NULL);
panvk_add_set_event_operation(cmdbuf, event, PANVK_EVENT_OP_SET);
}
void
@ -1493,7 +1581,13 @@ panvk_CmdResetEvent(VkCommandBuffer commandBuffer,
VkEvent _event,
VkPipelineStageFlags stageMask)
{
panvk_stub();
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(panvk_event, event, _event);
/* vkCmdResetEvent cannot be called inside a render pass */
assert(cmdbuf->state.pass == NULL);
panvk_add_set_event_operation(cmdbuf, event, PANVK_EVENT_OP_RESET);
}
void
@ -1509,7 +1603,14 @@ panvk_CmdWaitEvents(VkCommandBuffer commandBuffer,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers)
{
panvk_stub();
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, commandBuffer);
assert(eventCount > 0);
for (uint32_t i = 0; i < eventCount; i++) {
VK_FROM_HANDLE(panvk_event, event, pEvents[i]);
panvk_add_wait_event_operation(cmdbuf, event);
}
}
void

View File

@ -1186,6 +1186,58 @@ panvk_queue_transfer_sync(struct panvk_queue *queue, uint32_t syncobj)
close(handle.fd);
}
static void
panvk_add_wait_event_syncobjs(struct panvk_batch *batch, uint32_t *in_fences, unsigned *nr_in_fences)
{
util_dynarray_foreach(&batch->event_ops, struct panvk_event_op, op) {
switch (op->type) {
case PANVK_EVENT_OP_SET:
/* Nothing to do yet */
break;
case PANVK_EVENT_OP_RESET:
/* Nothing to do yet */
break;
case PANVK_EVENT_OP_WAIT:
in_fences[*nr_in_fences++] = op->event->syncobj;
break;
default:
unreachable("bad panvk_event_op type\n");
}
}
}
static void
panvk_signal_event_syncobjs(struct panvk_queue *queue, struct panvk_batch *batch)
{
const struct panfrost_device *pdev = &queue->device->physical_device->pdev;
util_dynarray_foreach(&batch->event_ops, struct panvk_event_op, op) {
switch (op->type) {
case PANVK_EVENT_OP_SET: {
panvk_queue_transfer_sync(queue, op->event->syncobj);
break;
}
case PANVK_EVENT_OP_RESET: {
struct panvk_event *event = op->event;
struct drm_syncobj_array objs = {
.handles = (uint64_t) (uintptr_t) &event->syncobj,
.count_handles = 1
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_RESET, &objs);
assert(!ret);
break;
}
case PANVK_EVENT_OP_WAIT:
/* Nothing left to do */
break;
default:
unreachable("bad panvk_event_op type\n");
}
}
}
VkResult
panvk_QueueSubmit(VkQueue _queue,
uint32_t submitCount,
@ -1198,14 +1250,14 @@ panvk_QueueSubmit(VkQueue _queue,
for (uint32_t i = 0; i < submitCount; ++i) {
const VkSubmitInfo *submit = pSubmits + i;
unsigned nr_in_fences = submit->waitSemaphoreCount + 1;
uint32_t in_fences[nr_in_fences];
unsigned nr_semaphores = submit->waitSemaphoreCount + 1;
uint32_t semaphores[nr_semaphores];
in_fences[0] = queue->sync;
semaphores[0] = queue->sync;
for (unsigned i = 0; i < submit->waitSemaphoreCount; i++) {
VK_FROM_HANDLE(panvk_semaphore, sem, submit->pWaitSemaphores[i]);
in_fences[i + 1] = sem->syncobj.temporary ? : sem->syncobj.permanent;
semaphores[i + 1] = sem->syncobj.temporary ? : sem->syncobj.permanent;
}
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
@ -1250,7 +1302,20 @@ panvk_QueueSubmit(VkQueue _queue,
bos[bo_idx++] = pdev->sample_positions->gem_handle;
assert(bo_idx == nr_bos);
unsigned nr_in_fences = 0;
unsigned max_wait_event_syncobjs =
util_dynarray_num_elements(&batch->event_ops,
struct panvk_event_op);
uint32_t in_fences[nr_semaphores + max_wait_event_syncobjs];
memcpy(in_fences, semaphores, nr_semaphores * sizeof(*in_fences));
nr_in_fences += nr_semaphores;
panvk_add_wait_event_syncobjs(batch, in_fences, &nr_in_fences);
panvk_queue_submit_batch(queue, batch, bos, nr_bos, in_fences, nr_in_fences);
panvk_signal_event_syncobjs(queue, batch);
}
}
@ -1634,7 +1699,25 @@ panvk_CreateEvent(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent)
{
panvk_stub();
VK_FROM_HANDLE(panvk_device, device, _device);
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct panvk_event *event =
vk_object_zalloc(&device->vk, pAllocator, sizeof(*event),
VK_OBJECT_TYPE_EVENT);
if (!event)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
struct drm_syncobj_create create = {
.flags = 0,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return VK_ERROR_OUT_OF_HOST_MEMORY;
event->syncobj = create.handle;
*pEvent = panvk_event_to_handle(event);
return VK_SUCCESS;
}
@ -1643,28 +1726,88 @@ panvk_DestroyEvent(VkDevice _device,
VkEvent _event,
const VkAllocationCallbacks *pAllocator)
{
panvk_stub();
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_event, event, _event);
const struct panfrost_device *pdev = &device->physical_device->pdev;
if (!event)
return;
struct drm_syncobj_destroy destroy = { .handle = event->syncobj };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
vk_object_free(&device->vk, pAllocator, event);
}
VkResult
panvk_GetEventStatus(VkDevice _device, VkEvent _event)
{
panvk_stub();
return VK_EVENT_RESET;
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_event, event, _event);
const struct panfrost_device *pdev = &device->physical_device->pdev;
bool signaled;
struct drm_syncobj_wait wait = {
.handles = (uintptr_t) &event->syncobj,
.count_handles = 1,
.timeout_nsec = 0,
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
if (ret) {
if (errno == ETIME)
signaled = false;
else {
assert(0);
return VK_ERROR_DEVICE_LOST; /* TODO */
}
} else
signaled = true;
return signaled ? VK_EVENT_SET : VK_EVENT_RESET;
}
VkResult
panvk_SetEvent(VkDevice _device, VkEvent _event)
{
panvk_stub();
return VK_SUCCESS;
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_event, event, _event);
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_array objs = {
.handles = (uint64_t) (uintptr_t) &event->syncobj,
.count_handles = 1
};
/* This is going to just replace the fence for this syncobj with one that
* is already in signaled state. This won't be a problem because the spec
* mandates that the event will have been set before the vkCmdWaitEvents
* command executes.
* https://www.khronos.org/registry/vulkan/specs/1.2/html/chap6.html#commandbuffers-submission-progress
*/
if (drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs))
return VK_ERROR_DEVICE_LOST;
return VK_SUCCESS;
}
VkResult
panvk_ResetEvent(VkDevice _device, VkEvent _event)
{
panvk_stub();
return VK_SUCCESS;
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_event, event, _event);
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_array objs = {
.handles = (uint64_t) (uintptr_t) &event->syncobj,
.count_handles = 1
};
if (drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_RESET, &objs))
return VK_ERROR_DEVICE_LOST;
return VK_SUCCESS;
}
VkResult

View File

@ -252,6 +252,7 @@ panvk_device_is_lost(struct panvk_device *device)
struct panvk_batch {
struct list_head node;
struct util_dynarray jobs;
struct util_dynarray event_ops;
struct pan_scoreboard scoreboard;
struct {
const struct panvk_framebuffer *info;
@ -280,6 +281,17 @@ struct panvk_syncobj {
uint32_t permanent, temporary;
};
enum panvk_event_op_type {
PANVK_EVENT_OP_SET,
PANVK_EVENT_OP_RESET,
PANVK_EVENT_OP_WAIT,
};
struct panvk_event_op {
enum panvk_event_op_type type;
struct panvk_event *event;
};
struct panvk_fence {
struct vk_object_base base;
struct panvk_syncobj syncobj;
@ -668,6 +680,7 @@ panvk_pack_color(struct panvk_clear_value *out,
struct panvk_event {
struct vk_object_base base;
uint32_t syncobj;
};
struct panvk_shader_module {