zink: require timeline semaphores

this allows the removal of tons of awful code

Acked-by: Adam Jackson <ajax@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15904>
This commit is contained in:
Mike Blumenkrantz 2022-03-25 12:18:42 -04:00 committed by Marge Bot
parent 07c86e99b1
commit bc2e29accd
10 changed files with 29 additions and 239 deletions

View File

@ -37,6 +37,7 @@ Here's a list of those requirements:
* Device extensions:
* `VK_KHR_maintenance1`_
* ``VK_KHR_timeline_semaphore``
* `VK_EXT_custom_border_color`_
* `VK_EXT_provoking_vertex`_
* `VK_EXT_line_rasterization`_, with the following ``VkPhysicalDeviceLineRasterizationFeaturesEXT``:

View File

@ -166,9 +166,6 @@ zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs
cnd_destroy(&bs->usage.flush);
mtx_destroy(&bs->usage.mtx);
if (bs->fence.fence)
VKSCR(DestroyFence)(screen->dev, bs->fence.fence, NULL);
if (bs->cmdbuf)
VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->cmdbuf);
if (bs->barrier_cmdbuf)
@ -196,7 +193,6 @@ create_batch_state(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
bs->have_timelines = ctx->have_timelines;
VkCommandPoolCreateInfo cpci = {0};
cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cpci.queueFamilyIndex = screen->gfx_queue;
@ -244,14 +240,6 @@ create_batch_state(struct zink_context *ctx)
if (!screen->batch_descriptor_init(screen, bs))
goto fail;
if (!screen->info.have_KHR_timeline_semaphore) {
VkFenceCreateInfo fci = {0};
fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
if (VKSCR(CreateFence)(screen->dev, &fci, NULL, &bs->fence.fence) != VK_SUCCESS)
goto fail;
}
util_queue_fence_init(&bs->flush_completed);
bs->queue = screen->threaded ? screen->thread_queue : screen->queue;
@ -290,9 +278,6 @@ get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
}
simple_mtx_unlock(&ctx->batch_mtx);
if (bs) {
if (bs->fence.submitted && !bs->fence.completed && bs->fence.fence)
/* this fence is already done, so we need vulkan to release the cmdbuf */
zink_vkfence_wait(screen, &bs->fence, PIPE_TIMEOUT_INFINITE);
zink_reset_batch_state(ctx, bs);
} else {
if (!batch->state) {
@ -352,7 +337,7 @@ post_submit(void *data, void *gdata, int thread_index)
bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
screen->device_lost = true;
} else if (bs->ctx->batch_states_count > screen->max_fences) {
zink_screen_batch_id_wait(screen, bs->fence.batch_id - (screen->max_fences / 2), PIPE_TIMEOUT_INFINITE);
zink_screen_timeline_wait(screen, bs->fence.batch_id - (screen->max_fences / 2), PIPE_TIMEOUT_INFINITE);
}
}
@ -369,17 +354,12 @@ submit_queue(void *data, void *gdata, int thread_index)
bs->usage.usage = bs->fence.batch_id;
bs->usage.unflushed = false;
if (ctx->have_timelines && screen->last_finished > bs->fence.batch_id && bs->fence.batch_id == 1) {
if (screen->last_finished > bs->fence.batch_id && bs->fence.batch_id == 1) {
if (!zink_screen_init_semaphore(screen)) {
debug_printf("timeline init failed, things are about to go dramatically wrong.");
ctx->have_timelines = false;
}
}
if (bs->fence.fence && VKSCR(ResetFences)(screen->dev, 1, &bs->fence.fence) != VK_SUCCESS) {
mesa_loge("ZINK: vkResetFences failed");
}
uint64_t batch_id = bs->fence.batch_id;
/* first submit is just for acquire waits since they have a separate array */
si[0].sType = si[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
@ -408,14 +388,12 @@ submit_queue(void *data, void *gdata, int thread_index)
si[1].pSignalSemaphores = signals;
VkTimelineSemaphoreSubmitInfo tsi = {0};
uint64_t signal_values[2] = {0};
if (bs->have_timelines) {
tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
si[1].pNext = &tsi;
tsi.pSignalSemaphoreValues = signal_values;
signal_values[si[1].signalSemaphoreCount] = batch_id;
signals[si[1].signalSemaphoreCount++] = screen->sem;
tsi.signalSemaphoreValueCount = si[1].signalSemaphoreCount;
}
tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
si[1].pNext = &tsi;
tsi.pSignalSemaphoreValues = signal_values;
signal_values[si[1].signalSemaphoreCount] = batch_id;
signals[si[1].signalSemaphoreCount++] = screen->sem;
tsi.signalSemaphoreValueCount = si[1].signalSemaphoreCount;
if (bs->present)
signals[si[1].signalSemaphoreCount++] = bs->present;
@ -441,7 +419,7 @@ submit_queue(void *data, void *gdata, int thread_index)
}
simple_mtx_lock(&screen->queue_lock);
if (VKSCR(QueueSubmit)(bs->queue, 2, si, bs->fence.fence) != VK_SUCCESS) {
if (VKSCR(QueueSubmit)(bs->queue, 2, si, VK_NULL_HANDLE) != VK_SUCCESS) {
mesa_loge("ZINK: vkQueueSubmit failed");
bs->is_device_lost = true;
}
@ -475,9 +453,6 @@ zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
if (!zink_check_batch_completion(ctx, fence->batch_id, true))
break;
if (bs->fence.submitted && !bs->fence.completed && bs->fence.fence)
/* this fence is already done, so we need vulkan to release the cmdbuf */
zink_vkfence_wait(screen, &bs->fence, PIPE_TIMEOUT_INFINITE);
pop_batch_state(ctx);
zink_reset_batch_state(ctx, bs);
util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
@ -649,7 +624,7 @@ zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink
if (zink_batch_usage_is_unflushed(u))
return false;
return zink_screen_batch_id_wait(screen, u->usage, 0);
return zink_screen_timeline_wait(screen, u->usage, 0);
}
bool

View File

@ -133,7 +133,6 @@ struct zink_batch_state {
unsigned submit_count;
bool is_device_lost;
bool have_timelines;
bool has_barriers;
};

View File

@ -2614,10 +2614,7 @@ stall(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
sync_flush(ctx, zink_batch_state(ctx->last_fence));
if (ctx->have_timelines)
zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, PIPE_TIMEOUT_INFINITE);
else
zink_vkfence_wait(screen, ctx->last_fence, PIPE_TIMEOUT_INFINITE);
zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, PIPE_TIMEOUT_INFINITE);
zink_batch_reset_all(ctx);
}
@ -3330,10 +3327,7 @@ zink_flush(struct pipe_context *pctx,
* in some cases in order to correctly draw the first frame, though it's
* unknown at this time why this is the case
*/
if (screen->info.have_KHR_timeline_semaphore)
zink_screen_timeline_wait(screen, fence->batch_id, PIPE_TIMEOUT_INFINITE);
else
zink_vkfence_wait(screen, fence, PIPE_TIMEOUT_INFINITE);
zink_screen_timeline_wait(screen, fence->batch_id, PIPE_TIMEOUT_INFINITE);
ctx->first_frame_done = true;
}
}
@ -3362,40 +3356,8 @@ zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id)
batch_id = bs->fence.batch_id;
}
assert(batch_id);
if (ctx->have_timelines) {
if (!zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, UINT64_MAX))
check_device_lost(ctx);
return;
}
simple_mtx_lock(&ctx->batch_mtx);
struct zink_fence *fence;
assert(ctx->last_fence);
if (batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id)
fence = ctx->last_fence;
else {
for (bs = ctx->batch_states; bs; bs = bs->next) {
if (bs->fence.batch_id < batch_id)
continue;
if (!bs->fence.batch_id || bs->fence.batch_id > batch_id)
break;
}
if (!bs || bs->fence.batch_id != batch_id) {
simple_mtx_unlock(&ctx->batch_mtx);
/* if we can't find it, it either must have finished already or is on a different context */
if (!zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id)) {
/* if it hasn't finished, it's on another context, so force a flush so there's something to wait on */
ctx->batch.has_work = true;
zink_fence_wait(&ctx->base);
}
return;
}
fence = &bs->fence;
}
simple_mtx_unlock(&ctx->batch_mtx);
assert(fence);
sync_flush(ctx, zink_batch_state(fence));
zink_vkfence_wait(zink_screen(ctx->base.screen), fence, PIPE_TIMEOUT_INFINITE);
if (!zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, UINT64_MAX))
check_device_lost(ctx);
}
bool
@ -3409,42 +3371,10 @@ zink_check_batch_completion(struct zink_context *ctx, uint32_t batch_id, bool ha
if (zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id))
return true;
if (ctx->have_timelines) {
bool success = zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, 0);
if (!success)
check_device_lost(ctx);
return success;
}
struct zink_fence *fence;
if (!have_lock)
simple_mtx_lock(&ctx->batch_mtx);
if (ctx->last_fence && batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id)
fence = ctx->last_fence;
else {
struct zink_batch_state *bs;
for (bs = ctx->batch_states; bs; bs = bs->next) {
if (bs->fence.batch_id < batch_id)
continue;
if (!bs->fence.batch_id || bs->fence.batch_id > batch_id)
break;
}
if (!bs || bs->fence.batch_id != batch_id) {
if (!have_lock)
simple_mtx_unlock(&ctx->batch_mtx);
/* return compare against last_finished, since this has info from all contexts */
return zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id);
}
fence = &bs->fence;
}
if (!have_lock)
simple_mtx_unlock(&ctx->batch_mtx);
assert(fence);
if (zink_screen(ctx->base.screen)->threaded &&
!util_queue_fence_is_signalled(&zink_batch_state(fence)->flush_completed))
return false;
return zink_vkfence_wait(zink_screen(ctx->base.screen), fence, 0);
bool success = zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, 0);
if (!success)
check_device_lost(ctx);
return success;
}
static void
@ -4305,7 +4235,6 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
struct zink_context *ctx = rzalloc(NULL, struct zink_context);
if (!ctx)
goto fail;
ctx->have_timelines = screen->info.have_KHR_timeline_semaphore;
ctx->pipeline_changed[0] = ctx->pipeline_changed[1] = true;
ctx->gfx_pipeline_state.dirty = true;
@ -4484,7 +4413,6 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
util_dynarray_init(&ctx->di.bindless[i].resident, NULL);
}
ctx->have_timelines = screen->info.have_KHR_timeline_semaphore;
simple_mtx_init(&ctx->batch_mtx, mtx_plain);
zink_start_batch(ctx, &ctx->batch);
if (!ctx->batch.state)

View File

@ -365,7 +365,6 @@ struct zink_context {
bool dirty_so_targets;
bool xfb_barrier;
bool first_frame_done;
bool have_timelines;
bool gfx_dirty;

View File

@ -138,34 +138,6 @@ fence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeou
return success;
}
bool
zink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
{
if (screen->device_lost)
return true;
if (p_atomic_read(&fence->completed))
return true;
assert(fence->batch_id);
assert(fence->submitted);
bool success = false;
VkResult ret;
if (timeout_ns)
ret = VKSCR(WaitForFences)(screen->dev, 1, &fence->fence, VK_TRUE, timeout_ns);
else
ret = VKSCR(GetFenceStatus)(screen->dev, fence->fence);
success = zink_screen_handle_vkresult(screen, ret);
if (success) {
p_atomic_set(&fence->completed, true);
zink_batch_state(fence)->usage.usage = 0;
zink_screen_update_last_finished(screen, fence->batch_id);
}
return success;
}
static bool
zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
uint64_t timeout_ns)
@ -207,9 +179,7 @@ zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct
if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id))
return true;
if (screen->info.have_KHR_timeline_semaphore)
return fence_wait(screen, fence, timeout_ns);
return zink_vkfence_wait(screen, fence, timeout_ns);
return fence_wait(screen, fence, timeout_ns);
}
static bool

View File

@ -50,7 +50,6 @@ struct zink_tc_fence {
};
struct zink_fence {
VkFence fence;
uint32_t batch_id;
bool submitted;
bool completed;
@ -89,9 +88,6 @@ zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfen
void
zink_screen_fence_init(struct pipe_screen *pscreen);
bool
zink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns);
void
zink_fence_clear_resources(struct zink_screen *screen, struct zink_fence *fence);
#endif

View File

@ -974,10 +974,7 @@ zink_get_query_result(struct pipe_context *pctx,
pctx->flush(pctx, NULL, 0);
if (!wait)
return false;
} else if (!threaded_query(q)->flushed &&
/* timeline drivers can wait during buffer map */
!zink_screen(pctx->screen)->info.have_KHR_timeline_semaphore)
zink_batch_usage_check_completion(ctx, query->batch_id);
}
return get_query_result(pctx, q, wait, result);
}

View File

@ -1731,7 +1731,6 @@ zink_screen_init_semaphore(struct zink_screen *screen)
} else {
mesa_loge("ZINK: vkCreateSemaphore failed");
}
screen->info.have_KHR_timeline_semaphore = false;
return false;
}
@ -1761,81 +1760,6 @@ zink_screen_timeline_wait(struct zink_screen *screen, uint32_t batch_id, uint64_
return success;
}
struct noop_submit_info {
struct zink_screen *screen;
VkFence fence;
};
static void
noop_submit(void *data, void *gdata, int thread_index)
{
struct noop_submit_info *n = data;
VkSubmitInfo si = {0};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
simple_mtx_lock(&n->screen->queue_lock);
if (n->VKSCR(QueueSubmit)(n->screen->threaded ? n->screen->thread_queue : n->screen->queue,
1, &si, n->fence) != VK_SUCCESS) {
mesa_loge("ZINK: vkQueueSubmit failed");
n->screen->device_lost = true;
}
simple_mtx_unlock(&n->screen->queue_lock);
}
bool
zink_screen_batch_id_wait(struct zink_screen *screen, uint32_t batch_id, uint64_t timeout)
{
if (zink_screen_check_last_finished(screen, batch_id))
return true;
if (screen->info.have_KHR_timeline_semaphore)
return zink_screen_timeline_wait(screen, batch_id, timeout);
if (!timeout)
return false;
uint32_t new_id = 0;
while (!new_id)
new_id = p_atomic_inc_return(&screen->curr_batch);
VkResult ret;
struct noop_submit_info n;
uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
uint64_t remaining = PIPE_TIMEOUT_INFINITE;
VkFenceCreateInfo fci = {0};
struct util_queue_fence fence;
util_queue_fence_init(&fence);
fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
if (VKSCR(CreateFence)(screen->dev, &fci, NULL, &n.fence) != VK_SUCCESS) {
mesa_loge("ZINK: vkCreateFence failed");
return false;
}
n.screen = screen;
if (screen->threaded) {
/* must use thread dispatch for sanity */
util_queue_add_job(&screen->flush_queue, &n, &fence, noop_submit, NULL, 0);
util_queue_fence_wait(&fence);
} else {
noop_submit(&n, NULL, 0);
}
if (timeout != PIPE_TIMEOUT_INFINITE) {
int64_t time_ns = os_time_get_nano();
remaining = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
}
if (remaining)
ret = VKSCR(WaitForFences)(screen->dev, 1, &n.fence, VK_TRUE, remaining);
else
ret = VKSCR(GetFenceStatus)(screen->dev, n.fence);
VKSCR(DestroyFence)(screen->dev, n.fence, NULL);
bool success = zink_screen_handle_vkresult(screen, ret);
if (success)
zink_screen_update_last_finished(screen, new_id);
return success;
}
static uint32_t
zink_get_loader_version(void)
{
@ -2187,6 +2111,10 @@ zink_internal_create_screen(const struct pipe_screen_config *config)
util_queue_init(&screen->flush_queue, "zfq", 8, 1, UTIL_QUEUE_INIT_RESIZE_IF_FULL, screen);
zink_internal_setup_moltenvk(screen);
if (!screen->info.have_KHR_timeline_semaphore) {
mesa_loge("zink: KHR_timeline_semaphore is required");
goto fail;
}
screen->dev = zink_create_logical_device(screen);
if (!screen->dev)
@ -2283,10 +2211,10 @@ zink_internal_create_screen(const struct pipe_screen_config *config)
break;
}
if (debug_get_bool_option("ZINK_NO_TIMELINES", false))
screen->info.have_KHR_timeline_semaphore = false;
if (screen->info.have_KHR_timeline_semaphore)
zink_screen_init_semaphore(screen);
if (!zink_screen_init_semaphore(screen)) {
mesa_loge("zink: failed to create timeline semaphore");
goto fail;
}
memset(&screen->heap_map, UINT8_MAX, sizeof(screen->heap_map));
for (enum zink_heap i = 0; i < ZINK_HEAP_MAX; i++) {

View File

@ -263,9 +263,6 @@ struct mem_cache_entry {
VkFormat
zink_get_format(struct zink_screen *screen, enum pipe_format format);
bool
zink_screen_batch_id_wait(struct zink_screen *screen, uint32_t batch_id, uint64_t timeout);
bool
zink_screen_timeline_wait(struct zink_screen *screen, uint32_t batch_id, uint64_t timeout);