freedreno: threaded_context async flush support

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9323>
This commit is contained in:
Rob Clark 2021-03-05 11:36:32 -08:00 committed by Marge Bot
parent 9dbe2405a3
commit c4e5beef07
3 changed files with 137 additions and 14 deletions

View File

@ -54,6 +54,42 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
DBG("%p: flush: flags=%x", batch, flags);
if (fencep && !batch) {
batch = fd_context_batch(ctx);
} else if (!batch) {
fd_bc_dump(ctx->screen, "%p: NULL batch, remaining:\n", ctx);
return;
}
/* With TC_FLUSH_ASYNC, the fence will have been pre-created from
* the front-end thread. But not yet associated with a batch,
* because we cannot safely access ctx->batch outside of the driver
* thread. So instead, replace the existing batch->fence with the
* one created earlier
*/
if ((flags & TC_FLUSH_ASYNC) && fencep) {
/* We don't currently expect async+flush in the fence-fd
* case.. for that to work properly we'd need TC to tell
* us in the create_fence callback that it needs an fd.
*/
assert(!(flags & PIPE_FLUSH_FENCE_FD));
fd_fence_set_batch(*fencep, batch);
fd_fence_ref(&batch->fence, *fencep);
/* We (a) cannot substitute the provided fence with last_fence,
* and (b) need fd_fence_populate() to be eventually called on
* the fence that was pre-created in frontend-thread:
*/
fd_fence_ref(&ctx->last_fence, NULL);
/* async flush is not compatible with deferred flush, since
* nothing triggers the batch flush which fence_flush() would
* be waiting for
*/
flags &= ~PIPE_FLUSH_DEFERRED;
}
/* In some sequence of events, we can end up with a last_fence that is
* not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
* errors.
@ -71,13 +107,6 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
goto out;
}
if (fencep && !batch) {
batch = fd_context_batch(ctx);
} else if (!batch) {
fd_bc_dump(ctx->screen, "%p: NULL batch, remaining:\n", ctx);
return;
}
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
fd_fence_ref(&fence, batch->fence);
@ -631,7 +660,7 @@ fd_context_init_tc(struct pipe_context *pctx, unsigned flags)
struct pipe_context *tc = threaded_context_create(pctx,
&ctx->screen->transfer_pool,
fd_replace_buffer_storage,
NULL, // TODO fd_create_fence for async flush
fd_fence_create_unflushed,
&ctx->tc);
uint64_t total_ram;

View File

@ -36,11 +36,36 @@
struct pipe_fence_handle {
struct pipe_reference reference;
/* fence holds a weak reference to the batch until the batch is flushed,
* at which point fd_fence_populate() is called and timestamp and possibly
* fence_fd become valid and the week reference is dropped.
*
* Note that with u_threaded_context async flushes, if a fence is requested
* by the frontend, the fence is initially created without a weak reference
* to the batch, which is filled in later when fd_context_flush() is called
* from the driver thread. In this case tc_token will be non-null, in
* which case threaded_context_flush() should be called in fd_fence_finish()
*/
struct fd_batch *batch;
struct tc_unflushed_batch_token *tc_token;
bool needs_signal;
/* For threaded_context async flushes, we must wait on the fence, signalled
* in fd_fence_populate(), to know that the rendering has been actually
* flushed from the driver thread.
*
* The ready fence is created signaled for non-async-flush fences, and only
* transitions once from unsignalled->signalled for async-flush fences
*/
struct util_queue_fence ready;
/* Note that a fence can outlive the ctx, so we can only assume this is a
* valid ptr for unflushed fences. However we hold a reference to the
* fence->pipe so that is safe to use after flushing.
*/
struct fd_context *ctx;
struct fd_pipe *pipe;
struct fd_screen *screen;
int fence_fd;
@ -48,13 +73,44 @@ struct pipe_fence_handle {
uint32_t syncobj;
};
static void fence_flush(struct pipe_fence_handle *fence)
/* TODO this will change w/ threaded-ctx where we need to use threaded_context_flush().. */
static bool
fence_flush(struct pipe_fence_handle *fence, uint64_t timeout)
/* NOTE: in the !fence_is_signalled() case we may be called from non-driver
* thread, but we don't call fd_batch_flush() in that case
*/
in_dt
{
if (!util_queue_fence_is_signalled(&fence->ready)) {
if (fence->tc_token) {
threaded_context_flush(&fence->ctx->base, fence->tc_token,
timeout == 0);
}
if (!timeout)
return false;
if (timeout == PIPE_TIMEOUT_INFINITE) {
util_queue_fence_wait(&fence->ready);
} else {
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
if (!util_queue_fence_wait_timeout(&fence->ready, abs_timeout)) {
return false;
}
}
/* We've already waited for batch to be flushed and fd_fence_populate()
* called:
*/
assert(!fence->batch);
return true;
}
if (fence->batch)
fd_batch_flush(fence->batch);
debug_assert(!fence->batch);
return true;
}
void fd_fence_populate(struct pipe_fence_handle *fence,
@ -65,10 +121,16 @@ void fd_fence_populate(struct pipe_fence_handle *fence,
fence->timestamp = timestamp;
fence->fence_fd = fence_fd;
fence->batch = NULL;
if (fence->needs_signal) {
util_queue_fence_signal(&fence->ready);
fence->needs_signal = false;
}
}
static void fd_fence_destroy(struct pipe_fence_handle *fence)
{
tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
if (fence->fence_fd != -1)
close(fence->fence_fd);
if (fence->syncobj)
@ -87,11 +149,12 @@ void fd_fence_ref(struct pipe_fence_handle **ptr,
}
bool fd_fence_finish(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct pipe_context *pctx,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
fence_flush(fence);
if (!fence_flush(fence, timeout))
return false;
if (fence->fence_fd != -1) {
int ret = sync_wait(fence->fence_fd, timeout / 1000000);
@ -114,7 +177,9 @@ static struct pipe_fence_handle * fence_create(struct fd_context *ctx,
return NULL;
pipe_reference_init(&fence->reference, 1);
util_queue_fence_init(&fence->ready);
fence->ctx = ctx;
fence->batch = batch;
fence->pipe = fd_pipe_ref(ctx->pipe);
fence->screen = ctx->screen;
@ -157,7 +222,10 @@ void fd_fence_server_sync(struct pipe_context *pctx,
{
struct fd_context *ctx = fd_context(pctx);
fence_flush(fence);
/* NOTE: we don't expect the combination of fence-fd + async-flush-fence,
* so timeout==0 is ok here:
*/
fence_flush(fence, 0);
/* if not an external fence, then nothing more to do without preemption: */
if (fence->fence_fd == -1)
@ -181,7 +249,7 @@ void fd_fence_server_signal(struct pipe_context *pctx,
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *fence)
{
fence_flush(fence);
fence_flush(fence, PIPE_TIMEOUT_INFINITE);
return os_dupfd_cloexec(fence->fence_fd);
}
@ -194,3 +262,22 @@ struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch)
{
return fence_create(batch->ctx, batch, 0, -1, 0);
}
void
fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
{
assert(!fence->batch);
fence->batch = batch;
}
struct pipe_fence_handle *
fd_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token)
{
struct pipe_fence_handle *fence =
fence_create(fd_context(pctx), NULL, 0, -1, 0);
fence->needs_signal = true;
util_queue_fence_reset(&fence->ready);
tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);
return fence;
}

View File

@ -51,4 +51,11 @@ bool fd_fence_is_fd(struct pipe_fence_handle *fence);
struct fd_batch;
struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch);
void fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch);
struct tc_unflushed_batch_token;
struct pipe_fence_handle *fd_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token);
#endif /* FREEDRENO_FENCE_H_ */