gallium/u_threaded: avoid syncing in threaded_context_flush

We could always do the flush asynchronously, but if we're going to wait
for a fence anyway and the driver thread is currently idle, the additional
communication overhead isn't worth it.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Nicolai Hähnle 2017-11-10 11:15:44 +01:00
parent bc65dcab3b
commit 3f17d3c017
3 changed files with 17 additions and 5 deletions

View File

@ -231,13 +231,23 @@ _tc_sync(struct threaded_context *tc, MAYBE_UNUSED const char *info, MAYBE_UNUSE
*/
void
threaded_context_flush(struct pipe_context *_pipe,
struct tc_unflushed_batch_token *token)
struct tc_unflushed_batch_token *token,
bool prefer_async)
{
struct threaded_context *tc = threaded_context(_pipe);
/* This is called from the state-tracker / application thread. */
if (token->tc && token->tc == tc)
tc_sync(token->tc);
if (token->tc && token->tc == tc) {
struct tc_batch *last = &tc->batch_slots[tc->last];
/* Prefer to do the flush in the driver thread if it is already
* running. That should be better for cache locality.
*/
if (prefer_async || !util_queue_fence_is_signalled(&last->fence))
tc_batch_flush(tc);
else
tc_sync(token->tc);
}
}
static void

View File

@ -381,7 +381,8 @@ threaded_context_create(struct pipe_context *pipe,
void
threaded_context_flush(struct pipe_context *_pipe,
struct tc_unflushed_batch_token *token);
struct tc_unflushed_batch_token *token,
bool prefer_async);
static inline struct threaded_context *
threaded_context(struct pipe_context *pipe)

View File

@ -203,7 +203,8 @@ static boolean si_fence_finish(struct pipe_screen *screen,
* be in flight in the driver thread, so the fence
* may not be ready yet when this call returns.
*/
threaded_context_flush(ctx, rfence->tc_token);
threaded_context_flush(ctx, rfence->tc_token,
timeout == 0);
}
if (timeout == PIPE_TIMEOUT_INFINITE) {