radeonsi: rename rfence -> sfence

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
This commit is contained in:
Marek Olšák 2019-01-18 19:35:04 -05:00
parent 260ff57647
commit d85917deaf
1 changed files with 49 additions and 49 deletions

View File

@ -286,15 +286,15 @@ static boolean si_fence_finish(struct pipe_screen *screen,
uint64_t timeout) uint64_t timeout)
{ {
struct radeon_winsys *rws = ((struct si_screen*)screen)->ws; struct radeon_winsys *rws = ((struct si_screen*)screen)->ws;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence; struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
struct si_context *sctx; struct si_context *sctx;
int64_t abs_timeout = os_time_get_absolute_timeout(timeout); int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
ctx = threaded_context_unwrap_sync(ctx); ctx = threaded_context_unwrap_sync(ctx);
sctx = (struct si_context*)(ctx ? ctx : NULL); sctx = (struct si_context*)(ctx ? ctx : NULL);
if (!util_queue_fence_is_signalled(&rfence->ready)) { if (!util_queue_fence_is_signalled(&sfence->ready)) {
if (rfence->tc_token) { if (sfence->tc_token) {
/* Ensure that si_flush_from_st will be called for /* Ensure that si_flush_from_st will be called for
* this fence, but only if we're in the API thread * this fence, but only if we're in the API thread
* where the context is current. * where the context is current.
@ -303,7 +303,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
* be in flight in the driver thread, so the fence * be in flight in the driver thread, so the fence
* may not be ready yet when this call returns. * may not be ready yet when this call returns.
*/ */
threaded_context_flush(ctx, rfence->tc_token, threaded_context_flush(ctx, sfence->tc_token,
timeout == 0); timeout == 0);
} }
@ -311,9 +311,9 @@ static boolean si_fence_finish(struct pipe_screen *screen,
return false; return false;
if (timeout == PIPE_TIMEOUT_INFINITE) { if (timeout == PIPE_TIMEOUT_INFINITE) {
util_queue_fence_wait(&rfence->ready); util_queue_fence_wait(&sfence->ready);
} else { } else {
if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout)) if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
return false; return false;
} }
@ -323,8 +323,8 @@ static boolean si_fence_finish(struct pipe_screen *screen,
} }
} }
if (rfence->sdma) { if (sfence->sdma) {
if (!rws->fence_wait(rws, rfence->sdma, timeout)) if (!rws->fence_wait(rws, sfence->sdma, timeout))
return false; return false;
/* Recompute the timeout after waiting. */ /* Recompute the timeout after waiting. */
@ -334,19 +334,19 @@ static boolean si_fence_finish(struct pipe_screen *screen,
} }
} }
if (!rfence->gfx) if (!sfence->gfx)
return true; return true;
if (rfence->fine.buf && if (sfence->fine.buf &&
si_fine_fence_signaled(rws, &rfence->fine)) { si_fine_fence_signaled(rws, &sfence->fine)) {
rws->fence_reference(&rfence->gfx, NULL); rws->fence_reference(&sfence->gfx, NULL);
si_resource_reference(&rfence->fine.buf, NULL); si_resource_reference(&sfence->fine.buf, NULL);
return true; return true;
} }
/* Flush the gfx IB if it hasn't been flushed yet. */ /* Flush the gfx IB if it hasn't been flushed yet. */
if (sctx && rfence->gfx_unflushed.ctx == sctx && if (sctx && sfence->gfx_unflushed.ctx == sctx &&
rfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) { sfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) {
/* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile) /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
* spec says: * spec says:
* *
@ -373,7 +373,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
(timeout ? 0 : PIPE_FLUSH_ASYNC) | (timeout ? 0 : PIPE_FLUSH_ASYNC) |
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
NULL); NULL);
rfence->gfx_unflushed.ctx = NULL; sfence->gfx_unflushed.ctx = NULL;
if (!timeout) if (!timeout)
return false; return false;
@ -385,13 +385,13 @@ static boolean si_fence_finish(struct pipe_screen *screen,
} }
} }
if (rws->fence_wait(rws, rfence->gfx, timeout)) if (rws->fence_wait(rws, sfence->gfx, timeout))
return true; return true;
/* Re-check in case the GPU is slow or hangs, but the commands before /* Re-check in case the GPU is slow or hangs, but the commands before
* the fine-grained fence have completed. */ * the fine-grained fence have completed. */
if (rfence->fine.buf && if (sfence->fine.buf &&
si_fine_fence_signaled(rws, &rfence->fine)) si_fine_fence_signaled(rws, &sfence->fine))
return true; return true;
return false; return false;
@ -403,12 +403,12 @@ static void si_create_fence_fd(struct pipe_context *ctx,
{ {
struct si_screen *sscreen = (struct si_screen*)ctx->screen; struct si_screen *sscreen = (struct si_screen*)ctx->screen;
struct radeon_winsys *ws = sscreen->ws; struct radeon_winsys *ws = sscreen->ws;
struct si_multi_fence *rfence; struct si_multi_fence *sfence;
*pfence = NULL; *pfence = NULL;
rfence = si_create_multi_fence(); sfence = si_create_multi_fence();
if (!rfence) if (!sfence)
return; return;
switch (type) { switch (type) {
@ -416,14 +416,14 @@ static void si_create_fence_fd(struct pipe_context *ctx,
if (!sscreen->info.has_fence_to_handle) if (!sscreen->info.has_fence_to_handle)
goto finish; goto finish;
rfence->gfx = ws->fence_import_sync_file(ws, fd); sfence->gfx = ws->fence_import_sync_file(ws, fd);
break; break;
case PIPE_FD_TYPE_SYNCOBJ: case PIPE_FD_TYPE_SYNCOBJ:
if (!sscreen->info.has_syncobj) if (!sscreen->info.has_syncobj)
goto finish; goto finish;
rfence->gfx = ws->fence_import_syncobj(ws, fd); sfence->gfx = ws->fence_import_syncobj(ws, fd);
break; break;
default: default:
@ -431,12 +431,12 @@ static void si_create_fence_fd(struct pipe_context *ctx,
} }
finish: finish:
if (!rfence->gfx) { if (!sfence->gfx) {
FREE(rfence); FREE(sfence);
return; return;
} }
*pfence = (struct pipe_fence_handle*)rfence; *pfence = (struct pipe_fence_handle*)sfence;
} }
static int si_fence_get_fd(struct pipe_screen *screen, static int si_fence_get_fd(struct pipe_screen *screen,
@ -444,26 +444,26 @@ static int si_fence_get_fd(struct pipe_screen *screen,
{ {
struct si_screen *sscreen = (struct si_screen*)screen; struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_winsys *ws = sscreen->ws; struct radeon_winsys *ws = sscreen->ws;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence; struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
int gfx_fd = -1, sdma_fd = -1; int gfx_fd = -1, sdma_fd = -1;
if (!sscreen->info.has_fence_to_handle) if (!sscreen->info.has_fence_to_handle)
return -1; return -1;
util_queue_fence_wait(&rfence->ready); util_queue_fence_wait(&sfence->ready);
/* Deferred fences aren't supported. */ /* Deferred fences aren't supported. */
assert(!rfence->gfx_unflushed.ctx); assert(!sfence->gfx_unflushed.ctx);
if (rfence->gfx_unflushed.ctx) if (sfence->gfx_unflushed.ctx)
return -1; return -1;
if (rfence->sdma) { if (sfence->sdma) {
sdma_fd = ws->fence_export_sync_file(ws, rfence->sdma); sdma_fd = ws->fence_export_sync_file(ws, sfence->sdma);
if (sdma_fd == -1) if (sdma_fd == -1)
return -1; return -1;
} }
if (rfence->gfx) { if (sfence->gfx) {
gfx_fd = ws->fence_export_sync_file(ws, rfence->gfx); gfx_fd = ws->fence_export_sync_file(ws, sfence->gfx);
if (gfx_fd == -1) { if (gfx_fd == -1) {
if (sdma_fd != -1) if (sdma_fd != -1)
close(sdma_fd); close(sdma_fd);
@ -584,15 +584,15 @@ static void si_fence_server_signal(struct pipe_context *ctx,
struct pipe_fence_handle *fence) struct pipe_fence_handle *fence)
{ {
struct si_context *sctx = (struct si_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence; struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
/* We should have at least one syncobj to signal */ /* We should have at least one syncobj to signal */
assert(rfence->sdma || rfence->gfx); assert(sfence->sdma || sfence->gfx);
if (rfence->sdma) if (sfence->sdma)
si_add_syncobj_signal(sctx, rfence->sdma); si_add_syncobj_signal(sctx, sfence->sdma);
if (rfence->gfx) if (sfence->gfx)
si_add_syncobj_signal(sctx, rfence->gfx); si_add_syncobj_signal(sctx, sfence->gfx);
/** /**
* The spec does not require a flush here. We insert a flush * The spec does not require a flush here. We insert a flush
@ -611,13 +611,13 @@ static void si_fence_server_sync(struct pipe_context *ctx,
struct pipe_fence_handle *fence) struct pipe_fence_handle *fence)
{ {
struct si_context *sctx = (struct si_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence; struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
util_queue_fence_wait(&rfence->ready); util_queue_fence_wait(&sfence->ready);
/* Unflushed fences from the same context are no-ops. */ /* Unflushed fences from the same context are no-ops. */
if (rfence->gfx_unflushed.ctx && if (sfence->gfx_unflushed.ctx &&
rfence->gfx_unflushed.ctx == sctx) sfence->gfx_unflushed.ctx == sctx)
return; return;
/* All unflushed commands will not start execution before /* All unflushed commands will not start execution before
@ -627,10 +627,10 @@ static void si_fence_server_sync(struct pipe_context *ctx,
*/ */
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC); si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
if (rfence->sdma) if (sfence->sdma)
si_add_fence_dependency(sctx, rfence->sdma); si_add_fence_dependency(sctx, sfence->sdma);
if (rfence->gfx) if (sfence->gfx)
si_add_fence_dependency(sctx, rfence->gfx); si_add_fence_dependency(sctx, sfence->gfx);
} }
void si_init_fence_functions(struct si_context *ctx) void si_init_fence_functions(struct si_context *ctx)