gallium/radeon: unify and simplify checking for an empty gfx IB

We can take advantage of the fact that multi_fence does the obvious thing
with NULL fences.

This fixes unflushed fences that can get stuck due to empty IBs.
This commit is contained in:
Marek Olšák 2016-08-25 01:26:54 +02:00
parent e6673e7ac2
commit fe91ae06d3
3 changed files with 23 additions and 27 deletions

View File

@ -255,14 +255,8 @@ void r600_context_gfx_flush(void *context, unsigned flags,
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys *ws = ctx->b.ws;
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
(!fence || ctx->b.last_gfx_fence)) {
if (fence)
ws->fence_reference(fence, ctx->b.last_gfx_fence);
if (!(flags & RADEON_FLUSH_ASYNC))
ws->cs_sync_flush(cs);
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
return;
}
r600_preflush_suspend_features(&ctx->b);

View File

@ -265,6 +265,7 @@ static void r600_flush_from_st(struct pipe_context *ctx,
{
struct pipe_screen *screen = ctx->screen;
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct radeon_winsys *ws = rctx->ws;
unsigned rflags = 0;
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
@ -279,26 +280,34 @@ static void r600_flush_from_st(struct pipe_context *ctx,
rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
}
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
* - The state tracker must request a fence.
* Thread safety in fence_finish must be ensured by the state tracker.
*/
if (flags & PIPE_FLUSH_DEFERRED && fence) {
gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
deferred_fence = true;
if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
if (fence)
ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
if (!(rflags & RADEON_FLUSH_ASYNC))
ws->cs_sync_flush(rctx->gfx.cs);
} else {
rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
* - The state tracker must request a fence.
* Thread safety in fence_finish must be ensured by the state tracker.
*/
if (flags & PIPE_FLUSH_DEFERRED && fence) {
gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
deferred_fence = true;
} else {
rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
}
}
/* Both engines can signal out of order, so we need to keep both fences. */
if (gfx_fence || sdma_fence) {
if (fence) {
struct r600_multi_fence *multi_fence =
CALLOC_STRUCT(r600_multi_fence);
if (!multi_fence)
return;
multi_fence->reference.count = 1;
/* If both fences are NULL, fence_finish will always return true. */
multi_fence->gfx = gfx_fence;
multi_fence->sdma = sdma_fence;

View File

@ -100,17 +100,10 @@ void si_context_gfx_flush(void *context, unsigned flags,
if (ctx->gfx_flush_in_progress)
return;
ctx->gfx_flush_in_progress = true;
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
(!fence || ctx->b.last_gfx_fence)) {
if (fence)
ws->fence_reference(fence, ctx->b.last_gfx_fence);
if (!(flags & RADEON_FLUSH_ASYNC))
ws->cs_sync_flush(cs);
ctx->gfx_flush_in_progress = false;
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
return;
}
ctx->gfx_flush_in_progress = true;
r600_preflush_suspend_features(&ctx->b);