gallium/radeon: move last_gfx_fence from radeonsi to common code
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
parent
c15a9dec29
commit
a6bfafa083
|
@ -510,6 +510,7 @@ void r600_common_context_cleanup(struct r600_common_context *rctx)
|
|||
if (rctx->allocator_zeroed_memory) {
|
||||
u_suballocator_destroy(rctx->allocator_zeroed_memory);
|
||||
}
|
||||
rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
|
||||
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -509,6 +509,7 @@ struct r600_common_context {
|
|||
enum chip_class chip_class;
|
||||
struct r600_ring gfx;
|
||||
struct r600_ring dma;
|
||||
struct pipe_fence_handle *last_gfx_fence;
|
||||
struct pipe_fence_handle *last_sdma_fence;
|
||||
unsigned initial_gfx_cs_size;
|
||||
unsigned gpu_reset_counter;
|
||||
|
|
|
@ -102,9 +102,9 @@ void si_context_gfx_flush(void *context, unsigned flags,
|
|||
ctx->gfx_flush_in_progress = true;
|
||||
|
||||
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
|
||||
(!fence || ctx->last_gfx_fence)) {
|
||||
(!fence || ctx->b.last_gfx_fence)) {
|
||||
if (fence)
|
||||
ws->fence_reference(fence, ctx->last_gfx_fence);
|
||||
ws->fence_reference(fence, ctx->b.last_gfx_fence);
|
||||
if (!(flags & RADEON_FLUSH_ASYNC))
|
||||
ws->cs_sync_flush(cs);
|
||||
ctx->gfx_flush_in_progress = false;
|
||||
|
@ -135,17 +135,17 @@ void si_context_gfx_flush(void *context, unsigned flags,
|
|||
}
|
||||
|
||||
/* Flush the CS. */
|
||||
ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
|
||||
ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
|
||||
|
||||
if (fence)
|
||||
ws->fence_reference(fence, ctx->last_gfx_fence);
|
||||
ws->fence_reference(fence, ctx->b.last_gfx_fence);
|
||||
|
||||
/* Check VM faults if needed. */
|
||||
if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
|
||||
/* Use conservative timeout 800ms, after which we won't wait any
|
||||
* longer and assume the GPU is hung.
|
||||
*/
|
||||
ctx->b.ws->fence_wait(ctx->b.ws, ctx->last_gfx_fence, 800*1000*1000);
|
||||
ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
|
||||
|
||||
si_check_vm_faults(&ctx->b, &ctx->last_gfx, RING_GFX);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,6 @@ static void si_destroy_context(struct pipe_context *context)
|
|||
free(sctx->border_color_table);
|
||||
r600_resource_reference(&sctx->scratch_buffer, NULL);
|
||||
r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
|
||||
sctx->b.ws->fence_reference(&sctx->last_gfx_fence, NULL);
|
||||
|
||||
si_pm4_free_state(sctx, sctx->init_config, ~0);
|
||||
if (sctx->init_config_gs_rings)
|
||||
|
|
|
@ -215,7 +215,6 @@ struct si_context {
|
|||
bool ce_need_synchronization;
|
||||
struct u_suballocator *ce_suballocator;
|
||||
|
||||
struct pipe_fence_handle *last_gfx_fence;
|
||||
struct si_shader_ctx_state fixed_func_tcs_shader;
|
||||
LLVMTargetMachineRef tm; /* only non-threaded compilation */
|
||||
bool gfx_flush_in_progress;
|
||||
|
|
Loading…
Reference in New Issue