radeonsi: use r600_common_context less pt1

Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:
Marek Olšák 2018-04-01 15:37:11 -04:00
parent 0606190059
commit 71d9028b7a
9 changed files with 88 additions and 87 deletions

View File

@ -46,6 +46,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
struct r600_resource *resource, struct r600_resource *resource,
unsigned usage) unsigned usage)
{ {
struct si_context *sctx = (struct si_context*)ctx;
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE; enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
bool busy = false; bool busy = false;
@ -75,10 +76,10 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
resource->buf, rusage)) { resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) { if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL); si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
return NULL; return NULL;
} else { } else {
si_flush_dma_cs(ctx, 0, NULL); si_flush_dma_cs(sctx, 0, NULL);
busy = true; busy = true;
} }
} }

View File

@ -79,7 +79,7 @@ static bool r600_resource_commit(struct pipe_context *pctx,
unsigned level, struct pipe_box *box, unsigned level, struct pipe_box *box,
bool commit) bool commit)
{ {
struct r600_common_context *ctx = (struct r600_common_context *)pctx; struct si_context *ctx = (struct si_context *)pctx;
struct r600_resource *res = r600_resource(resource); struct r600_resource *res = r600_resource(resource);
/* /*
@ -89,23 +89,23 @@ static bool r600_resource_commit(struct pipe_context *pctx,
* (b) wait for threaded submit to finish, including those that were * (b) wait for threaded submit to finish, including those that were
* triggered by some other, earlier operation. * triggered by some other, earlier operation.
*/ */
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) && if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
res->buf, RADEON_USAGE_READWRITE)) { res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL); si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
} }
if (radeon_emitted(ctx->dma_cs, 0) && if (radeon_emitted(ctx->b.dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
res->buf, RADEON_USAGE_READWRITE)) { res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL); si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
} }
ctx->ws->cs_sync_flush(ctx->dma_cs); ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
ctx->ws->cs_sync_flush(ctx->gfx_cs); ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
assert(resource->target == PIPE_BUFFER); assert(resource->target == PIPE_BUFFER);
return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit); return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
} }
bool si_common_context_init(struct r600_common_context *rctx, bool si_common_context_init(struct r600_common_context *rctx,
@ -175,7 +175,7 @@ bool si_common_context_init(struct r600_common_context *rctx,
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) { if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA, rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
si_flush_dma_cs, (void*)si_flush_dma_cs,
rctx); rctx);
} }

View File

@ -47,7 +47,7 @@ static void cik_sdma_copy_buffer(struct si_context *ctx,
src_offset += rsrc->gpu_address; src_offset += rsrc->gpu_address;
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE); ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
si_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc); si_need_dma_space(ctx, ncopy * 7, rdst, rsrc);
for (i = 0; i < ncopy; i++) { for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE); csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
@ -92,7 +92,7 @@ static void cik_sdma_clear_buffer(struct pipe_context *ctx,
/* the same maximum size as for copying */ /* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE); ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
si_need_dma_space(&sctx->b, ncopy * 5, rdst, NULL); si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
for (i = 0; i < ncopy; i++) { for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE); csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
@ -232,7 +232,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
srcy + copy_height != (1 << 14)))) { srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma_cs; struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource); si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) | CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
@ -395,7 +395,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
struct radeon_winsys_cs *cs = sctx->b.dma_cs; struct radeon_winsys_cs *cs = sctx->b.dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0; uint32_t direction = linear == rdst ? 1u << 31 : 0;
si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource); si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) | CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
@ -489,7 +489,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
dstx + copy_width != (1 << 14)))) { dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma_cs; struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource); si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0)); CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));

View File

@ -1108,10 +1108,9 @@ static void si_dump_dma(struct si_context *sctx,
fprintf(f, "SDMA Dump Done.\n"); fprintf(f, "SDMA Dump Done.\n");
} }
void si_check_vm_faults(struct r600_common_context *ctx, void si_check_vm_faults(struct si_context *sctx,
struct radeon_saved_cs *saved, enum ring_type ring) struct radeon_saved_cs *saved, enum ring_type ring)
{ {
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_screen *screen = sctx->b.b.screen; struct pipe_screen *screen = sctx->b.b.screen;
FILE *f; FILE *f;
uint64_t addr; uint64_t addr;

View File

@ -59,7 +59,7 @@ static void si_dma_copy_buffer(struct si_context *ctx,
} }
ncopy = DIV_ROUND_UP(size, max_size); ncopy = DIV_ROUND_UP(size, max_size);
si_need_dma_space(&ctx->b, ncopy * 5, rdst, rsrc); si_need_dma_space(ctx, ncopy * 5, rdst, rsrc);
for (i = 0; i < ncopy; i++) { for (i = 0; i < ncopy; i++) {
count = MIN2(size, max_size); count = MIN2(size, max_size);
@ -101,7 +101,7 @@ static void si_dma_clear_buffer(struct pipe_context *ctx,
/* the same maximum size as for copying */ /* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE); ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
si_need_dma_space(&sctx->b, ncopy * 4, rdst, NULL); si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
for (i = 0; i < ncopy; i++) { for (i = 0; i < ncopy; i++) {
csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE); csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
@ -190,7 +190,7 @@ static void si_dma_copy_tile(struct si_context *ctx,
mt = G_009910_MICRO_TILE_MODE(tile_mode); mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch; size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE); ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
si_need_dma_space(&ctx->b, ncopy * 9, &rdst->resource, &rsrc->resource); si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
for (i = 0; i < ncopy; i++) { for (i = 0; i < ncopy; i++) {
cheight = copy_height; cheight = copy_height;

View File

@ -24,22 +24,22 @@
#include "si_pipe.h" #include "si_pipe.h"
#include "radeon/r600_cs.h" #include "radeon/r600_cs.h"
static void si_dma_emit_wait_idle(struct r600_common_context *rctx) static void si_dma_emit_wait_idle(struct si_context *sctx)
{ {
struct radeon_winsys_cs *cs = rctx->dma_cs; struct radeon_winsys_cs *cs = sctx->b.dma_cs;
/* NOP waits for idle on Evergreen and later. */ /* NOP waits for idle on Evergreen and later. */
if (rctx->chip_class >= CIK) if (sctx->b.chip_class >= CIK)
radeon_emit(cs, 0x00000000); /* NOP */ radeon_emit(cs, 0x00000000); /* NOP */
else else
radeon_emit(cs, 0xf0000000); /* NOP */ radeon_emit(cs, 0xf0000000); /* NOP */
} }
void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src) struct r600_resource *dst, struct r600_resource *src)
{ {
uint64_t vram = ctx->dma_cs->used_vram; uint64_t vram = ctx->b.dma_cs->used_vram;
uint64_t gtt = ctx->dma_cs->used_gart; uint64_t gtt = ctx->b.dma_cs->used_gart;
if (dst) { if (dst) {
vram += dst->vram_usage; vram += dst->vram_usage;
@ -51,13 +51,13 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
} }
/* Flush the GFX IB if DMA depends on it. */ /* Flush the GFX IB if DMA depends on it. */
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) && if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
((dst && ((dst &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf, ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, dst->buf,
RADEON_USAGE_READWRITE)) || RADEON_USAGE_READWRITE)) ||
(src && (src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf, ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, src->buf,
RADEON_USAGE_WRITE)))) RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL); si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB /* Flush if there's not enough space, or if the memory usage per IB
@ -73,66 +73,66 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
* engine busy while uploads are being submitted. * engine busy while uploads are being submitted.
*/ */
num_dw++; /* for emit_wait_idle below */ num_dw++; /* for emit_wait_idle below */
if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) || if (!ctx->b.ws->cs_check_space(ctx->b.dma_cs, num_dw) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 || ctx->b.dma_cs->used_vram + ctx->b.dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) { !radeon_cs_memory_below_limit(ctx->screen, ctx->b.dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL); si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw); assert((num_dw + ctx->b.dma_cs->current.cdw) <= ctx->b.dma_cs->current.max_dw);
} }
/* Wait for idle if either buffer has been used in the IB before to /* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards. * prevent read-after-write hazards.
*/ */
if ((dst && if ((dst &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf, ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, dst->buf,
RADEON_USAGE_READWRITE)) || RADEON_USAGE_READWRITE)) ||
(src && (src &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf, ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, src->buf,
RADEON_USAGE_WRITE))) RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx); si_dma_emit_wait_idle(ctx);
if (dst) { if (dst) {
radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst, radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, dst,
RADEON_USAGE_WRITE, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER); RADEON_PRIO_SDMA_BUFFER);
} }
if (src) { if (src) {
radeon_add_to_buffer_list(ctx, ctx->dma_cs, src, radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, src,
RADEON_USAGE_READ, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER); RADEON_PRIO_SDMA_BUFFER);
} }
/* this function is called before all DMA calls, so increment this. */ /* this function is called before all DMA calls, so increment this. */
ctx->num_dma_calls++; ctx->b.num_dma_calls++;
} }
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence) void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{ {
struct r600_common_context *rctx = (struct r600_common_context *)ctx; struct radeon_winsys_cs *cs = ctx->b.dma_cs;
struct radeon_winsys_cs *cs = rctx->dma_cs;
struct radeon_saved_cs saved; struct radeon_saved_cs saved;
bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM)); bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
if (!radeon_emitted(cs, 0)) { if (!radeon_emitted(cs, 0)) {
if (fence) if (fence)
rctx->ws->fence_reference(fence, rctx->last_sdma_fence); ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
return; return;
} }
if (check_vm) if (check_vm)
si_save_cs(rctx->ws, cs, &saved, true); si_save_cs(ctx->b.ws, cs, &saved, true);
rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence); ctx->b.ws->cs_flush(cs, flags, &ctx->b.last_sdma_fence);
if (fence) if (fence)
rctx->ws->fence_reference(fence, rctx->last_sdma_fence); ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
if (check_vm) { if (check_vm) {
/* Use conservative timeout 800ms, after which we won't wait any /* Use conservative timeout 800ms, after which we won't wait any
* longer and assume the GPU is hung. * longer and assume the GPU is hung.
*/ */
rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000); ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_sdma_fence, 800*1000*1000);
si_check_vm_faults(rctx, &saved, RING_DMA); si_check_vm_faults(ctx, &saved, RING_DMA);
si_clear_saved_cs(&saved); si_clear_saved_cs(&saved);
} }
} }
@ -140,10 +140,10 @@ void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence
void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value) uint64_t offset, uint64_t size, unsigned value)
{ {
struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context; struct si_context *ctx = (struct si_context*)sscreen->aux_context;
mtx_lock(&sscreen->aux_context_lock); mtx_lock(&sscreen->aux_context_lock);
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value); ctx->b.dma_clear_buffer(&ctx->b.b, dst, offset, size, value);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0); sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
mtx_unlock(&sscreen->aux_context_lock); mtx_unlock(&sscreen->aux_context_lock);
} }

View File

@ -46,7 +46,7 @@ struct si_multi_fence {
/* If the context wasn't flushed at fence creation, this is non-NULL. */ /* If the context wasn't flushed at fence creation, this is non-NULL. */
struct { struct {
struct r600_common_context *ctx; struct si_context *ctx;
unsigned ib_index; unsigned ib_index;
} gfx_unflushed; } gfx_unflushed;
@ -174,14 +174,14 @@ void si_gfx_wait_fence(struct r600_common_context *ctx,
radeon_emit(cs, 4); /* poll interval */ radeon_emit(cs, 4); /* poll interval */
} }
static void si_add_fence_dependency(struct r600_common_context *rctx, static void si_add_fence_dependency(struct si_context *sctx,
struct pipe_fence_handle *fence) struct pipe_fence_handle *fence)
{ {
struct radeon_winsys *ws = rctx->ws; struct radeon_winsys *ws = sctx->b.ws;
if (rctx->dma_cs) if (sctx->b.dma_cs)
ws->cs_add_fence_dependency(rctx->dma_cs, fence); ws->cs_add_fence_dependency(sctx->b.dma_cs, fence);
ws->cs_add_fence_dependency(rctx->gfx_cs, fence); ws->cs_add_fence_dependency(sctx->b.gfx_cs, fence);
} }
static void si_add_syncobj_signal(struct r600_common_context *rctx, static void si_add_syncobj_signal(struct r600_common_context *rctx,
@ -351,7 +351,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
struct si_context *sctx; struct si_context *sctx;
sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx); sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
if (rfence->gfx_unflushed.ctx == &sctx->b && if (rfence->gfx_unflushed.ctx == sctx &&
rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) { rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
/* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile) /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
* spec says: * spec says:
@ -496,8 +496,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
unsigned flags) unsigned flags)
{ {
struct pipe_screen *screen = ctx->screen; struct pipe_screen *screen = ctx->screen;
struct r600_common_context *rctx = (struct r600_common_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
struct radeon_winsys *ws = rctx->ws; struct radeon_winsys *ws = sctx->b.ws;
struct pipe_fence_handle *gfx_fence = NULL; struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL; struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false; bool deferred_fence = false;
@ -511,18 +511,18 @@ static void si_flush_from_st(struct pipe_context *ctx,
assert(flags & PIPE_FLUSH_DEFERRED); assert(flags & PIPE_FLUSH_DEFERRED);
assert(fence); assert(fence);
si_fine_fence_set((struct si_context *)rctx, &fine, flags); si_fine_fence_set(sctx, &fine, flags);
} }
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */ /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
if (rctx->dma_cs) if (sctx->b.dma_cs)
si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL); si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) { if (!radeon_emitted(sctx->b.gfx_cs, sctx->b.initial_gfx_cs_size)) {
if (fence) if (fence)
ws->fence_reference(&gfx_fence, rctx->last_gfx_fence); ws->fence_reference(&gfx_fence, sctx->b.last_gfx_fence);
if (!(flags & PIPE_FLUSH_DEFERRED)) if (!(flags & PIPE_FLUSH_DEFERRED))
ws->cs_sync_flush(rctx->gfx_cs); ws->cs_sync_flush(sctx->b.gfx_cs);
} else { } else {
/* Instead of flushing, create a deferred fence. Constraints: /* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush. * - The state tracker must allow a deferred flush.
@ -533,10 +533,10 @@ static void si_flush_from_st(struct pipe_context *ctx,
if (flags & PIPE_FLUSH_DEFERRED && if (flags & PIPE_FLUSH_DEFERRED &&
!(flags & PIPE_FLUSH_FENCE_FD) && !(flags & PIPE_FLUSH_FENCE_FD) &&
fence) { fence) {
gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs); gfx_fence = sctx->b.ws->cs_get_next_fence(sctx->b.gfx_cs);
deferred_fence = true; deferred_fence = true;
} else { } else {
si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL); si_flush_gfx_cs(sctx, rflags, fence ? &gfx_fence : NULL);
} }
} }
@ -564,8 +564,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
multi_fence->sdma = sdma_fence; multi_fence->sdma = sdma_fence;
if (deferred_fence) { if (deferred_fence) {
multi_fence->gfx_unflushed.ctx = rctx; multi_fence->gfx_unflushed.ctx = sctx;
multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes; multi_fence->gfx_unflushed.ib_index = sctx->b.num_gfx_cs_flushes;
} }
multi_fence->fine = fine; multi_fence->fine = fine;
@ -579,9 +579,9 @@ static void si_flush_from_st(struct pipe_context *ctx,
assert(!fine.buf); assert(!fine.buf);
finish: finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) { if (!(flags & PIPE_FLUSH_DEFERRED)) {
if (rctx->dma_cs) if (sctx->b.dma_cs)
ws->cs_sync_flush(rctx->dma_cs); ws->cs_sync_flush(sctx->b.dma_cs);
ws->cs_sync_flush(rctx->gfx_cs); ws->cs_sync_flush(sctx->b.gfx_cs);
} }
} }
@ -615,14 +615,14 @@ static void si_fence_server_signal(struct pipe_context *ctx,
static void si_fence_server_sync(struct pipe_context *ctx, static void si_fence_server_sync(struct pipe_context *ctx,
struct pipe_fence_handle *fence) struct pipe_fence_handle *fence)
{ {
struct r600_common_context *rctx = (struct r600_common_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence; struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
util_queue_fence_wait(&rfence->ready); util_queue_fence_wait(&rfence->ready);
/* Unflushed fences from the same context are no-ops. */ /* Unflushed fences from the same context are no-ops. */
if (rfence->gfx_unflushed.ctx && if (rfence->gfx_unflushed.ctx &&
rfence->gfx_unflushed.ctx == rctx) rfence->gfx_unflushed.ctx == sctx)
return; return;
/* All unflushed commands will not start execution before /* All unflushed commands will not start execution before
@ -633,9 +633,9 @@ static void si_fence_server_sync(struct pipe_context *ctx,
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC); si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
if (rfence->sdma) if (rfence->sdma)
si_add_fence_dependency(rctx, rfence->sdma); si_add_fence_dependency(sctx, rfence->sdma);
if (rfence->gfx) if (rfence->gfx)
si_add_fence_dependency(rctx, rfence->gfx); si_add_fence_dependency(sctx, rfence->gfx);
} }
void si_init_fence_functions(struct si_context *ctx) void si_init_fence_functions(struct si_context *ctx)

View File

@ -139,7 +139,7 @@ void si_flush_gfx_cs(void *context, unsigned flags,
*/ */
ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000); ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
si_check_vm_faults(&ctx->b, &ctx->current_saved_cs->gfx, RING_GFX); si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
} }
if (ctx->current_saved_cs) if (ctx->current_saved_cs)

View File

@ -714,7 +714,7 @@ void si_log_hw_flush(struct si_context *sctx);
void si_log_draw_state(struct si_context *sctx, struct u_log_context *log); void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
void si_log_compute_state(struct si_context *sctx, struct u_log_context *log); void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
void si_init_debug_functions(struct si_context *sctx); void si_init_debug_functions(struct si_context *sctx);
void si_check_vm_faults(struct r600_common_context *ctx, void si_check_vm_faults(struct si_context *sctx,
struct radeon_saved_cs *saved, enum ring_type ring); struct radeon_saved_cs *saved, enum ring_type ring);
bool si_replace_shader(unsigned num, struct ac_shader_binary *binary); bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
@ -722,9 +722,10 @@ bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
void si_init_dma_functions(struct si_context *sctx); void si_init_dma_functions(struct si_context *sctx);
/* si_dma_cs.c */ /* si_dma_cs.c */
void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src); struct r600_resource *dst, struct r600_resource *src);
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence); void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence);
void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value); uint64_t offset, uint64_t size, unsigned value);