radeonsi: rename dma_cs -> sdma_cs

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-By: Timur Kristóf <timur.kristof@gmail.com>
This commit is contained in:
Marek Olšák 2020-01-02 16:51:58 -05:00
parent cd6a4f7631
commit 3c265c2586
10 changed files with 46 additions and 46 deletions

View File

@ -33,7 +33,7 @@ static void cik_sdma_copy_buffer(struct si_context *ctx,
uint64_t src_offset,
uint64_t size)
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
struct radeon_cmdbuf *cs = ctx->sdma_cs;
unsigned i, ncopy, csize;
unsigned align = ~0u;
struct si_resource *sdst = si_resource(dst);
@ -162,7 +162,7 @@ static bool si_sdma_v4_copy_texture(struct si_context *sctx,
/* Linear -> linear sub-window copy. */
if (ssrc->surface.is_linear &&
sdst->surface.is_linear) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
/* Check if everything fits into the bitfields */
if (!(src_pitch <= (1 << 19) &&
@ -228,7 +228,7 @@ static bool si_sdma_v4_copy_texture(struct si_context *sctx,
unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
uint64_t tiled_address = tiled == ssrc ? src_address : dst_address;
uint64_t linear_address = linear == ssrc ? src_address : dst_address;
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
linear_address += linear->surface.u.gfx9.offset[linear_level];
@ -381,7 +381,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
sctx->family != CHIP_KAVERI) ||
(srcx + copy_width != (1 << 14) &&
srcy + copy_height != (1 << 14)))) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
@ -542,7 +542,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
copy_width_aligned <= (1 << 14) &&
copy_height <= (1 << 14) &&
copy_depth <= (1 << 11)) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
uint32_t direction = linear == sdst ? 1u << 31 : 0;
si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
@ -636,7 +636,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
(srcx + copy_width_aligned != (1 << 14) &&
srcy + copy_height_aligned != (1 << 14) &&
dstx + copy_width != (1 << 14)))) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
si_need_dma_space(sctx, 15, &sdst->buffer, &ssrc->buffer);
@ -680,7 +680,7 @@ static void cik_sdma_copy(struct pipe_context *ctx,
{
struct si_context *sctx = (struct si_context *)ctx;
if (!sctx->dma_cs ||
if (!sctx->sdma_cs ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
goto fallback;

View File

@ -36,8 +36,8 @@ bool si_rings_is_buffer_referenced(struct si_context *sctx,
if (sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, buf, usage)) {
return true;
}
if (radeon_emitted(sctx->dma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->dma_cs, buf, usage)) {
if (radeon_emitted(sctx->sdma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->sdma_cs, buf, usage)) {
return true;
}
return false;
@ -72,8 +72,8 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx,
busy = true;
}
}
if (radeon_emitted(sctx->dma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->dma_cs,
if (radeon_emitted(sctx->sdma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->sdma_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
@ -91,8 +91,8 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx,
/* We will be wait for the GPU. Wait for any offloaded
* CS flush to complete to avoid busy-waiting in the winsys. */
sctx->ws->cs_sync_flush(sctx->gfx_cs);
if (sctx->dma_cs)
sctx->ws->cs_sync_flush(sctx->dma_cs);
if (sctx->sdma_cs)
sctx->ws->cs_sync_flush(sctx->sdma_cs);
}
}
@ -791,13 +791,13 @@ static bool si_resource_commit(struct pipe_context *pctx,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
if (radeon_emitted(ctx->dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
if (radeon_emitted(ctx->sdma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->sdma_cs,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma_cs);
ctx->ws->cs_sync_flush(ctx->sdma_cs);
ctx->ws->cs_sync_flush(ctx->gfx_cs);
assert(resource->target == PIPE_BUFFER);

View File

@ -35,7 +35,7 @@ static void si_dma_copy_buffer(struct si_context *ctx,
uint64_t src_offset,
uint64_t size)
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
struct radeon_cmdbuf *cs = ctx->sdma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
struct si_resource *sdst = si_resource(dst);
struct si_resource *ssrc = si_resource(src);
@ -87,7 +87,7 @@ static void si_dma_copy(struct pipe_context *ctx,
{
struct si_context *sctx = (struct si_context *)ctx;
if (sctx->dma_cs == NULL ||
if (sctx->sdma_cs == NULL ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
goto fallback;

View File

@ -27,7 +27,7 @@
static void si_dma_emit_wait_idle(struct si_context *sctx)
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
/* NOP waits for idle. */
if (sctx->chip_class >= GFX7)
@ -39,7 +39,7 @@ static void si_dma_emit_wait_idle(struct si_context *sctx)
void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
uint64_t offset)
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
uint64_t va = dst->gpu_address + offset;
if (sctx->chip_class == GFX6) {
@ -67,7 +67,7 @@ void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned clear_value)
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
struct radeon_cmdbuf *cs = sctx->sdma_cs;
unsigned i, ncopy, csize;
struct si_resource *sdst = si_resource(dst);
@ -129,8 +129,8 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct si_resource *dst, struct si_resource *src)
{
struct radeon_winsys *ws = ctx->ws;
uint64_t vram = ctx->dma_cs->used_vram;
uint64_t gtt = ctx->dma_cs->used_gart;
uint64_t vram = ctx->sdma_cs->used_vram;
uint64_t gtt = ctx->sdma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
@ -166,31 +166,31 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
*/
num_dw++; /* for emit_wait_idle below */
if (!ctx->sdma_uploads_in_progress &&
(!ws->cs_check_space(ctx->dma_cs, num_dw, false) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt))) {
(!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
ws->cs_is_buffer_referenced(ctx->sdma_cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
ws->cs_is_buffer_referenced(ctx->sdma_cs, src->buf,
RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
unsigned sync = ctx->sdma_uploads_in_progress ? 0 : RADEON_USAGE_SYNCHRONIZED;
if (dst) {
ws->cs_add_buffer(ctx->dma_cs, dst->buf, RADEON_USAGE_WRITE | sync,
ws->cs_add_buffer(ctx->sdma_cs, dst->buf, RADEON_USAGE_WRITE | sync,
dst->domains, 0);
}
if (src) {
ws->cs_add_buffer(ctx->dma_cs, src->buf, RADEON_USAGE_READ | sync,
ws->cs_add_buffer(ctx->sdma_cs, src->buf, RADEON_USAGE_READ | sync,
src->domains, 0);
}
@ -201,7 +201,7 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
struct radeon_cmdbuf *cs = ctx->sdma_cs;
struct radeon_saved_cs saved;
bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;

View File

@ -180,8 +180,8 @@ static void si_add_fence_dependency(struct si_context *sctx,
{
struct radeon_winsys *ws = sctx->ws;
if (sctx->dma_cs)
ws->cs_add_fence_dependency(sctx->dma_cs, fence, 0);
if (sctx->sdma_cs)
ws->cs_add_fence_dependency(sctx->sdma_cs, fence, 0);
ws->cs_add_fence_dependency(sctx->gfx_cs, fence, 0);
}
@ -513,7 +513,7 @@ static void si_flush_from_st(struct pipe_context *ctx,
}
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
if (sctx->dma_cs)
if (sctx->sdma_cs)
si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
if (!radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size)) {
@ -577,8 +577,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
assert(!fine.buf);
finish:
if (!(flags & (PIPE_FLUSH_DEFERRED | PIPE_FLUSH_ASYNC))) {
if (sctx->dma_cs)
ws->cs_sync_flush(sctx->dma_cs);
if (sctx->sdma_cs)
ws->cs_sync_flush(sctx->sdma_cs);
ws->cs_sync_flush(sctx->gfx_cs);
}
}

View File

@ -110,7 +110,7 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
* If the driver flushes the GFX IB internally, and it should never ask
* for a fence handle.
*/
assert(!radeon_emitted(ctx->dma_cs, 0) || fence == NULL);
assert(!radeon_emitted(ctx->sdma_cs, 0) || fence == NULL);
/* Update the sdma_uploads list by flushing the uploader. */
u_upload_unmap(ctx->b.const_uploader);
@ -132,7 +132,7 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
si_unref_sdma_uploads(ctx);
/* Flush SDMA (preamble IB). */
if (radeon_emitted(ctx->dma_cs, 0))
if (radeon_emitted(ctx->sdma_cs, 0))
si_flush_dma_cs(ctx, flags, NULL);
if (radeon_emitted(ctx->prim_discard_compute_cs, 0)) {

View File

@ -270,8 +270,8 @@ static void si_destroy_context(struct pipe_context *context)
if (sctx->gfx_cs)
sctx->ws->cs_destroy(sctx->gfx_cs);
if (sctx->dma_cs)
sctx->ws->cs_destroy(sctx->dma_cs);
if (sctx->sdma_cs)
sctx->ws->cs_destroy(sctx->sdma_cs);
if (sctx->ctx)
sctx->ws->ctx_destroy(sctx->ctx);
@ -494,12 +494,12 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
* https://gitlab.freedesktop.org/mesa/mesa/issues/1907
*/
(sctx->chip_class != GFX10 || sscreen->debug_flags & DBG(FORCE_SDMA))) {
sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
sctx->sdma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
(void*)si_flush_dma_cs,
sctx, stop_exec_on_failure);
}
bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->sdma_cs;
sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
0, PIPE_USAGE_DEFAULT,
SI_RESOURCE_FLAG_32BIT |

View File

@ -865,7 +865,7 @@ struct si_context {
struct radeon_winsys *ws;
struct radeon_winsys_ctx *ctx;
struct radeon_cmdbuf *gfx_cs; /* compute IB if graphics is disabled */
struct radeon_cmdbuf *dma_cs;
struct radeon_cmdbuf *sdma_cs;
struct pipe_fence_handle *last_gfx_fence;
struct pipe_fence_handle *last_sdma_fence;
struct si_resource *eop_bug_scratch;

View File

@ -112,7 +112,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
unsigned cs_dwords_per_thread =
test_cs ? cs_dwords_per_thread_list[cs_method % NUM_SHADERS] : 0;
if (test_sdma && !sctx->dma_cs)
if (test_sdma && !sctx->sdma_cs)
continue;
if (sctx->chip_class == GFX6) {

View File

@ -53,7 +53,7 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
unsigned src_level,
const struct pipe_box *src_box)
{
if (!sctx->dma_cs)
if (!sctx->sdma_cs)
return false;
if (dst->surface.bpe != src->surface.bpe)