freedreno: Add submit lock

Add a lock to synchronize batch flush (which can be triggered from a
different ctx) with cmdstream emit.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7342>
This commit is contained in:
Rob Clark 2020-10-28 10:47:07 -07:00 committed by Marge Bot
parent 290f827928
commit 02298ed1fc
9 changed files with 88 additions and 17 deletions

View File

@ -115,10 +115,11 @@ time_elapsed_enable(struct fd_context *ctx, struct fd_ringbuffer *ring)
* just hard coded. If we start exposing more countables than we
* have counters, we will need to be more clever.
*/
struct fd_batch *batch = fd_context_batch(ctx);
struct fd_batch *batch = fd_context_batch_locked(ctx);
fd_wfi(batch, ring);
OUT_PKT0(ring, REG_A4XX_CP_PERFCTR_CP_SEL_0, 1);
OUT_RING(ring, CP_ALWAYS_COUNT);
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}

View File

@ -808,6 +808,9 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
fd_screen_unlock(ctx->screen);
bool ret = fd_batch_lock_submit(batch);
assert(ret);
/* Clearing last_fence must come after the batch dependency tracking
* (resource_read()/resource_write()), as that can trigger a flush,
* re-populating last_fence
@ -841,6 +844,8 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
fd6_event_write(batch, batch->draw, CACHE_FLUSH_TS, true);
fd6_cache_inv(batch, batch->draw);
fd_batch_unlock_submit(batch);
fd_resource(info->dst.resource)->valid = true;
batch->needs_flush = true;

View File

@ -127,6 +127,8 @@ fd_batch_create(struct fd_context *ctx, bool nondraw)
batch->ctx = ctx;
batch->nondraw = nondraw;
simple_mtx_init(&batch->submit_lock, mtx_plain);
batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
@ -294,6 +296,9 @@ __fd_batch_destroy(struct fd_batch *batch)
util_copy_framebuffer_state(&batch->framebuffer, NULL);
batch_fini(batch);
simple_mtx_destroy(&batch->submit_lock);
free(batch);
fd_screen_lock(ctx->screen);
}
@ -319,7 +324,7 @@ batch_flush(struct fd_batch *batch)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
if (batch->flushed)
if (!fd_batch_lock_submit(batch))
return;
batch->needs_flush = false;
@ -332,6 +337,8 @@ batch_flush(struct fd_batch *batch)
batch_flush_reset_dependencies(batch, true);
batch->flushed = true;
if (batch == batch->ctx->batch)
fd_batch_reference(&batch->ctx->batch, NULL);
fd_fence_ref(&batch->ctx->last_fence, batch->fence);
@ -341,8 +348,14 @@ batch_flush(struct fd_batch *batch)
debug_assert(batch->reference.count > 0);
fd_screen_lock(batch->ctx->screen);
/* NOTE: remove=false removes the patch from the hashtable, so future
* lookups won't cache-hit a flushed batch, but leaves the weak reference
* to the batch to avoid having multiple batches with same batch->idx, as
* that causes all sorts of hilarity.
*/
fd_bc_invalidate_batch(batch, false);
fd_screen_unlock(batch->ctx->screen);
fd_batch_unlock_submit(batch);
}
/* NOTE: could drop the last ref to batch
@ -357,13 +370,7 @@ fd_batch_flush(struct fd_batch *batch)
* up used_resources
*/
fd_batch_reference(&tmp, batch);
batch_flush(tmp);
if (batch == batch->ctx->batch) {
fd_batch_reference(&batch->ctx->batch, NULL);
}
fd_batch_reference(&tmp, NULL);
}

View File

@ -30,6 +30,7 @@
#include "util/u_inlines.h"
#include "util/u_queue.h"
#include "util/list.h"
#include "util/simple_mtx.h"
#include "freedreno_context.h"
#include "freedreno_util.h"
@ -57,6 +58,11 @@ struct fd_batch {
struct fd_context *ctx;
/* emit_lock serializes cmdstream emission and flush. Acquire before
* screen->lock.
*/
simple_mtx_t submit_lock;
/* do we need to mem2gmem before rendering. We don't, if for example,
* there was a glClear() that invalidated the entire previous buffer
* contents. Keep track of which buffer(s) are cleared, or needs
@ -297,6 +303,26 @@ fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
fd_screen_unlock(ctx->screen);
}
static inline void
fd_batch_unlock_submit(struct fd_batch *batch)
{
simple_mtx_unlock(&batch->submit_lock);
}
/**
* Returns true if emit-lock was aquired, false if failed to aquire lock,
* ie. batch already flushed.
*/
static inline bool MUST_CHECK
fd_batch_lock_submit(struct fd_batch *batch)
{
simple_mtx_lock(&batch->submit_lock);
bool ret = !batch->flushed;
if (!ret)
fd_batch_unlock_submit(batch);
return ret;
}
static inline void
fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{

View File

@ -202,13 +202,18 @@ fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
if (!ctx->batch)
return;
struct fd_batch *batch = fd_context_batch_locked(ctx);
ctx->batch->needs_flush = true;
if (ctx->screen->gpu_id >= 500) {
fd_emit_string5(ctx->batch->draw, string, len);
fd_emit_string5(batch->draw, string, len);
} else {
fd_emit_string(ctx->batch->draw, string, len);
fd_emit_string(batch->draw, string, len);
}
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
/**
@ -256,12 +261,32 @@ fd_context_batch(struct fd_context *ctx)
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);
}
fd_context_switch_to(ctx, batch);
return batch;
}
/**
* Return a locked reference to the current batch. A batch with emit
* lock held is protected against flushing while the lock is held.
* The emit-lock should be acquired before screen-lock. The emit-lock
* should be held while emitting cmdstream.
*/
struct fd_batch *
fd_context_batch_locked(struct fd_context *ctx)
{
struct fd_batch *batch = NULL;
while (!batch) {
batch = fd_context_batch(ctx);
if (!fd_batch_lock_submit(batch)) {
fd_batch_reference(&batch, NULL);
}
}
return batch;
}
void
fd_context_destroy(struct pipe_context *pctx)
{

View File

@ -504,6 +504,7 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
void fd_context_switch_from(struct fd_context *ctx);
void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch);
struct fd_batch * fd_context_batch(struct fd_context *ctx);
struct fd_batch * fd_context_batch_locked(struct fd_context *ctx);
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);

View File

@ -268,7 +268,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
batch_draw_tracking(batch, info);
if (unlikely(ctx->batch != batch)) {
while (unlikely(!fd_batch_lock_submit(batch))) {
/* The current batch was flushed in batch_draw_tracking()
* so start anew. We know this won't happen a second time
* since we are dealing with a fresh batch:
@ -330,6 +330,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
fd_context_all_dirty(ctx);
fd_batch_check_size(batch);
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
if (info == &new_info)
@ -406,7 +407,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
batch_clear_tracking(batch, buffers);
if (unlikely(ctx->batch != batch)) {
while (unlikely(!fd_batch_lock_submit(batch))) {
/* The current batch was flushed in batch_clear_tracking()
* so start anew. We know this won't happen a second time
* since we are dealing with a fresh batch:
@ -445,11 +446,13 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
}
}
fd_batch_check_size(batch);
fd_batch_unlock_submit(batch);
if (fallback) {
fd_blitter_clear(pctx, buffers, color, depth, stencil);
}
fd_batch_check_size(batch);
fd_batch_reference(&batch, NULL);
}

View File

@ -113,8 +113,9 @@ fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
* need to just emit the capture at this moment.
*/
if (skip_begin_query(q->type)) {
struct fd_batch *batch = fd_context_batch(ctx);
struct fd_batch *batch = fd_context_batch_locked(ctx);
fd_acc_query_resume(aq, batch);
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
}

View File

@ -135,7 +135,7 @@ fd_hw_destroy_query(struct fd_context *ctx, struct fd_query *q)
static void
fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
{
struct fd_batch *batch = fd_context_batch(ctx);
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p", q);
@ -150,13 +150,14 @@ fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
assert(list_is_empty(&hq->list));
list_addtail(&hq->list, &ctx->hw_active_queries);
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
static void
fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
{
struct fd_batch *batch = fd_context_batch(ctx);
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p", q);
@ -167,6 +168,7 @@ fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
/* remove from active list: */
list_delinit(&hq->list);
fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}