diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c index a1578506c2a..84dfa898c51 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.c +++ b/src/gallium/drivers/freedreno/freedreno_batch.c @@ -433,7 +433,7 @@ static void flush_write_batch(struct fd_resource *rsc) { struct fd_batch *b = NULL; - fd_batch_reference(&b, rsc->write_batch); + fd_batch_reference_locked(&b, rsc->write_batch); mtx_unlock(&b->ctx->screen->lock); fd_batch_flush(b, true, false); diff --git a/src/gallium/drivers/freedreno/freedreno_batch.h b/src/gallium/drivers/freedreno/freedreno_batch.h index 7b723db64af..e771ad6c0bd 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.h +++ b/src/gallium/drivers/freedreno/freedreno_batch.h @@ -243,6 +243,10 @@ void __fd_batch_destroy(struct fd_batch *batch); * WARNING the _locked() version can briefly drop the lock. Without * recursive mutexes, I'm not sure there is much else we can do (since * __fd_batch_destroy() needs to unref resources) + * + * WARNING you must acquire the screen->lock and use the _locked() + * version in case that the batch being ref'd can disappear under + * you. */ /* fwd-decl prototypes to untangle header dependency :-/ */ diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c index f2ad2c517c6..163fa70312a 100644 --- a/src/gallium/drivers/freedreno/freedreno_resource.c +++ b/src/gallium/drivers/freedreno/freedreno_resource.c @@ -373,7 +373,9 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage) { struct fd_batch *write_batch = NULL; - fd_batch_reference(&write_batch, rsc->write_batch); + mtx_lock(&ctx->screen->lock); + fd_batch_reference_locked(&write_batch, rsc->write_batch); + mtx_unlock(&ctx->screen->lock); if (usage & PIPE_TRANSFER_WRITE) { struct fd_batch *batch, *batches[32] = {}; @@ -387,7 +389,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage) mtx_lock(&ctx->screen->lock); batch_mask = rsc->batch_mask; foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) - fd_batch_reference(&batches[batch->idx], batch); + fd_batch_reference_locked(&batches[batch->idx], batch); mtx_unlock(&ctx->screen->lock); foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) @@ -501,7 +503,10 @@ fd_resource_transfer_map(struct pipe_context *pctx, fd_blit_to_staging(ctx, trans); struct fd_batch *batch = NULL; - fd_batch_reference(&batch, staging_rsc->write_batch); + + fd_context_lock(ctx); + fd_batch_reference_locked(&batch, staging_rsc->write_batch); + fd_context_unlock(ctx); /* we can't fd_bo_cpu_prep() until the blit to staging * is submitted to kernel.. in that case write_batch @@ -550,7 +555,9 @@ fd_resource_transfer_map(struct pipe_context *pctx, struct fd_batch *write_batch = NULL; /* hold a reference, so it doesn't disappear under us: */ - fd_batch_reference(&write_batch, rsc->write_batch); + fd_context_lock(ctx); + fd_batch_reference_locked(&write_batch, rsc->write_batch); + fd_context_unlock(ctx); if ((usage & PIPE_TRANSFER_WRITE) && write_batch && write_batch->back_blit) {