freedreno: fix race condition

rsc->write_batch can be cleared behind our back, so we need to acquire
the lock *before* deref'ing.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2019-02-19 09:29:49 -05:00
parent 3090c6b9e9
commit 5d4fa194b8
3 changed files with 16 additions and 5 deletions

View File

@ -433,7 +433,7 @@ static void
flush_write_batch(struct fd_resource *rsc)
{
struct fd_batch *b = NULL;
fd_batch_reference(&b, rsc->write_batch);
fd_batch_reference_locked(&b, rsc->write_batch);
mtx_unlock(&b->ctx->screen->lock);
fd_batch_flush(b, true, false);

View File

@ -243,6 +243,10 @@ void __fd_batch_destroy(struct fd_batch *batch);
* WARNING the _locked() version can briefly drop the lock. Without
* recursive mutexes, I'm not sure there is much else we can do (since
* __fd_batch_destroy() needs to unref resources)
*
* WARNING you must acquire the screen->lock and use the _locked()
* version in case that the batch being ref'd can disappear under
* you.
*/
/* fwd-decl prototypes to untangle header dependency :-/ */

View File

@ -373,7 +373,9 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
{
struct fd_batch *write_batch = NULL;
fd_batch_reference(&write_batch, rsc->write_batch);
mtx_lock(&ctx->screen->lock);
fd_batch_reference_locked(&write_batch, rsc->write_batch);
mtx_unlock(&ctx->screen->lock);
if (usage & PIPE_TRANSFER_WRITE) {
struct fd_batch *batch, *batches[32] = {};
@ -387,7 +389,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
mtx_lock(&ctx->screen->lock);
batch_mask = rsc->batch_mask;
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
fd_batch_reference(&batches[batch->idx], batch);
fd_batch_reference_locked(&batches[batch->idx], batch);
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
@ -501,7 +503,10 @@ fd_resource_transfer_map(struct pipe_context *pctx,
fd_blit_to_staging(ctx, trans);
struct fd_batch *batch = NULL;
fd_batch_reference(&batch, staging_rsc->write_batch);
fd_context_lock(ctx);
fd_batch_reference_locked(&batch, staging_rsc->write_batch);
fd_context_unlock(ctx);
/* we can't fd_bo_cpu_prep() until the blit to staging
* is submitted to kernel.. in that case write_batch
@ -550,7 +555,9 @@ fd_resource_transfer_map(struct pipe_context *pctx,
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
fd_batch_reference(&write_batch, rsc->write_batch);
fd_context_lock(ctx);
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_context_unlock(ctx);
if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
write_batch->back_blit) {