freedreno: rework fence tracking

ctx->last_fence isn't such a terribly clever idea, if batches can be
flushed out of order.  Instead, each batch now holds a fence, which is
created before the batch is flushed (useful for next patch), that later
gets populated after the batch is actually flushed.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2017-11-19 10:36:19 -05:00
parent deb57fb237
commit 15ebf387fc
12 changed files with 117 additions and 69 deletions

View File

@ -31,6 +31,7 @@
#include "freedreno_batch.h"
#include "freedreno_context.h"
#include "freedreno_fence.h"
#include "freedreno_resource.h"
#include "freedreno_query_hw.h"
@ -62,6 +63,7 @@ batch_init(struct fd_batch *batch)
fd_ringbuffer_set_parent(batch->binning, batch->gmem);
batch->in_fence_fd = -1;
batch->fence = fd_fence_create(batch);
batch->cleared = batch->partial_cleared = 0;
batch->restore = batch->resolve = 0;
@ -115,6 +117,11 @@ batch_fini(struct fd_batch *batch)
if (batch->in_fence_fd != -1)
close(batch->in_fence_fd);
/* in case batch wasn't flushed but fence was created: */
fd_fence_populate(batch->fence, 0, -1);
fd_fence_ref(NULL, &batch->fence, NULL);
fd_ringbuffer_del(batch->draw);
fd_ringbuffer_del(batch->binning);
fd_ringbuffer_del(batch->gmem);
@ -147,7 +154,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
fd_batch_flush(dep, false);
fd_batch_flush(dep, false, false);
fd_batch_reference(&dep, NULL);
}
@ -254,12 +261,17 @@ batch_cleanup_func(void *job, int id)
}
static void
batch_flush(struct fd_batch *batch)
batch_flush(struct fd_batch *batch, bool force)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
if (!batch->needs_flush)
if (!batch->needs_flush) {
if (force) {
fd_gmem_render_noop(batch);
goto out;
}
return;
}
batch->needs_flush = false;
@ -288,6 +300,7 @@ batch_flush(struct fd_batch *batch)
debug_assert(batch->reference.count > 0);
out:
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
@ -297,9 +310,16 @@ batch_flush(struct fd_batch *batch)
}
}
/* NOTE: could drop the last ref to batch */
/* NOTE: could drop the last ref to batch
*
* @sync: synchronize with flush_queue, ensures batch is *actually* flushed
* to kernel before this returns, as opposed to just being queued to be
* flushed
* @force: force a flush even if no rendering, mostly useful if you need
* a fence to sync on
*/
void
fd_batch_flush(struct fd_batch *batch, bool sync)
fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
{
/* NOTE: we need to hold an extra ref across the body of flush,
* since the last ref to this batch could be dropped when cleaning
@ -307,7 +327,7 @@ fd_batch_flush(struct fd_batch *batch, bool sync)
*/
struct fd_batch *tmp = NULL;
fd_batch_reference(&tmp, batch);
batch_flush(tmp);
batch_flush(tmp, force);
if (sync)
fd_batch_sync(tmp);
fd_batch_reference(&tmp, NULL);
@ -342,7 +362,7 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
mtx_unlock(&batch->ctx->screen->lock);
fd_batch_flush(dep, false);
fd_batch_flush(dep, false, false);
mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
@ -409,7 +429,7 @@ fd_batch_check_size(struct fd_batch *batch)
struct fd_ringbuffer *ring = batch->draw;
if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
(fd_mesa_debug & FD_DBG_FLUSH))
fd_batch_flush(batch, true);
fd_batch_flush(batch, true, false);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already

View File

@ -70,6 +70,7 @@ struct fd_batch {
int in_fence_fd;
bool needs_out_fence_fd;
struct pipe_fence_handle *fence;
struct fd_context *ctx;
@ -205,7 +206,7 @@ struct fd_batch * fd_batch_create(struct fd_context *ctx);
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_sync(struct fd_batch *batch);
void fd_batch_flush(struct fd_batch *batch, bool sync);
void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);

View File

@ -134,11 +134,12 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
/* hold a reference since we can drop screen->lock: */
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
mtx_unlock(&ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
fd_batch_flush(batch, false, false);
mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
@ -265,7 +266,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
*/
mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
fd_batch_flush(flush_batch, true, false);
mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch

View File

@ -40,31 +40,28 @@
#include "util/u_upload_mgr.h"
static void
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
unsigned flags)
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_fence_handle *fence = NULL;
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
if (flags & PIPE_FLUSH_FENCE_FD)
ctx->batch->needs_out_fence_fd = true;
if (!ctx->screen->reorder) {
fd_batch_flush(ctx->batch, true);
fd_batch_flush(ctx->batch, true, false);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}
if (fence) {
/* if there hasn't been any rendering submitted yet, we might not
* have actually created a fence
*/
if (!ctx->last_fence || ctx->batch->needs_out_fence_fd) {
ctx->batch->needs_flush = true;
fd_gmem_render_noop(ctx->batch);
fd_batch_reset(ctx->batch);
}
fd_fence_ref(pctx->screen, fence, ctx->last_fence);
}
if (fencep)
fd_fence_ref(pctx->screen, fencep, fence);
fd_fence_ref(pctx->screen, &fence, NULL);
}
static void
@ -129,8 +126,6 @@ fd_context_destroy(struct pipe_context *pctx)
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
fd_prog_fini(pctx);
if (ctx->blitter)

View File

@ -226,8 +226,6 @@ struct fd_context {
*/
struct fd_batch *batch;
struct pipe_fence_handle *last_fence;
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/

View File

@ -36,12 +36,34 @@
struct pipe_fence_handle {
struct pipe_reference reference;
/* fence holds a weak reference to the batch until the batch is flushed,
* at which point fd_fence_populate() is called and timestamp and possibly
* fence_fd become valid and the week reference is dropped.
*/
struct fd_batch *batch;
struct fd_context *ctx;
struct fd_screen *screen;
int fence_fd;
uint32_t timestamp;
};
static void fence_flush(struct pipe_fence_handle *fence)
{
if (fence->batch)
fd_batch_flush(fence->batch, true, true);
debug_assert(!fence->batch);
}
void fd_fence_populate(struct pipe_fence_handle *fence,
uint32_t timestamp, int fence_fd)
{
if (!fence->batch)
return;
fence->timestamp = timestamp;
fence->fence_fd = fence_fd;
fence->batch = NULL;
}
static void fd_fence_destroy(struct pipe_fence_handle *fence)
{
if (fence->fence_fd != -1)
@ -64,6 +86,8 @@ boolean fd_fence_finish(struct pipe_screen *pscreen,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
fence_flush(fence);
if (fence->fence_fd != -1) {
int ret = sync_wait(fence->fence_fd, timeout / 1000000);
return ret == 0;
@ -75,31 +99,8 @@ boolean fd_fence_finish(struct pipe_screen *pscreen,
return true;
}
void fd_create_fence_fd(struct pipe_context *pctx,
struct pipe_fence_handle **pfence, int fd)
{
*pfence = fd_fence_create(fd_context(pctx), 0, dup(fd));
}
void fd_fence_server_sync(struct pipe_context *pctx,
struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
struct fd_batch *batch = ctx->batch;
if (sync_accumulate("freedreno", &batch->in_fence_fd, fence->fence_fd)) {
/* error */
}
}
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *fence)
{
return dup(fence->fence_fd);
}
struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
uint32_t timestamp, int fence_fd)
static struct pipe_fence_handle * fence_create(struct fd_context *ctx,
struct fd_batch *batch, uint32_t timestamp, int fence_fd)
{
struct pipe_fence_handle *fence;
@ -109,6 +110,7 @@ struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
pipe_reference_init(&fence->reference, 1);
fence->batch = batch;
fence->ctx = ctx;
fence->screen = ctx->screen;
fence->timestamp = timestamp;
@ -116,3 +118,34 @@ struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
return fence;
}
void fd_create_fence_fd(struct pipe_context *pctx,
struct pipe_fence_handle **pfence, int fd)
{
*pfence = fence_create(fd_context(pctx), NULL, 0, dup(fd));
}
void fd_fence_server_sync(struct pipe_context *pctx,
struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
struct fd_batch *batch = ctx->batch;
fence_flush(fence);
if (sync_accumulate("freedreno", &batch->in_fence_fd, fence->fence_fd)) {
/* error */
}
}
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *fence)
{
fence_flush(fence);
return dup(fence->fence_fd);
}
struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch)
{
return fence_create(batch->ctx, batch, 0, -1);
}

View File

@ -31,6 +31,8 @@
#include "pipe/p_context.h"
void fd_fence_populate(struct pipe_fence_handle *fence,
uint32_t timestamp, int fence_fd);
void fd_fence_ref(struct pipe_screen *pscreen,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence);
@ -45,8 +47,7 @@ void fd_fence_server_sync(struct pipe_context *pctx,
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *pfence);
struct fd_context;
struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
uint32_t timestamp, int fence_fd);
struct fd_batch;
struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch);
#endif /* FREEDRENO_FENCE_H_ */

View File

@ -372,15 +372,14 @@ render_sysmem(struct fd_batch *batch)
static void
flush_ring(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
uint32_t timestamp;
int out_fence_fd = -1;
fd_ringbuffer_flush2(batch->gmem, batch->in_fence_fd,
batch->needs_out_fence_fd ? &out_fence_fd : NULL);
fd_fence_ref(&ctx->screen->base, &ctx->last_fence, NULL);
ctx->last_fence = fd_fence_create(ctx,
fd_ringbuffer_timestamp(batch->gmem), out_fence_fd);
timestamp = fd_ringbuffer_timestamp(batch->gmem);
fd_fence_populate(batch->fence, timestamp, out_fence_fd);
}
void

View File

@ -138,7 +138,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
fd_batch_flush(rsc->write_batch, false);
fd_batch_flush(rsc->write_batch, false, false);
return false;
}
@ -151,7 +151,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
}
if (rsc->write_batch)
fd_batch_flush(rsc->write_batch, true);
fd_batch_flush(rsc->write_batch, true, false);
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);

View File

@ -211,7 +211,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
* spin forever:
*/
if (hq->no_wait_cnt++ > 5)
fd_batch_flush(rsc->write_batch, false);
fd_batch_flush(rsc->write_batch, false, false);
return false;
}
@ -239,7 +239,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->write_batch)
fd_batch_flush(rsc->write_batch, true);
fd_batch_flush(rsc->write_batch, true, false);
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)

View File

@ -546,7 +546,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
fd_batch_flush(batch, false);
fd_batch_flush(batch, false, false);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
fd_batch_sync(batch);
@ -554,7 +554,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
}
assert(rsc->batch_mask == 0);
} else {
fd_batch_flush(write_batch, true);
fd_batch_flush(write_batch, true, false);
}
assert(!rsc->write_batch);
}
@ -1166,7 +1166,7 @@ fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
struct fd_resource *rsc = fd_resource(prsc);
if (rsc->write_batch)
fd_batch_flush(rsc->write_batch, true);
fd_batch_flush(rsc->write_batch, true, false);
assert(!rsc->write_batch);
}

View File

@ -235,14 +235,14 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
* multiple times to the same surface), so we might as
* well go ahead and flush this one:
*/
fd_batch_flush(old_batch, false);
fd_batch_flush(old_batch, false, false);
}
fd_batch_reference(&old_batch, NULL);
} else {
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
fd_batch_flush(ctx->batch, false);
fd_batch_flush(ctx->batch, false, false);
}
cso = &ctx->batch->framebuffer;