freedreno: remove flush-queue
Signed-off-by: Rob Clark <robdclark@chromium.org> Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3503> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3503>
This commit is contained in:
parent
b3b1fa5e2b
commit
5b9fe18485
|
@ -479,7 +479,7 @@ fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
|
||||||
fd_resource(info->dst.resource)->valid = true;
|
fd_resource(info->dst.resource)->valid = true;
|
||||||
batch->needs_flush = true;
|
batch->needs_flush = true;
|
||||||
|
|
||||||
fd_batch_flush(batch, false);
|
fd_batch_flush(batch);
|
||||||
fd_batch_reference(&batch, NULL);
|
fd_batch_reference(&batch, NULL);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -668,7 +668,7 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
|
||||||
fd_resource(info->dst.resource)->valid = true;
|
fd_resource(info->dst.resource)->valid = true;
|
||||||
batch->needs_flush = true;
|
batch->needs_flush = true;
|
||||||
|
|
||||||
fd_batch_flush(batch, false);
|
fd_batch_flush(batch);
|
||||||
fd_batch_reference(&batch, NULL);
|
fd_batch_reference(&batch, NULL);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -1432,7 +1432,7 @@ emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fd6_event_write(batch, ring, 0x1d, true);
|
fd6_event_write(batch, ring, UNK_1D, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -41,9 +41,6 @@ batch_init(struct fd_batch *batch)
|
||||||
struct fd_context *ctx = batch->ctx;
|
struct fd_context *ctx = batch->ctx;
|
||||||
unsigned size = 0;
|
unsigned size = 0;
|
||||||
|
|
||||||
if (ctx->screen->reorder)
|
|
||||||
util_queue_fence_init(&batch->flush_fence);
|
|
||||||
|
|
||||||
/* if kernel is too old to support unlimited # of cmd buffers, we
|
/* if kernel is too old to support unlimited # of cmd buffers, we
|
||||||
* have no option but to allocate large worst-case sizes so that
|
* have no option but to allocate large worst-case sizes so that
|
||||||
* we don't need to grow the ringbuffer. Performance is likely to
|
* we don't need to grow the ringbuffer. Performance is likely to
|
||||||
|
@ -191,9 +188,6 @@ batch_fini(struct fd_batch *batch)
|
||||||
fd_hw_sample_reference(batch->ctx, &samp, NULL);
|
fd_hw_sample_reference(batch->ctx, &samp, NULL);
|
||||||
}
|
}
|
||||||
util_dynarray_fini(&batch->samples);
|
util_dynarray_fini(&batch->samples);
|
||||||
|
|
||||||
if (batch->ctx->screen->reorder)
|
|
||||||
util_queue_fence_destroy(&batch->flush_fence);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -204,7 +198,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
|
||||||
|
|
||||||
foreach_batch(dep, cache, batch->dependents_mask) {
|
foreach_batch(dep, cache, batch->dependents_mask) {
|
||||||
if (flush)
|
if (flush)
|
||||||
fd_batch_flush(dep, false);
|
fd_batch_flush(dep);
|
||||||
fd_batch_reference(&dep, NULL);
|
fd_batch_reference(&dep, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,8 +233,6 @@ batch_reset(struct fd_batch *batch)
|
||||||
{
|
{
|
||||||
DBG("%p", batch);
|
DBG("%p", batch);
|
||||||
|
|
||||||
fd_batch_sync(batch);
|
|
||||||
|
|
||||||
batch_flush_reset_dependencies(batch, false);
|
batch_flush_reset_dependencies(batch, false);
|
||||||
batch_reset_resources(batch);
|
batch_reset_resources(batch);
|
||||||
|
|
||||||
|
@ -286,32 +278,6 @@ __fd_batch_describe(char* buf, const struct fd_batch *batch)
|
||||||
sprintf(buf, "fd_batch<%u>", batch->seqno);
|
sprintf(buf, "fd_batch<%u>", batch->seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
fd_batch_sync(struct fd_batch *batch)
|
|
||||||
{
|
|
||||||
if (!batch->ctx->screen->reorder)
|
|
||||||
return;
|
|
||||||
util_queue_fence_wait(&batch->flush_fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
batch_flush_func(void *job, int id)
|
|
||||||
{
|
|
||||||
struct fd_batch *batch = job;
|
|
||||||
|
|
||||||
DBG("%p", batch);
|
|
||||||
|
|
||||||
fd_gmem_render_tiles(batch);
|
|
||||||
batch_reset_resources(batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
batch_cleanup_func(void *job, int id)
|
|
||||||
{
|
|
||||||
struct fd_batch *batch = job;
|
|
||||||
fd_batch_reference(&batch, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
batch_flush(struct fd_batch *batch)
|
batch_flush(struct fd_batch *batch)
|
||||||
{
|
{
|
||||||
|
@ -333,20 +299,8 @@ batch_flush(struct fd_batch *batch)
|
||||||
|
|
||||||
fd_fence_ref(&batch->ctx->last_fence, batch->fence);
|
fd_fence_ref(&batch->ctx->last_fence, batch->fence);
|
||||||
|
|
||||||
if (batch->ctx->screen->reorder) {
|
fd_gmem_render_tiles(batch);
|
||||||
struct fd_batch *tmp = NULL;
|
batch_reset_resources(batch);
|
||||||
fd_batch_reference(&tmp, batch);
|
|
||||||
|
|
||||||
if (!util_queue_is_initialized(&batch->ctx->flush_queue))
|
|
||||||
util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
|
|
||||||
|
|
||||||
util_queue_add_job(&batch->ctx->flush_queue,
|
|
||||||
batch, &batch->flush_fence,
|
|
||||||
batch_flush_func, batch_cleanup_func, 0);
|
|
||||||
} else {
|
|
||||||
fd_gmem_render_tiles(batch);
|
|
||||||
batch_reset_resources(batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug_assert(batch->reference.count > 0);
|
debug_assert(batch->reference.count > 0);
|
||||||
|
|
||||||
|
@ -364,7 +318,7 @@ batch_flush(struct fd_batch *batch)
|
||||||
* a fence to sync on
|
* a fence to sync on
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
fd_batch_flush(struct fd_batch *batch, bool sync)
|
fd_batch_flush(struct fd_batch *batch)
|
||||||
{
|
{
|
||||||
struct fd_batch *tmp = NULL;
|
struct fd_batch *tmp = NULL;
|
||||||
bool newbatch = false;
|
bool newbatch = false;
|
||||||
|
@ -401,9 +355,6 @@ fd_batch_flush(struct fd_batch *batch, bool sync)
|
||||||
fd_context_all_dirty(ctx);
|
fd_context_all_dirty(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sync)
|
|
||||||
fd_batch_sync(tmp);
|
|
||||||
|
|
||||||
fd_batch_reference(&tmp, NULL);
|
fd_batch_reference(&tmp, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,7 +396,7 @@ flush_write_batch(struct fd_resource *rsc)
|
||||||
fd_batch_reference_locked(&b, rsc->write_batch);
|
fd_batch_reference_locked(&b, rsc->write_batch);
|
||||||
|
|
||||||
mtx_unlock(&b->ctx->screen->lock);
|
mtx_unlock(&b->ctx->screen->lock);
|
||||||
fd_batch_flush(b, true);
|
fd_batch_flush(b);
|
||||||
mtx_lock(&b->ctx->screen->lock);
|
mtx_lock(&b->ctx->screen->lock);
|
||||||
|
|
||||||
fd_bc_invalidate_batch(b, false);
|
fd_bc_invalidate_batch(b, false);
|
||||||
|
@ -519,7 +470,7 @@ fd_batch_check_size(struct fd_batch *batch)
|
||||||
debug_assert(!batch->flushed);
|
debug_assert(!batch->flushed);
|
||||||
|
|
||||||
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
|
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
|
||||||
fd_batch_flush(batch, true);
|
fd_batch_flush(batch);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -528,7 +479,7 @@ fd_batch_check_size(struct fd_batch *batch)
|
||||||
|
|
||||||
struct fd_ringbuffer *ring = batch->draw;
|
struct fd_ringbuffer *ring = batch->draw;
|
||||||
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
|
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
|
||||||
fd_batch_flush(batch, true);
|
fd_batch_flush(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
|
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
|
||||||
|
|
|
@ -74,8 +74,6 @@ struct fd_batch {
|
||||||
|
|
||||||
struct fd_context *ctx;
|
struct fd_context *ctx;
|
||||||
|
|
||||||
struct util_queue_fence flush_fence;
|
|
||||||
|
|
||||||
/* do we need to mem2gmem before rendering. We don't, if for example,
|
/* do we need to mem2gmem before rendering. We don't, if for example,
|
||||||
* there was a glClear() that invalidated the entire previous buffer
|
* there was a glClear() that invalidated the entire previous buffer
|
||||||
* contents. Keep track of which buffer(s) are cleared, or needs
|
* contents. Keep track of which buffer(s) are cleared, or needs
|
||||||
|
@ -247,8 +245,7 @@ struct fd_batch {
|
||||||
struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
|
struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
|
||||||
|
|
||||||
void fd_batch_reset(struct fd_batch *batch);
|
void fd_batch_reset(struct fd_batch *batch);
|
||||||
void fd_batch_sync(struct fd_batch *batch);
|
void fd_batch_flush(struct fd_batch *batch);
|
||||||
void fd_batch_flush(struct fd_batch *batch, bool sync);
|
|
||||||
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
|
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
|
||||||
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
|
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
|
||||||
void fd_batch_check_size(struct fd_batch *batch);
|
void fd_batch_check_size(struct fd_batch *batch);
|
||||||
|
|
|
@ -159,7 +159,7 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
|
||||||
fd_context_unlock(ctx);
|
fd_context_unlock(ctx);
|
||||||
|
|
||||||
for (unsigned i = 0; i < n; i++) {
|
for (unsigned i = 0; i < n; i++) {
|
||||||
fd_batch_flush(batches[i], false);
|
fd_batch_flush(batches[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,7 +307,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
|
||||||
*/
|
*/
|
||||||
mtx_unlock(&ctx->screen->lock);
|
mtx_unlock(&ctx->screen->lock);
|
||||||
DBG("%p: too many batches! flush forced!", flush_batch);
|
DBG("%p: too many batches! flush forced!", flush_batch);
|
||||||
fd_batch_flush(flush_batch, true);
|
fd_batch_flush(flush_batch);
|
||||||
mtx_lock(&ctx->screen->lock);
|
mtx_lock(&ctx->screen->lock);
|
||||||
|
|
||||||
/* While the resources get cleaned up automatically, the flush_batch
|
/* While the resources get cleaned up automatically, the flush_batch
|
||||||
|
|
|
@ -75,7 +75,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
|
||||||
batch->needs_out_fence_fd = true;
|
batch->needs_out_fence_fd = true;
|
||||||
|
|
||||||
if (!ctx->screen->reorder) {
|
if (!ctx->screen->reorder) {
|
||||||
fd_batch_flush(batch, true);
|
fd_batch_flush(batch);
|
||||||
} else if (flags & PIPE_FLUSH_DEFERRED) {
|
} else if (flags & PIPE_FLUSH_DEFERRED) {
|
||||||
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
|
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
|
||||||
} else {
|
} else {
|
||||||
|
@ -170,9 +170,6 @@ fd_context_destroy(struct pipe_context *pctx)
|
||||||
|
|
||||||
fd_fence_ref(&ctx->last_fence, NULL);
|
fd_fence_ref(&ctx->last_fence, NULL);
|
||||||
|
|
||||||
if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
|
|
||||||
util_queue_destroy(&ctx->flush_queue);
|
|
||||||
|
|
||||||
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
|
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
|
||||||
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
|
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
|
||||||
fd_bc_invalidate_context(ctx);
|
fd_bc_invalidate_context(ctx);
|
||||||
|
|
|
@ -173,8 +173,6 @@ struct fd_context {
|
||||||
struct fd_screen *screen;
|
struct fd_screen *screen;
|
||||||
struct fd_pipe *pipe;
|
struct fd_pipe *pipe;
|
||||||
|
|
||||||
struct util_queue flush_queue;
|
|
||||||
|
|
||||||
struct blitter_context *blitter;
|
struct blitter_context *blitter;
|
||||||
void *clear_rs_state;
|
void *clear_rs_state;
|
||||||
struct primconvert_context *primconvert;
|
struct primconvert_context *primconvert;
|
||||||
|
|
|
@ -464,7 +464,7 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
|
||||||
batch->needs_flush = true;
|
batch->needs_flush = true;
|
||||||
ctx->launch_grid(ctx, info);
|
ctx->launch_grid(ctx, info);
|
||||||
|
|
||||||
fd_batch_flush(batch, false);
|
fd_batch_flush(batch);
|
||||||
|
|
||||||
fd_batch_reference(&ctx->batch, save_batch);
|
fd_batch_reference(&ctx->batch, save_batch);
|
||||||
fd_context_all_dirty(ctx);
|
fd_context_all_dirty(ctx);
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct pipe_fence_handle {
|
||||||
static void fence_flush(struct pipe_fence_handle *fence)
|
static void fence_flush(struct pipe_fence_handle *fence)
|
||||||
{
|
{
|
||||||
if (fence->batch)
|
if (fence->batch)
|
||||||
fd_batch_flush(fence->batch, true);
|
fd_batch_flush(fence->batch);
|
||||||
debug_assert(!fence->batch);
|
debug_assert(!fence->batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
||||||
* spin forever:
|
* spin forever:
|
||||||
*/
|
*/
|
||||||
if (aq->no_wait_cnt++ > 5)
|
if (aq->no_wait_cnt++ > 5)
|
||||||
fd_batch_flush(rsc->write_batch, false);
|
fd_batch_flush(rsc->write_batch);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rsc->write_batch)
|
if (rsc->write_batch)
|
||||||
fd_batch_flush(rsc->write_batch, true);
|
fd_batch_flush(rsc->write_batch);
|
||||||
|
|
||||||
/* get the result: */
|
/* get the result: */
|
||||||
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
|
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
|
||||||
|
|
|
@ -209,7 +209,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
||||||
* spin forever:
|
* spin forever:
|
||||||
*/
|
*/
|
||||||
if (hq->no_wait_cnt++ > 5)
|
if (hq->no_wait_cnt++ > 5)
|
||||||
fd_batch_flush(rsc->write_batch, false);
|
fd_batch_flush(rsc->write_batch);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
||||||
struct fd_resource *rsc = fd_resource(start->prsc);
|
struct fd_resource *rsc = fd_resource(start->prsc);
|
||||||
|
|
||||||
if (rsc->write_batch)
|
if (rsc->write_batch)
|
||||||
fd_batch_flush(rsc->write_batch, true);
|
fd_batch_flush(rsc->write_batch);
|
||||||
|
|
||||||
/* some piglit tests at least do query with no draws, I guess: */
|
/* some piglit tests at least do query with no draws, I guess: */
|
||||||
if (!rsc->bo)
|
if (!rsc->bo)
|
||||||
|
|
|
@ -450,15 +450,14 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
|
||||||
mtx_unlock(&ctx->screen->lock);
|
mtx_unlock(&ctx->screen->lock);
|
||||||
|
|
||||||
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
|
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
|
||||||
fd_batch_flush(batch, false);
|
fd_batch_flush(batch);
|
||||||
|
|
||||||
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
|
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
|
||||||
fd_batch_sync(batch);
|
|
||||||
fd_batch_reference(&batches[batch->idx], NULL);
|
fd_batch_reference(&batches[batch->idx], NULL);
|
||||||
}
|
}
|
||||||
assert(rsc->batch_mask == 0);
|
assert(rsc->batch_mask == 0);
|
||||||
} else if (write_batch) {
|
} else if (write_batch) {
|
||||||
fd_batch_flush(write_batch, true);
|
fd_batch_flush(write_batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
fd_batch_reference(&write_batch, NULL);
|
fd_batch_reference(&write_batch, NULL);
|
||||||
|
@ -560,21 +559,6 @@ fd_resource_transfer_map(struct pipe_context *pctx,
|
||||||
if (usage & PIPE_TRANSFER_READ) {
|
if (usage & PIPE_TRANSFER_READ) {
|
||||||
fd_blit_to_staging(ctx, trans);
|
fd_blit_to_staging(ctx, trans);
|
||||||
|
|
||||||
struct fd_batch *batch = NULL;
|
|
||||||
|
|
||||||
fd_context_lock(ctx);
|
|
||||||
fd_batch_reference_locked(&batch, staging_rsc->write_batch);
|
|
||||||
fd_context_unlock(ctx);
|
|
||||||
|
|
||||||
/* we can't fd_bo_cpu_prep() until the blit to staging
|
|
||||||
* is submitted to kernel.. in that case write_batch
|
|
||||||
* wouldn't be NULL yet:
|
|
||||||
*/
|
|
||||||
if (batch) {
|
|
||||||
fd_batch_sync(batch);
|
|
||||||
fd_batch_reference(&batch, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
|
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
|
||||||
DRM_FREEDRENO_PREP_READ);
|
DRM_FREEDRENO_PREP_READ);
|
||||||
}
|
}
|
||||||
|
|
|
@ -248,14 +248,14 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
|
||||||
* multiple times to the same surface), so we might as
|
* multiple times to the same surface), so we might as
|
||||||
* well go ahead and flush this one:
|
* well go ahead and flush this one:
|
||||||
*/
|
*/
|
||||||
fd_batch_flush(old_batch, false);
|
fd_batch_flush(old_batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
fd_batch_reference(&old_batch, NULL);
|
fd_batch_reference(&old_batch, NULL);
|
||||||
} else {
|
} else {
|
||||||
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
|
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
|
||||||
framebuffer->cbufs[0], framebuffer->zsbuf);
|
framebuffer->cbufs[0], framebuffer->zsbuf);
|
||||||
fd_batch_flush(ctx->batch, false);
|
fd_batch_flush(ctx->batch);
|
||||||
util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
|
util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue