freedreno: allocate ctx's batch on demand
Don't fall over when app wants more than 32 contexts. Instead allocate contexts on demand. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
a122118c14
commit
c3d9f29b78
|
@ -296,7 +296,6 @@ batch_flush(struct fd_batch *batch, bool force)
|
|||
*/
|
||||
fd_batch_set_stage(batch, FD_STAGE_NULL);
|
||||
|
||||
fd_context_all_dirty(batch->ctx);
|
||||
batch_flush_reset_dependencies(batch, true);
|
||||
|
||||
batch->flushed = true;
|
||||
|
@ -355,14 +354,15 @@ fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
|
|||
struct fd_batch *new_batch;
|
||||
|
||||
if (ctx->screen->reorder) {
|
||||
new_batch = fd_batch_from_fb(&ctx->screen->batch_cache,
|
||||
ctx, &batch->framebuffer);
|
||||
/* defer allocating new batch until one is needed for rendering
|
||||
* to avoid unused batches for apps that create many contexts
|
||||
*/
|
||||
new_batch = NULL;
|
||||
} else {
|
||||
new_batch = fd_batch_create(ctx, false);
|
||||
util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
|
||||
}
|
||||
|
||||
util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
|
||||
|
||||
fd_batch_reference(&batch, NULL);
|
||||
ctx->batch = new_batch;
|
||||
}
|
||||
|
|
|
@ -46,20 +46,22 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
|
|||
{
|
||||
struct fd_context *ctx = fd_context(pctx);
|
||||
struct pipe_fence_handle *fence = NULL;
|
||||
// TODO we want to lookup batch if it exists, but not create one if not.
|
||||
struct fd_batch *batch = fd_context_batch(ctx);
|
||||
|
||||
DBG("%p: flush: flags=%x\n", ctx->batch, flags);
|
||||
|
||||
if (!ctx->batch)
|
||||
if (!batch)
|
||||
return;
|
||||
|
||||
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
|
||||
fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
|
||||
fd_fence_ref(pctx->screen, &fence, batch->fence);
|
||||
|
||||
if (flags & PIPE_FLUSH_FENCE_FD)
|
||||
ctx->batch->needs_out_fence_fd = true;
|
||||
batch->needs_out_fence_fd = true;
|
||||
|
||||
if (!ctx->screen->reorder) {
|
||||
fd_batch_flush(ctx->batch, true, false);
|
||||
fd_batch_flush(batch, true, false);
|
||||
} else if (flags & PIPE_FLUSH_DEFERRED) {
|
||||
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
|
||||
} else {
|
||||
|
@ -140,6 +142,7 @@ fd_context_destroy(struct pipe_context *pctx)
|
|||
if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
|
||||
util_queue_destroy(&ctx->flush_queue);
|
||||
|
||||
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
|
||||
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
|
||||
fd_bc_invalidate_context(ctx);
|
||||
|
||||
|
@ -312,7 +315,8 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
|
|||
goto fail;
|
||||
pctx->const_uploader = pctx->stream_uploader;
|
||||
|
||||
ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
|
||||
if (!ctx->screen->reorder)
|
||||
ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
|
||||
|
||||
slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
|
||||
|
||||
|
|
|
@ -281,6 +281,8 @@ struct fd_context {
|
|||
struct pipe_blend_color blend_color;
|
||||
struct pipe_stencil_ref stencil_ref;
|
||||
unsigned sample_mask;
|
||||
/* local context fb state, for when ctx->batch is null: */
|
||||
struct pipe_framebuffer_state framebuffer;
|
||||
struct pipe_poly_stipple stipple;
|
||||
struct pipe_viewport_state viewport;
|
||||
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
|
||||
|
@ -431,6 +433,13 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
|
|||
static inline struct fd_batch *
|
||||
fd_context_batch(struct fd_context *ctx)
|
||||
{
|
||||
if (unlikely(!ctx->batch)) {
|
||||
struct fd_batch *batch =
|
||||
fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
|
||||
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
|
||||
ctx->batch = batch;
|
||||
fd_context_all_dirty(ctx);
|
||||
}
|
||||
return ctx->batch;
|
||||
}
|
||||
|
||||
|
|
|
@ -212,17 +212,14 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
|
|||
struct pipe_framebuffer_state *cso;
|
||||
|
||||
if (ctx->screen->reorder) {
|
||||
struct fd_batch *batch, *old_batch = NULL;
|
||||
struct fd_batch *old_batch = NULL;
|
||||
|
||||
fd_batch_reference(&old_batch, ctx->batch);
|
||||
|
||||
if (likely(old_batch))
|
||||
fd_batch_set_stage(old_batch, FD_STAGE_NULL);
|
||||
|
||||
batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
|
||||
fd_batch_reference(&ctx->batch, NULL);
|
||||
fd_reset_wfi(batch);
|
||||
ctx->batch = batch;
|
||||
fd_context_all_dirty(ctx);
|
||||
|
||||
if (old_batch && old_batch->blit && !old_batch->back_blit) {
|
||||
|
@ -241,7 +238,7 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
|
|||
fd_batch_flush(ctx->batch, false, false);
|
||||
}
|
||||
|
||||
cso = &ctx->batch->framebuffer;
|
||||
cso = &ctx->framebuffer;
|
||||
|
||||
util_copy_framebuffer_state(cso, framebuffer);
|
||||
|
||||
|
|
Loading…
Reference in New Issue