From e1ba1b5b25c7905e34a924c78c0be2969bfef70c Mon Sep 17 00:00:00 2001 From: Mike Blumenkrantz Date: Fri, 7 May 2021 09:37:04 -0400 Subject: [PATCH] zink: split batch state work_count into separate vars this has better struct packing Reviewed-by: Dave Airlie Part-of: --- src/gallium/drivers/zink/zink_batch.c | 2 +- src/gallium/drivers/zink/zink_batch.h | 4 +++- src/gallium/drivers/zink/zink_context.c | 2 +- src/gallium/drivers/zink/zink_draw.c | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/gallium/drivers/zink/zink_batch.c b/src/gallium/drivers/zink/zink_batch.c index f2d370ad8f0..6c658ed01fc 100644 --- a/src/gallium/drivers/zink/zink_batch.c +++ b/src/gallium/drivers/zink/zink_batch.c @@ -92,7 +92,7 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs) bs->has_barriers = false; zink_screen_update_last_finished(screen, bs->fence.batch_id); bs->fence.batch_id = 0; - bs->work_count[0] = bs->work_count[1] = 0; + bs->draw_count = bs->compute_count = 0; } void diff --git a/src/gallium/drivers/zink/zink_batch.h b/src/gallium/drivers/zink/zink_batch.h index dc8ef09e9f1..bdfadea7fcb 100644 --- a/src/gallium/drivers/zink/zink_batch.h +++ b/src/gallium/drivers/zink/zink_batch.h @@ -56,6 +56,8 @@ batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr, struct z struct zink_batch_state { struct zink_fence fence; struct pipe_reference reference; + unsigned draw_count; + struct zink_context *ctx; VkCommandPool cmdpool; VkCommandBuffer cmdbuf; @@ -66,6 +68,7 @@ struct zink_batch_state { VkSemaphore sem; struct util_queue_fence flush_completed; + unsigned compute_count; struct zink_resource *flush_res; @@ -86,7 +89,6 @@ struct zink_batch_state { bool is_device_lost; bool have_timelines; - unsigned work_count[2]; }; struct zink_batch { diff --git a/src/gallium/drivers/zink/zink_context.c b/src/gallium/drivers/zink/zink_context.c index f65fe7ee226..75b6f2399da 100644 --- a/src/gallium/drivers/zink/zink_context.c +++ b/src/gallium/drivers/zink/zink_context.c @@ -2458,7 +2458,7 @@ zink_maybe_flush_or_stall(struct zink_context *ctx) /* flush anytime our total batch memory usage is potentially >= 50% of total video memory */ if (ctx->batch.state->resource_size >= screen->total_video_mem / 2 || /* or if there's >100k draws+computes */ - ctx->batch.state->work_count[0] + ctx->batch.state->work_count[1] >= 100000) + ctx->batch.state->draw_count + ctx->batch.state->compute_count >= 100000) flush_batch(ctx, true); if (ctx->resource_size >= screen->total_video_mem / 2 || _mesa_hash_table_num_entries(&ctx->batch_states) > 100) { diff --git a/src/gallium/drivers/zink/zink_draw.c b/src/gallium/drivers/zink/zink_draw.c index fc5da158bed..d9f80aa4004 100644 --- a/src/gallium/drivers/zink/zink_draw.c +++ b/src/gallium/drivers/zink/zink_draw.c @@ -695,7 +695,7 @@ zink_draw_vbo(struct pipe_context *pctx, unsigned draw_id = drawid_offset; bool needs_drawid = ctx->drawid_broken; - batch->state->work_count[0] += num_draws; + batch->state->draw_count += num_draws; if (dinfo->index_size > 0) { VkIndexType index_type; unsigned index_size = dinfo->index_size; @@ -819,7 +819,7 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info) offsetof(struct zink_cs_push_constant, work_dim), sizeof(uint32_t), &info->work_dim); - batch->state->work_count[1]++; + batch->state->compute_count++; if (info->indirect) { vkCmdDispatchIndirect(batch->state->cmdbuf, zink_resource(info->indirect)->obj->buffer, info->indirect_offset); zink_batch_reference_resource_rw(batch, zink_resource(info->indirect), false);