zink: abstract zink_get_resource_usage() and move it to be internal

I'll be rewriting how resource tracking works, so abstracting it and removing
direct uses is going to reduce the chances of breaking things as well as code churn

plus it's a bit easier to use

downside is that until that rewrite happens, this will be a (very small) perf hit and
there's some kinda gross macros involved to consolidate code

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9626>
This commit is contained in:
Mike Blumenkrantz 2020-10-28 13:21:34 -04:00
parent 62b10ad1b8
commit 870e0e73d7
4 changed files with 54 additions and 38 deletions

View File

@ -180,22 +180,18 @@ zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource
zink_get_depth_stencil_resources((struct pipe_resource*)res, NULL, &stencil);
uint32_t cur_uses = zink_get_resource_usage(res);
uint32_t uses_check = cur_uses;
cur_uses &= ~(ZINK_RESOURCE_ACCESS_READ << batch->batch_id);
cur_uses &= ~(ZINK_RESOURCE_ACCESS_WRITE << batch->batch_id);
if (batch->batch_id == ZINK_COMPUTE_BATCH_ID) {
if (cur_uses >= ZINK_RESOURCE_ACCESS_WRITE || (write && cur_uses))
if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_GFX)) ||
(!write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX)))
batch_to_flush = 0;
} else {
if (cur_uses & (ZINK_RESOURCE_ACCESS_WRITE << ZINK_COMPUTE_BATCH_ID) ||
(write && cur_uses & (ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID)))
if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE)) ||
zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
batch_to_flush = ZINK_COMPUTE_BATCH_ID;
}
/* if the resource already has usage of any sort set for this batch, we can skip hashing */
uint32_t check_mask = (ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE) << batch->batch_id;
if (!(uses_check & check_mask)) {
if (!zink_resource_has_usage_for_id(res, batch->batch_id)) {
bool found = false;
_mesa_set_search_and_add(batch->resources, res->obj, &found);
if (!found) {

View File

@ -454,10 +454,7 @@ copy_results_to_buffer(struct zink_context *ctx, struct zink_query *query, struc
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
result_size += base_result_size;
if (is_cs_query(query)) {
uint32_t batch_uses = zink_get_resource_usage(res);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_WRITE << ZINK_COMPUTE_BATCH_ID);
if (batch_uses >= ZINK_RESOURCE_ACCESS_WRITE)
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX))
ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
}
/* if it's a single query that doesn't need special handling, we can copy it and be done */

View File

@ -53,11 +53,19 @@ debug_describe_zink_resource_object(char *buf, const struct zink_resource_object
sprintf(buf, "zink_resource_object");
}
static uint32_t
get_resource_usage(struct zink_resource *res)
{
uint32_t batch_uses = 0;
for (unsigned i = 0; i < ARRAY_SIZE(res->obj->batch_uses); i++)
batch_uses |= p_atomic_read(&res->obj->batch_uses[i]) << i;
return batch_uses;
}
static void
resource_sync_writes_from_batch_usage(struct zink_context *ctx, struct zink_resource *res)
{
uint32_t batch_uses = zink_get_resource_usage(res);
uint32_t batch_uses = get_resource_usage(res);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID);
uint32_t write_mask = 0;
@ -68,7 +76,7 @@ resource_sync_writes_from_batch_usage(struct zink_context *ctx, struct zink_reso
if (batch_id == -1)
break;
zink_wait_on_batch(ctx, batch_id);
batch_uses &= ~((ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE) << batch_id);
batch_uses &= ~((ZINK_RESOURCE_ACCESS_RW) << batch_id);
}
}
@ -595,7 +603,7 @@ zink_resource_invalidate(struct pipe_context *pctx, struct pipe_resource *pres)
res->bind_history &= ~ZINK_RESOURCE_USAGE_STREAMOUT;
util_range_set_empty(&res->valid_buffer_range);
if (!zink_get_resource_usage(res))
if (!get_resource_usage(res))
return;
struct zink_resource_object *old_obj = res->obj;
@ -632,13 +640,32 @@ zink_transfer_copy_bufimage(struct zink_context *ctx,
box.y, box.z, trans->base.level, &box, trans->base.usage);
}
uint32_t
zink_get_resource_usage(struct zink_resource *res)
#define ALL_GFX_USAGE(batch_uses, usage) (batch_uses & ((usage << ZINK_NUM_GFX_BATCHES) - ((usage & ZINK_RESOURCE_ACCESS_RW))))
#define ALL_COMPUTE_USAGE(batch_uses, usage) (batch_uses & (usage << ZINK_COMPUTE_BATCH_ID))
bool
zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue)
{
uint32_t batch_uses = 0;
for (unsigned i = 0; i < ARRAY_SIZE(res->obj->batch_uses); i++)
batch_uses |= p_atomic_read(&res->obj->batch_uses[i]) << i;
return batch_uses;
uint32_t batch_uses = get_resource_usage(res);
switch (queue) {
case ZINK_QUEUE_COMPUTE:
return ALL_COMPUTE_USAGE(batch_uses, usage);
case ZINK_QUEUE_GFX:
return ALL_GFX_USAGE(batch_uses, usage);
case ZINK_QUEUE_ANY:
return ALL_GFX_USAGE(batch_uses, usage) || ALL_COMPUTE_USAGE(batch_uses, usage);
default:
break;
}
unreachable("unknown queue type");
return false;
}
bool
zink_resource_has_usage_for_id(struct zink_resource *res, uint32_t id)
{
uint32_t batch_uses = get_resource_usage(res);
return batch_uses & (ZINK_RESOURCE_ACCESS_RW) << id;
}
static void *
@ -652,7 +679,6 @@ zink_transfer_map(struct pipe_context *pctx,
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
struct zink_resource *res = zink_resource(pres);
uint32_t batch_uses = zink_get_resource_usage(res);
struct zink_transfer *trans = slab_alloc(&ctx->transfer_pool);
if (!trans)
@ -679,12 +705,11 @@ zink_transfer_map(struct pipe_context *pctx,
}
if (util_ranges_intersect(&res->valid_buffer_range, box->x, box->x + box->width)) {
/* special case compute reads since they aren't handled by zink_fence_wait() */
if (usage & PIPE_MAP_WRITE && (batch_uses & (ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID)))
if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
zink_wait_on_batch(ctx, ZINK_COMPUTE_BATCH_ID);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID);
if (usage & PIPE_MAP_READ && batch_uses >= ZINK_RESOURCE_ACCESS_WRITE)
if (usage & PIPE_MAP_READ && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_ANY))
resource_sync_writes_from_batch_usage(ctx, res);
else if (usage & PIPE_MAP_WRITE && batch_uses) {
else if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
/* need to wait for all rendering to finish
* TODO: optimize/fix this to be much less obtrusive
* mesa/mesa#2966
@ -762,7 +787,7 @@ zink_transfer_map(struct pipe_context *pctx,
if (usage & PIPE_MAP_READ) {
/* TODO: can probably just do a full cs copy if it's already in a cs batch */
if (batch_uses & (ZINK_RESOURCE_ACCESS_WRITE << ZINK_COMPUTE_BATCH_ID))
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
/* don't actually have to stall here, only ensure batch is submitted */
zink_flush_compute(ctx);
struct zink_context *ctx = zink_context(pctx);
@ -780,10 +805,9 @@ zink_transfer_map(struct pipe_context *pctx,
} else {
assert(!res->optimal_tiling);
/* special case compute reads since they aren't handled by zink_fence_wait() */
if (batch_uses & (ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID))
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
zink_wait_on_batch(ctx, ZINK_COMPUTE_BATCH_ID);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID);
if (batch_uses >= ZINK_RESOURCE_ACCESS_WRITE) {
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
if (usage & PIPE_MAP_READ)
resource_sync_writes_from_batch_usage(ctx, res);
else
@ -828,13 +852,9 @@ zink_transfer_flush_region(struct pipe_context *pctx,
if (trans->base.usage & PIPE_MAP_WRITE) {
if (trans->staging_res) {
struct zink_resource *staging_res = zink_resource(trans->staging_res);
uint32_t batch_uses = zink_get_resource_usage(res) | zink_get_resource_usage(staging_res);
if (batch_uses & (ZINK_RESOURCE_ACCESS_WRITE << ZINK_COMPUTE_BATCH_ID)) {
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
/* don't actually have to stall here, only ensure batch is submitted */
zink_flush_compute(ctx);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_WRITE << ZINK_COMPUTE_BATCH_ID);
batch_uses &= ~(ZINK_RESOURCE_ACCESS_READ << ZINK_COMPUTE_BATCH_ID);
}
if (ptrans->resource->target == PIPE_BUFFER)
zink_copy_buffer(ctx, NULL, res, staging_res, box->x, box->x, box->width);

View File

@ -124,8 +124,11 @@ zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resou
int
zink_get_resource_latest_batch_usage(struct zink_context *ctx, uint32_t batch_uses);
uint32_t
zink_get_resource_usage(struct zink_resource *res);
bool
zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue);
bool
zink_resource_has_usage_for_id(struct zink_resource *res, uint32_t id);
void
zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx);