etnaviv: rework resource status tracking (again)

While a resource might be shared across different contexts all synchronization
of commands is the resposibility of the user (OpenGL spec Chapter 5 "Shared
Objects and Multiple Contexts").

Currently etnaviv tries to be extremely helpful by flushing foreign contexts
when using a resource that is still pending there. This introduces a lot of
issues, as context flushes can now happen at basically any time and also
introduces a lot of overhead due to the needed tracking and locking. Get rid
of all this cross-context tracking and flushing.

The only real requirement here is that we need to track pending resources
without mutating the state of the etna_resource, as this might be shared
across multiple contexts and thus be used by multiple threads concurrently.
Introduce a hash table to track the current pending resources and their
states in the local context.

A side-effect of this change is that we don't need to keep all used
resources referenced until context flush time, as we don't need to mutate
them anymore to track the status. This allows to free some of them a bit
earlier. Note that this introduces a small possibility of a new resource
matching the key of a already destroyed resource still in the
pending_resources hashtable, but even if we get such a collision the worst
outcome is a not strictly necessary flush of the local context being
performed, which is acceptable if it doesn't happen very often.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14466>
This commit is contained in:
Lucas Stach 2022-01-07 21:18:31 +01:00 committed by Marge Bot
parent a11fa44b2d
commit 0993473054
8 changed files with 32 additions and 212 deletions

View File

@ -343,7 +343,6 @@ etna_clear_blt(struct pipe_context *pctx, unsigned buffers, const struct pipe_sc
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct etna_context *ctx = etna_context(pctx);
mtx_lock(&ctx->lock);
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000c23);
etna_set_state(ctx->stream, VIVS_TS_FLUSH_CACHE, VIVS_TS_FLUSH_CACHE_FLUSH);
@ -364,7 +363,6 @@ etna_clear_blt(struct pipe_context *pctx, unsigned buffers, const struct pipe_sc
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000c23);
else
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000002);
mtx_unlock(&ctx->lock);
}
static bool
@ -435,7 +433,6 @@ etna_try_blt_blit(struct pipe_context *pctx,
return true;
}
mtx_lock(&ctx->lock);
/* Kick off BLT here */
if (src == dst && src_lev->ts_compress_fmt < 0) {
/* Resolve-in-place */
@ -527,7 +524,6 @@ etna_try_blt_blit(struct pipe_context *pctx,
dst->seqno++;
dst_lev->ts_valid = false;
mtx_unlock(&ctx->lock);
return true;
}

View File

@ -102,47 +102,12 @@ etna_context_destroy(struct pipe_context *pctx)
{
struct etna_context *ctx = etna_context(pctx);
mtx_lock(&ctx->lock);
if (ctx->pending_resources)
_mesa_hash_table_destroy(ctx->pending_resources, NULL);
if (ctx->used_resources_read) {
/*
* There should be no resources tracked in the context when it's being
* destroyed. Be sure there are none to avoid memory leaks on buggy
* programs.
*/
set_foreach(ctx->used_resources_read, entry) {
struct etna_resource *rsc = (struct etna_resource *)entry->key;
mtx_lock(&rsc->lock);
_mesa_set_remove_key(rsc->pending_ctx, ctx);
mtx_unlock(&rsc->lock);
}
_mesa_set_destroy(ctx->used_resources_read, NULL);
}
if (ctx->used_resources_write) {
/*
* There should be no resources tracked in the context when it's being
* destroyed. Be sure there are none to avoid memory leaks on buggy
* programs.
*/
set_foreach(ctx->used_resources_write, entry) {
struct etna_resource *rsc = (struct etna_resource *)entry->key;
mtx_lock(&rsc->lock);
_mesa_set_remove_key(rsc->pending_ctx, ctx);
mtx_unlock(&rsc->lock);
}
_mesa_set_destroy(ctx->used_resources_write, NULL);
}
if (ctx->flush_resources)
_mesa_set_destroy(ctx->flush_resources, NULL);
mtx_unlock(&ctx->lock);
if (ctx->dummy_desc_bo)
etna_bo_del(ctx->dummy_desc_bo);
@ -165,8 +130,6 @@ etna_context_destroy(struct pipe_context *pctx)
if (ctx->in_fence_fd != -1)
close(ctx->in_fence_fd);
mtx_destroy(&ctx->lock);
FREE(pctx);
}
@ -346,8 +309,6 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
if (!etna_state_update(ctx))
return;
mtx_lock(&ctx->lock);
/*
* Figure out the buffers/features we need:
*/
@ -450,7 +411,6 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
* draw op has caused the hang. */
etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
mtx_unlock(&ctx->lock);
if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
pctx->flush(pctx, NULL, 0);
@ -553,8 +513,6 @@ etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
struct etna_context *ctx = etna_context(pctx);
int out_fence_fd = -1;
mtx_lock(&ctx->lock);
list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
etna_acc_query_suspend(aq, ctx);
@ -576,46 +534,9 @@ etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
if (fence)
*fence = etna_fence_create(pctx, out_fence_fd);
/*
* Go through all _resources_ pending in this _context_ and mark them as
* not pending in this _context_ anymore, since they were just flushed.
*/
set_foreach(ctx->used_resources_read, entry) {
struct etna_resource *rsc = (struct etna_resource *)entry->key;
struct pipe_resource *referenced = &rsc->base;
mtx_lock(&rsc->lock);
_mesa_set_remove_key(rsc->pending_ctx, ctx);
/* if resource has no pending ctx's reset its status */
if (_mesa_set_next_entry(rsc->pending_ctx, NULL) == NULL)
rsc->status &= ~ETNA_PENDING_READ;
mtx_unlock(&rsc->lock);
pipe_resource_reference(&referenced, NULL);
}
_mesa_set_clear(ctx->used_resources_read, NULL);
set_foreach(ctx->used_resources_write, entry) {
struct etna_resource *rsc = (struct etna_resource *)entry->key;
struct pipe_resource *referenced = &rsc->base;
mtx_lock(&rsc->lock);
_mesa_set_remove_key(rsc->pending_ctx, ctx);
/* if resource has no pending ctx's reset its status */
if (_mesa_set_next_entry(rsc->pending_ctx, NULL) == NULL)
rsc->status &= ~ETNA_PENDING_WRITE;
mtx_unlock(&rsc->lock);
pipe_resource_reference(&referenced, NULL);
}
_mesa_set_clear(ctx->used_resources_write, NULL);
_mesa_hash_table_clear(ctx->pending_resources, NULL);
etna_reset_gpu_state(ctx);
mtx_unlock(&ctx->lock);
}
static void
@ -666,14 +587,8 @@ etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
if (ctx->stream == NULL)
goto fail;
ctx->used_resources_read = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!ctx->used_resources_read)
goto fail;
ctx->used_resources_write = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!ctx->used_resources_write)
ctx->pending_resources = _mesa_pointer_hash_table_create(NULL);
if (!ctx->pending_resources)
goto fail;
ctx->flush_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
@ -681,8 +596,6 @@ etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
if (!ctx->flush_resources)
goto fail;
mtx_init(&ctx->lock, mtx_recursive);
/* context ctxate setup */
ctx->screen = screen;
/* need some sane default in case gallium frontends don't set some state: */

View File

@ -199,15 +199,12 @@ struct etna_context {
struct etna_reloc DUMMY_DESC_ADDR;
/* set of resources used by currently-unsubmitted renders */
struct set *used_resources_read;
struct set *used_resources_write;
struct hash_table *pending_resources;
/* resources that must be flushed implicitly at the context flush time */
struct set *flush_resources;
bool is_noop;
mtx_t lock;
};
static inline struct etna_context *

View File

@ -305,12 +305,6 @@ etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout,
etna_bo_cpu_fini(rsc->bo);
}
mtx_init(&rsc->lock, mtx_recursive);
rsc->pending_ctx = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!rsc->pending_ctx)
goto free_rsc;
return &rsc->base;
free_rsc:
@ -450,11 +444,6 @@ etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
struct etna_resource *rsc = etna_resource(prsc);
mtx_lock(&rsc->lock);
assert(!_mesa_set_next_entry(rsc->pending_ctx, NULL));
_mesa_set_destroy(rsc->pending_ctx, NULL);
mtx_unlock(&rsc->lock);
if (rsc->bo)
etna_bo_del(rsc->bo);
@ -472,8 +461,6 @@ etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
for (unsigned i = 0; i < ETNA_NUM_LOD; i++)
FREE(rsc->levels[i].patch_offsets);
mtx_destroy(&rsc->lock);
FREE(rsc);
}
@ -554,12 +541,6 @@ etna_resource_from_handle(struct pipe_screen *pscreen,
goto fail;
}
mtx_init(&rsc->lock, mtx_recursive);
rsc->pending_ctx = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!rsc->pending_ctx)
goto fail;
if (screen->ro) {
struct pipe_resource *imp_prsc = prsc;
do {
@ -669,78 +650,37 @@ void
etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
enum etna_resource_status status)
{
struct pipe_resource *referenced = NULL;
struct etna_resource *rsc;
struct hash_entry *entry;
uint32_t hash;
if (!prsc)
return;
mtx_lock(&ctx->lock);
rsc = etna_resource(prsc);
again:
mtx_lock(&rsc->lock);
hash = _mesa_hash_pointer(rsc);
entry = _mesa_hash_table_search_pre_hashed(ctx->pending_resources,
hash, rsc);
set_foreach(rsc->pending_ctx, entry) {
struct etna_context *extctx = (struct etna_context *)entry->key;
struct pipe_context *pctx = &extctx->base;
bool need_flush = false;
if (mtx_trylock(&extctx->lock) != thrd_success) {
/*
* The other context could be locked in etna_flush() and
* stuck waiting for the resource lock, so release the
* resource lock here, let etna_flush() finish, and try
* again.
*/
mtx_unlock(&rsc->lock);
thrd_yield();
goto again;
}
set_foreach(extctx->used_resources_read, entry2) {
struct etna_resource *rsc2 = (struct etna_resource *)entry2->key;
if (ctx == extctx || rsc2 != rsc)
continue;
if (status & ETNA_PENDING_WRITE) {
need_flush = true;
break;
}
}
if (need_flush) {
pctx->flush(pctx, NULL, 0);
mtx_unlock(&extctx->lock);
continue;
}
set_foreach(extctx->used_resources_write, entry2) {
struct etna_resource *rsc2 = (struct etna_resource *)entry2->key;
if (ctx == extctx || rsc2 != rsc)
continue;
need_flush = true;
break;
}
if (need_flush)
pctx->flush(pctx, NULL, 0);
mtx_unlock(&extctx->lock);
if (entry) {
enum etna_resource_status tmp = (uintptr_t)entry->data;
tmp |= status;
entry->data = (void *)(uintptr_t)tmp;
} else {
_mesa_hash_table_insert_pre_hashed(ctx->pending_resources, hash, rsc,
(void *)(uintptr_t)status);
}
}
rsc->status = status;
enum etna_resource_status
etna_resource_status(struct etna_context *ctx, struct etna_resource *res)
{
struct hash_entry *entry = _mesa_hash_table_search(ctx->pending_resources, res);
if (!_mesa_set_search(rsc->pending_ctx, ctx)) {
pipe_resource_reference(&referenced, prsc);
_mesa_set_add((status & ETNA_PENDING_READ) ?
ctx->used_resources_read : ctx->used_resources_write, rsc);
_mesa_set_add(rsc->pending_ctx, ctx);
}
mtx_unlock(&rsc->lock);
mtx_unlock(&ctx->lock);
if (entry)
return (enum etna_resource_status)(uintptr_t)entry->data;
else
return 0;
}
bool

View File

@ -32,7 +32,7 @@
#include "pipe/p_state.h"
#include "util/format/u_format.h"
#include "util/list.h"
#include "util/set.h"
#include "util/hash_table.h"
#include "util/u_helpers.h"
#include "util/u_range.h"
@ -95,11 +95,6 @@ struct etna_resource {
struct pipe_resource *render;
/* frontend flushes resource via an explicit call to flush_resource */
bool explicit_flush;
enum etna_resource_status status;
mtx_t lock; /* Lock to protect pending_ctx */
struct set *pending_ctx;
};
/* returns TRUE if a is newer than b */
@ -169,11 +164,8 @@ resource_written(struct etna_context *ctx, struct pipe_resource *prsc)
etna_resource_used(ctx, prsc, ETNA_PENDING_WRITE);
}
static inline enum etna_resource_status
etna_resource_status(struct etna_context *ctx, struct etna_resource *res)
{
return res->status;
}
enum etna_resource_status
etna_resource_status(struct etna_context *ctx, struct etna_resource *res);
/* Allocate Tile Status for an etna resource.
* Tile status is a cache of the clear status per tile. This means a smaller

View File

@ -420,7 +420,6 @@ etna_clear_rs(struct pipe_context *pctx, unsigned buffers, const struct pipe_sci
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct etna_context *ctx = etna_context(pctx);
mtx_lock(&ctx->lock);
/* Flush color and depth cache before clearing anything.
* This is especially important when coming from another surface, as
@ -466,7 +465,6 @@ etna_clear_rs(struct pipe_context *pctx, unsigned buffers, const struct pipe_sci
etna_blit_clear_zs_rs(pctx, ctx->framebuffer_s.zsbuf, buffers, depth, stencil);
etna_stall(ctx->stream, SYNC_RECIPIENT_RA, SYNC_RECIPIENT_PE);
mtx_unlock(&ctx->lock);
}
static bool
@ -705,8 +703,6 @@ etna_try_rs_blit(struct pipe_context *pctx,
width & (w_align - 1) || height & (h_align - 1))
goto manual;
mtx_lock(&ctx->lock);
/* Always flush color and depth cache together before resolving. This works
* around artifacts that appear in some cases when scanning out a texture
* directly after it has been rendered to, such as rendering an animated web
@ -799,7 +795,6 @@ etna_try_rs_blit(struct pipe_context *pctx,
dst->seqno++;
dst_lev->ts_valid = false;
ctx->dirty |= ETNA_DIRTY_DERIVE_TS;
mtx_unlock(&ctx->lock);
return true;

View File

@ -326,9 +326,7 @@ etna_texture_barrier(struct pipe_context *pctx, unsigned flags)
struct etna_context *ctx = etna_context(pctx);
/* clear color and texture cache to make sure that texture unit reads
* what has been written */
mtx_lock(&ctx->lock);
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_COLOR | VIVS_GL_FLUSH_CACHE_TEXTURE);
mtx_unlock(&ctx->lock);
}
uint32_t

View File

@ -374,7 +374,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* transfers without a temporary resource.
*/
if (trans->rsc || !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
enum etna_resource_status status = etna_resource_status(ctx, rsc);
enum etna_resource_status status = etna_resource_status(ctx, rsc);
uint32_t prep_flags = 0;
/*
@ -383,24 +383,13 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* current GPU usage (reads must wait for GPU writes, writes must have
* exclusive access to the buffer).
*/
mtx_lock(&ctx->lock);
if ((trans->rsc && (status & ETNA_PENDING_WRITE)) ||
(!trans->rsc &&
(((usage & PIPE_MAP_READ) && (status & ETNA_PENDING_WRITE)) ||
((usage & PIPE_MAP_WRITE) && status)))) {
mtx_lock(&rsc->lock);
set_foreach(rsc->pending_ctx, entry) {
struct etna_context *pend_ctx = (struct etna_context *)entry->key;
struct pipe_context *pend_pctx = &pend_ctx->base;
pend_pctx->flush(pend_pctx, NULL, 0);
}
mtx_unlock(&rsc->lock);
pctx->flush(pctx, NULL, 0);
}
mtx_unlock(&ctx->lock);
if (usage & PIPE_MAP_READ)
prep_flags |= DRM_ETNA_PREP_READ;
if (usage & PIPE_MAP_WRITE)