winsys/amdgpu: reduce amdgpu_cs size

buffer_indices_hashlist is only used by the current
amdgpu_cs_context (= amdgpu_cs.csc).

So store a single 16k int array instead of 2, and switch
the owner when flushing the cs.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11010>
This commit is contained in:
Pierre-Eric Pelloux-Prayer 2021-05-26 12:24:31 +02:00 committed by Marge Bot
parent 74c67f2b72
commit a981105d90
2 changed files with 17 additions and 8 deletions

View File

@ -425,7 +425,7 @@ static inline unsigned amdgpu_cs_epilog_dws(struct amdgpu_cs *cs)
static int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo,
struct amdgpu_cs_buffer *buffers, unsigned num_buffers)
{
unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
unsigned hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
int i = cs->buffer_indices_hashlist[hash];
/* not found or found */
@ -522,7 +522,7 @@ amdgpu_lookup_or_add_real_buffer(struct radeon_cmdbuf *rcs, struct amdgpu_cs *ac
idx = amdgpu_do_add_real_buffer(acs->ws, cs, bo);
hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
cs->buffer_indices_hashlist[hash] = idx;
if (bo->base.placement & RADEON_DOMAIN_VRAM)
@ -577,7 +577,7 @@ static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_winsys *ws,
buffer->u.slab.real_idx = real_idx;
cs->num_slab_buffers++;
hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
cs->buffer_indices_hashlist[hash] = idx;
return idx;
@ -621,7 +621,7 @@ static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_winsys *ws,
amdgpu_winsys_bo_reference(ws, &buffer->bo, bo);
cs->num_sparse_buffers++;
hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
cs->buffer_indices_hashlist[hash] = idx;
/* We delay adding the backing buffers until we really have to. However,
@ -911,7 +911,6 @@ static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,
cs->ib[IB_PARALLEL_COMPUTE].ip_type = AMDGPU_HW_IP_COMPUTE;
cs->ib[IB_PARALLEL_COMPUTE].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
cs->last_added_bo = NULL;
return true;
}
@ -946,8 +945,6 @@ static void amdgpu_cs_context_cleanup(struct amdgpu_winsys *ws, struct amdgpu_cs
cs->num_slab_buffers = 0;
cs->num_sparse_buffers = 0;
amdgpu_fence_reference(&cs->fence, NULL);
memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
cs->last_added_bo = NULL;
}
@ -1013,10 +1010,16 @@ amdgpu_cs_create(struct radeon_cmdbuf *rcs,
return false;
}
memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
/* Set the first submission context as current. */
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
/* Assign to both amdgpu_cs_context; only csc will use it. */
cs->csc1.buffer_indices_hashlist = cs->buffer_indices_hashlist;
cs->csc2.buffer_indices_hashlist = cs->buffer_indices_hashlist;
cs->main.rcs = rcs;
rcs->priv = cs;
@ -1892,6 +1895,8 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
amdgpu_cs_context_cleanup(ws, cs->csc);
}
memset(cs->csc->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
amdgpu_get_new_ib(ws, rcs, &cs->main, cs);
if (cs->compute_ib.ib_mapped)
amdgpu_get_new_ib(ws, cs->compute_ib.rcs, &cs->compute_ib, cs);

View File

@ -104,7 +104,7 @@ struct amdgpu_cs_context {
unsigned max_sparse_buffers;
struct amdgpu_cs_buffer *sparse_buffers;
int buffer_indices_hashlist[4096];
int *buffer_indices_hashlist;
struct amdgpu_winsys_bo *last_added_bo;
unsigned last_added_bo_index;
@ -128,6 +128,8 @@ struct amdgpu_cs_context {
bool secure;
};
#define BUFFER_HASHLIST_SIZE 4096
struct amdgpu_cs {
struct amdgpu_ib main; /* must be first because this is inherited */
struct amdgpu_ib compute_ib; /* optional parallel compute IB */
@ -145,6 +147,8 @@ struct amdgpu_cs {
struct amdgpu_cs_context *csc;
/* The CS being currently-owned by the other thread. */
struct amdgpu_cs_context *cst;
/* This is only used by csc, not cst */
int buffer_indices_hashlist[BUFFER_HASHLIST_SIZE];
/* Flush CS. */
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);