radeonsi: set a per-buffer flag that disables inter-process sharing (v4)

For lower overhead in the CS ioctl.
Winsys allocators are not used with interprocess-sharable resources.

v2: It shouldn't crash anymore, but the kernel will reject the new flag.
v3 (christian): Rename the flag, avoid sending those buffers in the BO list.
v4 (christian): Remove setting the kernel flag for now

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Marek Olšák 2017-07-18 16:08:44 -04:00 committed by Christian König
parent 5ae2de81c8
commit 8b3a257851
4 changed files with 56 additions and 28 deletions

View File

@ -167,6 +167,13 @@ void r600_init_resource_fields(struct r600_common_screen *rscreen,
RADEON_FLAG_GTT_WC; RADEON_FLAG_GTT_WC;
} }
/* Only displayable single-sample textures can be shared between
* processes. */
if (res->b.b.target == PIPE_BUFFER ||
res->b.b.nr_samples >= 2 ||
rtex->surface.micro_tile_mode != RADEON_MICRO_MODE_DISPLAY)
res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
/* If VRAM is just stolen system memory, allow both VRAM and /* If VRAM is just stolen system memory, allow both VRAM and
* GTT, whichever has free space. If a buffer is evicted from * GTT, whichever has free space. If a buffer is evicted from
* VRAM to GTT, it will stay there. * VRAM to GTT, it will stay there.

View File

@ -54,6 +54,7 @@ enum radeon_bo_flag { /* bitfield */
RADEON_FLAG_NO_CPU_ACCESS = (1 << 1), RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
RADEON_FLAG_NO_SUBALLOC = (1 << 2), RADEON_FLAG_NO_SUBALLOC = (1 << 2),
RADEON_FLAG_SPARSE = (1 << 3), RADEON_FLAG_SPARSE = (1 << 3),
RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
}; };
enum radeon_bo_usage { /* bitfield */ enum radeon_bo_usage { /* bitfield */
@ -661,14 +662,19 @@ static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
{ {
switch (heap) { switch (heap) {
case RADEON_HEAP_VRAM_NO_CPU_ACCESS: case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
return RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS; return RADEON_FLAG_GTT_WC |
RADEON_FLAG_NO_CPU_ACCESS |
RADEON_FLAG_NO_INTERPROCESS_SHARING;
case RADEON_HEAP_VRAM: case RADEON_HEAP_VRAM:
case RADEON_HEAP_VRAM_GTT: case RADEON_HEAP_VRAM_GTT:
case RADEON_HEAP_GTT_WC: case RADEON_HEAP_GTT_WC:
return RADEON_FLAG_GTT_WC; return RADEON_FLAG_GTT_WC |
RADEON_FLAG_NO_INTERPROCESS_SHARING;
case RADEON_HEAP_GTT: case RADEON_HEAP_GTT:
default: default:
return 0; return RADEON_FLAG_NO_INTERPROCESS_SHARING;
} }
} }
@ -700,8 +706,14 @@ static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
/* NO_CPU_ACCESS implies VRAM only. */ /* NO_CPU_ACCESS implies VRAM only. */
assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM); assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
/* Resources with interprocess sharing don't use any winsys allocators. */
if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
return -1;
/* Unsupported flags: NO_SUBALLOC, SPARSE. */ /* Unsupported flags: NO_SUBALLOC, SPARSE. */
if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS)) if (flags & ~(RADEON_FLAG_GTT_WC |
RADEON_FLAG_NO_CPU_ACCESS |
RADEON_FLAG_NO_INTERPROCESS_SHARING))
return -1; return -1;
switch (domain) { switch (domain) {

View File

@ -1138,7 +1138,7 @@ amdgpu_bo_create(struct radeon_winsys *rws,
{ {
struct amdgpu_winsys *ws = amdgpu_winsys(rws); struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo; struct amdgpu_winsys_bo *bo;
unsigned usage = 0, pb_cache_bucket; unsigned usage = 0, pb_cache_bucket = 0;
/* VRAM implies WC. This is not optional. */ /* VRAM implies WC. This is not optional. */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC); assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
@ -1193,19 +1193,23 @@ no_slab:
size = align64(size, ws->info.gart_page_size); size = align64(size, ws->info.gart_page_size);
alignment = align(alignment, ws->info.gart_page_size); alignment = align(alignment, ws->info.gart_page_size);
int heap = radeon_get_heap_index(domain, flags); bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
usage = 1 << heap; /* Only set one usage bit for each heap. */
pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap); if (use_reusable_pool) {
assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets)); int heap = radeon_get_heap_index(domain, flags);
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
usage = 1 << heap; /* Only set one usage bit for each heap. */
/* Get a buffer from the cache. */ pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
bo = (struct amdgpu_winsys_bo*) assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
pb_cache_bucket); /* Get a buffer from the cache. */
if (bo) bo = (struct amdgpu_winsys_bo*)
return &bo->base; pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
pb_cache_bucket);
if (bo)
return &bo->base;
}
/* Create a new one. */ /* Create a new one. */
bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags, bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
@ -1220,7 +1224,7 @@ no_slab:
return NULL; return NULL;
} }
bo->u.real.use_reusable_pool = true; bo->u.real.use_reusable_pool = use_reusable_pool;
return &bo->base; return &bo->base;
} }

View File

@ -914,7 +914,7 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
{ {
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo; struct radeon_bo *bo;
unsigned usage = 0, pb_cache_bucket; unsigned usage = 0, pb_cache_bucket = 0;
assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */ assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */
@ -969,17 +969,22 @@ no_slab:
size = align(size, ws->info.gart_page_size); size = align(size, ws->info.gart_page_size);
alignment = align(alignment, ws->info.gart_page_size); alignment = align(alignment, ws->info.gart_page_size);
int heap = radeon_get_heap_index(domain, flags); bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
usage = 1 << heap; /* Only set one usage bit for each heap. */
pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap); /* Shared resources don't use cached heaps. */
assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets)); if (use_reusable_pool) {
int heap = radeon_get_heap_index(domain, flags);
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
usage = 1 << heap; /* Only set one usage bit for each heap. */
bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
usage, pb_cache_bucket)); assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
if (bo)
return &bo->base; bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
usage, pb_cache_bucket));
if (bo)
return &bo->base;
}
bo = radeon_create_bo(ws, size, alignment, usage, domain, flags, bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
pb_cache_bucket); pb_cache_bucket);
@ -994,7 +999,7 @@ no_slab:
return NULL; return NULL;
} }
bo->u.real.use_reusable_pool = true; bo->u.real.use_reusable_pool = use_reusable_pool;
mtx_lock(&ws->bo_handles_mutex); mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo); util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);