iris: Rework the DEVICE_LOCAL heap

Split it into a local-only heap (which keeps the original enum) and a
local-preferred heap (which has a new enum).

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14012>
This commit is contained in:
Nanley Chery 2021-12-01 15:45:25 -05:00 committed by Marge Bot
parent 305677e242
commit 9a188b10a5
3 changed files with 45 additions and 4 deletions

View File

@ -107,7 +107,7 @@ dump_bo_list(struct iris_batch *batch)
bool exported = iris_bo_is_exported(bo);
bool imported = iris_bo_is_imported(bo);
fprintf(stderr, "[%2d]: %3d (%3d) %-14s @ 0x%016"PRIx64" (%-6s %8"PRIu64"B) %2d refs %s%s%s\n",
fprintf(stderr, "[%2d]: %3d (%3d) %-14s @ 0x%016"PRIx64" (%-15s %8"PRIu64"B) %2d refs %s%s%s\n",
i,
bo->gem_handle,
backing->gem_handle,

View File

@ -206,6 +206,10 @@ struct iris_bufmgr {
struct bo_cache_bucket local_cache_bucket[BUCKET_ARRAY_SIZE];
int num_local_buckets;
/** Same as cache_bucket, but for local-preferred memory gem objects */
struct bo_cache_bucket local_preferred_cache_bucket[BUCKET_ARRAY_SIZE];
int num_local_preferred_buckets;
time_t time;
struct hash_table *name_table;
@ -282,6 +286,10 @@ bucket_info_for_heap(struct iris_bufmgr *bufmgr, enum iris_heap heap,
*cache_bucket = bufmgr->local_cache_bucket;
*num_buckets = &bufmgr->num_local_buckets;
break;
case IRIS_HEAP_DEVICE_LOCAL_PREFERRED:
*cache_bucket = bufmgr->local_preferred_cache_bucket;
*num_buckets = &bufmgr->num_local_preferred_buckets;
break;
case IRIS_HEAP_MAX:
default:
*cache_bucket = NULL;
@ -641,7 +649,8 @@ iris_slab_alloc(void *priv,
{
struct iris_bufmgr *bufmgr = priv;
struct iris_slab *slab = calloc(1, sizeof(struct iris_slab));
unsigned flags = heap == IRIS_HEAP_SYSTEM_MEMORY ? BO_ALLOC_SMEM : 0;
unsigned flags = heap == IRIS_HEAP_SYSTEM_MEMORY ? BO_ALLOC_SMEM :
heap == IRIS_HEAP_DEVICE_LOCAL ? BO_ALLOC_LMEM : 0;
unsigned slab_size = 0;
/* We only support slab allocation for IRIS_MEMZONE_OTHER */
enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
@ -746,8 +755,10 @@ flags_to_heap(struct iris_bufmgr *bufmgr, unsigned flags)
if (bufmgr->vram.size > 0 &&
!(flags & BO_ALLOC_SMEM) &&
!(flags & BO_ALLOC_COHERENT)) {
return IRIS_HEAP_DEVICE_LOCAL;
return flags & BO_ALLOC_LMEM ? IRIS_HEAP_DEVICE_LOCAL :
IRIS_HEAP_DEVICE_LOCAL_PREFERRED;
} else {
assert(!(flags & BO_ALLOC_LMEM));
return IRIS_HEAP_SYSTEM_MEMORY;
}
}
@ -948,11 +959,14 @@ alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, unsigned flags)
struct drm_i915_gem_memory_class_instance regions[2];
uint32_t nregions = 0;
switch (bo->real.heap) {
case IRIS_HEAP_DEVICE_LOCAL:
case IRIS_HEAP_DEVICE_LOCAL_PREFERRED:
/* For vram allocations, still use system memory as a fallback. */
regions[nregions++] = bufmgr->vram.region;
regions[nregions++] = bufmgr->sys.region;
break;
case IRIS_HEAP_DEVICE_LOCAL:
regions[nregions++] = bufmgr->vram.region;
break;
case IRIS_HEAP_SYSTEM_MEMORY:
regions[nregions++] = bufmgr->sys.region;
break;
@ -1018,6 +1032,7 @@ const char *
iris_heap_to_string[IRIS_HEAP_MAX] = {
[IRIS_HEAP_SYSTEM_MEMORY] = "system",
[IRIS_HEAP_DEVICE_LOCAL] = "local",
[IRIS_HEAP_DEVICE_LOCAL_PREFERRED] = "local-preferred",
};
struct iris_bo *
@ -1366,6 +1381,19 @@ cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
}
}
for (i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
if (time - bo->real.free_time <= 1)
break;
list_del(&bo->head);
bo_free(bo);
}
}
list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
/* Stop once we reach a busy BO - all others past this point were
* freed more recently so are likely also busy.
@ -1711,6 +1739,16 @@ iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
}
}
for (int i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
list_del(&bo->head);
bo_free(bo);
}
}
/* Close any buffer objects on the dead list. */
list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
list_del(&bo->head);
@ -2330,6 +2368,7 @@ iris_bufmgr_create(struct intel_device_info *devinfo, int fd, bool bo_reuse)
init_cache_buckets(bufmgr, IRIS_HEAP_SYSTEM_MEMORY);
init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL);
init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL_PREFERRED);
unsigned min_slab_order = 8; /* 256 bytes */
unsigned max_slab_order = 20; /* 1 MB (slab size = 2 MB) */

View File

@ -140,6 +140,7 @@ enum iris_mmap_mode {
enum iris_heap {
IRIS_HEAP_SYSTEM_MEMORY,
IRIS_HEAP_DEVICE_LOCAL,
IRIS_HEAP_DEVICE_LOCAL_PREFERRED,
IRIS_HEAP_MAX,
};
@ -279,6 +280,7 @@ struct iris_bo {
#define BO_ALLOC_SMEM (1<<2)
#define BO_ALLOC_SCANOUT (1<<3)
#define BO_ALLOC_NO_SUBALLOC (1<<4)
#define BO_ALLOC_LMEM (1<<5)
/**
* Allocate a buffer object.