iris/bufmgr: Stop changing mapping modes on buffers

With discrete GPUs, we aren't able to change mapping modes after we've
created a buffer.  This is a limitation of TTM.  However, we already
have a buffer cache and it's pretty likely that stuff in any given
memzone will end up with just the one mapping type anyway so this
shouldn't have much of a cost.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11583>
This commit is contained in:
Jason Ekstrand 2021-06-24 11:26:05 -05:00 committed by Marge Bot
parent b18cf54f0d
commit dd890f520e
1 changed files with 14 additions and 8 deletions

View File

@ -432,6 +432,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
struct bo_cache_bucket *bucket,
uint32_t alignment,
enum iris_memory_zone memzone,
enum iris_mmap_mode mmap_mode,
unsigned flags,
bool match_zone)
{
@ -441,6 +442,12 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
struct iris_bo *bo = NULL;
list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
/* Find one that's got the right mapping type. We used to swap maps
* around but the kernel doesn't allow this on discrete GPUs.
*/
if (mmap_mode != cur->mmap_mode)
continue;
/* Try a little harder to find one that's already in the right memzone */
if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
continue;
@ -607,7 +614,7 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
bool is_coherent = bufmgr->has_llc || (flags & BO_ALLOC_COHERENT);
enum iris_mmap_mode desired_mmap_mode =
enum iris_mmap_mode mmap_mode =
!local && is_coherent ? IRIS_MMAP_WB : IRIS_MMAP_WC;
mtx_lock(&bufmgr->lock);
@ -615,12 +622,13 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
/* Get a buffer out of the cache if available. First, we try to find
* one with a matching memory zone so we can avoid reallocating VMA.
*/
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags, true);
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
flags, true);
/* If that fails, we try for any cached BO, without matching memzone. */
if (!bo) {
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags,
false);
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
flags, false);
}
mtx_unlock(&bufmgr->lock);
@ -653,10 +661,8 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
if (memzone < IRIS_MEMZONE_OTHER)
bo->kflags |= EXEC_OBJECT_CAPTURE;
if (bo->mmap_mode != desired_mmap_mode && bo->map)
bo_unmap(bo);
bo->mmap_mode = desired_mmap_mode;
assert(bo->map == NULL || bo->mmap_mode == mmap_mode);
bo->mmap_mode = mmap_mode;
if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
struct drm_i915_gem_caching arg = {