panfrost: Keep track of active BOs

If two jobs use the same GEM object at the same time, the job that
finishes first will (previous to this commit) close the GEM object, even
if there's a job still referencing it.

To prevent this, have all jobs use the same panfrost_bo for a given GEM
object, so it's only closed once the last job is done with it.

Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Daniel Stone <daniels@collabora.com>
Reviewed-by: Rohan Garg <rohan.garg@collabora.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
Tomeu Vizoso 2019-10-09 10:10:44 +02:00 committed by Daniel Stone
parent 730f06a44d
commit 82f18b713a
3 changed files with 86 additions and 20 deletions

View File

@ -371,6 +371,11 @@ panfrost_bo_create(struct panfrost_screen *screen, size_t size,
}
pipe_reference_init(&bo->reference, 1);
pthread_mutex_lock(&screen->active_bos_lock);
_mesa_set_add(bo->screen->active_bos, bo);
pthread_mutex_unlock(&screen->active_bos_lock);
return bo;
}
@ -390,43 +395,79 @@ panfrost_bo_unreference(struct panfrost_bo *bo)
if (!pipe_reference(&bo->reference, NULL))
return;
/* When the reference count goes to zero, we need to cleanup */
panfrost_bo_munmap(bo);
struct panfrost_screen *screen = bo->screen;
/* Rather than freeing the BO now, we'll cache the BO for later
* allocations if we're allowed to.
pthread_mutex_lock(&screen->active_bos_lock);
/* Someone might have imported this BO while we were waiting for the
* lock, let's make sure it's still not referenced before freeing it.
*/
if (panfrost_bo_cache_put(bo))
return;
if (!pipe_is_referenced(&bo->reference)) {
_mesa_set_remove_key(bo->screen->active_bos, bo);
panfrost_bo_free(bo);
/* When the reference count goes to zero, we need to cleanup */
panfrost_bo_munmap(bo);
/* Rather than freeing the BO now, we'll cache the BO for later
* allocations if we're allowed to.
*/
if (!panfrost_bo_cache_put(bo))
panfrost_bo_free(bo);
}
pthread_mutex_unlock(&screen->active_bos_lock);
}
struct panfrost_bo *
panfrost_bo_import(struct panfrost_screen *screen, int fd)
{
struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
struct panfrost_bo *bo, *newbo = rzalloc(screen, struct panfrost_bo);
struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
struct set_entry *entry;
ASSERTED int ret;
unsigned gem_handle;
newbo->screen = screen;
ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
assert(!ret);
get_bo_offset.handle = gem_handle;
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
assert(!ret);
newbo->gem_handle = gem_handle;
bo->screen = screen;
bo->gem_handle = gem_handle;
bo->gpu = (mali_ptr) get_bo_offset.offset;
bo->size = lseek(fd, 0, SEEK_END);
bo->flags |= PAN_BO_DONT_REUSE | PAN_BO_IMPORTED;
assert(bo->size > 0);
pipe_reference_init(&bo->reference, 1);
pthread_mutex_lock(&screen->active_bos_lock);
entry = _mesa_set_search_or_add(screen->active_bos, newbo);
assert(entry);
bo = (struct panfrost_bo *)entry->key;
if (newbo == bo) {
get_bo_offset.handle = gem_handle;
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
assert(!ret);
newbo->gpu = (mali_ptr) get_bo_offset.offset;
newbo->size = lseek(fd, 0, SEEK_END);
newbo->flags |= PAN_BO_DONT_REUSE | PAN_BO_IMPORTED;
assert(newbo->size > 0);
pipe_reference_init(&newbo->reference, 1);
// TODO map and unmap on demand?
panfrost_bo_mmap(newbo);
} else {
ralloc_free(newbo);
/* !pipe_is_referenced(&bo->reference) can happen if the BO
* was being released but panfrost_bo_import() acquired the
* lock before panfrost_bo_unreference(). In that case, refcnt
* is 0 and we can't use panfrost_bo_reference() directly, we
* have to re-initialize it with pipe_reference_init().
* Note that panfrost_bo_unreference() checks
* pipe_is_referenced() value just after acquiring the lock to
* make sure the object is not freed if panfrost_bo_import()
* acquired it in the meantime.
*/
if (!pipe_is_referenced(&bo->reference))
pipe_reference_init(&newbo->reference, 1);
else
panfrost_bo_reference(bo);
assert(bo->cpu);
}
pthread_mutex_unlock(&screen->active_bos_lock);
// TODO map and unmap on demand?
panfrost_bo_mmap(bo);
return bo;
}

View File

@ -548,6 +548,7 @@ panfrost_destroy_screen(struct pipe_screen *pscreen)
struct panfrost_screen *screen = pan_screen(pscreen);
panfrost_bo_cache_evict_all(screen);
pthread_mutex_destroy(&screen->bo_cache_lock);
pthread_mutex_destroy(&screen->active_bos_lock);
drmFreeVersion(screen->kernel_version);
ralloc_free(screen);
}
@ -681,6 +682,22 @@ panfrost_query_gpu_version(struct panfrost_screen *screen)
return get_param.value;
}
static uint32_t
panfrost_active_bos_hash(const void *key)
{
const struct panfrost_bo *bo = key;
return _mesa_hash_data(&bo->gem_handle, sizeof(bo->gem_handle));
}
static bool
panfrost_active_bos_cmp(const void *keya, const void *keyb)
{
const struct panfrost_bo *a = keya, *b = keyb;
return a->gem_handle == b->gem_handle;
}
struct pipe_screen *
panfrost_create_screen(int fd, struct renderonly *ro)
{
@ -733,6 +750,10 @@ panfrost_create_screen(int fd, struct renderonly *ro)
return NULL;
}
pthread_mutex_init(&screen->active_bos_lock, NULL);
screen->active_bos = _mesa_set_create(screen, panfrost_active_bos_hash,
panfrost_active_bos_cmp);
pthread_mutex_init(&screen->bo_cache_lock, NULL);
for (unsigned i = 0; i < ARRAY_SIZE(screen->bo_cache); ++i)
list_inithead(&screen->bo_cache[i]);

View File

@ -35,6 +35,7 @@
#include "renderonly/renderonly.h"
#include "util/u_dynarray.h"
#include "util/bitset.h"
#include "util/set.h"
#include <panfrost-misc.h>
#include "pan_allocate.h"
@ -85,6 +86,9 @@ struct panfrost_screen {
struct renderonly *ro;
pthread_mutex_t active_bos_lock;
struct set *active_bos;
pthread_mutex_t bo_cache_lock;
/* The BO cache is a set of buckets with power-of-two sizes ranging