panfrost: Fork pan_pool for Gallium and Vulkan

This commit adds the actual implementations, allowing to diverge while
still sharing code that depends on pool functionality.

Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11695>
This commit is contained in:
Tomeu Vizoso 2021-07-02 10:41:02 +02:00 committed by Marge Bot
parent deb4074a54
commit d33a3fad64
16 changed files with 496 additions and 239 deletions

View File

@ -10,6 +10,8 @@ C_SOURCES := \
pan_context.h \
pan_job.c \
pan_job.h \
pan_mempool.c \
pan_mempool.h \
pan_public.h \
pan_resource.c \
pan_resource.h \

View File

@ -32,6 +32,8 @@ files_panfrost = files(
'pan_blend_cso.c',
'pan_cmdstream.c',
'pan_compute.c',
'pan_mempool.c',
'pan_mempool.h',
)
panfrost_includes = [

View File

@ -29,7 +29,7 @@
#include "util/u_dynarray.h"
#include "pipe/p_state.h"
#include "pan_cs.h"
#include "pan_pool.h"
#include "pan_mempool.h"
#include "pan_resource.h"
#include "pan_scoreboard.h"

View File

@ -0,0 +1,143 @@
/*
* © Copyright 2018 Alyssa Rosenzweig
* Copyright (C) 2019 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "pan_device.h"
#include "pan_mempool.h"
/* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries
* when needed.
*
* In "owned" mode, a single parent owns the entire pool, and the pool owns all
* created BOs. All BOs are tracked and addable as
* panfrost_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
* This is useful for streaming uploads, where the batch owns the pool.
*
* In "unowned" mode, the pool is freestanding. It does not track created BOs
* or hold references. Instead, the consumer must manage the created BOs. This
* is more flexible, enabling non-transient CSO state or shader code to be
* packed with conservative lifetime handling.
*/
static struct panfrost_bo *
panfrost_pool_alloc_backing(struct panfrost_pool *pool, size_t bo_sz)
{
/* We don't know what the BO will be used for, so let's flag it
* RW and attach it to both the fragment and vertex/tiler jobs.
* TODO: if we want fine grained BO assignment we should pass
* flags to this function and keep the read/write,
* fragment/vertex+tiler pools separate.
*/
struct panfrost_bo *bo = panfrost_bo_create(pool->base.dev, bo_sz,
pool->base.create_flags, pool->base.label);
if (pool->owned)
util_dynarray_append(&pool->bos, struct panfrost_bo *, bo);
else
panfrost_bo_unreference(pool->transient_bo);
pool->transient_bo = bo;
pool->transient_offset = 0;
return bo;
}
void
panfrost_pool_init(struct panfrost_pool *pool, void *memctx,
struct panfrost_device *dev,
unsigned create_flags, size_t slab_size, const char *label,
bool prealloc, bool owned)
{
memset(pool, 0, sizeof(*pool));
pan_pool_init(&pool->base, dev, create_flags, slab_size, label);
pool->owned = owned;
if (owned)
util_dynarray_init(&pool->bos, memctx);
if (prealloc)
panfrost_pool_alloc_backing(pool, pool->base.slab_size);
}
void
panfrost_pool_cleanup(struct panfrost_pool *pool)
{
if (!pool->owned) {
panfrost_bo_unreference(pool->transient_bo);
return;
}
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo)
panfrost_bo_unreference(*bo);
util_dynarray_fini(&pool->bos);
}
void
panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles)
{
assert(pool->owned && "pool does not track BOs in unowned mode");
unsigned idx = 0;
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
assert((*bo)->gem_handle > 0);
handles[idx++] = (*bo)->gem_handle;
/* Update the BO access flags so that panfrost_bo_wait() knows
* about all pending accesses.
* We only keep the READ/WRITE info since this is all the BO
* wait logic cares about.
* We also preserve existing flags as this batch might not
* be the first one to access the BO.
*/
(*bo)->gpu_access |= PAN_BO_ACCESS_RW;
}
}
static struct panfrost_ptr
panfrost_pool_alloc_aligned(struct panfrost_pool *pool, size_t sz, unsigned alignment)
{
assert(alignment == util_next_power_of_two(alignment));
/* Find or create a suitable BO */
struct panfrost_bo *bo = pool->transient_bo;
unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
/* If we don't fit, allocate a new backing */
if (unlikely(bo == NULL || (offset + sz) >= pool->base.slab_size)) {
bo = panfrost_pool_alloc_backing(pool,
ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096));
offset = 0;
}
pool->transient_offset = offset + sz;
struct panfrost_ptr ret = {
.cpu = bo->ptr.cpu + offset,
.gpu = bo->ptr.gpu + offset,
};
return ret;
}
PAN_POOL_ALLOCATOR(struct panfrost_pool, panfrost_pool_alloc_aligned)

View File

@ -0,0 +1,101 @@
/*
* © Copyright 2017-2018 Alyssa Rosenzweig
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __PAN_MEMPOOL_H__
#define __PAN_MEMPOOL_H__
#include "pan_pool.h"
/* Represents grow-only memory. It may be owned by the batch (OpenGL), or may
be unowned for persistent uploads. */
struct panfrost_pool {
/* Inherit from pan_pool */
struct pan_pool base;
/* BOs allocated by this pool */
struct util_dynarray bos;
/* Current transient BO */
struct panfrost_bo *transient_bo;
/* Within the topmost transient BO, how much has been used? */
unsigned transient_offset;
/* Mode of the pool. BO management is in the pool for owned mode, but
* the consumed for unowned mode. */
bool owned;
};
static inline struct panfrost_pool *
to_panfrost_pool(struct pan_pool *pool)
{
return container_of(pool, struct panfrost_pool, base);
}
/* Reference to pool allocated memory for an unowned pool */
struct panfrost_pool_ref {
/* Owning BO */
struct panfrost_bo *bo;
/* Mapped GPU VA */
mali_ptr gpu;
};
/* Take a reference to an allocation pool. Call directly after allocating from
* an unowned pool for correct operation. */
static inline struct panfrost_pool_ref
panfrost_pool_take_ref(struct panfrost_pool *pool, mali_ptr ptr)
{
if (!pool->owned)
panfrost_bo_reference(pool->transient_bo);
return (struct panfrost_pool_ref) {
.bo = pool->transient_bo,
.gpu = ptr
};
}
void
panfrost_pool_init(struct panfrost_pool *pool, void *memctx,
struct panfrost_device *dev, unsigned create_flags,
size_t slab_size, const char *label, bool prealloc, bool
owned);
void
panfrost_pool_cleanup(struct panfrost_pool *pool);
static inline unsigned
panfrost_pool_num_bos(struct panfrost_pool *pool)
{
assert(pool->owned && "pool does not track BOs in unowned mode");
return util_dynarray_num_elements(&pool->bos, struct panfrost_bo *);
}
void
panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles);
#endif

View File

@ -28,7 +28,6 @@
#include <midgard_pack.h>
#include "pan_screen.h"
#include "pan_pool.h"
#include "pan_minmax_cache.h"
#include "pan_texture.h"
#include "drm-uapi/drm.h"

View File

@ -38,7 +38,7 @@
#include "util/set.h"
#include "pan_device.h"
#include "pan_pool.h"
#include "pan_mempool.h"
struct panfrost_batch;
struct panfrost_context;

View File

@ -29,120 +29,8 @@
/* Knockoff u_upload_mgr. Uploads whereever we left off, allocating new entries
* when needed.
*
* In "owned" mode, a single parent owns the entire pool, and the pool owns all
* created BOs. All BOs are tracked and addable as
* panfrost_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
* This is useful for streaming uploads, where the batch owns the pool.
*
* In "unowned" mode, the pool is freestanding. It does not track created BOs
* or hold references. Instead, the consumer must manage the created BOs. This
* is more flexible, enabling non-transient CSO state or shader code to be
* packed with conservative lifetime handling.
*/
static struct panfrost_bo *
panfrost_pool_alloc_backing(struct panfrost_pool *pool, size_t bo_sz)
{
/* We don't know what the BO will be used for, so let's flag it
* RW and attach it to both the fragment and vertex/tiler jobs.
* TODO: if we want fine grained BO assignment we should pass
* flags to this function and keep the read/write,
* fragment/vertex+tiler pools separate.
*/
struct panfrost_bo *bo = panfrost_bo_create(pool->base.dev, bo_sz,
pool->base.create_flags, pool->base.label);
if (pool->owned)
util_dynarray_append(&pool->bos, struct panfrost_bo *, bo);
else
panfrost_bo_unreference(pool->transient_bo);
pool->transient_bo = bo;
pool->transient_offset = 0;
return bo;
}
void
panfrost_pool_init(struct panfrost_pool *pool, void *memctx,
struct panfrost_device *dev,
unsigned create_flags, size_t slab_size, const char *label,
bool prealloc, bool owned)
{
memset(pool, 0, sizeof(*pool));
pan_pool_init(&pool->base, dev, create_flags, slab_size, label);
pool->owned = owned;
if (owned)
util_dynarray_init(&pool->bos, memctx);
if (prealloc)
panfrost_pool_alloc_backing(pool, pool->base.slab_size);
}
void
panfrost_pool_cleanup(struct panfrost_pool *pool)
{
if (!pool->owned) {
panfrost_bo_unreference(pool->transient_bo);
return;
}
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo)
panfrost_bo_unreference(*bo);
util_dynarray_fini(&pool->bos);
}
void
panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles)
{
assert(pool->owned && "pool does not track BOs in unowned mode");
unsigned idx = 0;
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
assert((*bo)->gem_handle > 0);
handles[idx++] = (*bo)->gem_handle;
/* Update the BO access flags so that panfrost_bo_wait() knows
* about all pending accesses.
* We only keep the READ/WRITE info since this is all the BO
* wait logic cares about.
* We also preserve existing flags as this batch might not
* be the first one to access the BO.
*/
(*bo)->gpu_access |= PAN_BO_ACCESS_RW;
}
}
static struct panfrost_ptr
panfrost_pool_alloc_aligned(struct panfrost_pool *pool, size_t sz, unsigned alignment)
{
assert(alignment == util_next_power_of_two(alignment));
/* Find or create a suitable BO */
struct panfrost_bo *bo = pool->transient_bo;
unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
/* If we don't fit, allocate a new backing */
if (unlikely(bo == NULL || (offset + sz) >= pool->base.slab_size)) {
bo = panfrost_pool_alloc_backing(pool,
ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096));
offset = 0;
}
pool->transient_offset = offset + sz;
struct panfrost_ptr ret = {
.cpu = bo->ptr.cpu + offset,
.gpu = bo->ptr.gpu + offset,
};
return ret;
}
PAN_POOL_ALLOCATOR(struct panfrost_pool, panfrost_pool_alloc_aligned)
mali_ptr
pan_pool_upload(struct pan_pool *pool, const void *data, size_t sz)
{

View File

@ -31,8 +31,7 @@
#include "util/u_dynarray.h"
/* Represents grow-only memory. It may be owned by the batch (OpenGL) or
* command pool (Vulkan), or may be unowned for persistent uploads. */
/* Represents grow-only memory. */
struct pan_pool {
/* Parent device for allocation */
@ -48,55 +47,6 @@ struct pan_pool {
size_t slab_size;
};
struct panfrost_pool {
/* Inherit from pan_pool */
struct pan_pool base;
/* BOs allocated by this pool */
struct util_dynarray bos;
/* Current transient BO */
struct panfrost_bo *transient_bo;
/* Within the topmost transient BO, how much has been used? */
unsigned transient_offset;
/* Mode of the pool. BO management is in the pool for owned mode, but
* the consumed for unowned mode. */
bool owned;
};
static inline struct panfrost_pool *
to_panfrost_pool(struct pan_pool *pool)
{
return container_of(pool, struct panfrost_pool, base);
}
/* Reference to pool allocated memory for an unowned pool */
struct panfrost_pool_ref {
/* Owning BO */
struct panfrost_bo *bo;
/* Mapped GPU VA */
mali_ptr gpu;
};
/* Take a reference to an allocation pool. Call directly after allocating from
* an unowned pool for correct operation. */
static inline struct panfrost_pool_ref
panfrost_pool_take_ref(struct panfrost_pool *pool, mali_ptr ptr)
{
if (!pool->owned)
panfrost_bo_reference(pool->transient_bo);
return (struct panfrost_pool_ref) {
.bo = pool->transient_bo,
.gpu = ptr
};
}
static inline void
pan_pool_init(struct pan_pool *pool, struct panfrost_device *dev,
unsigned create_flags, size_t slab_size, const char *label)
@ -107,25 +57,6 @@ pan_pool_init(struct pan_pool *pool, struct panfrost_device *dev,
pool->label = label;
}
void
panfrost_pool_init(struct panfrost_pool *pool, void *memctx,
struct panfrost_device *dev, unsigned create_flags,
size_t slab_size, const char *label, bool prealloc, bool
owned);
void
panfrost_pool_cleanup(struct panfrost_pool *pool);
static inline unsigned
panfrost_pool_num_bos(struct panfrost_pool *pool)
{
assert(pool->owned && "pool does not track BOs in unowned mode");
return util_dynarray_num_elements(&pool->bos, struct panfrost_bo *);
}
void
panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles);
/* Represents a fat pointer for GPU-mapped memory, returned from the transient
* allocator and not used for much else */

View File

@ -38,6 +38,7 @@ libpanvk_files = files(
'panvk_descriptor_set.c',
'panvk_formats.c',
'panvk_image.c',
'panvk_mempool.c',
'panvk_meta.c',
'panvk_pass.c',
'panvk_pipeline.c',

View File

@ -53,16 +53,16 @@ panvk_reset_cmdbuf(struct panvk_cmd_buffer *cmdbuf)
vk_free(&cmdbuf->pool->alloc, batch);
}
panfrost_pool_cleanup(&cmdbuf->desc_pool);
panfrost_pool_cleanup(&cmdbuf->tls_pool);
panfrost_pool_cleanup(&cmdbuf->varying_pool);
panfrost_pool_init(&cmdbuf->desc_pool, NULL, &device->physical_device->pdev,
0, 64 * 1024, "Command buffer descriptor pool",
true, true);
panfrost_pool_init(&cmdbuf->tls_pool, NULL, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "TLS pool", false, true);
panfrost_pool_init(&cmdbuf->varying_pool, NULL, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "Varyings pool", false, true);
panvk_pool_cleanup(&cmdbuf->desc_pool);
panvk_pool_cleanup(&cmdbuf->tls_pool);
panvk_pool_cleanup(&cmdbuf->varying_pool);
panvk_pool_init(&cmdbuf->desc_pool, &device->physical_device->pdev,
0, 64 * 1024, "Command buffer descriptor pool",
true);
panvk_pool_init(&cmdbuf->tls_pool, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "TLS pool", false);
panvk_pool_init(&cmdbuf->varying_pool, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "Varyings pool", false);
cmdbuf->status = PANVK_CMD_BUFFER_STATUS_INITIAL;
for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
@ -87,13 +87,13 @@ panvk_create_cmdbuf(struct panvk_device *device,
cmdbuf->device = device;
cmdbuf->level = level;
cmdbuf->pool = pool;
panfrost_pool_init(&cmdbuf->desc_pool, NULL, &device->physical_device->pdev,
0, 64 * 1024, "Command buffer descriptor pool",
true, true);
panfrost_pool_init(&cmdbuf->tls_pool, NULL, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "TLS pool", false, true);
panfrost_pool_init(&cmdbuf->varying_pool, NULL, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "Varyings pool", false, true);
panvk_pool_init(&cmdbuf->desc_pool, &device->physical_device->pdev,
0, 64 * 1024, "Command buffer descriptor pool",
true);
panvk_pool_init(&cmdbuf->tls_pool, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "TLS pool", false);
panvk_pool_init(&cmdbuf->varying_pool, &device->physical_device->pdev,
PAN_BO_INVISIBLE, 64 * 1024, "Varyings pool", false);
list_inithead(&cmdbuf->batches);
cmdbuf->status = PANVK_CMD_BUFFER_STATUS_INITIAL;
*cmdbuf_out = cmdbuf;
@ -114,9 +114,9 @@ panvk_destroy_cmdbuf(struct panvk_cmd_buffer *cmdbuf)
vk_free(&cmdbuf->pool->alloc, batch);
}
panfrost_pool_cleanup(&cmdbuf->desc_pool);
panfrost_pool_cleanup(&cmdbuf->tls_pool);
panfrost_pool_cleanup(&cmdbuf->varying_pool);
panvk_pool_cleanup(&cmdbuf->desc_pool);
panvk_pool_cleanup(&cmdbuf->tls_pool);
panvk_pool_cleanup(&cmdbuf->varying_pool);
vk_object_free(&device->vk, NULL, cmdbuf);
}

View File

@ -1215,9 +1215,9 @@ panvk_QueueSubmit(VkQueue _queue,
list_for_each_entry(struct panvk_batch, batch, &cmdbuf->batches, node) {
/* FIXME: should be done at the batch level */
unsigned nr_bos =
util_dynarray_num_elements(&cmdbuf->desc_pool.bos, struct panfrost_bo *) +
util_dynarray_num_elements(&cmdbuf->varying_pool.bos, struct panfrost_bo *) +
util_dynarray_num_elements(&cmdbuf->tls_pool.bos, struct panfrost_bo *) +
panvk_pool_num_bos(&cmdbuf->desc_pool) +
panvk_pool_num_bos(&cmdbuf->varying_pool) +
panvk_pool_num_bos(&cmdbuf->tls_pool) +
(batch->fb.info ? batch->fb.info->attachment_count : 0) +
(batch->blit.src ? 1 : 0) +
(batch->blit.dst ? 1 : 0) +
@ -1225,17 +1225,14 @@ panvk_QueueSubmit(VkQueue _queue,
unsigned bo_idx = 0;
uint32_t bos[nr_bos];
util_dynarray_foreach(&cmdbuf->desc_pool.bos, struct panfrost_bo *, bo) {
bos[bo_idx++] = (*bo)->gem_handle;
}
panvk_pool_get_bo_handles(&cmdbuf->desc_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->desc_pool);
util_dynarray_foreach(&cmdbuf->varying_pool.bos, struct panfrost_bo *, bo) {
bos[bo_idx++] = (*bo)->gem_handle;
}
panvk_pool_get_bo_handles(&cmdbuf->varying_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->varying_pool);
util_dynarray_foreach(&cmdbuf->tls_pool.bos, struct panfrost_bo *, bo) {
bos[bo_idx++] = (*bo)->gem_handle;
}
panvk_pool_get_bo_handles(&cmdbuf->tls_pool, &bos[bo_idx]);
bo_idx += panvk_pool_num_bos(&cmdbuf->tls_pool);
if (batch->fb.info) {
for (unsigned i = 0; i < batch->fb.info->attachment_count; i++) {

View File

@ -0,0 +1,122 @@
/*
* © Copyright 2018 Alyssa Rosenzweig
* Copyright (C) 2019 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "pan_device.h"
#include "panvk_mempool.h"
/* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries
* when needed.
*
* In "owned" mode, a single parent owns the entire pool, and the pool owns all
* created BOs. All BOs are tracked and addable as
* panvk_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
* This is useful for streaming uploads, where the batch owns the pool.
*
* In "unowned" mode, the pool is freestanding. It does not track created BOs
* or hold references. Instead, the consumer must manage the created BOs. This
* is more flexible, enabling non-transient CSO state or shader code to be
* packed with conservative lifetime handling.
*/
static struct panfrost_bo *
panvk_pool_alloc_backing(struct panvk_pool *pool, size_t bo_sz)
{
/* We don't know what the BO will be used for, so let's flag it
* RW and attach it to both the fragment and vertex/tiler jobs.
* TODO: if we want fine grained BO assignment we should pass
* flags to this function and keep the read/write,
* fragment/vertex+tiler pools separate.
*/
struct panfrost_bo *bo = panfrost_bo_create(pool->base.dev, bo_sz,
pool->base.create_flags,
pool->base.label);
util_dynarray_append(&pool->bos, struct panfrost_bo *, bo);
pool->transient_bo = bo;
pool->transient_offset = 0;
return bo;
}
static struct panfrost_ptr
panvk_pool_alloc_aligned(struct panvk_pool *pool, size_t sz, unsigned alignment)
{
assert(alignment == util_next_power_of_two(alignment));
/* Find or create a suitable BO */
struct panfrost_bo *bo = pool->transient_bo;
unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
/* If we don't fit, allocate a new backing */
if (unlikely(bo == NULL || (offset + sz) >= pool->base.slab_size)) {
bo = panvk_pool_alloc_backing(pool,
ALIGN_POT(MAX2(pool->base.slab_size, sz),
4096));
offset = 0;
}
pool->transient_offset = offset + sz;
struct panfrost_ptr ret = {
.cpu = bo->ptr.cpu + offset,
.gpu = bo->ptr.gpu + offset,
};
return ret;
}
PAN_POOL_ALLOCATOR(struct panvk_pool, panvk_pool_alloc_aligned)
void
panvk_pool_init(struct panvk_pool *pool, struct panfrost_device *dev,
unsigned create_flags, size_t slab_size, const char *label,
bool prealloc)
{
memset(pool, 0, sizeof(*pool));
pan_pool_init(&pool->base, dev, create_flags, slab_size, label);
util_dynarray_init(&pool->bos, NULL);
if (prealloc)
panvk_pool_alloc_backing(pool, pool->base.slab_size);
}
void
panvk_pool_cleanup(struct panvk_pool *pool)
{
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo)
panfrost_bo_unreference(*bo);
util_dynarray_fini(&pool->bos);
}
void
panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles)
{
unsigned idx = 0;
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
assert((*bo)->gem_handle > 0);
handles[idx++] = (*bo)->gem_handle;
}
}

View File

@ -0,0 +1,71 @@
/*
* © Copyright 2017-2018 Alyssa Rosenzweig
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __PANVK_POOL_H__
#define __PANVK_POOL_H__
#include "pan_pool.h"
/* Represents grow-only memory. It may be owned by the batch (OpenGL), or may
be unowned for persistent uploads. */
struct panvk_pool {
/* Inherit from pan_pool */
struct pan_pool base;
/* BOs allocated by this pool */
struct util_dynarray bos;
/* Current transient BO */
struct panfrost_bo *transient_bo;
/* Within the topmost transient BO, how much has been used? */
unsigned transient_offset;
};
static inline struct panvk_pool *
to_panvk_pool(struct pan_pool *pool)
{
return container_of(pool, struct panvk_pool, base);
}
void
panvk_pool_init(struct panvk_pool *pool, struct panfrost_device *dev,
unsigned create_flags, size_t slab_size, const char *label,
bool prealloc);
void
panvk_pool_cleanup(struct panvk_pool *pool);
static inline unsigned
panvk_pool_num_bos(struct panvk_pool *pool)
{
return util_dynarray_num_elements(&pool->bos, struct panfrost_bo *);
}
void
panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles);
#endif

View File

@ -154,16 +154,16 @@ panvk_CmdClearAttachments(VkCommandBuffer commandBuffer,
void
panvk_meta_init(struct panvk_physical_device *dev)
{
panfrost_pool_init(&dev->meta.bin_pool, NULL, &dev->pdev, PAN_BO_EXECUTE,
16 * 1024, "panvk_meta binary pool", false, true);
panfrost_pool_init(&dev->meta.desc_pool, NULL, &dev->pdev, 0,
16 * 1024, "panvk_meta descriptor pool", false, true);
panfrost_pool_init(&dev->meta.blitter.bin_pool, NULL, &dev->pdev,
PAN_BO_EXECUTE, 16 * 1024,
"panvk_meta blitter binary pool", false, true);
panfrost_pool_init(&dev->meta.blitter.desc_pool, NULL, &dev->pdev,
0, 16 * 1024, "panvk_meta blitter descriptor pool",
false, true);
panvk_pool_init(&dev->meta.bin_pool, &dev->pdev, PAN_BO_EXECUTE,
16 * 1024, "panvk_meta binary pool", false);
panvk_pool_init(&dev->meta.desc_pool, &dev->pdev, 0,
16 * 1024, "panvk_meta descriptor pool", false);
panvk_pool_init(&dev->meta.blitter.bin_pool, &dev->pdev,
PAN_BO_EXECUTE, 16 * 1024,
"panvk_meta blitter binary pool", false);
panvk_pool_init(&dev->meta.blitter.desc_pool, &dev->pdev,
0, 16 * 1024, "panvk_meta blitter descriptor pool",
false);
pan_blitter_init(&dev->pdev, &dev->meta.blitter.bin_pool.base,
&dev->meta.blitter.desc_pool.base);
}
@ -172,8 +172,8 @@ void
panvk_meta_cleanup(struct panvk_physical_device *dev)
{
pan_blitter_cleanup(&dev->pdev);
panfrost_pool_cleanup(&dev->meta.blitter.desc_pool);
panfrost_pool_cleanup(&dev->meta.blitter.bin_pool);
panfrost_pool_cleanup(&dev->meta.desc_pool);
panfrost_pool_cleanup(&dev->meta.bin_pool);
panvk_pool_cleanup(&dev->meta.blitter.desc_pool);
panvk_pool_cleanup(&dev->meta.blitter.bin_pool);
panvk_pool_cleanup(&dev->meta.desc_pool);
panvk_pool_cleanup(&dev->meta.bin_pool);
}

View File

@ -63,7 +63,7 @@
#include "pan_blitter.h"
#include "pan_cs.h"
#include "pan_device.h"
#include "pan_pool.h"
#include "panvk_mempool.h"
#include "pan_texture.h"
#include "pan_scoreboard.h"
#include "pan_shader.h"
@ -133,16 +133,16 @@ panvk_logi_v(const char *format, va_list va);
#define panvk_stub() assert(!"stub")
struct panvk_meta {
struct panfrost_pool bin_pool;
struct panfrost_pool desc_pool;
struct panvk_pool bin_pool;
struct panvk_pool desc_pool;
/* Access to the blitter pools are protected by the blitter
* shader/rsd locks. They can't be merged with other binary/desc
* pools unless we patch pan_blitter.c to external pool locks.
*/
struct {
struct panfrost_pool bin_pool;
struct panfrost_pool desc_pool;
struct panvk_pool bin_pool;
struct panvk_pool desc_pool;
} blitter;
};
@ -618,9 +618,9 @@ struct panvk_cmd_buffer {
struct panvk_device *device;
struct panvk_cmd_pool *pool;
struct panfrost_pool desc_pool;
struct panfrost_pool varying_pool;
struct panfrost_pool tls_pool;
struct panvk_pool desc_pool;
struct panvk_pool varying_pool;
struct panvk_pool tls_pool;
struct list_head batches;
VkCommandBufferUsageFlags usage_flags;