venus: use vn_renderer_shmem

vn_renderer_bo_create_cpu becomes unused and is removed.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10437>
This commit is contained in:
Chia-I Wu 2021-04-22 13:33:59 -07:00
parent 452a49fe19
commit b54a262421
9 changed files with 105 additions and 171 deletions

View File

@ -33,7 +33,7 @@ vn_cs_encoder_sanity_check(struct vn_cs_encoder *enc)
static void
vn_cs_encoder_add_buffer(struct vn_cs_encoder *enc,
struct vn_renderer_bo *bo,
struct vn_renderer_shmem *shmem,
size_t offset,
void *base,
size_t size)
@ -41,8 +41,8 @@ vn_cs_encoder_add_buffer(struct vn_cs_encoder *enc,
/* add a buffer and make it current */
assert(enc->buffer_count < enc->buffer_max);
struct vn_cs_encoder_buffer *cur_buf = &enc->buffers[enc->buffer_count++];
/* bo ownership transferred */
cur_buf->bo = bo;
/* shmem ownership transferred */
cur_buf->shmem = shmem;
cur_buf->offset = offset;
cur_buf->base = base;
cur_buf->committed_size = 0;
@ -75,12 +75,12 @@ vn_cs_encoder_gc_buffers(struct vn_cs_encoder *enc)
struct vn_cs_encoder_buffer *cur_buf =
&enc->buffers[enc->buffer_count - 1];
for (uint32_t i = 0; i < enc->buffer_count - 1; i++)
vn_renderer_bo_unref(enc->buffers[i].bo);
vn_renderer_shmem_unref(enc->instance->renderer, enc->buffers[i].shmem);
/* move the current buffer to the beginning, skipping the used part */
const size_t used = cur_buf->offset + cur_buf->committed_size;
enc->buffer_count = 0;
vn_cs_encoder_add_buffer(enc, cur_buf->bo, used,
vn_cs_encoder_add_buffer(enc, cur_buf->shmem, used,
cur_buf->base + cur_buf->committed_size,
enc->current_buffer_size - used);
@ -105,7 +105,7 @@ vn_cs_encoder_fini(struct vn_cs_encoder *enc)
return;
for (uint32_t i = 0; i < enc->buffer_count; i++)
vn_renderer_bo_unref(enc->buffers[i].bo);
vn_renderer_shmem_unref(enc->instance->renderer, enc->buffers[i].shmem);
if (enc->buffers)
free(enc->buffers);
}
@ -190,26 +190,19 @@ vn_cs_encoder_reserve_internal(struct vn_cs_encoder *enc, size_t size)
return false;
}
struct vn_renderer_bo *bo;
VkResult result =
vn_renderer_bo_create_cpu(enc->instance->renderer, buf_size, &bo);
if (result != VK_SUCCESS)
struct vn_renderer_shmem *shmem =
vn_renderer_shmem_create(enc->instance->renderer, buf_size);
if (!shmem)
return false;
void *base = vn_renderer_bo_map(bo);
if (!base) {
vn_renderer_bo_unref(bo);
return false;
}
uint32_t roundtrip;
result = vn_instance_submit_roundtrip(enc->instance, &roundtrip);
VkResult result = vn_instance_submit_roundtrip(enc->instance, &roundtrip);
if (result != VK_SUCCESS) {
vn_renderer_bo_unref(bo);
vn_renderer_shmem_unref(enc->instance->renderer, shmem);
return false;
}
vn_cs_encoder_add_buffer(enc, bo, 0, base, buf_size);
vn_cs_encoder_add_buffer(enc, shmem, 0, shmem->mmap_ptr, buf_size);
enc->current_buffer_size = buf_size;
enc->current_buffer_roundtrip = roundtrip;

View File

@ -26,14 +26,14 @@
}
struct vn_cs_encoder_buffer {
struct vn_renderer_bo *bo;
struct vn_renderer_shmem *shmem;
size_t offset;
void *base;
size_t committed_size;
};
struct vn_cs_encoder {
struct vn_instance *instance; /* TODO bo cache */
struct vn_instance *instance; /* TODO shmem cache */
size_t min_buffer_size;
bool indirect;
@ -44,7 +44,7 @@ struct vn_cs_encoder {
uint32_t buffer_max;
size_t total_committed_size;
/* the current buffer is buffers[buffer_count - 1].bo */
/* the current buffer is buffers[buffer_count - 1].shmem */
size_t current_buffer_size;
uint32_t current_buffer_roundtrip;

View File

@ -117,31 +117,26 @@ vn_instance_init_ring(struct vn_instance *instance)
struct vn_ring_layout layout;
vn_ring_get_layout(extra_size, &layout);
void *ring_ptr;
VkResult result = vn_renderer_bo_create_cpu(
instance->renderer, layout.bo_size, &instance->ring.bo);
if (result == VK_SUCCESS) {
ring_ptr = vn_renderer_bo_map(instance->ring.bo);
if (!ring_ptr)
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
if (result != VK_SUCCESS) {
instance->ring.shmem =
vn_renderer_shmem_create(instance->renderer, layout.shmem_size);
if (!instance->ring.shmem) {
if (VN_DEBUG(INIT))
vn_log(instance, "failed to allocate/map ring bo");
return result;
vn_log(instance, "failed to allocate/map ring shmem");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
mtx_init(&instance->ring.mutex, mtx_plain);
struct vn_ring *ring = &instance->ring.ring;
vn_ring_init(ring, &layout, ring_ptr);
vn_ring_init(ring, instance->renderer, &layout,
instance->ring.shmem->mmap_ptr);
instance->ring.id = (uintptr_t)ring;
const struct VkRingCreateInfoMESA info = {
.sType = VK_STRUCTURE_TYPE_RING_CREATE_INFO_MESA,
.resourceId = instance->ring.bo->res_id,
.size = layout.bo_size,
.resourceId = instance->ring.shmem->res_id,
.size = layout.shmem_size,
.idleTimeout = 50ull * 1000 * 1000,
.headOffset = layout.head_offset,
.tailOffset = layout.tail_offset,
@ -298,7 +293,7 @@ vn_instance_submission_indirect_cs(struct vn_instance_submission *submit,
const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
if (buf->committed_size) {
descs[desc_count++] = (VkCommandStreamDescriptionMESA){
.resourceId = buf->bo->res_id,
.resourceId = buf->shmem->res_id,
.offset = buf->offset,
.size = buf->committed_size,
};
@ -353,22 +348,26 @@ vn_instance_submission_direct_cs(struct vn_instance_submission *submit,
static struct vn_ring_submit *
vn_instance_submission_get_ring_submit(struct vn_ring *ring,
const struct vn_cs_encoder *cs,
struct vn_renderer_bo *extra_bo,
struct vn_renderer_shmem *extra_shmem,
bool direct)
{
const uint32_t bo_count =
(direct ? 0 : cs->buffer_count) + (extra_bo ? 1 : 0);
struct vn_ring_submit *submit = vn_ring_get_submit(ring, bo_count);
const uint32_t shmem_count =
(direct ? 0 : cs->buffer_count) + (extra_shmem ? 1 : 0);
struct vn_ring_submit *submit = vn_ring_get_submit(ring, shmem_count);
if (!submit)
return NULL;
submit->bo_count = bo_count;
submit->shmem_count = shmem_count;
if (!direct) {
for (uint32_t i = 0; i < cs->buffer_count; i++)
submit->bos[i] = vn_renderer_bo_ref(cs->buffers[i].bo);
for (uint32_t i = 0; i < cs->buffer_count; i++) {
submit->shmems[i] =
vn_renderer_shmem_ref(ring->renderer, cs->buffers[i].shmem);
}
}
if (extra_shmem) {
submit->shmems[shmem_count - 1] =
vn_renderer_shmem_ref(ring->renderer, extra_shmem);
}
if (extra_bo)
submit->bos[bo_count - 1] = vn_renderer_bo_ref(extra_bo);
return submit;
}
@ -386,7 +385,7 @@ static VkResult
vn_instance_submission_prepare(struct vn_instance_submission *submit,
const struct vn_cs_encoder *cs,
struct vn_ring *ring,
struct vn_renderer_bo *extra_bo,
struct vn_renderer_shmem *extra_shmem,
bool direct)
{
if (direct) {
@ -400,7 +399,7 @@ vn_instance_submission_prepare(struct vn_instance_submission *submit,
return VK_ERROR_OUT_OF_HOST_MEMORY;
submit->submit =
vn_instance_submission_get_ring_submit(ring, cs, extra_bo, direct);
vn_instance_submission_get_ring_submit(ring, cs, extra_shmem, direct);
if (!submit->submit) {
vn_instance_submission_cleanup(submit, cs);
return VK_ERROR_OUT_OF_HOST_MEMORY;
@ -441,7 +440,7 @@ vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
static VkResult
vn_instance_ring_submit_locked(struct vn_instance *instance,
const struct vn_cs_encoder *cs,
struct vn_renderer_bo *extra_bo,
struct vn_renderer_shmem *extra_shmem,
uint32_t *ring_seqno)
{
struct vn_ring *ring = &instance->ring.ring;
@ -456,7 +455,7 @@ vn_instance_ring_submit_locked(struct vn_instance *instance,
struct vn_instance_submission submit;
VkResult result =
vn_instance_submission_prepare(&submit, cs, ring, extra_bo, direct);
vn_instance_submission_prepare(&submit, cs, ring, extra_shmem, direct);
if (result != VK_SUCCESS)
return result;
@ -492,46 +491,40 @@ vn_instance_ring_submit(struct vn_instance *instance,
}
static bool
vn_instance_grow_reply_bo_locked(struct vn_instance *instance, size_t size)
vn_instance_grow_reply_shmem_locked(struct vn_instance *instance, size_t size)
{
const size_t min_bo_size = 1 << 20;
const size_t min_shmem_size = 1 << 20;
size_t bo_size = instance->reply.size ? instance->reply.size : min_bo_size;
while (bo_size < size) {
bo_size <<= 1;
if (!bo_size)
size_t shmem_size =
instance->reply.size ? instance->reply.size : min_shmem_size;
while (shmem_size < size) {
shmem_size <<= 1;
if (!shmem_size)
return false;
}
struct vn_renderer_bo *bo;
VkResult result =
vn_renderer_bo_create_cpu(instance->renderer, bo_size, &bo);
if (result != VK_SUCCESS)
struct vn_renderer_shmem *shmem =
vn_renderer_shmem_create(instance->renderer, shmem_size);
if (!shmem)
return false;
void *ptr = vn_renderer_bo_map(bo);
if (!ptr) {
vn_renderer_bo_unref(bo);
return false;
}
if (instance->reply.bo)
vn_renderer_bo_unref(instance->reply.bo);
instance->reply.bo = bo;
instance->reply.size = bo_size;
if (instance->reply.shmem)
vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
instance->reply.shmem = shmem;
instance->reply.size = shmem_size;
instance->reply.used = 0;
instance->reply.ptr = ptr;
instance->reply.ptr = shmem->mmap_ptr;
return true;
}
static struct vn_renderer_bo *
vn_instance_get_reply_bo_locked(struct vn_instance *instance,
size_t size,
void **ptr)
static struct vn_renderer_shmem *
vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
size_t size,
void **ptr)
{
if (unlikely(instance->reply.used + size > instance->reply.size)) {
if (!vn_instance_grow_reply_bo_locked(instance, size))
if (!vn_instance_grow_reply_shmem_locked(instance, size))
return NULL;
uint32_t set_reply_command_stream_data[16];
@ -539,7 +532,7 @@ vn_instance_get_reply_bo_locked(struct vn_instance *instance,
set_reply_command_stream_data,
sizeof(set_reply_command_stream_data));
const struct VkCommandStreamDescriptionMESA stream = {
.resourceId = instance->reply.bo->res_id,
.resourceId = instance->reply.shmem->res_id,
.size = instance->reply.size,
};
vn_encode_vkSetReplyCommandStreamMESA(&local_enc, 0, &stream);
@ -561,7 +554,7 @@ vn_instance_get_reply_bo_locked(struct vn_instance *instance,
*ptr = instance->reply.ptr + offset;
instance->reply.used += size;
return vn_renderer_bo_ref(instance->reply.bo);
return vn_renderer_shmem_ref(instance->renderer, instance->reply.shmem);
}
void
@ -569,7 +562,7 @@ vn_instance_submit_command(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
void *reply_ptr;
submit->reply_bo = NULL;
submit->reply_shmem = NULL;
mtx_lock(&instance->ring.mutex);
@ -578,15 +571,15 @@ vn_instance_submit_command(struct vn_instance *instance,
vn_cs_encoder_commit(&submit->command);
if (submit->reply_size) {
submit->reply_bo = vn_instance_get_reply_bo_locked(
submit->reply_shmem = vn_instance_get_reply_shmem_locked(
instance, submit->reply_size, &reply_ptr);
if (!submit->reply_bo)
if (!submit->reply_shmem)
goto fail;
}
uint32_t ring_seqno;
VkResult result = vn_instance_ring_submit_locked(
instance, &submit->command, submit->reply_bo, &ring_seqno);
instance, &submit->command, submit->reply_shmem, &ring_seqno);
mtx_unlock(&instance->ring.mutex);
@ -1870,10 +1863,10 @@ vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
return VK_SUCCESS;
fail:
if (instance->reply.bo)
vn_renderer_bo_unref(instance->reply.bo);
if (instance->reply.shmem)
vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
if (instance->ring.bo) {
if (instance->ring.shmem) {
uint32_t destroy_ring_data[4];
struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
destroy_ring_data, sizeof(destroy_ring_data));
@ -1882,7 +1875,7 @@ fail:
vn_cs_encoder_get_len(&local_enc));
vn_cs_encoder_fini(&instance->ring.upload);
vn_renderer_bo_unref(instance->ring.bo);
vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
vn_ring_fini(&instance->ring.ring);
mtx_destroy(&instance->ring.mutex);
}
@ -1919,7 +1912,7 @@ vn_DestroyInstance(VkInstance _instance,
vn_call_vkDestroyInstance(instance, _instance, NULL);
vn_renderer_bo_unref(instance->reply.bo);
vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
uint32_t destroy_ring_data[4];
struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
@ -1931,7 +1924,7 @@ vn_DestroyInstance(VkInstance _instance,
vn_cs_encoder_fini(&instance->ring.upload);
vn_ring_fini(&instance->ring.ring);
mtx_destroy(&instance->ring.mutex);
vn_renderer_bo_unref(instance->ring.bo);
vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
mtx_destroy(&instance->roundtrip_mutex);
vn_renderer_destroy(instance->renderer, alloc);

View File

@ -35,7 +35,7 @@ struct vn_instance {
struct {
mtx_t mutex;
struct vn_renderer_bo *bo;
struct vn_renderer_shmem *shmem;
struct vn_ring ring;
uint64_t id;
@ -44,7 +44,7 @@ struct vn_instance {
} ring;
struct {
struct vn_renderer_bo *bo;
struct vn_renderer_shmem *shmem;
size_t size;
size_t used;
void *ptr;
@ -141,7 +141,7 @@ struct vn_instance_submit_command {
size_t reply_size;
/* when reply_size is non-zero, NULL can be returned on errors */
struct vn_renderer_bo *reply_bo;
struct vn_renderer_shmem *reply_shmem;
struct vn_cs_decoder reply;
};
@ -158,7 +158,7 @@ vn_instance_submit_command_init(struct vn_instance *instance,
submit->command.buffers = &submit->buffer;
submit->reply_size = reply_size;
submit->reply_bo = NULL;
submit->reply_shmem = NULL;
return &submit->command;
}
@ -171,15 +171,15 @@ static inline struct vn_cs_decoder *
vn_instance_get_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
return submit->reply_bo ? &submit->reply : NULL;
return submit->reply_shmem ? &submit->reply : NULL;
}
static inline void
vn_instance_free_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
assert(submit->reply_bo);
vn_renderer_bo_unref(submit->reply_bo);
assert(submit->reply_shmem);
vn_renderer_shmem_unref(instance->renderer, submit->reply_shmem);
}
#endif /* VN_DEVICE_H */

View File

@ -18,9 +18,6 @@ struct vn_renderer_shmem {
struct vn_renderer_bo_ops {
void (*destroy)(struct vn_renderer_bo *bo);
/* allocate a CPU shared memory as the storage */
VkResult (*init_cpu)(struct vn_renderer_bo *bo, VkDeviceSize size);
/* import a VkDeviceMemory as the storage */
VkResult (*init_gpu)(struct vn_renderer_bo *bo,
VkDeviceSize size,
@ -319,27 +316,6 @@ vn_renderer_shmem_unref(struct vn_renderer *renderer,
}
}
static inline VkResult
vn_renderer_bo_create_cpu(struct vn_renderer *renderer,
VkDeviceSize size,
struct vn_renderer_bo **_bo)
{
struct vn_renderer_bo *bo = renderer->ops.bo_create(renderer);
if (!bo)
return VK_ERROR_OUT_OF_HOST_MEMORY;
VkResult result = bo->ops.init_cpu(bo, size);
if (result != VK_SUCCESS) {
bo->ops.destroy(bo);
return result;
}
atomic_init(&bo->refcount, 1);
*_bo = bo;
return VK_SUCCESS;
}
static inline VkResult
vn_renderer_bo_create_gpu(struct vn_renderer *renderer,
VkDeviceSize size,

View File

@ -1181,22 +1181,6 @@ virtgpu_bo_init_gpu(struct vn_renderer_bo *_bo,
return bo->gem_handle ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
static VkResult
virtgpu_bo_init_cpu(struct vn_renderer_bo *_bo, VkDeviceSize size)
{
struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;
struct virtgpu *gpu = bo->gpu;
bo->blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
bo->size = size;
bo->gem_handle = virtgpu_ioctl_resource_create_blob(
gpu, VIRTGPU_BLOB_MEM_GUEST, bo->blob_flags, bo->size, 0,
&bo->base.res_id);
return bo->gem_handle ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
}
static void
virtgpu_bo_destroy(struct vn_renderer_bo *_bo)
{
@ -1224,7 +1208,6 @@ virtgpu_bo_create(struct vn_renderer *renderer)
bo->gpu = gpu;
bo->base.ops.destroy = virtgpu_bo_destroy;
bo->base.ops.init_cpu = virtgpu_bo_init_cpu;
bo->base.ops.init_gpu = virtgpu_bo_init_gpu;
bo->base.ops.init_dmabuf = virtgpu_bo_init_dmabuf;
bo->base.ops.export_dmabuf = virtgpu_bo_export_dmabuf;

View File

@ -726,7 +726,6 @@ static int
vtest_bo_export_dmabuf(struct vn_renderer_bo *_bo)
{
const struct vtest_bo *bo = (struct vtest_bo *)_bo;
/* this suffices because vtest_bo_init_cpu does not set the bit */
const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
}
@ -768,23 +767,6 @@ vtest_bo_init_gpu(struct vn_renderer_bo *_bo,
return VK_SUCCESS;
}
static VkResult
vtest_bo_init_cpu(struct vn_renderer_bo *_bo, VkDeviceSize size)
{
struct vtest_bo *bo = (struct vtest_bo *)_bo;
struct vtest *vtest = bo->vtest;
bo->blob_flags = VCMD_BLOB_FLAG_MAPPABLE;
bo->size = size;
mtx_lock(&vtest->sock_mutex);
bo->base.res_id = vtest_vcmd_resource_create_blob(
vtest, VCMD_BLOB_TYPE_GUEST, bo->blob_flags, bo->size, 0, &bo->res_fd);
mtx_unlock(&vtest->sock_mutex);
return VK_SUCCESS;
}
static void
vtest_bo_destroy(struct vn_renderer_bo *_bo)
{
@ -818,7 +800,6 @@ vtest_bo_create(struct vn_renderer *renderer)
bo->res_fd = -1;
bo->base.ops.destroy = vtest_bo_destroy;
bo->base.ops.init_cpu = vtest_bo_init_cpu;
bo->base.ops.init_gpu = vtest_bo_init_gpu;
bo->base.ops.init_dmabuf = NULL;
bo->base.ops.export_dmabuf = vtest_bo_export_dmabuf;

View File

@ -81,8 +81,8 @@ vn_ring_retire_submits(struct vn_ring *ring, uint32_t seqno)
if (!vn_ring_ge_seqno(ring, seqno, submit->seqno))
break;
for (uint32_t i = 0; i < submit->bo_count; i++)
vn_renderer_bo_unref(submit->bos[i]);
for (uint32_t i = 0; i < submit->shmem_count; i++)
vn_renderer_shmem_unref(ring->renderer, submit->shmems[i]);
list_del(&submit->head);
list_add(&submit->head, &ring->free_submits);
@ -144,16 +144,19 @@ vn_ring_get_layout(size_t extra_size, struct vn_ring_layout *layout)
layout->extra_offset = layout->buffer_offset + layout->buffer_size;
layout->extra_size = extra_size;
layout->bo_size = layout->extra_offset + layout->extra_size;
layout->shmem_size = layout->extra_offset + layout->extra_size;
}
void
vn_ring_init(struct vn_ring *ring,
struct vn_renderer *renderer,
const struct vn_ring_layout *layout,
void *shared)
{
memset(ring, 0, sizeof(*ring));
memset(shared, 0, layout->bo_size);
memset(shared, 0, layout->shmem_size);
ring->renderer = renderer;
ring->shared.head = shared + layout->head_offset;
ring->shared.tail = shared + layout->tail_offset;
@ -177,19 +180,21 @@ vn_ring_fini(struct vn_ring *ring)
}
struct vn_ring_submit *
vn_ring_get_submit(struct vn_ring *ring, uint32_t bo_count)
vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count)
{
const uint32_t min_bo_count = 2;
const uint32_t min_shmem_count = 2;
struct vn_ring_submit *submit;
/* TODO this could be simplified if we could omit bo_count */
if (bo_count <= min_bo_count && !list_is_empty(&ring->free_submits)) {
/* TODO this could be simplified if we could omit shmem_count */
if (shmem_count <= min_shmem_count &&
!list_is_empty(&ring->free_submits)) {
submit =
list_first_entry(&ring->free_submits, struct vn_ring_submit, head);
list_del(&submit->head);
} else {
bo_count = MAX2(bo_count, min_bo_count);
submit = malloc(sizeof(*submit) + sizeof(submit->bos[0]) * bo_count);
shmem_count = MAX2(shmem_count, min_shmem_count);
submit =
malloc(sizeof(*submit) + sizeof(submit->shmems[0]) * shmem_count);
}
return submit;

View File

@ -22,7 +22,7 @@
* also venus commands that facilitate polling or waiting for ongoing works.
*/
/* the layout of a ring in a BO */
/* the layout of a ring in a shmem */
struct vn_ring_layout {
size_t head_offset;
size_t tail_offset;
@ -34,7 +34,7 @@ struct vn_ring_layout {
size_t extra_offset;
size_t extra_size;
size_t bo_size;
size_t shmem_size;
};
static_assert(ATOMIC_INT_LOCK_FREE == 2 && sizeof(atomic_uint) == 4,
@ -54,12 +54,14 @@ struct vn_ring_submit {
struct list_head head;
/* BOs to keep alive (TODO make sure BOs are pinned) */
uint32_t bo_count;
struct vn_renderer_bo *bos[];
/* BOs to keep alive (TODO make sure shmems are pinned) */
uint32_t shmem_count;
struct vn_renderer_shmem *shmems[];
};
struct vn_ring {
struct vn_renderer *renderer;
struct vn_ring_shared shared;
uint32_t cur;
@ -72,6 +74,7 @@ vn_ring_get_layout(size_t extra_size, struct vn_ring_layout *layout);
void
vn_ring_init(struct vn_ring *ring,
struct vn_renderer *renderer,
const struct vn_ring_layout *layout,
void *shared);
@ -79,7 +82,7 @@ void
vn_ring_fini(struct vn_ring *ring);
struct vn_ring_submit *
vn_ring_get_submit(struct vn_ring *ring, uint32_t bo_count);
vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count);
bool
vn_ring_submit(struct vn_ring *ring,