2021-03-01 19:54:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2021 Google LLC
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef VN_RING_H
|
|
|
|
#define VN_RING_H
|
|
|
|
|
|
|
|
#include "vn_common.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A ring is a single-producer and single-consumer circular buffer. The data
|
|
|
|
* in the buffer are produced and consumed in order. An externally-defined
|
|
|
|
* mechanism is required for ring setup and notifications in both directions.
|
|
|
|
* Notifications for new data from the producer are needed only when the
|
|
|
|
* consumer is not actively polling, which is indicated by the ring status.
|
|
|
|
*
|
|
|
|
* For venus, the data are plain venus commands. When a venus command is
|
|
|
|
* consumed from the ring's perspective, there can still be ongoing CPU and/or
|
|
|
|
* GPU works. This is not an issue when the works generated by following
|
|
|
|
* venus commands are correctly queued after the ongoing works. There are
|
|
|
|
* also venus commands that facilitate polling or waiting for ongoing works.
|
|
|
|
*/
|
|
|
|
|
2021-04-22 21:33:59 +01:00
|
|
|
/* the layout of a ring in a shmem */
|
2021-03-01 19:54:21 +00:00
|
|
|
struct vn_ring_layout {
|
|
|
|
size_t head_offset;
|
|
|
|
size_t tail_offset;
|
|
|
|
size_t status_offset;
|
|
|
|
|
|
|
|
size_t buffer_offset;
|
|
|
|
size_t buffer_size;
|
|
|
|
|
|
|
|
size_t extra_offset;
|
|
|
|
size_t extra_size;
|
|
|
|
|
2021-04-22 21:33:59 +01:00
|
|
|
size_t shmem_size;
|
2021-03-01 19:54:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static_assert(ATOMIC_INT_LOCK_FREE == 2 && sizeof(atomic_uint) == 4,
|
|
|
|
"vn_ring_shared requires lock-free 32-bit atomic_uint");
|
|
|
|
|
|
|
|
/* pointers to a ring in a BO */
|
|
|
|
struct vn_ring_shared {
|
|
|
|
const volatile atomic_uint *head;
|
|
|
|
volatile atomic_uint *tail;
|
|
|
|
const volatile atomic_uint *status;
|
|
|
|
void *buffer;
|
|
|
|
void *extra;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct vn_ring_submit {
|
|
|
|
uint32_t seqno;
|
|
|
|
|
|
|
|
struct list_head head;
|
|
|
|
|
2021-04-22 21:33:59 +01:00
|
|
|
/* BOs to keep alive (TODO make sure shmems are pinned) */
|
|
|
|
uint32_t shmem_count;
|
|
|
|
struct vn_renderer_shmem *shmems[];
|
2021-03-01 19:54:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct vn_ring {
|
2021-04-22 21:33:59 +01:00
|
|
|
struct vn_renderer *renderer;
|
|
|
|
|
2021-08-26 19:30:18 +01:00
|
|
|
/* TODO assume large ring support and use fixed size */
|
|
|
|
uint32_t buffer_size;
|
|
|
|
uint32_t buffer_mask;
|
|
|
|
|
2021-03-01 19:54:21 +00:00
|
|
|
struct vn_ring_shared shared;
|
|
|
|
uint32_t cur;
|
|
|
|
|
|
|
|
struct list_head submits;
|
|
|
|
struct list_head free_submits;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
2021-08-26 19:30:18 +01:00
|
|
|
vn_ring_get_layout(size_t buf_size,
|
|
|
|
size_t extra_size,
|
|
|
|
struct vn_ring_layout *layout);
|
2021-03-01 19:54:21 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
vn_ring_init(struct vn_ring *ring,
|
2021-04-22 21:33:59 +01:00
|
|
|
struct vn_renderer *renderer,
|
2021-03-01 19:54:21 +00:00
|
|
|
const struct vn_ring_layout *layout,
|
|
|
|
void *shared);
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_ring_fini(struct vn_ring *ring);
|
|
|
|
|
|
|
|
struct vn_ring_submit *
|
2021-04-22 21:33:59 +01:00
|
|
|
vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count);
|
2021-03-01 19:54:21 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
vn_ring_submit(struct vn_ring *ring,
|
|
|
|
struct vn_ring_submit *submit,
|
2021-08-30 17:44:29 +01:00
|
|
|
const struct vn_cs_encoder *cs,
|
2021-03-01 19:54:21 +00:00
|
|
|
uint32_t *seqno);
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_ring_wait(const struct vn_ring *ring, uint32_t seqno);
|
|
|
|
|
|
|
|
#endif /* VN_RING_H */
|