freedreno/drm: Move submit_queue to base

The virtio backend will want this too.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14900>
This commit is contained in:
Rob Clark 2022-03-14 16:23:08 -07:00 committed by Marge Bot
parent 88a10c6216
commit 2ac9b23f78
5 changed files with 21 additions and 23 deletions

View File

@ -152,6 +152,8 @@ fd_device_del_impl(struct fd_device *dev)
fd_bo_cache_cleanup(&dev->ring_cache, 0);
_mesa_hash_table_destroy(dev->handle_table, NULL);
_mesa_hash_table_destroy(dev->name_table, NULL);
if (util_queue_is_initialized(&dev->submit_queue))
util_queue_destroy(&dev->submit_queue);
dev->funcs->destroy(dev);
if (close_fd >= 0)
close(close_fd);

View File

@ -171,6 +171,8 @@ struct fd_device {
struct fd_bo *suballoc_bo;
uint32_t suballoc_offset;
simple_mtx_t suballoc_lock;
struct util_queue submit_queue;
};
#define foreach_submit(name, list) \
@ -237,6 +239,14 @@ struct fd_pipe {
*/
uint32_t last_fence;
/**
* The last fence seqno that was flushed to kernel (doesn't mean that it
* is complete, just that the kernel knows about it)
*/
uint32_t last_submit_fence;
uint32_t last_enqueue_fence; /* just for debugging */
struct fd_bo *control_mem;
volatile struct fd_pipe_control *control;

View File

@ -34,9 +34,6 @@ static void
msm_device_destroy(struct fd_device *dev)
{
struct msm_device *msm_dev = to_msm_device(dev);
if (util_queue_is_initialized(&msm_dev->submit_queue)) {
util_queue_destroy(&msm_dev->submit_queue);
}
free(msm_dev);
}
@ -70,7 +67,7 @@ msm_device_new(int fd, drmVersionPtr version)
* thread's comm truncating the interesting part of the
* process name.
*/
util_queue_init(&msm_dev->submit_queue, "sq", 8, 1, 0, NULL);
util_queue_init(&dev->submit_queue, "sq", 8, 1, 0, NULL);
}
if (version->version_minor >= FD_VERSION_CACHED_COHERENT) {

View File

@ -42,7 +42,6 @@
struct msm_device {
struct fd_device base;
struct util_queue submit_queue;
};
FD_DEFINE_CAST(fd_device, msm_device);
@ -58,14 +57,6 @@ struct msm_pipe {
uint32_t queue_id;
struct slab_parent_pool ring_pool;
/**
* The last fence seqno that was flushed to kernel (doesn't mean that it
* is complete, just that the kernel knows about it)
*/
uint32_t last_submit_fence;
uint32_t last_enqueue_fence; /* just for debugging */
/**
* If we *ever* see an in-fence-fd, assume that userspace is
* not relying on implicit fences.

View File

@ -393,8 +393,8 @@ flush_submit_list(struct list_head *submit_list)
free(submit_bos);
pthread_mutex_lock(&flush_mtx);
assert(fd_fence_before(msm_pipe->last_submit_fence, msm_submit->base.fence));
msm_pipe->last_submit_fence = msm_submit->base.fence;
assert(fd_fence_before(msm_pipe->base.last_submit_fence, msm_submit->base.fence));
msm_pipe->base.last_submit_fence = msm_submit->base.fence;
pthread_cond_broadcast(&flush_cnd);
pthread_mutex_unlock(&flush_mtx);
@ -427,7 +427,6 @@ enqueue_submit_list(struct list_head *submit_list)
{
struct fd_submit *submit = last_submit(submit_list);
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_device *msm_dev = to_msm_device(submit->pipe->dev);
list_replace(submit_list, &msm_submit->submit_list);
list_inithead(submit_list);
@ -442,7 +441,7 @@ enqueue_submit_list(struct list_head *submit_list)
DEBUG_MSG("enqueue: %u", submit->fence);
util_queue_add_job(&msm_dev->submit_queue,
util_queue_add_job(&submit->pipe->dev->submit_queue,
submit, fence,
msm_submit_sp_flush_execute,
msm_submit_sp_flush_cleanup,
@ -476,7 +475,7 @@ msm_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
struct fd_submit_fence *out_fence)
{
struct fd_device *dev = submit->pipe->dev;
struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
struct fd_pipe *pipe = submit->pipe;
/* Acquire lock before flush_prep() because it is possible to race between
* this and pipe->flush():
@ -502,8 +501,8 @@ msm_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
bool has_shared = msm_submit_sp_flush_prep(submit, in_fence_fd, out_fence);
assert(fd_fence_before(msm_pipe->last_enqueue_fence, submit->fence));
msm_pipe->last_enqueue_fence = submit->fence;
assert(fd_fence_before(pipe->last_enqueue_fence, submit->fence));
pipe->last_enqueue_fence = submit->fence;
/* If we don't need an out-fence, we can defer the submit.
*
@ -534,7 +533,6 @@ msm_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
void
msm_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct fd_device *dev = pipe->dev;
struct list_head submit_list;
@ -544,7 +542,7 @@ msm_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence)
simple_mtx_lock(&dev->submit_lock);
assert(!fd_fence_after(fence, msm_pipe->last_enqueue_fence));
assert(!fd_fence_after(fence, pipe->last_enqueue_fence));
foreach_submit_safe (deferred_submit, &dev->deferred_submits) {
/* We should never have submits from multiple pipes in the deferred
@ -577,7 +575,7 @@ flush_sync:
* them to the kernel
*/
pthread_mutex_lock(&flush_mtx);
while (fd_fence_before(msm_pipe->last_submit_fence, fence)) {
while (fd_fence_before(pipe->last_submit_fence, fence)) {
pthread_cond_wait(&flush_cnd, &flush_mtx);
}
pthread_mutex_unlock(&flush_mtx);