freedreno/drm/virtio: Protocol updates

This syncs up with the protocol of what eventually landed in virglrender.

1) Move all static params to capset to avoid having to query host
   (reduce synchronous round trips at startup)
2) Use res_id instead of host_handle.. costs extra hashtable lookups in
   host during submit, but this lets us (with userspace allocated IOVA)
   make bo alloc and import completely async.
3) Require userspace allocated IOVA to simplify the protocol and not
   have to deal with GEM_NEW/GEM_INFO potentially being synchronous.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16086>
This commit is contained in:
Rob Clark 2022-04-16 12:11:31 -07:00 committed by Marge Bot
parent fa23ddf258
commit cb5f25ea71
7 changed files with 141 additions and 310 deletions

View File

@ -28,20 +28,33 @@ struct msm_shmem {
*/
uint32_t seqno;
/* TODO maybe separate flags for host to increment when:
* a) host CPU error (like async SUBMIT failed, etc)
* b) global reset count, if it hasn't incremented guest
* can skip synchonous getparam..
/**
* Offset to the start of rsp memory region in the shmem buffer. This
* is set by the host when the shmem buffer is allocated, to allow for
* extending the shmem buffer with new fields. The size of the rsp
* memory region is the size of the shmem buffer (controlled by the
* guest) minus rsp_mem_offset.
*
* The guest should use the msm_shmem_has_field() macro to determine
* if the host supports a given field, ie. to handle compatibility of
* newer guest vs older host.
*
* Making the guest userspace responsible for backwards compatibility
* simplifies the host VMM.
*/
uint16_t error;
uint16_t rsp_mem_len;
uint32_t rsp_mem_offset;
#define msm_shmem_has_field(shmem, field) ({ \
struct msm_shmem *_shmem = (shmem); \
(_shmem->rsp_mem_offset > offsetof(struct msm_shmem, field)); \
})
/**
* Memory to use for response messages. The offset to use for the
* response message is allocated by the guest, and specified by
* msm_ccmd_req:rsp_off.
* Counter that is incremented on asynchronous errors, like SUBMIT
* or GEM_NEW failures. The guest should treat errors as context-
* lost.
*/
uint8_t rsp_mem[0x4000-8];
uint32_t async_error;
};
#define DEFINE_CAST(parent, child) \
@ -57,7 +70,7 @@ enum msm_ccmd {
MSM_CCMD_NOP = 1, /* No payload, can be used to sync with host */
MSM_CCMD_IOCTL_SIMPLE,
MSM_CCMD_GEM_NEW,
MSM_CCMD_GEM_INFO,
MSM_CCMD_GEM_SET_IOVA,
MSM_CCMD_GEM_CPU_PREP,
MSM_CCMD_GEM_SET_NAME,
MSM_CCMD_GEM_SUBMIT,
@ -65,7 +78,6 @@ enum msm_ccmd {
MSM_CCMD_SUBMITQUEUE_QUERY,
MSM_CCMD_WAIT_FENCE,
MSM_CCMD_SET_DEBUGINFO,
MSM_CCMD_GEM_CLOSE,
MSM_CCMD_LAST,
};
@ -131,53 +143,33 @@ struct msm_ccmd_ioctl_simple_rsp {
* MSM_CCMD_GEM_NEW
*
* GEM buffer allocation, maps to DRM_MSM_GEM_NEW plus DRM_MSM_GEM_INFO to
* get the BO's iova (to avoid extra guest<->host round trip)
* set the BO's iova (to avoid extra guest -> host trip)
*
* No response.
*/
struct msm_ccmd_gem_new_req {
struct msm_ccmd_req hdr;
uint64_t iova;
uint64_t size;
uint32_t flags;
uint32_t blob_id;
uint64_t iova; /* non-zero for guest userspace iova allocation */
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_new_req)
struct msm_ccmd_gem_new_rsp {
struct msm_ccmd_rsp hdr;
int32_t ret;
uint32_t host_handle; /* host side GEM handle, used for cmdstream submit */
uint64_t iova;
};
/*
* MSM_CCMD_GEM_INFO
* MSM_CCMD_GEM_SET_IOVA
*
* Returns similar information as MSM_CCMD_GEM_NEW, but for imported BO's,
* which don't have a blob_id in our context, but do have a resource-id
* Set the buffer iova (for imported BOs). Also used to release the iova
* (by setting it to zero) when a BO is freed.
*/
struct msm_ccmd_gem_info_req {
struct msm_ccmd_gem_set_iova_req {
struct msm_ccmd_req hdr;
uint32_t res_id;
uint32_t blob_mem; // TODO do we need this?
uint32_t blob_id; // TODO do we need this?
uint64_t iova; /* non-zero for guest userspace iova allocation */
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_info_req)
struct msm_ccmd_gem_info_rsp {
struct msm_ccmd_rsp hdr;
int32_t ret;
uint32_t host_handle; /* host side GEM handle, used for cmdstream submit */
uint64_t iova;
uint32_t pad;
uint32_t size; /* true size of bo on host side */
uint32_t res_id;
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_iova_req)
/*
* MSM_CCMD_GEM_CPU_PREP
@ -191,7 +183,7 @@ struct msm_ccmd_gem_info_rsp {
struct msm_ccmd_gem_cpu_prep_req {
struct msm_ccmd_req hdr;
uint32_t host_handle;
uint32_t res_id;
uint32_t op;
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
@ -212,7 +204,7 @@ struct msm_ccmd_gem_cpu_prep_rsp {
struct msm_ccmd_gem_set_name_req {
struct msm_ccmd_req hdr;
uint32_t host_handle;
uint32_t res_id;
/* Note: packet size aligned to 4 bytes, so the string name may
* be shorter than the packet header indicates.
*/
@ -280,7 +272,7 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_submit_req)
struct msm_ccmd_gem_upload_req {
struct msm_ccmd_req hdr;
uint32_t host_handle;
uint32_t res_id;
uint32_t pad;
uint32_t off;
@ -362,19 +354,4 @@ struct msm_ccmd_set_debuginfo_req {
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_set_debuginfo_req)
/*
* MSM_CCMD_GEM_CLOSE
*
* If guest userspace allocated iova's are used, this request can be used
* to clear the vma when the guest bo is deleted.
*
* No response.
*/
struct msm_ccmd_gem_close_req {
struct msm_ccmd_req hdr;
uint32_t host_handle;
};
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_close_req)
#endif /* MSM_PROTO_H_ */

View File

@ -104,7 +104,7 @@ virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
struct msm_ccmd_gem_cpu_prep_req req = {
.hdr = MSM_CCMD(GEM_CPU_PREP, sizeof(req)),
.host_handle = virtio_bo_host_handle(bo),
.res_id = to_virtio_bo(bo)->res_id,
.op = op,
};
struct msm_ccmd_gem_cpu_prep_rsp *rsp;
@ -174,7 +174,7 @@ virtio_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
struct msm_ccmd_gem_set_name_req *req = (void *)buf;
req->hdr = MSM_CCMD(GEM_SET_NAME, req_len);
req->host_handle = virtio_bo_host_handle(bo);
req->res_id = to_virtio_bo(bo)->res_id;
req->len = sz;
memcpy(req->payload, name, sz);
@ -191,7 +191,7 @@ virtio_bo_upload(struct fd_bo *bo, void *src, unsigned len)
struct msm_ccmd_gem_upload_req *req = (void *)buf;
req->hdr = MSM_CCMD(GEM_UPLOAD, req_len);
req->host_handle = virtio_bo_host_handle(bo);
req->res_id = to_virtio_bo(bo)->res_id;
req->pad = 0;
req->off = 0;
req->len = len;
@ -201,23 +201,28 @@ virtio_bo_upload(struct fd_bo *bo, void *src, unsigned len)
virtio_execbuf(bo->dev, &req->hdr, false);
}
static void
set_iova(struct fd_bo *bo, uint64_t iova)
{
struct msm_ccmd_gem_set_iova_req req = {
.hdr = MSM_CCMD(GEM_SET_IOVA, sizeof(req)),
.res_id = to_virtio_bo(bo)->res_id,
.iova = iova,
};
virtio_execbuf(bo->dev, &req.hdr, false);
}
static void
virtio_bo_destroy(struct fd_bo *bo)
{
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
struct virtio_device *virtio_dev = to_virtio_device(bo->dev);
if (virtio_dev->userspace_allocates_iova && bo->iova) {
struct msm_ccmd_gem_close_req req = {
.hdr = MSM_CCMD(GEM_CLOSE, sizeof(req)),
.host_handle = virtio_bo_host_handle(bo),
};
virtio_execbuf(bo->dev, &req.hdr, false);
/* Release iova by setting to zero: */
if (bo->iova) {
set_iova(bo, 0);
virtio_dev_free_iova(bo->dev, bo->iova, bo->size);
}
free(virtio_bo);
}
@ -232,50 +237,6 @@ static const struct fd_bo_funcs funcs = {
.destroy = virtio_bo_destroy,
};
struct allocation_wait {
struct fd_bo *bo;
int fence_fd;
struct msm_ccmd_gem_new_rsp *new_rsp;
struct msm_ccmd_gem_info_rsp *info_rsp;
};
static void
allocation_wait_execute(void *job, void *gdata, int thread_index)
{
struct allocation_wait *wait = job;
struct virtio_bo *virtio_bo = to_virtio_bo(wait->bo);
sync_wait(wait->fence_fd, -1);
close(wait->fence_fd);
if (wait->new_rsp) {
virtio_bo->host_handle = wait->new_rsp->host_handle;
} else {
virtio_bo->host_handle = wait->info_rsp->host_handle;
wait->bo->size = wait->info_rsp->size;
}
fd_bo_del(wait->bo);
free(wait);
}
static void
enqueue_allocation_wait(struct fd_bo *bo, int fence_fd,
struct msm_ccmd_gem_new_rsp *new_rsp,
struct msm_ccmd_gem_info_rsp *info_rsp)
{
struct allocation_wait *wait = malloc(sizeof(*wait));
wait->bo = fd_bo_ref(bo);
wait->fence_fd = fence_fd;
wait->new_rsp = new_rsp;
wait->info_rsp = info_rsp;
util_queue_add_job(&bo->dev->submit_queue,
wait, &to_virtio_bo(bo)->fence,
allocation_wait_execute,
NULL, 0);
}
static struct fd_bo *
bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
{
@ -286,8 +247,6 @@ bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
if (!virtio_bo)
return NULL;
util_queue_fence_init(&virtio_bo->fence);
bo = &virtio_bo->base;
/* Note we need to set these because allocation_wait_execute() could
@ -300,6 +259,20 @@ bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
bo->funcs = &funcs;
bo->handle = handle;
struct drm_virtgpu_resource_info args = {
.bo_handle = handle,
};
int ret;
ret = drmCommandWriteRead(dev->fd, DRM_VIRTGPU_RESOURCE_INFO, &args, sizeof(args));
if (ret) {
INFO_MSG("failed to get resource info: %s", strerror(errno));
free(virtio_bo);
return NULL;
}
virtio_bo->res_id = args.res_handle;
fd_bo_init_common(bo, dev);
return bo;
@ -309,76 +282,16 @@ bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
struct fd_bo *
virtio_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
{
struct virtio_device *virtio_dev = to_virtio_device(dev);
struct fd_bo *bo = bo_from_handle(dev, size, handle);
struct drm_virtgpu_resource_info args = {
.bo_handle = handle,
};
int ret;
ret = drmCommandWriteRead(dev->fd, DRM_VIRTGPU_RESOURCE_INFO, &args, sizeof(args));
if (ret) {
INFO_MSG("failed to get resource info: %s", strerror(errno));
if (!bo)
return NULL;
bo->iova = virtio_dev_alloc_iova(dev, size);
if (!bo->iova)
goto fail;
}
struct msm_ccmd_gem_info_req req = {
.hdr = MSM_CCMD(GEM_INFO, sizeof(req)),
.res_id = args.res_handle,
.blob_mem = args.blob_mem,
.blob_id = p_atomic_inc_return(&virtio_dev->next_blob_id),
};
if (virtio_dev->userspace_allocates_iova) {
req.iova = virtio_dev_alloc_iova(dev, size);
if (!req.iova) {
virtio_dev_free_iova(dev, req.iova, size);
ret = -ENOMEM;
goto fail;
}
}
struct msm_ccmd_gem_info_rsp *rsp =
virtio_alloc_rsp(dev, &req.hdr, sizeof(*rsp));
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
virtio_bo->blob_id = req.blob_id;
if (virtio_dev->userspace_allocates_iova) {
int fence_fd;
ret = virtio_execbuf_fenced(dev, &req.hdr, -1, &fence_fd, 0);
if (ret) {
INFO_MSG("failed to get gem info: %s", strerror(errno));
goto fail;
}
bo->iova = req.iova;
enqueue_allocation_wait(bo, fence_fd, NULL, rsp);
} else {
ret = virtio_execbuf(dev, &req.hdr, true);
if (ret) {
INFO_MSG("failed to get gem info: %s", strerror(errno));
goto fail;
}
if (rsp->ret) {
INFO_MSG("failed (on host) to get gem info: %s", strerror(rsp->ret));
goto fail;
}
virtio_bo->host_handle = rsp->host_handle;
bo->iova = rsp->iova;
/* If the imported buffer is allocated via virgl context (for example
* minigbm/arc-cros-gralloc) then the guest gem object size is fake,
* potentially not accounting for UBWC meta data, required pitch
* alignment, etc. But in the import path the gallium driver checks
* that the size matches the minimum size based on layout. So replace
* the guest potentially-fake size with the real size from the host:
*/
bo->size = rsp->size;
}
set_iova(bo, bo->iova);
return bo;
@ -400,7 +313,6 @@ virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
.hdr = MSM_CCMD(GEM_NEW, sizeof(req)),
.size = size,
};
struct msm_ccmd_gem_new_rsp *rsp = NULL;
int ret;
if (flags & FD_BO_SCANOUT)
@ -437,20 +349,15 @@ virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
* is used to like the created bo to the get_blob() call
*/
req.blob_id = args.blob_id;
rsp = virtio_alloc_rsp(dev, &req.hdr, sizeof(*rsp));
if (virtio_dev->userspace_allocates_iova) {
req.iova = virtio_dev_alloc_iova(dev, size);
if (!req.iova) {
ret = -ENOMEM;
goto fail;
}
req.iova = virtio_dev_alloc_iova(dev, size);
if (!req.iova) {
ret = -ENOMEM;
goto fail;
}
}
simple_mtx_lock(&virtio_dev->eb_lock);
if (rsp)
if (args.cmd)
req.hdr.seqno = ++virtio_dev->next_seqno;
ret = drmIoctl(dev->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &args);
simple_mtx_unlock(&virtio_dev->eb_lock);
@ -461,54 +368,13 @@ virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
virtio_bo->blob_id = args.blob_id;
if (rsp) {
if (virtio_dev->userspace_allocates_iova) {
int fence_fd;
/* We can't get a fence fd from RESOURCE_CREATE_BLOB, so send
* a NOP packet just for that purpose:
*/
struct msm_ccmd_nop_req nop = {
.hdr = MSM_CCMD(NOP, sizeof(nop)),
};
ret = virtio_execbuf_fenced(dev, &nop.hdr, -1, &fence_fd, 0);
if (ret) {
INFO_MSG("failed to get gem info: %s", strerror(errno));
goto fail;
}
bo->iova = req.iova;
enqueue_allocation_wait(bo, fence_fd, rsp, NULL);
} else {
/* RESOURCE_CREATE_BLOB is async, so we need to wait for host..
* which is a bit unfortunate, but better to sync here than
* add extra code to check if we need to wait each time we
* emit a reloc.
*/
virtio_host_sync(dev, &req.hdr);
virtio_bo->host_handle = rsp->host_handle;
bo->iova = rsp->iova;
}
}
bo->iova = req.iova;
return bo;
fail:
if (req.iova) {
assert(virtio_dev->userspace_allocates_iova);
virtio_dev_free_iova(dev, req.iova, size);
}
return NULL;
}
uint32_t
virtio_bo_host_handle(struct fd_bo *bo)
{
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
util_queue_fence_wait(&virtio_bo->fence);
return virtio_bo->host_handle;
}

View File

@ -36,10 +36,7 @@ virtio_device_destroy(struct fd_device *dev)
struct virtio_device *virtio_dev = to_virtio_device(dev);
fd_bo_del_locked(virtio_dev->shmem_bo);
if (virtio_dev->userspace_allocates_iova) {
util_vma_heap_finish(&virtio_dev->address_space);
}
util_vma_heap_finish(&virtio_dev->address_space);
}
static const struct fd_device_funcs funcs = {
@ -156,8 +153,13 @@ virtio_device_new(int fd, drmVersionPtr version)
INFO_MSG("has_cached_coherent: %u", caps.u.msm.has_cached_coherent);
INFO_MSG("va_start: 0x%0"PRIx64, caps.u.msm.va_start);
INFO_MSG("va_size: 0x%0"PRIx64, caps.u.msm.va_size);
INFO_MSG("gpu_id: %u", caps.u.msm.gpu_id);
INFO_MSG("gmem_size: %u", caps.u.msm.gmem_size);
INFO_MSG("gmem_base: 0x%0" PRIx64, caps.u.msm.gmem_base);
INFO_MSG("chip_id: 0x%0" PRIx64, caps.u.msm.chip_id);
INFO_MSG("max_freq: %u", caps.u.msm.max_freq);
if (caps.wire_format_version != 1) {
if (caps.wire_format_version != 2) {
ERROR_MSG("Unsupported protocol version: %u", caps.wire_format_version);
return NULL;
}
@ -168,6 +170,11 @@ virtio_device_new(int fd, drmVersionPtr version)
return NULL;
}
if (!caps.u.msm.va_size) {
ERROR_MSG("No address space");
return NULL;
}
ret = set_context(fd);
if (ret) {
INFO_MSG("Could not set context type: %s", strerror(errno));
@ -186,6 +193,8 @@ virtio_device_new(int fd, drmVersionPtr version)
p_atomic_set(&virtio_dev->next_blob_id, 1);
virtio_dev->caps = caps;
util_queue_init(&dev->submit_queue, "sq", 8, 1, 0, NULL);
dev->bo_size = sizeof(struct virtio_bo);
@ -195,14 +204,10 @@ virtio_device_new(int fd, drmVersionPtr version)
set_debuginfo(dev);
if (caps.u.msm.va_start && caps.u.msm.va_size) {
virtio_dev->userspace_allocates_iova = true;
util_vma_heap_init(&virtio_dev->address_space,
caps.u.msm.va_start,
caps.u.msm.va_size);
simple_mtx_init(&virtio_dev->address_space_lock, mtx_plain);
}
util_vma_heap_init(&virtio_dev->address_space,
caps.u.msm.va_start,
caps.u.msm.va_size);
simple_mtx_init(&virtio_dev->address_space_lock, mtx_plain);
return dev;
}
@ -217,7 +222,7 @@ virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
sz = align(sz, 8);
if ((virtio_dev->next_rsp_off + sz) >= sizeof(virtio_dev->shmem->rsp_mem))
if ((virtio_dev->next_rsp_off + sz) >= virtio_dev->rsp_mem_len)
virtio_dev->next_rsp_off = 0;
off = virtio_dev->next_rsp_off;
@ -227,7 +232,7 @@ virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
req->rsp_off = off;
struct msm_ccmd_rsp *rsp = (void *)&virtio_dev->shmem->rsp_mem[off];
struct msm_ccmd_rsp *rsp = (void *)&virtio_dev->rsp_mem[off];
rsp->len = sz;
return rsp;

View File

@ -76,6 +76,8 @@ virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
uint64_t *value)
{
struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
struct virtio_device *virtio_dev = to_virtio_device(pipe->dev);
switch (param) {
case FD_DEVICE_ID: // XXX probably get rid of this..
case FD_GPU_ID:
@ -91,11 +93,13 @@ virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
*value = virtio_pipe->chip_id;
return 0;
case FD_MAX_FREQ:
return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
*value = virtio_dev->caps.u.msm.max_freq;
return 0;
case FD_TIMESTAMP:
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
case FD_NR_RINGS:
return query_param(pipe, MSM_PARAM_NR_RINGS, value);
*value = virtio_dev->caps.u.msm.priorities;
return 0;
case FD_CTX_FAULTS:
return query_queue_param(pipe, MSM_SUBMITQUEUE_PARAM_FAULTS, value);
case FD_GLOBAL_FAULTS:
@ -194,18 +198,6 @@ static const struct fd_pipe_funcs funcs = {
.destroy = virtio_pipe_destroy,
};
static uint64_t
get_param(struct fd_pipe *pipe, uint32_t param)
{
uint64_t value;
int ret = query_param(pipe, param, &value);
if (ret) {
ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
return 0;
}
return value;
}
static void
init_shmem(struct fd_device *dev)
{
@ -217,11 +209,14 @@ init_shmem(struct fd_device *dev)
* have to bypass/reinvent fd_bo_new()..
*/
if (unlikely(!virtio_dev->shmem)) {
virtio_dev->shmem_bo = fd_bo_new(dev, sizeof(*virtio_dev->shmem),
virtio_dev->shmem_bo = fd_bo_new(dev, 0x4000,
_FD_BO_VIRTIO_SHM, "shmem");
virtio_dev->shmem = fd_bo_map(virtio_dev->shmem_bo);
virtio_dev->shmem_bo->bo_reuse = NO_CACHE;
uint32_t offset = virtio_dev->shmem->rsp_mem_offset;
virtio_dev->rsp_mem_len = fd_bo_size(virtio_dev->shmem_bo) - offset;
virtio_dev->rsp_mem = &((uint8_t *)virtio_dev->shmem)[offset];
}
simple_mtx_unlock(&virtio_dev->rsp_lock);
@ -234,6 +229,7 @@ virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
[FD_PIPE_3D] = MSM_PIPE_3D0,
[FD_PIPE_2D] = MSM_PIPE_2D0,
};
struct virtio_device *virtio_dev = to_virtio_device(dev);
struct virtio_pipe *virtio_pipe = NULL;
struct fd_pipe *pipe = NULL;
@ -253,21 +249,17 @@ virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
pipe->dev = dev;
virtio_pipe->pipe = pipe_id[id];
/* these params should be supported since the first version of drm/msm: */
virtio_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
virtio_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
virtio_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
virtio_pipe->gpu_id = virtio_dev->caps.u.msm.gpu_id;
virtio_pipe->gmem = virtio_dev->caps.u.msm.gmem_size;
virtio_pipe->gmem_base = virtio_dev->caps.u.msm.gmem_base;
virtio_pipe->chip_id = virtio_dev->caps.u.msm.chip_id;
if (fd_device_version(pipe->dev) >= FD_VERSION_GMEM_BASE)
virtio_pipe->gmem_base = get_param(pipe, MSM_PARAM_GMEM_BASE);
if (!(virtio_pipe->gpu_id || virtio_pipe->chip_id))
goto fail;
if (to_virtio_device(dev)->userspace_allocates_iova) {
util_queue_init(&virtio_pipe->retire_queue, "rq", 8, 1,
UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
}
util_queue_init(&virtio_pipe->retire_queue, "rq", 8, 1,
UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
INFO_MSG("Pipe Info:");
INFO_MSG(" GPU-id: %d", virtio_pipe->gpu_id);

View File

@ -48,6 +48,8 @@ struct virtio_device {
struct fd_bo *shmem_bo;
struct msm_shmem *shmem;
uint8_t *rsp_mem;
uint32_t rsp_mem_len;
uint32_t next_rsp_off;
simple_mtx_t rsp_lock;
simple_mtx_t eb_lock;
@ -55,7 +57,7 @@ struct virtio_device {
uint32_t next_blob_id;
uint32_t next_seqno;
bool userspace_allocates_iova;
struct virgl_renderer_capset_drm caps;
/*
* Notes on address space allocation:
@ -137,7 +139,7 @@ struct virtio_pipe {
int32_t next_submit_fence;
/**
* When userspace_allocates_iova, we need to defer deleting bo's (and
* When userspace allocates iova, we need to defer deleting bo's (and
* therefore releasing their address) until submits referencing them
* have completed. This is accomplished by enqueueing a job, holding
* a reference to the submit, that waits on the submit's out-fence
@ -157,17 +159,7 @@ struct fd_submit *virtio_submit_new(struct fd_pipe *pipe);
struct virtio_bo {
struct fd_bo base;
uint64_t offset;
struct util_queue_fence fence;
/*
* Note: all access to host_handle must wait on fence, *other* than
* access from the submit_queue thread (because async bo allocations
* are retired on the submit_queue, guaranteeing that the fence is
* signaled before host_handle is accessed). All other access must
* use virtio_bo_host_handle().
*/
uint32_t host_handle;
uint32_t res_id;
uint32_t blob_id;
};
FD_DEFINE_CAST(fd_bo, virtio_bo);
@ -176,8 +168,6 @@ struct fd_bo *virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags
struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
uint32_t handle);
uint32_t virtio_bo_host_handle(struct fd_bo *bo);
/*
* Internal helpers:
*/

View File

@ -53,7 +53,6 @@ flush_submit_list(struct list_head *submit_list)
struct fd_submit_sp *fd_submit = to_fd_submit_sp(last_submit(submit_list));
struct virtio_pipe *virtio_pipe = to_virtio_pipe(fd_submit->base.pipe);
struct fd_device *dev = virtio_pipe->base.dev;
struct virtio_device *virtio_dev = to_virtio_device(dev);
unsigned nr_cmds = 0;
@ -136,10 +135,8 @@ flush_submit_list(struct list_head *submit_list)
for (unsigned i = 0; i < fd_submit->nr_bos; i++) {
struct virtio_bo *virtio_bo = to_virtio_bo(fd_submit->bos[i]);
assert(util_queue_fence_is_signalled(&virtio_bo->fence));
submit_bos[i].flags = fd_submit->bos[i]->reloc_flags;
submit_bos[i].handle = virtio_bo->host_handle;
submit_bos[i].handle = virtio_bo->res_id;
submit_bos[i].presumed = 0;
}
@ -178,7 +175,7 @@ flush_submit_list(struct list_head *submit_list)
*/
out_fence->use_fence_fd = true;
out_fence_fd = &out_fence->fence_fd;
} else if (virtio_dev->userspace_allocates_iova) {
} else {
/* we are using retire_queue, so we need an out-fence for each
* submit.. we can just re-use fd_submit->out_fence_fd for temporary
* storage.
@ -205,19 +202,18 @@ flush_submit_list(struct list_head *submit_list)
if (fd_submit->in_fence_fd != -1)
close(fd_submit->in_fence_fd);
if (virtio_dev->userspace_allocates_iova) {
if (out_fence_fd != &fd_submit->out_fence_fd)
fd_submit->out_fence_fd = os_dupfd_cloexec(*out_fence_fd);
fd_submit_ref(&fd_submit->base);
if (out_fence_fd != &fd_submit->out_fence_fd)
fd_submit->out_fence_fd = os_dupfd_cloexec(*out_fence_fd);
util_queue_fence_init(&fd_submit->retire_fence);
fd_submit_ref(&fd_submit->base);
util_queue_add_job(&virtio_pipe->retire_queue,
fd_submit, &fd_submit->retire_fence,
retire_execute,
retire_cleanup,
0);
}
util_queue_fence_init(&fd_submit->retire_fence);
util_queue_add_job(&virtio_pipe->retire_queue,
fd_submit, &fd_submit->retire_fence,
retire_execute,
retire_cleanup,
0);
return 0;
}

View File

@ -21,6 +21,11 @@ struct virgl_renderer_capset_drm {
uint32_t priorities;
uint64_t va_start;
uint64_t va_size;
uint32_t gpu_id;
uint32_t gmem_size;
uint64_t gmem_base;
uint64_t chip_id;
uint32_t max_freq;
} msm; /* context_type == VIRTGPU_DRM_CONTEXT_MSM */
} u;
};