virgl/drm: New optimization for uploading textures

1. We can create resource with size of "1" on drm, because size
   is not passed to the renderer.
2. We can't create resource with size of "1" on vtest, because shmem
   is created based on that.
3. If renderer supports copy_transfer_from_host, then use staging
   buffer for transfer in both ways to and from host.

This will allow to reduce memory consumption in the guest.

v2:
   - add inline function for checking if we can use this optimization
   - add check in readback path. If renderer doesn't support
     copy transfer from host, then we need to go with previous
     path in readback (through transfer_get ioctl)

v3:
   - fix logic for readback

v4:
   - refactor the implementation to integrate it more to
     existing code base

v5:
   - reuse COPY_TRANSFER3D in both directions

v6:
   - encode direction in COPY_TRANSFER3D if host supports it

v7:
   - renamed cap bit
   - introduced COPY_TRANSFER3D_SIZE_LEGACY define

Reviewed-by: Gert Wollny <gert.wollny@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13689>
This commit is contained in:
mwezdeck 2021-11-05 13:13:01 +01:00 committed by Marge Bot
parent 420170fabc
commit cdc480585c
8 changed files with 116 additions and 12 deletions

View File

@ -48,9 +48,12 @@ void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
trans->offset = transfer->box.x;
}
if (trans->copy_src_hw_res) {
if (trans->copy_src_hw_res && trans->direction == VIRGL_TRANSFER_TO_HOST) {
virgl_encode_copy_transfer(vctx, trans);
virgl_resource_destroy_transfer(vctx, trans);
} else if (trans->copy_src_hw_res && trans->direction == VIRGL_TRANSFER_FROM_HOST) {
// if it is readback, then we have already encoded transfer
virgl_resource_destroy_transfer(vctx, trans);
} else {
virgl_transfer_queue_unmap(&vctx->queue, trans);
}

View File

@ -1515,10 +1515,22 @@ void virgl_encode_copy_transfer(struct virgl_context *ctx,
{
uint32_t command;
struct virgl_screen *vs = virgl_screen(ctx->base.screen);
// set always synchronized to 1, second bit is used for direction
uint32_t direction_and_synchronized = 1;
if (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) {
if (trans->direction == VIRGL_TRANSFER_TO_HOST) {
// do nothing, as 0 means transfer to host
} else if (trans->direction == VIRGL_TRANSFER_FROM_HOST) {
direction_and_synchronized |= 1 << 1;
} else {
// something wrong happened here
assert(0);
}
}
assert(trans->copy_src_hw_res);
command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
virgl_encoder_write_cmd_dword(ctx, command);
/* Copy transfers need to explicitly specify the stride, since it may differ
* from the image stride.
@ -1526,8 +1538,7 @@ void virgl_encode_copy_transfer(struct virgl_context *ctx,
virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
/* At the moment all copy transfers are synchronized. */
virgl_encoder_write_dword(ctx->cbuf, 1);
virgl_encoder_write_dword(ctx->cbuf, direction_and_synchronized);
}
void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)

View File

@ -28,6 +28,7 @@
#include "virgl_resource.h"
#include "virgl_screen.h"
#include "virgl_staging_mgr.h"
#include "virgl_encode.h" // for declaration of virgl_encode_copy_transfer
/* A (soft) limit for the amount of memory we want to allow for queued staging
* resources. This is used to decide when we should force a flush, in order to
@ -42,12 +43,29 @@ enum virgl_transfer_map_type {
/* Map a range of a staging buffer. The updated contents should be transferred
* with a copy transfer.
*/
VIRGL_TRANSFER_MAP_STAGING,
VIRGL_TRANSFER_MAP_WRITE_TO_STAGING,
/* Reallocate the underlying virgl_hw_res. */
VIRGL_TRANSFER_MAP_REALLOC,
/* Map type for read of texture data from host to guest
* using staging buffer. */
VIRGL_TRANSFER_MAP_READ_FROM_STAGING,
};
/* Check if copy transfer from host can be used:
* 1. if resource is a texture
* 2. if renderer supports copy transfer from host
*/
static bool virgl_can_copy_transfer_from_host(struct virgl_screen *vs,
struct virgl_resource *res)
{
if (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS &&
res->b.target != PIPE_BUFFER)
return true;
return false;
}
/* We need to flush to properly sync the transfer with the current cmdbuf.
* But there are cases where the flushing can be skipped:
*
@ -172,7 +190,7 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
if (wait) {
map_type = (can_realloc) ?
VIRGL_TRANSFER_MAP_REALLOC :
VIRGL_TRANSFER_MAP_STAGING;
VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
wait = false;
/* There is normally no need to flush either, unless the amount of
@ -183,11 +201,23 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
flush = (vctx->queued_staging_res_size >
VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
}
/* We can use staging buffer for texture uploads from guest to host */
if (can_staging && res->b.target != PIPE_BUFFER) {
map_type = VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
}
}
}
/* readback has some implications */
if (readback) {
/* If we are performing readback for textures and renderer supports
* copy_transfer_from_host, then we can return here with proper map.
*/
if (virgl_can_copy_transfer_from_host(vs, res) && vctx->supports_staging &&
xfer->base.usage & PIPE_MAP_READ) {
return VIRGL_TRANSFER_MAP_READ_FROM_STAGING;
}
/* Readback is yet another command and is transparent to the state
* trackers. It should be waited for in all cases, including when
* PIPE_MAP_UNSYNCHRONIZED is set.
@ -330,6 +360,36 @@ virgl_staging_map(struct virgl_context *vctx,
return map_addr;
}
/* Maps a region from staging to service the transfer from host.
* This function should be called only for texture readbacks
* from host. */
static void *
virgl_staging_read_map(struct virgl_context *vctx,
struct virgl_transfer *vtransfer)
{
struct virgl_screen *vscreen = virgl_screen(vctx->base.screen);
struct virgl_winsys *vws = vscreen->vws;
assert(vtransfer->base.resource->target != PIPE_BUFFER);
void *map_addr;
/* There are two possibilities to perform readback via:
* a) calling transfer_get();
* b) calling submit_cmd() with encoded transfer inside cmd.
*
* For b) we need:
* 1. select offset from staging buffer
* 2. encode this transfer in wire
* 3. flush the execbuffer to the host
* 4. wait till copy on the host is done
*/
map_addr = virgl_staging_map(vctx, vtransfer);
vtransfer->direction = VIRGL_TRANSFER_FROM_HOST;
virgl_encode_copy_transfer(vctx, vtransfer);
vctx->base.flush(&vctx->base, NULL, 0);
vws->resource_wait(vws, vtransfer->copy_src_hw_res);
return map_addr;
}
static bool
virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
{
@ -380,7 +440,8 @@ virgl_resource_transfer_map(struct pipe_context *ctx,
struct pipe_transfer **transfer)
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws;
struct virgl_screen *vscreen = virgl_screen(ctx->screen);
struct virgl_winsys *vws = vscreen->vws;
struct virgl_resource *vres = virgl_resource(resource);
struct virgl_transfer *trans;
enum virgl_transfer_map_type map_type;
@ -408,10 +469,16 @@ virgl_resource_transfer_map(struct pipe_context *ctx,
else
map_addr = NULL;
break;
case VIRGL_TRANSFER_MAP_STAGING:
case VIRGL_TRANSFER_MAP_WRITE_TO_STAGING:
map_addr = virgl_staging_map(vctx, trans);
/* Copy transfers don't make use of hw_res_map at the moment. */
trans->hw_res_map = NULL;
trans->direction = VIRGL_TRANSFER_TO_HOST;
break;
case VIRGL_TRANSFER_MAP_READ_FROM_STAGING:
map_addr = virgl_staging_read_map(vctx, trans);
/* Copy transfers don't make use of hw_res_map at the moment. */
trans->hw_res_map = NULL;
break;
case VIRGL_TRANSFER_MAP_ERROR:
default:
@ -506,6 +573,7 @@ static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
unsigned vbind, vflags;
struct virgl_screen *vs = virgl_screen(screen);
struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
uint32_t alloc_size;
res->b = *templ;
res->b.screen = &vs->base;
@ -523,6 +591,14 @@ static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
}
// If renderer supports copy transfer from host,
// then for textures alloc minimum size of bo
// This size is not passed to the host
if (virgl_can_copy_transfer_from_host(vs, res))
alloc_size = 1;
else
alloc_size = res->metadata.total_size;
res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
templ->format, vbind,
templ->width0,
@ -532,7 +608,7 @@ static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
templ->last_level,
templ->nr_samples,
vflags,
res->metadata.total_size);
alloc_size);
if (!res->hw_res) {
FREE(res);
return NULL;

View File

@ -86,6 +86,8 @@ struct virgl_transfer {
struct virgl_hw_res *copy_src_hw_res;
/* The offset in the copy source resource to copy data from. */
uint32_t copy_src_offset;
/* copy transfers can be performed to and from host */
uint32_t direction;
};
void virgl_resource_destroy(struct pipe_screen *screen,

View File

@ -291,9 +291,12 @@ void virgl_texture_transfer_unmap(struct pipe_context *ctx,
}
if (queue_unmap) {
if (trans->copy_src_hw_res) {
if (trans->copy_src_hw_res && trans->direction == VIRGL_TRANSFER_TO_HOST) {
virgl_encode_copy_transfer(vctx, trans);
virgl_resource_destroy_transfer(vctx, trans);
} else if (trans->copy_src_hw_res && trans->direction == VIRGL_TRANSFER_FROM_HOST) {
// if it is readback, then we have already encoded transfer
virgl_resource_destroy_transfer(vctx, trans);
} else {
virgl_transfer_queue_unmap(&vctx->queue, trans);
}

View File

@ -564,9 +564,14 @@ static int virgl_vtest_get_caps(struct virgl_winsys *vws,
struct virgl_drm_caps *caps)
{
struct virgl_vtest_winsys *vtws = virgl_vtest_winsys(vws);
int ret;
virgl_ws_fill_new_caps_defaults(caps);
return virgl_vtest_send_get_caps(vtws, caps);
ret = virgl_vtest_send_get_caps(vtws, caps);
// vtest doesn't support that
if (caps->caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS)
caps->caps.v2.capability_bits_v2 &= ~VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS;
return ret;
}
static struct pipe_fence_handle *

View File

@ -443,6 +443,7 @@ enum virgl_formats {
#define VIRGL_CAP_V2_MEMINFO (1 << 3)
#define VIRGL_CAP_V2_STRING_MARKER (1 << 4)
#define VIRGL_CAP_V2_IMPLICIT_MSAA (1 << 6)
#define VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS (1 << 7)
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
* but are fixed, no other should be passed to virgl either.

View File

@ -608,11 +608,14 @@ enum virgl_context_cmd {
#define VIRGL_TRANSFER3D_DATA_OFFSET 12
#define VIRGL_TRANSFER3D_DIRECTION 13
/* Copy transfer */
/* Copy transfer to host and from host*/
#define VIRGL_COPY_TRANSFER3D_SIZE 14
/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
#define VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE 12
#define VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET 13
/* Second bit of this dword is used to identify the direction
* 1 << 1 means transfer from host. 0 << 1 means transfer to host.
*/
#define VIRGL_COPY_TRANSFER3D_SYNCHRONIZED 14
/* set tweak flags */