virgl: rework virgl_transfer_queue_extend
Do not take a transfer and do the memcpy. Add a _buffer suffix to the function name to make it clear that it is only for buffers. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
This commit is contained in:
parent
2b8ad88078
commit
324c20304e
|
@ -546,7 +546,6 @@ static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
|
|||
struct virgl_resource *vbuf = virgl_resource(resource);
|
||||
struct virgl_transfer dummy_trans = { 0 };
|
||||
bool flush;
|
||||
struct virgl_transfer *queued;
|
||||
|
||||
/*
|
||||
* Attempts to short circuit the entire process of mapping and unmapping
|
||||
|
@ -566,11 +565,10 @@ static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
|
|||
box->x, box->x + box->width))
|
||||
return false;
|
||||
|
||||
queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
|
||||
if (!queued || !queued->hw_res_map)
|
||||
if (!virgl_transfer_queue_extend_buffer(&vctx->queue,
|
||||
vbuf->hw_res, box->x, box->width, data))
|
||||
return false;
|
||||
|
||||
memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
|
||||
util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
|
||||
|
||||
return true;
|
||||
|
|
|
@ -355,27 +355,26 @@ bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
|
|||
false);
|
||||
}
|
||||
|
||||
struct virgl_transfer *
|
||||
virgl_transfer_queue_extend(struct virgl_transfer_queue *queue,
|
||||
struct virgl_transfer *transfer)
|
||||
bool
|
||||
virgl_transfer_queue_extend_buffer(struct virgl_transfer_queue *queue,
|
||||
const struct virgl_hw_res *hw_res,
|
||||
unsigned offset, unsigned size,
|
||||
const void *data)
|
||||
{
|
||||
struct virgl_transfer *queued = NULL;
|
||||
struct virgl_transfer *queued;
|
||||
struct pipe_box box;
|
||||
|
||||
/* We don't support extending from copy transfers. */
|
||||
assert(!transfer->copy_src_hw_res);
|
||||
u_box_1d(offset, size, &box);
|
||||
queued = virgl_transfer_queue_find_overlap(queue, hw_res, 0, &box, true);
|
||||
if (!queued)
|
||||
return false;
|
||||
|
||||
if (transfer->base.resource->target == PIPE_BUFFER) {
|
||||
queued = virgl_transfer_queue_find_overlap(queue,
|
||||
transfer->hw_res,
|
||||
transfer->base.level,
|
||||
&transfer->base.box,
|
||||
true);
|
||||
}
|
||||
assert(queued->base.resource->target == PIPE_BUFFER);
|
||||
assert(queued->hw_res_map);
|
||||
|
||||
if (queued) {
|
||||
u_box_union_2d(&queued->base.box, &queued->base.box, &transfer->base.box);
|
||||
queued->offset = queued->base.box.x;
|
||||
}
|
||||
memcpy(queued->hw_res_map + offset, data, size);
|
||||
u_box_union_2d(&queued->base.box, &queued->base.box, &box);
|
||||
queued->offset = queued->base.box.x;
|
||||
|
||||
return queued;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -56,9 +56,11 @@ bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
|
|||
|
||||
/*
|
||||
* Search the transfer queue for a transfer suitable for extension and
|
||||
* extend it to include the new transfer.
|
||||
* extend it to include the specified data.
|
||||
*/
|
||||
struct virgl_transfer * virgl_transfer_queue_extend(
|
||||
struct virgl_transfer_queue *queue, struct virgl_transfer *transfer);
|
||||
bool virgl_transfer_queue_extend_buffer(struct virgl_transfer_queue *queue,
|
||||
const struct virgl_hw_res *hw_res,
|
||||
unsigned offset, unsigned size,
|
||||
const void *data);
|
||||
|
||||
#endif /* VIRGL_TRANSFER_QUEUE_H */
|
||||
|
|
Loading…
Reference in New Issue