gallium: rename PIPE_TRANSFER_* -> PIPE_MAP_*

Acked-by: Eric Anholt <eric@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5749>
This commit is contained in:
Marek Olšák 2020-07-01 08:16:12 -04:00 committed by Marge Bot
parent 44f1b1be7a
commit 22253e6b65
173 changed files with 865 additions and 865 deletions

View File

@ -754,49 +754,49 @@ the last (partial) page requires a box that ends at the end of the buffer
.. _pipe_transfer:
PIPE_TRANSFER
PIPE_MAP
^^^^^^^^^^^^^
These flags control the behavior of a transfer object.
``PIPE_TRANSFER_READ``
``PIPE_MAP_READ``
Resource contents read back (or accessed directly) at transfer create time.
``PIPE_TRANSFER_WRITE``
``PIPE_MAP_WRITE``
Resource contents will be written back at transfer_unmap time (or modified
as a result of being accessed directly).
``PIPE_TRANSFER_MAP_DIRECTLY``
``PIPE_MAP_DIRECTLY``
a transfer should directly map the resource. May return NULL if not supported.
``PIPE_TRANSFER_DISCARD_RANGE``
``PIPE_MAP_DISCARD_RANGE``
The memory within the mapped region is discarded. Cannot be used with
``PIPE_TRANSFER_READ``.
``PIPE_MAP_READ``.
``PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE``
``PIPE_MAP_DISCARD_WHOLE_RESOURCE``
Discards all memory backing the resource. It should not be used with
``PIPE_TRANSFER_READ``.
``PIPE_MAP_READ``.
``PIPE_TRANSFER_DONTBLOCK``
``PIPE_MAP_DONTBLOCK``
Fail if the resource cannot be mapped immediately.
``PIPE_TRANSFER_UNSYNCHRONIZED``
``PIPE_MAP_UNSYNCHRONIZED``
Do not synchronize pending operations on the resource when mapping. The
interaction of any writes to the map and any operations pending on the
resource are undefined. Cannot be used with ``PIPE_TRANSFER_READ``.
resource are undefined. Cannot be used with ``PIPE_MAP_READ``.
``PIPE_TRANSFER_FLUSH_EXPLICIT``
``PIPE_MAP_FLUSH_EXPLICIT``
Written ranges will be notified later with :ref:`transfer_flush_region`.
Cannot be used with ``PIPE_TRANSFER_READ``.
Cannot be used with ``PIPE_MAP_READ``.
``PIPE_TRANSFER_PERSISTENT``
``PIPE_MAP_PERSISTENT``
Allows the resource to be used for rendering while mapped.
PIPE_RESOURCE_FLAG_MAP_PERSISTENT must be set when creating
the resource.
If COHERENT is not set, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER)
must be called to ensure the device can see what the CPU has written.
``PIPE_TRANSFER_COHERENT``
``PIPE_MAP_COHERENT``
If PERSISTENT is set, this ensures any writes done by the device are
immediately visible to the CPU and vice versa.
PIPE_RESOURCE_FLAG_MAP_COHERENT must be set when creating
@ -909,4 +909,4 @@ uploaded data, unless:
mapping, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER) should be called on the
context that has mapped the resource. No flush is required.
* Mapping the resource with PIPE_TRANSFER_MAP_DIRECTLY.
* Mapping the resource with PIPE_MAP_DIRECTLY.

View File

@ -210,7 +210,7 @@ The integer capabilities:
hardware implements the SM5 features, component selection,
shadow comparison, and run-time offsets.
* ``PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT``: Whether
PIPE_TRANSFER_PERSISTENT and PIPE_TRANSFER_COHERENT are supported
PIPE_MAP_PERSISTENT and PIPE_MAP_COHERENT are supported
for buffers.
* ``PIPE_CAP_TEXTURE_QUERY_LOD``: Whether the ``LODQ`` instruction is
supported.

View File

@ -272,7 +272,7 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
tex = tr_tex->resource;
map = pipe_transfer_map(context, tex,
gptr->level, gptr->face + gptr->zslice,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
gptr->x, gptr->y, gptr->w, gptr->h, &t);
rbug_send_texture_read_reply(tr_rbug->con, serial,

View File

@ -1440,7 +1440,7 @@ trace_context_transfer_map(struct pipe_context *_context,
*transfer = trace_transfer_create(tr_context, resource, result);
if (map) {
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_MAP_WRITE) {
trace_transfer(*transfer)->map = map;
}
}

View File

@ -417,7 +417,7 @@ util_font_create_fixed_8x13(struct pipe_context *pipe,
return FALSE;
}
map = pipe_transfer_map(pipe, tex, 0, 0, PIPE_TRANSFER_WRITE, 0, 0,
map = pipe_transfer_map(pipe, tex, 0, 0, PIPE_MAP_WRITE, 0, 0,
tex->width0, tex->height0, &transfer);
if (!map) {
pipe_resource_reference(&tex, NULL);

View File

@ -130,7 +130,7 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
src = info->has_user_indices ? info->index.user : NULL;
if (!src) {
src = pipe_buffer_map(pc->pipe, info->index.resource,
PIPE_TRANSFER_READ, &src_transfer);
PIPE_MAP_READ, &src_transfer);
}
src = (const uint8_t *)src;
}

View File

@ -256,7 +256,7 @@ pp_jimenezmlaa_init_run(struct pp_queue_t *ppq, unsigned int n,
u_box_2d(0, 0, 165, 165, &box);
ppq->p->pipe->texture_subdata(ppq->p->pipe, ppq->areamaptex, 0,
PIPE_TRANSFER_WRITE, &box,
PIPE_MAP_WRITE, &box,
areamap, 165 * 2, sizeof(areamap));
ppq->shaders[n][1] = pp_tgsi_to_state(ppq->p->pipe, offsetvs, true,

View File

@ -216,9 +216,9 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
return;
mtx_lock(&fbuf->mutex);
map_sync = !(flags & PIPE_TRANSFER_UNSYNCHRONIZED);
map_sync = !(flags & PIPE_MAP_UNSYNCHRONIZED);
persistent = !map_sync || fbuf->supports_persistent ||
!!(flags & PIPE_TRANSFER_PERSISTENT);
!!(flags & PIPE_MAP_PERSISTENT);
/* Recursive maps are allowed if previous maps are persistent,
* or if the current map is unsync. In other cases we might flush

View File

@ -115,7 +115,7 @@ debug_dump_surface(struct pipe_context *pipe,
data = pipe_transfer_map(pipe, texture, surface->u.tex.level,
surface->u.tex.first_layer,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
0, 0, surface->width, surface->height, &transfer);
if (!data)
return;
@ -193,7 +193,7 @@ debug_dump_surface_bmp(struct pipe_context *pipe,
void *ptr;
ptr = pipe_transfer_map(pipe, texture, surface->u.tex.level,
surface->u.tex.first_layer, PIPE_TRANSFER_READ,
surface->u.tex.first_layer, PIPE_MAP_READ,
0, 0, surface->width, surface->height, &transfer);
debug_dump_transfer_bmp(pipe, filename, transfer, ptr);

View File

@ -150,7 +150,7 @@ util_draw_indirect(struct pipe_context *pipe,
uint32_t *dc_param = pipe_buffer_map_range(pipe,
info_in->indirect->indirect_draw_count,
info_in->indirect->indirect_draw_count_offset,
4, PIPE_TRANSFER_READ, &dc_transfer);
4, PIPE_MAP_READ, &dc_transfer);
if (!dc_transfer) {
debug_printf("%s: failed to map indirect draw count buffer\n", __FUNCTION__);
return;
@ -167,7 +167,7 @@ util_draw_indirect(struct pipe_context *pipe,
info_in->indirect->buffer,
info_in->indirect->offset,
(num_params * info_in->indirect->draw_count) * sizeof(uint32_t),
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&transfer);
if (!transfer) {
debug_printf("%s: failed to map indirect buffer\n", __FUNCTION__);

View File

@ -512,16 +512,16 @@ util_dump_query_value_type(FILE *stream, unsigned value)
static const char * const
util_transfer_usage_names[] = {
"PIPE_TRANSFER_READ",
"PIPE_TRANSFER_WRITE",
"PIPE_TRANSFER_MAP_DIRECTLY",
"PIPE_TRANSFER_DISCARD_RANGE",
"PIPE_TRANSFER_DONTBLOCK",
"PIPE_TRANSFER_UNSYNCHRONIZED",
"PIPE_TRANSFER_FLUSH_EXPLICIT",
"PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE",
"PIPE_TRANSFER_PERSISTENT",
"PIPE_TRANSFER_COHERENT",
"PIPE_MAP_READ",
"PIPE_MAP_WRITE",
"PIPE_MAP_DIRECTLY",
"PIPE_MAP_DISCARD_RANGE",
"PIPE_MAP_DONTBLOCK",
"PIPE_MAP_UNSYNCHRONIZED",
"PIPE_MAP_FLUSH_EXPLICIT",
"PIPE_MAP_DISCARD_WHOLE_RESOURCE",
"PIPE_MAP_PERSISTENT",
"PIPE_MAP_COHERENT",
};
DEFINE_UTIL_DUMP_FLAGS_CONTINUOUS(transfer_usage)

View File

@ -43,7 +43,7 @@ void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
PIPE_MAP_READ |
add_transfer_flags,
&src_transfer);
}
@ -77,7 +77,7 @@ void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
PIPE_MAP_READ |
add_transfer_flags,
&in_transfer);
}
@ -111,7 +111,7 @@ void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
PIPE_MAP_READ |
add_transfer_flags,
&in_transfer);
}

View File

@ -321,7 +321,7 @@ pipe_buffer_create_const0(struct pipe_screen *screen,
* Map a range of a resource.
* \param offset start of region, in bytes
* \param length size of region, in bytes
* \param access bitmask of PIPE_TRANSFER_x flags
* \param access bitmask of PIPE_MAP_x flags
* \param transfer returns a transfer object
*/
static inline void *
@ -352,7 +352,7 @@ pipe_buffer_map_range(struct pipe_context *pipe,
/**
* Map whole resource.
* \param access bitmask of PIPE_TRANSFER_x flags
* \param access bitmask of PIPE_MAP_x flags
* \param transfer returns a transfer object
*/
static inline void *
@ -405,7 +405,7 @@ pipe_buffer_write(struct pipe_context *pipe,
const void *data)
{
/* Don't set any other usage bits. Drivers should derive them. */
pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
}
/**
@ -421,8 +421,8 @@ pipe_buffer_write_nooverlap(struct pipe_context *pipe,
const void *data)
{
pipe->buffer_subdata(pipe, buf,
(PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED),
(PIPE_MAP_WRITE |
PIPE_MAP_UNSYNCHRONIZED),
offset, size, data);
}
@ -458,7 +458,7 @@ pipe_buffer_read(struct pipe_context *pipe,
map = (ubyte *) pipe_buffer_map_range(pipe,
buf,
offset, size,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&src_transfer);
if (!map)
return;
@ -470,7 +470,7 @@ pipe_buffer_read(struct pipe_context *pipe,
/**
* Map a resource for reading/writing.
* \param access bitmask of PIPE_TRANSFER_x flags
* \param access bitmask of PIPE_MAP_x flags
*/
static inline void *
pipe_transfer_map(struct pipe_context *context,
@ -493,7 +493,7 @@ pipe_transfer_map(struct pipe_context *context,
/**
* Map a 3D (texture) resource for reading/writing.
* \param access bitmask of PIPE_TRANSFER_x flags
* \param access bitmask of PIPE_MAP_x flags
*/
static inline void *
pipe_transfer_map_3d(struct pipe_context *context,

View File

@ -49,7 +49,7 @@ read_indirect_elements(struct pipe_context *context, struct pipe_draw_indirect_i
map = pipe_buffer_map_range(context, indirect->buffer,
indirect->offset,
read_size,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&transfer);
assert(map);
memcpy(&ret, map, read_size);
@ -129,7 +129,7 @@ util_translate_prim_restart_ib(struct pipe_context *context,
/* Map new / dest index buffer */
dst_map = pipe_buffer_map(context, *dst_buffer,
PIPE_TRANSFER_WRITE, &dst_transfer);
PIPE_MAP_WRITE, &dst_transfer);
if (!dst_map)
goto error;
@ -140,7 +140,7 @@ util_translate_prim_restart_ib(struct pipe_context *context,
src_map = pipe_buffer_map_range(context, info->index.resource,
start * src_index_size,
count * src_index_size,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&src_transfer);
if (!src_map)
goto error;
@ -248,7 +248,7 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
src_map = pipe_buffer_map_range(context, info->index.resource,
info_start * info->index_size,
info_count * info->index_size,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&src_transfer);
if (!src_map) {
return PIPE_ERROR_OUT_OF_MEMORY;

View File

@ -70,7 +70,7 @@ util_pstipple_update_stipple_texture(struct pipe_context *pipe,
/* map texture memory */
data = pipe_transfer_map(pipe, tex, 0, 0,
PIPE_TRANSFER_WRITE, 0, 0, 32, 32, &transfer);
PIPE_MAP_WRITE, 0, 0, 32, 32, &transfer);
/*
* Load alpha texture.

View File

@ -131,7 +131,7 @@ u_suballocator_alloc(struct u_suballocator *allocator, unsigned size,
} else {
struct pipe_transfer *transfer = NULL;
void *ptr = pipe_buffer_map(pipe, allocator->buffer,
PIPE_TRANSFER_WRITE, &transfer);
PIPE_MAP_WRITE, &transfer);
memset(ptr, 0, allocator->size);
pipe_buffer_unmap(pipe, transfer);
}

View File

@ -285,7 +285,7 @@ util_resource_copy_region(struct pipe_context *pipe,
src_map = pipe->transfer_map(pipe,
src,
src_level,
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&src_box, &src_trans);
assert(src_map);
if (!src_map) {
@ -295,8 +295,8 @@ util_resource_copy_region(struct pipe_context *pipe,
dst_map = pipe->transfer_map(pipe,
dst,
dst_level,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
PIPE_MAP_WRITE |
PIPE_MAP_DISCARD_RANGE, &dst_box,
&dst_trans);
assert(dst_map);
if (!dst_map) {
@ -358,7 +358,7 @@ util_clear_color_texture(struct pipe_context *pipe,
dst_map = pipe_transfer_map_3d(pipe,
texture,
level,
PIPE_TRANSFER_WRITE,
PIPE_MAP_WRITE,
dstx, dsty, dstz,
width, height, depth,
&dst_trans);
@ -410,7 +410,7 @@ util_clear_render_target(struct pipe_context *pipe,
dst_map = pipe_transfer_map(pipe,
dst->texture,
0, 0,
PIPE_TRANSFER_WRITE,
PIPE_MAP_WRITE,
dx, 0, w, 1,
&dst_trans);
if (dst_map) {
@ -561,8 +561,8 @@ util_clear_depth_stencil_texture(struct pipe_context *pipe,
dst_map = pipe_transfer_map_3d(pipe,
texture,
level,
(need_rmw ? PIPE_TRANSFER_READ_WRITE :
PIPE_TRANSFER_WRITE),
(need_rmw ? PIPE_MAP_READ_WRITE :
PIPE_MAP_WRITE),
dstx, dsty, dstz,
width, height, depth, &dst_trans);
assert(dst_map);

View File

@ -227,7 +227,7 @@ util_probe_rect_rgba_multi(struct pipe_context *ctx, struct pipe_resource *tex,
unsigned x,y,e,c;
bool pass = true;
map = pipe_transfer_map(ctx, tex, 0, 0, PIPE_TRANSFER_READ,
map = pipe_transfer_map(ctx, tex, 0, 0, PIPE_MAP_READ,
offx, offy, w, h, &transfer);
pipe_get_tile_rgba(transfer, map, 0, 0, w, h, tex->format, pixels);
pipe_transfer_unmap(ctx, transfer);

View File

@ -1359,17 +1359,17 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
return usage;
/* Use the staging upload if it's preferred. */
if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
!(usage & PIPE_TRANSFER_PERSISTENT) &&
if (usage & (PIPE_MAP_DISCARD_RANGE |
PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
!(usage & PIPE_MAP_PERSISTENT) &&
/* Try not to decrement the counter if it's not positive. Still racy,
* but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
tres->max_forced_staging_uploads > 0 &&
p_atomic_dec_return(&tres->max_forced_staging_uploads) >= 0) {
usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
PIPE_TRANSFER_UNSYNCHRONIZED);
usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE |
PIPE_MAP_UNSYNCHRONIZED);
return usage | tc_flags | PIPE_TRANSFER_DISCARD_RANGE;
return usage | tc_flags | PIPE_MAP_DISCARD_RANGE;
}
/* Sparse buffers can't be mapped directly and can't be reallocated
@ -1380,8 +1380,8 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
/* We can use DISCARD_RANGE instead of full discard. This is the only
* fast path for sparse buffers that doesn't need thread synchronization.
*/
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
usage |= PIPE_TRANSFER_DISCARD_RANGE;
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
usage |= PIPE_MAP_DISCARD_RANGE;
/* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
* The threaded context doesn't do unsychronized mappings and invalida-
@ -1394,50 +1394,50 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
usage |= tc_flags;
/* Handle CPU reads trivially. */
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
if (usage & PIPE_MAP_READ) {
if (usage & PIPE_MAP_UNSYNCHRONIZED)
usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* don't sync */
/* Drivers aren't allowed to do buffer invalidations. */
return usage & ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
return usage & ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
!tres->is_shared &&
!util_ranges_intersect(&tres->valid_buffer_range, offset, offset + size))
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* If discarding the entire range, discard the whole resource instead. */
if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
if (usage & PIPE_MAP_DISCARD_RANGE &&
offset == 0 && size == tres->b.width0)
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
/* Discard the whole resource if needed. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (tc_invalidate_buffer(tc, tres))
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
else
usage |= PIPE_TRANSFER_DISCARD_RANGE; /* fallback */
usage |= PIPE_MAP_DISCARD_RANGE; /* fallback */
}
}
/* We won't need this flag anymore. */
/* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
usage &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
usage &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
/* GL_AMD_pinned_memory and persistent mappings can't use staging
* buffers. */
if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_PERSISTENT) ||
if (usage & (PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_PERSISTENT) ||
tres->is_user_ptr)
usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
usage &= ~PIPE_MAP_DISCARD_RANGE;
/* Unsychronized buffer mappings don't have to synchronize the thread. */
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
usage &= ~PIPE_MAP_DISCARD_RANGE;
usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* notify the driver */
}
@ -1460,7 +1460,7 @@ tc_transfer_map(struct pipe_context *_pipe,
/* Do a staging transfer within the threaded context. The driver should
* only get resource_copy_region.
*/
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
if (usage & PIPE_MAP_DISCARD_RANGE) {
struct threaded_transfer *ttrans = slab_alloc(&tc->pool_transfers);
uint8_t *map;
@ -1488,8 +1488,8 @@ tc_transfer_map(struct pipe_context *_pipe,
/* Unsychronized buffer mappings don't have to synchronize the thread. */
if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
usage & PIPE_TRANSFER_READ ? " read" : " ??");
usage & PIPE_MAP_DISCARD_RANGE ? " discard_range" :
usage & PIPE_MAP_READ ? " read" : " ??");
tc->bytes_mapped_estimate += box->width;
@ -1559,8 +1559,8 @@ tc_transfer_flush_region(struct pipe_context *_pipe,
struct threaded_context *tc = threaded_context(_pipe);
struct threaded_transfer *ttrans = threaded_transfer(transfer);
struct threaded_resource *tres = threaded_resource(transfer->resource);
unsigned required_usage = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_FLUSH_EXPLICIT;
unsigned required_usage = PIPE_MAP_WRITE |
PIPE_MAP_FLUSH_EXPLICIT;
if (tres->b.target == PIPE_BUFFER) {
if ((transfer->usage & required_usage) == required_usage) {
@ -1599,13 +1599,13 @@ tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
struct threaded_transfer *ttrans = threaded_transfer(transfer);
struct threaded_resource *tres = threaded_resource(transfer->resource);
/* PIPE_TRANSFER_THREAD_SAFE is only valid with UNSYNCHRONIZED. It can be
/* PIPE_MAP_THREAD_SAFE is only valid with UNSYNCHRONIZED. It can be
* called from any thread and bypasses all multithreaded queues.
*/
if (transfer->usage & PIPE_TRANSFER_THREAD_SAFE) {
assert(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED);
assert(!(transfer->usage & (PIPE_TRANSFER_FLUSH_EXPLICIT |
PIPE_TRANSFER_DISCARD_RANGE)));
if (transfer->usage & PIPE_MAP_THREAD_SAFE) {
assert(transfer->usage & PIPE_MAP_UNSYNCHRONIZED);
assert(!(transfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
PIPE_MAP_DISCARD_RANGE)));
struct pipe_context *pipe = tc->pipe;
pipe->transfer_unmap(pipe, transfer);
@ -1615,8 +1615,8 @@ tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
}
if (tres->b.target == PIPE_BUFFER) {
if (transfer->usage & PIPE_TRANSFER_WRITE &&
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
if (transfer->usage & PIPE_MAP_WRITE &&
!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
tc_buffer_do_flush_region(tc, ttrans, &transfer->box);
/* Staging transfers don't send the call to the driver. */
@ -1669,19 +1669,19 @@ tc_buffer_subdata(struct pipe_context *_pipe,
if (!size)
return;
usage |= PIPE_TRANSFER_WRITE;
usage |= PIPE_MAP_WRITE;
/* PIPE_TRANSFER_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
usage |= PIPE_TRANSFER_DISCARD_RANGE;
/* PIPE_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
if (!(usage & PIPE_MAP_DIRECTLY))
usage |= PIPE_MAP_DISCARD_RANGE;
usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size);
/* Unsychronized and big transfers should use transfer_map. Also handle
* full invalidations, because drivers aren't allowed to do them.
*/
if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) ||
if (usage & (PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_DISCARD_WHOLE_RESOURCE) ||
size > TC_MAX_SUBDATA_BYTES) {
struct pipe_transfer *transfer;
struct pipe_box box;

View File

@ -85,9 +85,9 @@
* Transfer_map rules for buffer mappings
* --------------------------------------
*
* 1) If transfer_map has PIPE_TRANSFER_UNSYNCHRONIZED, the call is made
* 1) If transfer_map has PIPE_MAP_UNSYNCHRONIZED, the call is made
* in the non-driver thread without flushing the queue. The driver will
* receive TC_TRANSFER_MAP_THREADED_UNSYNC in addition to PIPE_TRANSFER_-
* receive TC_TRANSFER_MAP_THREADED_UNSYNC in addition to PIPE_MAP_-
* UNSYNCHRONIZED to indicate this.
* Note that transfer_unmap is always enqueued and called from the driver
* thread.

View File

@ -13,19 +13,19 @@ void u_default_buffer_subdata(struct pipe_context *pipe,
struct pipe_box box;
uint8_t *map = NULL;
assert(!(usage & PIPE_TRANSFER_READ));
assert(!(usage & PIPE_MAP_READ));
/* the write flag is implicit by the nature of buffer_subdata */
usage |= PIPE_TRANSFER_WRITE;
usage |= PIPE_MAP_WRITE;
/* buffer_subdata implicitly discards the rewritten buffer range.
* PIPE_TRANSFER_MAP_DIRECTLY supresses that.
* PIPE_MAP_DIRECTLY supresses that.
*/
if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
if (!(usage & PIPE_MAP_DIRECTLY)) {
if (offset == 0 && size == resource->width0) {
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
} else {
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage |= PIPE_MAP_DISCARD_RANGE;
}
}
@ -52,13 +52,13 @@ void u_default_texture_subdata(struct pipe_context *pipe,
const uint8_t *src_data = data;
uint8_t *map = NULL;
assert(!(usage & PIPE_TRANSFER_READ));
assert(!(usage & PIPE_MAP_READ));
/* the write flag is implicit by the nature of texture_subdata */
usage |= PIPE_TRANSFER_WRITE;
usage |= PIPE_MAP_WRITE;
/* texture_subdata implicitly discards the rewritten buffer range */
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage |= PIPE_MAP_DISCARD_RANGE;
map = pipe->transfer_map(pipe,
resource,

View File

@ -148,8 +148,8 @@ u_transfer_helper_resource_destroy(struct pipe_screen *pscreen,
static bool needs_pack(unsigned usage)
{
return (usage & PIPE_TRANSFER_READ) &&
!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE));
return (usage & PIPE_MAP_READ) &&
!(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE));
}
/* In the case of transfer_map of a multi-sample resource, call back into
@ -358,7 +358,7 @@ flush_region(struct pipe_context *pctx, struct pipe_transfer *ptrans,
unsigned height = box->height;
void *src, *dst;
if (!(ptrans->usage & PIPE_TRANSFER_WRITE))
if (!(ptrans->usage & PIPE_MAP_WRITE))
return;
if (trans->ss) {
@ -495,7 +495,7 @@ u_transfer_helper_transfer_unmap(struct pipe_context *pctx,
if (handle_transfer(ptrans->resource)) {
struct u_transfer *trans = u_transfer(ptrans);
if (!(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
if (!(ptrans->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
struct pipe_box box;
u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
flush_region(pctx, ptrans, &box);
@ -589,13 +589,13 @@ u_transfer_helper_deinterleave_transfer_map(struct pipe_context *pctx,
if (!trans->staging)
goto fail;
trans->ptr = helper->vtbl->transfer_map(pctx, prsc, level, usage | PIPE_TRANSFER_DEPTH_ONLY, box,
trans->ptr = helper->vtbl->transfer_map(pctx, prsc, level, usage | PIPE_MAP_DEPTH_ONLY, box,
&trans->trans);
if (!trans->ptr)
goto fail;
trans->ptr2 = helper->vtbl->transfer_map(pctx, prsc, level,
usage | PIPE_TRANSFER_STENCIL_ONLY, box, &trans->trans2);
usage | PIPE_MAP_STENCIL_ONLY, box, &trans->trans2);
if (needs_pack(usage)) {
switch (prsc->format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
@ -649,7 +649,7 @@ u_transfer_helper_deinterleave_transfer_unmap(struct pipe_context *pctx,
(format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT && helper->separate_z32s8)) {
struct u_transfer *trans = (struct u_transfer *)ptrans;
if (!(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
if (!(ptrans->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
struct pipe_box box;
u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
flush_region(pctx, ptrans, &box);

View File

@ -45,7 +45,7 @@ struct u_upload_mgr {
unsigned bind; /* Bitmask of PIPE_BIND_* flags. */
enum pipe_resource_usage usage;
unsigned flags;
unsigned map_flags; /* Bitmask of PIPE_TRANSFER_* flags. */
unsigned map_flags; /* Bitmask of PIPE_MAP_* flags. */
boolean map_persistent; /* If persistent mappings are supported. */
struct pipe_resource *buffer; /* Upload buffer. */
@ -77,15 +77,15 @@ u_upload_create(struct pipe_context *pipe, unsigned default_size,
PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT);
if (upload->map_persistent) {
upload->map_flags = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_PERSISTENT |
PIPE_TRANSFER_COHERENT;
upload->map_flags = PIPE_MAP_WRITE |
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_PERSISTENT |
PIPE_MAP_COHERENT;
}
else {
upload->map_flags = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_FLUSH_EXPLICIT;
upload->map_flags = PIPE_MAP_WRITE |
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_FLUSH_EXPLICIT;
}
return upload;
@ -110,7 +110,7 @@ u_upload_clone(struct pipe_context *pipe, struct u_upload_mgr *upload)
if (!upload->map_persistent && result->map_persistent)
u_upload_disable_persistent(result);
else if (upload->map_persistent &&
upload->map_flags & PIPE_TRANSFER_FLUSH_EXPLICIT)
upload->map_flags & PIPE_MAP_FLUSH_EXPLICIT)
u_upload_enable_flush_explicit(result);
return result;
@ -120,16 +120,16 @@ void
u_upload_enable_flush_explicit(struct u_upload_mgr *upload)
{
assert(upload->map_persistent);
upload->map_flags &= ~PIPE_TRANSFER_COHERENT;
upload->map_flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
upload->map_flags &= ~PIPE_MAP_COHERENT;
upload->map_flags |= PIPE_MAP_FLUSH_EXPLICIT;
}
void
u_upload_disable_persistent(struct u_upload_mgr *upload)
{
upload->map_persistent = FALSE;
upload->map_flags &= ~(PIPE_TRANSFER_COHERENT | PIPE_TRANSFER_PERSISTENT);
upload->map_flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
upload->map_flags &= ~(PIPE_MAP_COHERENT | PIPE_MAP_PERSISTENT);
upload->map_flags |= PIPE_MAP_FLUSH_EXPLICIT;
}
static void
@ -138,7 +138,7 @@ upload_unmap_internal(struct u_upload_mgr *upload, boolean destroying)
if (!upload->transfer)
return;
if (upload->map_flags & PIPE_TRANSFER_FLUSH_EXPLICIT) {
if (upload->map_flags & PIPE_MAP_FLUSH_EXPLICIT) {
struct pipe_box *box = &upload->transfer->box;
unsigned flush_offset = box->x + upload->flushed_size;

View File

@ -461,7 +461,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
}
map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
PIPE_TRANSFER_READ, &vb_transfer[i]);
PIPE_MAP_READ, &vb_transfer[i]);
}
/* Subtract min_index so that indexing with the index buffer works. */
@ -491,7 +491,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
} else {
map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
PIPE_MAP_READ, &transfer);
}
switch (info->index_size) {
@ -1228,7 +1228,7 @@ void u_vbuf_get_minmax_index(struct pipe_context *pipe,
indices = pipe_buffer_map_range(pipe, info->index.resource,
info->start * info->index_size,
info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
PIPE_MAP_READ, &transfer);
}
u_vbuf_get_minmax_index_mapped(info, indices, out_min_index, out_max_index);
@ -1386,7 +1386,7 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
indices = (uint8_t*)info->index.user;
} else {
indices = (uint8_t*)pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_READ, &transfer);
PIPE_MAP_READ, &transfer);
}
for (unsigned i = 0; i < draw_count; i++) {

View File

@ -475,7 +475,7 @@ vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
assert(s);
float *ptr = pipe_buffer_map(s->pipe, s->shader_params,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer);
if (!ptr)

View File

@ -654,7 +654,7 @@ set_viewport(struct vl_compositor_state *s,
assert(s && drawn);
void *ptr = pipe_buffer_map(s->pipe, s->shader_params,
PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE,
PIPE_MAP_READ | PIPE_MAP_WRITE,
&buf_transfer);
if (!ptr)

View File

@ -711,8 +711,8 @@ vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
goto error_matrix;
f = pipe->transfer_map(pipe, matrix, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE |
PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!f)
goto error_map;

View File

@ -629,8 +629,8 @@ vl_mpeg12_begin_frame(struct pipe_video_codec *decoder,
buf->texels =
dec->context->transfer_map(dec->context, tex, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE |
PIPE_MAP_DISCARD_RANGE,
&rect, &buf->tex_transfer);
buf->block_num = 0;

View File

@ -66,7 +66,7 @@ vl_vb_upload_quads(struct pipe_context *pipe)
(
pipe,
quad.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer
);
@ -111,7 +111,7 @@ vl_vb_upload_pos(struct pipe_context *pipe, unsigned width, unsigned height)
(
pipe,
pos.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer
);
@ -301,7 +301,7 @@ vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
(
pipe,
buffer->ycbcr[i].resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buffer->ycbcr[i].transfer
);
}
@ -311,7 +311,7 @@ vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
(
pipe,
buffer->mv[i].resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buffer->mv[i].transfer
);
}

View File

@ -410,7 +410,7 @@ vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks
goto error_resource;
f = pipe->transfer_map(pipe, res,
0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
0, PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!f)
goto error_map;
@ -576,8 +576,8 @@ vl_zscan_upload_quant(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer,
rect.width *= zscan->blocks_per_line;
data = pipe->transfer_map(pipe, buffer->quant->texture,
0, PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
0, PIPE_MAP_WRITE |
PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!data)
return;

View File

@ -123,7 +123,7 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
if (trans->rsc)
etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
if (ptrans->usage & PIPE_TRANSFER_WRITE) {
if (ptrans->usage & PIPE_MAP_WRITE) {
if (trans->rsc) {
/* We have a temporary resource due to either tile status or
* tiling format. Write back the updated buffer contents.
@ -171,11 +171,11 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
* are not mapped unsynchronized. If they are, must push them back into GPU
* domain after CPU access is finished.
*/
if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
if (!trans->rsc && !(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED))
etna_bo_cpu_fini(rsc->bo);
if ((ptrans->resource->target == PIPE_BUFFER) &&
(ptrans->usage & PIPE_TRANSFER_WRITE)) {
(ptrans->usage & PIPE_MAP_WRITE)) {
util_range_add(&rsc->base,
&rsc->valid_buffer_range,
ptrans->box.x,
@ -211,26 +211,26 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
/*
* Upgrade to UNSYNCHRONIZED if target is PIPE_BUFFER and range is uninitialized.
*/
if ((usage & PIPE_TRANSFER_WRITE) &&
if ((usage & PIPE_MAP_WRITE) &&
(prsc->target == PIPE_BUFFER) &&
!util_ranges_intersect(&rsc->valid_buffer_range,
box->x,
box->x + box->width)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
* being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
* check needs to be extended to coherent mappings and shared resources.
*/
if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
if ((usage & PIPE_MAP_DISCARD_RANGE) &&
!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
prsc->last_level == 0 &&
prsc->width0 == box->width &&
prsc->height0 == box->height &&
prsc->depth0 == box->depth &&
prsc->array_size == 1) {
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
ptrans = &trans->base;
@ -268,7 +268,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* depth buffer, filling in the "holes" where the tile status
* indicates that it's clear. We also do this for tiled
* resources, but only if the RS can blit them. */
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
if (usage & PIPE_MAP_DIRECTLY) {
slab_free(&ctx->transfer_pool, trans);
BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
return NULL;
@ -313,7 +313,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
}
if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
etna_copy_resource_box(pctx, trans->rsc, &rsc->base, level, &ptrans->box);
/* Switch to using the temporary resource instead */
@ -322,7 +322,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
struct etna_resource_level *res_level = &rsc->levels[level];
/* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
/* XXX we don't handle PIPE_MAP_FLUSH_EXPLICIT; this flag can be ignored
* when mapping in-place,
* but when not in place we need to fire off the copy operation in
* transfer_flush_region (currently
@ -345,7 +345,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
pipeline?
Is it necessary at all? Only in case we want to provide a fast path and
map the resource directly
(and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
(and for PIPE_MAP_DIRECTLY) and we don't want to force a sync.
We also need to know whether the resource is in use to determine if a sync
is needed (or just do it
always, but that comes at the expense of performance).
@ -356,8 +356,8 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
resources that have
been bound but are no longer in use for a while still carry a performance
penalty. On the other hand,
the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
PIPE_TRANSFER_UNSYNCHRONIZED to
the program could be using PIPE_MAP_DISCARD_WHOLE_RESOURCE or
PIPE_MAP_UNSYNCHRONIZED to
avoid this in the first place...
A) We use an in-pipe copy engine, and queue the copy operation after unmap
@ -365,18 +365,18 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
will be performed when all current commands have been executed.
Using the RS is possible, not sure if always efficient. This can also
do any kind of tiling for us.
Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
Only possible when PIPE_MAP_DISCARD_RANGE is set.
B) We discard the entire resource (or at least, the mipmap level) and
allocate new memory for it.
Only possible when mapping the entire resource or
PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
PIPE_MAP_DISCARD_WHOLE_RESOURCE is set.
*/
/*
* Pull resources into the CPU domain. Only skipped for unsynchronized
* transfers without a temporary resource.
*/
if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (trans->rsc || !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
uint32_t prep_flags = 0;
/*
@ -389,8 +389,8 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
(!trans->rsc &&
(((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
(((usage & PIPE_MAP_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
((usage & PIPE_MAP_WRITE) && rsc->status)))) {
mtx_lock(&rsc->lock);
set_foreach(rsc->pending_ctx, entry) {
struct etna_context *pend_ctx = (struct etna_context *)entry->key;
@ -403,9 +403,9 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
mtx_unlock(&ctx->lock);
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
prep_flags |= DRM_ETNA_PREP_READ;
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
prep_flags |= DRM_ETNA_PREP_WRITE;
/*
@ -413,7 +413,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* get written even on read-only transfers. This blocks the GPU to sample
* from this resource.
*/
if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
if ((usage & PIPE_MAP_READ) && etna_etc2_needs_patching(prsc))
prep_flags |= DRM_ETNA_PREP_WRITE;
if (etna_bo_cpu_prep(rsc->bo, prep_flags))
@ -436,7 +436,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
res_level->layer_stride);
/* We need to have the unpatched data ready for the gfx stack. */
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
etna_unpatch_data(trans->mapped, ptrans);
return trans->mapped;
@ -447,7 +447,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
if (usage & PIPE_MAP_DIRECTLY)
goto fail;
trans->mapped += res_level->offset;
@ -459,7 +459,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
if (!trans->staging)
goto fail;
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
if (rsc->layout == ETNA_LAYOUT_TILED) {
for (unsigned z = 0; z < ptrans->box.depth; z++) {
etna_texture_untile(trans->staging + z * ptrans->layer_stride,

View File

@ -524,7 +524,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_screen_unlock(ctx->screen);
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_MAP_WRITE) {
struct fd_batch *batch, *batches[32] = {};
uint32_t batch_mask;
@ -558,7 +558,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
static void
fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_MAP_READ);
}
static void
@ -570,12 +570,12 @@ fd_resource_transfer_unmap(struct pipe_context *pctx,
struct fd_transfer *trans = fd_transfer(ptrans);
if (trans->staging_prsc) {
if (ptrans->usage & PIPE_TRANSFER_WRITE)
if (ptrans->usage & PIPE_MAP_WRITE)
fd_blit_from_staging(ctx, trans);
pipe_resource_reference(&trans->staging_prsc, NULL);
}
if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (!(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED)) {
fd_bo_cpu_fini(rsc->bo);
}
@ -607,7 +607,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
box->width, box->height, box->x, box->y);
if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsc->layout.tile_mode) {
if ((usage & PIPE_MAP_DIRECTLY) && rsc->layout.tile_mode) {
DBG("CANNOT MAP DIRECTLY!\n");
return NULL;
}
@ -638,7 +638,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
if (staging_rsc) {
// TODO for PIPE_TRANSFER_READ, need to do untiling blit..
// TODO for PIPE_MAP_READ, need to do untiling blit..
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = fd_resource_pitch(staging_rsc, 0);
trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
@ -647,7 +647,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
trans->staging_box.y = 0;
trans->staging_box.z = 0;
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
fd_blit_to_staging(ctx, trans);
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
@ -665,30 +665,30 @@ fd_resource_transfer_map(struct pipe_context *pctx,
}
}
if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
if (ctx->in_shadow && !(usage & PIPE_MAP_READ))
usage |= PIPE_MAP_UNSYNCHRONIZED;
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
op |= DRM_FREEDRENO_PREP_READ;
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
op |= DRM_FREEDRENO_PREP_WRITE;
bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (needs_flush || fd_resource_busy(rsc, op)) {
rebind_resource(rsc);
realloc_bo(rsc, fd_bo_size(rsc->bo));
}
} else if ((usage & PIPE_TRANSFER_WRITE) &&
} else if ((usage & PIPE_MAP_WRITE) &&
prsc->target == PIPE_BUFFER &&
!util_ranges_intersect(&rsc->valid_buffer_range,
box->x, box->x + box->width)) {
/* We are trying to write to a previously uninitialized range. No need
* to wait.
*/
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
} else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
@ -696,7 +696,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_context_unlock(ctx);
if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
if ((usage & PIPE_MAP_WRITE) && write_batch &&
write_batch->back_blit) {
/* if only thing pending is a back-blit, we can discard it: */
fd_batch_reset(write_batch);
@ -714,8 +714,8 @@ fd_resource_transfer_map(struct pipe_context *pctx,
* ie. we only *don't* want to go down this path if the blit
* will trigger a flush!
*/
if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
(usage & PIPE_TRANSFER_DISCARD_RANGE)) {
if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
(usage & PIPE_MAP_DISCARD_RANGE)) {
/* try shadowing only if it avoids a flush, otherwise staging would
* be better:
*/
@ -784,7 +784,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
fd_resource_offset(rsc, level, box->z);
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
rsc->valid = true;
*pptrans = ptrans;

View File

@ -744,8 +744,8 @@ i915_texture_transfer_map(struct pipe_context *pipe,
* because we need that for u_blitter */
if (i915->blitter &&
util_blitter_is_copy_supported(i915->blitter, resource, resource) &&
(usage & PIPE_TRANSFER_WRITE) &&
!(usage & (PIPE_TRANSFER_READ | PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_UNSYNCHRONIZED)))
(usage & PIPE_MAP_WRITE) &&
!(usage & (PIPE_MAP_READ | PIPE_MAP_DONTBLOCK | PIPE_MAP_UNSYNCHRONIZED)))
use_staging_texture = TRUE;
use_staging_texture = FALSE;
@ -773,7 +773,7 @@ i915_texture_transfer_map(struct pipe_context *pipe,
offset = i915_texture_offset(tex, transfer->b.level, box->z);
map = iws->buffer_map(iws, tex->buffer,
(transfer->b.usage & PIPE_TRANSFER_WRITE) ? TRUE : FALSE);
(transfer->b.usage & PIPE_MAP_WRITE) ? TRUE : FALSE);
if (!map) {
pipe_resource_reference(&transfer->staging_texture, NULL);
FREE(transfer);
@ -802,7 +802,7 @@ i915_texture_transfer_unmap(struct pipe_context *pipe,
iws->buffer_unmap(iws, tex->buffer);
if ((itransfer->staging_texture) &&
(transfer->usage & PIPE_TRANSFER_WRITE)) {
(transfer->usage & PIPE_MAP_WRITE)) {
struct pipe_box sbox;
u_box_origin_2d(itransfer->b.box.width, itransfer->b.box.height, &sbox);

View File

@ -292,11 +292,11 @@ iris_bo_reference(struct iris_bo *bo)
*/
void iris_bo_unreference(struct iris_bo *bo);
#define MAP_READ PIPE_TRANSFER_READ
#define MAP_WRITE PIPE_TRANSFER_WRITE
#define MAP_ASYNC PIPE_TRANSFER_UNSYNCHRONIZED
#define MAP_PERSISTENT PIPE_TRANSFER_PERSISTENT
#define MAP_COHERENT PIPE_TRANSFER_COHERENT
#define MAP_READ PIPE_MAP_READ
#define MAP_WRITE PIPE_MAP_WRITE
#define MAP_ASYNC PIPE_MAP_UNSYNCHRONIZED
#define MAP_PERSISTENT PIPE_MAP_PERSISTENT
#define MAP_COHERENT PIPE_MAP_COHERENT
/* internal */
#define MAP_INTERNAL_MASK (0xffu << 24)
#define MAP_RAW (0x01 << 24)

View File

@ -1351,7 +1351,7 @@ static void
iris_flush_staging_region(struct pipe_transfer *xfer,
const struct pipe_box *flush_box)
{
if (!(xfer->usage & PIPE_TRANSFER_WRITE))
if (!(xfer->usage & PIPE_MAP_WRITE))
return;
struct iris_transfer *map = (void *) xfer;
@ -1422,7 +1422,7 @@ iris_map_copy_region(struct iris_transfer *map)
xfer->layer_stride = isl_surf_get_array_pitch(surf);
}
if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
iris_copy_region(map->blorp, map->batch, map->staging, 0, extra, 0, 0,
xfer->resource, xfer->level, box);
/* Ensure writes to the staging BO land before we map it below. */
@ -1610,7 +1610,7 @@ iris_unmap_s8(struct iris_transfer *map)
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
if (xfer->usage & PIPE_TRANSFER_WRITE) {
if (xfer->usage & PIPE_MAP_WRITE) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@ -1657,7 +1657,7 @@ iris_map_s8(struct iris_transfer *map)
* invalidate is set, since we'll be writing the whole rectangle from our
* temporary buffer back out.
*/
if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@ -1716,7 +1716,7 @@ iris_unmap_tiled_memcpy(struct iris_transfer *map)
const bool has_swizzling = false;
if (xfer->usage & PIPE_TRANSFER_WRITE) {
if (xfer->usage & PIPE_MAP_WRITE) {
char *dst =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@ -1760,7 +1760,7 @@ iris_map_tiled_memcpy(struct iris_transfer *map)
const bool has_swizzling = false;
if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
char *src =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@ -1819,7 +1819,7 @@ can_promote_to_async(const struct iris_resource *res,
* initialized with useful data, then we can safely promote this write
* to be unsynchronized. This helps the common pattern of appending data.
*/
return res->base.target == PIPE_BUFFER && (usage & PIPE_TRANSFER_WRITE) &&
return res->base.target == PIPE_BUFFER && (usage & PIPE_MAP_WRITE) &&
!(usage & TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED) &&
!util_ranges_intersect(&res->valid_buffer_range, box->x,
box->x + box->width);
@ -1840,35 +1840,35 @@ iris_transfer_map(struct pipe_context *ctx,
if (iris_resource_unfinished_aux_import(res))
iris_resource_finish_aux_import(ctx->screen, res);
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
/* Replace the backing storage with a fresh buffer for non-async maps */
if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE)))
iris_invalidate_resource(ctx, resource);
/* If we can discard the whole resource, we can discard the range. */
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage |= PIPE_MAP_DISCARD_RANGE;
}
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
can_promote_to_async(res, box, usage)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
bool map_would_stall = false;
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
map_would_stall =
resource_is_busy(ice, res) ||
iris_has_invalid_primary(res, level, 1, box->z, box->depth);
if (map_would_stall && (usage & PIPE_TRANSFER_DONTBLOCK) &&
(usage & PIPE_TRANSFER_MAP_DIRECTLY))
if (map_would_stall && (usage & PIPE_MAP_DONTBLOCK) &&
(usage & PIPE_MAP_DIRECTLY))
return NULL;
}
if (surf->tiling != ISL_TILING_LINEAR &&
(usage & PIPE_TRANSFER_MAP_DIRECTLY))
(usage & PIPE_MAP_DIRECTLY))
return NULL;
struct iris_transfer *map = slab_alloc(&ice->transfer_pool);
@ -1890,7 +1890,7 @@ iris_transfer_map(struct pipe_context *ctx,
util_ranges_intersect(&res->valid_buffer_range, box->x,
box->x + box->width);
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
util_range_add(&res->base, &res->valid_buffer_range, box->x, box->x + box->width);
/* Avoid using GPU copies for persistent/coherent buffers, as the idea
@ -1899,9 +1899,9 @@ iris_transfer_map(struct pipe_context *ctx,
* contain state we're constructing for a GPU draw call, which would
* kill us with infinite stack recursion.
*/
bool no_gpu = usage & (PIPE_TRANSFER_PERSISTENT |
PIPE_TRANSFER_COHERENT |
PIPE_TRANSFER_MAP_DIRECTLY);
bool no_gpu = usage & (PIPE_MAP_PERSISTENT |
PIPE_MAP_COHERENT |
PIPE_MAP_DIRECTLY);
/* GPU copies are not useful for buffer reads. Instead of stalling to
* read from the original buffer, we'd simply copy it to a temporary...
@ -1912,7 +1912,7 @@ iris_transfer_map(struct pipe_context *ctx,
* temporary and map that, to avoid the resolve. (It might be better to
* a tiled temporary and use the tiled_memcpy paths...)
*/
if (!(usage & PIPE_TRANSFER_DISCARD_RANGE) &&
if (!(usage & PIPE_MAP_DISCARD_RANGE) &&
!iris_has_invalid_primary(res, level, 1, box->z, box->depth)) {
no_gpu = true;
}
@ -1939,10 +1939,10 @@ iris_transfer_map(struct pipe_context *ctx,
if (resource->target != PIPE_BUFFER) {
iris_resource_access_raw(ice, res, level, box->z, box->depth,
usage & PIPE_TRANSFER_WRITE);
usage & PIPE_MAP_WRITE);
}
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
if (iris_batch_references(&ice->batches[i], res->bo))
iris_batch_flush(&ice->batches[i]);
@ -2010,8 +2010,8 @@ iris_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *xfer)
struct iris_context *ice = (struct iris_context *)ctx;
struct iris_transfer *map = (void *) xfer;
if (!(xfer->usage & (PIPE_TRANSFER_FLUSH_EXPLICIT |
PIPE_TRANSFER_COHERENT))) {
if (!(xfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
PIPE_MAP_COHERENT))) {
struct pipe_box flush_box = {
.x = 0, .y = 0, .z = 0,
.width = xfer->box.width,
@ -2068,7 +2068,7 @@ iris_texture_subdata(struct pipe_context *ctx,
data, stride, layer_stride);
}
/* No state trackers pass any flags other than PIPE_TRANSFER_WRITE */
/* No state trackers pass any flags other than PIPE_MAP_WRITE */
iris_resource_access_raw(ice, res, level, box->z, box->depth, true);

View File

@ -559,12 +559,12 @@ lima_transfer_map(struct pipe_context *pctx,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
if (res->tiled && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
if (res->tiled && (usage & PIPE_MAP_DIRECTLY))
return NULL;
/* bo might be in use in a previous stream draw. Allocate a new
* one for the resource to avoid overwriting data in use. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
struct lima_bo *new_bo;
assert(res->bo && res->bo->size);
@ -580,13 +580,13 @@ lima_transfer_map(struct pipe_context *pctx,
bo = res->bo;
}
else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
(usage & PIPE_TRANSFER_READ_WRITE)) {
else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
(usage & PIPE_MAP_READ_WRITE)) {
/* use once buffers are made sure to not read/write overlapped
* range, so no need to sync */
lima_flush_job_accessing_bo(ctx, bo, usage & PIPE_TRANSFER_WRITE);
lima_flush_job_accessing_bo(ctx, bo, usage & PIPE_MAP_WRITE);
unsigned op = usage & PIPE_TRANSFER_WRITE ?
unsigned op = usage & PIPE_MAP_WRITE ?
LIMA_GEM_WAIT_WRITE : LIMA_GEM_WAIT_READ;
lima_bo_wait(bo, op, PIPE_TIMEOUT_INFINITE);
}
@ -614,7 +614,7 @@ lima_transfer_map(struct pipe_context *pctx,
trans->staging = malloc(ptrans->stride * ptrans->box.height * ptrans->box.depth);
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
unsigned i;
for (i = 0; i < ptrans->box.depth; i++)
panfrost_load_tiled_image(
@ -629,15 +629,15 @@ lima_transfer_map(struct pipe_context *pctx,
return trans->staging;
} else {
unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_PERSISTENT;
unsigned dpw = PIPE_MAP_DIRECTLY | PIPE_MAP_WRITE |
PIPE_MAP_PERSISTENT;
if ((usage & dpw) == dpw && res->index_cache)
return NULL;
ptrans->stride = res->levels[level].stride;
ptrans->layer_stride = res->levels[level].layer_stride;
if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
if ((usage & PIPE_MAP_WRITE) && (usage & PIPE_MAP_DIRECTLY))
panfrost_minmax_cache_invalidate(res->index_cache, ptrans);
return bo->map + res->levels[level].offset +
@ -668,7 +668,7 @@ lima_transfer_unmap_inner(struct lima_context *ctx,
if (trans->staging) {
pres = &res->base;
if (trans->base.usage & PIPE_TRANSFER_WRITE) {
if (trans->base.usage & PIPE_MAP_WRITE) {
unsigned i;
for (i = 0; i < trans->base.box.depth; i++)
panfrost_store_tiled_image(
@ -779,12 +779,12 @@ lima_texture_subdata(struct pipe_context *pctx,
return;
}
assert(!(usage & PIPE_TRANSFER_READ));
assert(!(usage & PIPE_MAP_READ));
struct lima_transfer t = {
.base = {
.resource = prsc,
.usage = PIPE_TRANSFER_WRITE,
.usage = PIPE_MAP_WRITE,
.level = level,
.box = *box,
.stride = stride,

View File

@ -1006,7 +1006,7 @@ lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
struct sw_winsys *winsys = screen->winsys;
jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
jit_tex->row_stride[0] = lp_tex->row_stride[0];
jit_tex->img_stride[0] = lp_tex->img_stride[0];
jit_tex->mip_offsets[0] = 0;

View File

@ -993,7 +993,7 @@ lp_csctx_set_sampler_views(struct lp_cs_context *csctx,
struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
struct sw_winsys *winsys = screen->winsys;
jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
jit_tex->row_stride[0] = lp_tex->row_stride[0];
jit_tex->img_stride[0] = lp_tex->img_stride[0];
jit_tex->mip_offsets[0] = 0;
@ -1300,7 +1300,7 @@ fill_grid_size(struct pipe_context *pipe,
params = pipe_buffer_map_range(pipe, info->indirect,
info->indirect_offset,
3 * sizeof(uint32_t),
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&transfer);
if (!transfer)

View File

@ -333,7 +333,7 @@ prepare_shader_sampling(
struct llvmpipe_screen *screen = llvmpipe_screen(tex->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, lp_tex->dt,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
row_stride[0] = lp_tex->row_stride[0];
img_stride[0] = lp_tex->img_stride[0];
mip_offsets[0] = 0;
@ -474,7 +474,7 @@ prepare_shader_images(
struct llvmpipe_screen *screen = llvmpipe_screen(img->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, lp_img->dt,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
row_stride = lp_img->row_stride[0];
img_stride = lp_img->img_stride[0];
sample_stride = 0;

View File

@ -53,14 +53,14 @@ lp_resource_copy_ms(struct pipe_context *pipe,
for (unsigned i = 0; i < src->nr_samples; i++) {
struct pipe_transfer *src_trans, *dst_trans;
const uint8_t *src_map = llvmpipe_transfer_map_ms(pipe,
src, 0, PIPE_TRANSFER_READ, i,
src, 0, PIPE_MAP_READ, i,
src_box,
&src_trans);
if (!src_map)
return;
uint8_t *dst_map = llvmpipe_transfer_map_ms(pipe,
dst, 0, PIPE_TRANSFER_WRITE, i,
dst, 0, PIPE_MAP_WRITE, i,
&dst_box,
&dst_trans);
if (!dst_map) {
@ -285,7 +285,7 @@ lp_clear_color_texture_msaa(struct pipe_context *pipe,
struct pipe_transfer *dst_trans;
ubyte *dst_map;
dst_map = llvmpipe_transfer_map_ms(pipe, texture, 0, PIPE_TRANSFER_WRITE,
dst_map = llvmpipe_transfer_map_ms(pipe, texture, 0, PIPE_MAP_WRITE,
sample, box, &dst_trans);
if (!dst_map)
return;
@ -347,8 +347,8 @@ lp_clear_depth_stencil_texture_msaa(struct pipe_context *pipe,
dst_map = llvmpipe_transfer_map_ms(pipe,
texture,
0,
(need_rmw ? PIPE_TRANSFER_READ_WRITE :
PIPE_TRANSFER_WRITE),
(need_rmw ? PIPE_MAP_READ_WRITE :
PIPE_MAP_WRITE),
sample, box, &dst_trans);
assert(dst_map);
if (!dst_map)

View File

@ -230,7 +230,7 @@ llvmpipe_displaytarget_layout(struct llvmpipe_screen *screen,
if (!map_front_private) {
void *map = winsys->displaytarget_map(winsys, lpr->dt,
PIPE_TRANSFER_WRITE);
PIPE_MAP_WRITE);
if (map)
memset(map, 0, height * lpr->row_stride[0]);
@ -408,10 +408,10 @@ llvmpipe_resource_map(struct pipe_resource *resource,
unsigned dt_usage;
if (tex_usage == LP_TEX_USAGE_READ) {
dt_usage = PIPE_TRANSFER_READ;
dt_usage = PIPE_MAP_READ;
}
else {
dt_usage = PIPE_TRANSFER_READ_WRITE;
dt_usage = PIPE_MAP_READ_WRITE;
}
assert(level == 0);
@ -566,9 +566,9 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
* Transfers, like other pipe operations, must happen in order, so flush the
* context if necessary.
*/
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
boolean read_only = !(usage & PIPE_TRANSFER_WRITE);
boolean do_not_block = !!(usage & PIPE_TRANSFER_DONTBLOCK);
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
boolean read_only = !(usage & PIPE_MAP_WRITE);
boolean do_not_block = !!(usage & PIPE_MAP_DONTBLOCK);
if (!llvmpipe_flush_resource(pipe, resource,
level,
read_only,
@ -584,7 +584,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
}
/* Check if we're mapping a current constant buffer */
if ((usage & PIPE_TRANSFER_WRITE) &&
if ((usage & PIPE_MAP_WRITE) &&
(resource->bind & PIPE_BIND_CONSTANT_BUFFER)) {
unsigned i;
for (i = 0; i < ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_FRAGMENT]); ++i) {
@ -618,7 +618,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
transfer->usage);
*/
if (usage == PIPE_TRANSFER_READ) {
if (usage == PIPE_MAP_READ) {
tex_usage = LP_TEX_USAGE_READ;
mode = "read";
}
@ -642,7 +642,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
/* May want to do different things here depending on read/write nature
* of the map:
*/
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_MAP_WRITE) {
/* Do something to notify sharing contexts of a texture change.
*/
screen->timestamp++;

View File

@ -228,7 +228,7 @@ static inline bool
nouveau_buffer_sync(struct nouveau_context *nv,
struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ) {
if (rw == PIPE_MAP_READ) {
if (!buf->fence_wr)
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
@ -253,7 +253,7 @@ nouveau_buffer_sync(struct nouveau_context *nv,
static inline bool
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ)
if (rw == PIPE_MAP_READ)
return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
else
return (buf->fence && !nouveau_fence_signalled(buf->fence));
@ -331,7 +331,7 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
#define NOUVEAU_TRANSFER_DISCARD \
(PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
(PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE)
/* Checks whether it is possible to completely discard the memory backing this
* resource. This can be useful if we would otherwise have to wait for a read
@ -340,13 +340,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
static inline bool
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
{
if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
return false;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return false;
if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
if (unlikely(usage & PIPE_MAP_PERSISTENT))
return false;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
return buf->mm && nouveau_buffer_busy(buf, PIPE_MAP_WRITE);
}
/* Returns a pointer to a memory area representing a window into the
@ -390,9 +390,9 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
nouveau_buffer_transfer_init(tx, resource, box, usage);
*ptransfer = &tx->base;
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
/* If we are trying to write to an uninitialized range, the user shouldn't
@ -402,15 +402,15 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
* uninitialized, the GPU can't care what was there, and so we can treat
* the write as being unsynchronized.
*/
if ((usage & PIPE_TRANSFER_WRITE) &&
if ((usage & PIPE_MAP_WRITE) &&
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_DISCARD_RANGE | PIPE_MAP_UNSYNCHRONIZED;
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
* back into VRAM on unmap. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
nouveau_transfer_staging(nv, tx, true);
} else {
@ -428,7 +428,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
nouveau_transfer_staging(nv, tx, true);
if (!buf->data)
nouveau_buffer_cache(nv, buf);
@ -465,31 +465,31 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
map = (uint8_t *)buf->bo->map + buf->offset + box->x;
/* using kernel fences only if !buf->mm */
if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
if ((usage & PIPE_MAP_UNSYNCHRONIZED) || !buf->mm)
return map;
/* If the GPU is currently reading/writing this buffer, we shouldn't
* interfere with its progress. So instead we either wait for the GPU to
* complete its operation, or set up a staging area to perform our work in.
*/
if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
if (unlikely(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
PIPE_TRANSFER_PERSISTENT))) {
if (nouveau_buffer_busy(buf, usage & PIPE_MAP_READ_WRITE)) {
if (unlikely(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE |
PIPE_MAP_PERSISTENT))) {
/* Discarding was not possible, must sync because
* subsequent transfers might use UNSYNCHRONIZED. */
nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
} else
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
if (usage & PIPE_MAP_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
* there before. No need to copy anything over. */
nouveau_transfer_staging(nv, tx, true);
map = tx->map;
} else
if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
if (usage & PIPE_TRANSFER_DONTBLOCK)
if (nouveau_buffer_busy(buf, PIPE_MAP_READ)) {
if (usage & PIPE_MAP_DONTBLOCK)
map = NULL;
else
nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
@ -536,8 +536,8 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
struct nouveau_transfer *tx = nouveau_transfer(transfer);
struct nv04_resource *buf = nv04_resource(transfer->resource);
if (tx->base.usage & PIPE_TRANSFER_WRITE) {
if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
if (tx->base.usage & PIPE_MAP_WRITE) {
if (!(tx->base.usage & PIPE_MAP_FLUSH_EXPLICIT)) {
if (tx->map)
nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
@ -553,7 +553,7 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
}
}
if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
if (!tx->bo && (tx->base.usage & PIPE_MAP_WRITE))
NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
nouveau_buffer_transfer_del(nv, tx);
@ -617,7 +617,7 @@ nouveau_resource_map_offset(struct nouveau_context *nv,
if (res->mm) {
unsigned rw;
rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
rw = (flags & NOUVEAU_BO_WR) ? PIPE_MAP_WRITE : PIPE_MAP_READ;
nouveau_buffer_sync(nv, res, rw);
if (nouveau_bo_map(res->bo, 0, NULL))
return NULL;
@ -931,7 +931,7 @@ nouveau_buffer_invalidate(struct pipe_context *pipe,
* wipe the valid buffer range. Otherwise we have to create fresh
* storage. (We don't keep track of fences for non-sub-allocated BO's.)
*/
if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) {
if (buf->mm && !nouveau_buffer_busy(buf, PIPE_MAP_WRITE)) {
util_range_set_empty(&buf->valid_buffer_range);
} else {
nouveau_buffer_reallocate(nv->screen, buf, buf->domain);

View File

@ -75,12 +75,12 @@ nouveau_screen_transfer_flags(unsigned pipe)
{
uint32_t flags = 0;
if (!(pipe & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (pipe & PIPE_TRANSFER_READ)
if (!(pipe & PIPE_MAP_UNSYNCHRONIZED)) {
if (pipe & PIPE_MAP_READ)
flags |= NOUVEAU_BO_RD;
if (pipe & PIPE_TRANSFER_WRITE)
if (pipe & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
if (pipe & PIPE_TRANSFER_DONTBLOCK)
if (pipe & PIPE_MAP_DONTBLOCK)
flags |= NOUVEAU_BO_NOBLOCK;
}

View File

@ -94,8 +94,8 @@ nv30_render_map_vertices(struct vbuf_render *render)
char *map = pipe_buffer_map_range(
&r->nv30->base.pipe, r->buffer,
r->offset, r->length,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
PIPE_MAP_WRITE |
PIPE_MAP_DISCARD_RANGE,
&r->transfer);
assert(map);
return map;
@ -424,8 +424,8 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (!map) {
if (nv30->vtxbuf[i].buffer.resource)
map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transfer[i]);
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_READ, &transfer[i]);
}
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
}
@ -434,8 +434,8 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
const void *map = info->has_user_indices ? info->index.user : NULL;
if (!map)
map = pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transferi);
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_READ, &transferi);
draw_set_indexes(draw,
(ubyte *) map,
info->index_size, ~0);

View File

@ -50,7 +50,7 @@ nv30_fragprog_upload(struct nv30_context *nv30)
int i;
map = pipe_buffer_map(pipe, fp->buffer,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
PIPE_MAP_WRITE | PIPE_MAP_DISCARD_WHOLE_RESOURCE,
&transfer);
for (i = 0; i < fp->insn_len; i++)
*map++ = (fp->insn[i] >> 16) | (fp->insn[i] << 16);

View File

@ -324,7 +324,7 @@ nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
tx->tmp.y1 = tx->tmp.h;
tx->tmp.z = 0;
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
unsigned offset = tx->img.offset;
unsigned z = tx->img.z;
@ -349,9 +349,9 @@ nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
return tx->tmp.bo->map;
}
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
access |= NOUVEAU_BO_RD;
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
access |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->tmp.bo, access, nv30->base.client);
@ -374,7 +374,7 @@ nv30_miptree_transfer_unmap(struct pipe_context *pipe,
struct nv30_miptree *mt = nv30_miptree(tx->base.resource);
unsigned i;
if (ptx->usage & PIPE_TRANSFER_WRITE) {
if (ptx->usage & PIPE_MAP_WRITE) {
bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
for (i = 0; i < tx->base.box.depth; ++i) {
nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);

View File

@ -120,7 +120,7 @@ nv30_transfer_rect_fragprog(struct nv30_context *nv30)
if (nv30->blit_fp) {
struct pipe_transfer *transfer;
u32 *map = pipe_buffer_map(pipe, nv30->blit_fp,
PIPE_TRANSFER_WRITE, &transfer);
PIPE_MAP_WRITE, &transfer);
if (map) {
map[0] = 0x17009e00; /* texr r0, i[tex0], texture[0]; end; */
map[1] = 0x1c9dc801;

View File

@ -258,7 +258,7 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
int ret;
unsigned flags = 0;
if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
if (usage & PIPE_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nv50_transfer);
@ -300,7 +300,7 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
tx->rect[1].pitch = tx->base.stride;
tx->rect[1].domain = NOUVEAU_BO_GART;
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
unsigned base = tx->rect[0].base;
unsigned z = tx->rect[0].z;
unsigned i;
@ -323,9 +323,9 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
return tx->rect[1].bo->map;
}
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
flags = NOUVEAU_BO_RD;
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->rect[1].bo, flags, screen->base.client);
@ -348,7 +348,7 @@ nv50_miptree_transfer_unmap(struct pipe_context *pctx,
struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
unsigned i;
if (tx->base.usage & PIPE_TRANSFER_WRITE) {
if (tx->base.usage & PIPE_MAP_WRITE) {
for (i = 0; i < tx->base.box.depth; ++i) {
nv50_m2mf_transfer_rect(nv50, &tx->rect[0], &tx->rect[1],
tx->nblocksx, tx->nblocksy);

View File

@ -360,11 +360,11 @@ static inline bool
nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
{
if (!mt->base.mm) {
uint32_t access = (usage & PIPE_TRANSFER_WRITE) ?
uint32_t access = (usage & PIPE_MAP_WRITE) ?
NOUVEAU_BO_WR : NOUVEAU_BO_RD;
return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
}
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
}
@ -390,12 +390,12 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
if (!ret)
ret = nouveau_bo_map(mt->base.bo, 0, NULL);
if (ret &&
(usage & PIPE_TRANSFER_MAP_DIRECTLY))
(usage & PIPE_MAP_DIRECTLY))
return NULL;
if (!ret)
usage |= PIPE_TRANSFER_MAP_DIRECTLY;
usage |= PIPE_MAP_DIRECTLY;
} else
if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
if (usage & PIPE_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nvc0_transfer);
@ -417,7 +417,7 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
}
tx->nlayers = box->depth;
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
if (usage & PIPE_MAP_DIRECTLY) {
tx->base.stride = mt->level[level].pitch;
tx->base.layer_stride = mt->layer_stride;
uint32_t offset = box->y * tx->base.stride +
@ -452,7 +452,7 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
tx->rect[1].pitch = tx->base.stride;
tx->rect[1].domain = NOUVEAU_BO_GART;
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
unsigned base = tx->rect[0].base;
unsigned z = tx->rect[0].z;
unsigned i;
@ -475,9 +475,9 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
return tx->rect[1].bo->map;
}
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
flags = NOUVEAU_BO_RD;
if (usage & PIPE_TRANSFER_WRITE)
if (usage & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
@ -501,14 +501,14 @@ nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
unsigned i;
if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
if (tx->base.usage & PIPE_MAP_DIRECTLY) {
pipe_resource_reference(&transfer->resource, NULL);
FREE(tx);
return;
}
if (tx->base.usage & PIPE_TRANSFER_WRITE) {
if (tx->base.usage & PIPE_MAP_WRITE) {
for (i = 0; i < tx->nlayers; ++i) {
nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
tx->nblocksx, tx->nblocksy);
@ -526,7 +526,7 @@ nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
} else {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
}
if (tx->base.usage & PIPE_TRANSFER_READ)
if (tx->base.usage & PIPE_MAP_READ)
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
pipe_resource_reference(&transfer->resource, NULL);

View File

@ -772,7 +772,7 @@ static void *
panfrost_transfer_map(struct pipe_context *pctx,
struct pipe_resource *resource,
unsigned level,
unsigned usage, /* a combination of PIPE_TRANSFER_x */
unsigned usage, /* a combination of PIPE_MAP_x */
const struct pipe_box *box,
struct pipe_transfer **out_transfer)
{
@ -783,7 +783,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
struct panfrost_bo *bo = rsrc->bo;
/* Can't map tiled/compressed directly */
if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsrc->modifier != DRM_FORMAT_MOD_LINEAR)
if ((usage & PIPE_MAP_DIRECTLY) && rsrc->modifier != DRM_FORMAT_MOD_LINEAR)
return NULL;
struct panfrost_gtransfer *transfer = rzalloc(pctx, struct panfrost_gtransfer);
@ -814,7 +814,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
* from a pending batch XXX */
panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, true);
if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
if ((usage & PIPE_MAP_READ) && rsrc->slices[level].initialized) {
pan_blit_to_staging(pctx, transfer);
panfrost_flush_batches_accessing_bo(ctx, staging->bo, true);
panfrost_bo_wait(staging->bo, INT64_MAX, false);
@ -830,12 +830,12 @@ panfrost_transfer_map(struct pipe_context *pctx,
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
bool create_new_bo = usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
bool create_new_bo = usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE;
bool copy_resource = false;
if (!create_new_bo &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
(usage & PIPE_TRANSFER_WRITE) &&
!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
(usage & PIPE_MAP_WRITE) &&
!(resource->target == PIPE_BUFFER
&& !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) &&
panfrost_pending_batches_access_bo(ctx, bo)) {
@ -887,15 +887,15 @@ panfrost_transfer_map(struct pipe_context *pctx,
panfrost_bo_wait(bo, INT64_MAX, true);
}
}
} else if ((usage & PIPE_TRANSFER_WRITE)
} else if ((usage & PIPE_MAP_WRITE)
&& resource->target == PIPE_BUFFER
&& !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
/* No flush for writes to uninitialized */
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (usage & PIPE_TRANSFER_WRITE) {
} else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
if (usage & PIPE_MAP_WRITE) {
panfrost_flush_batches_accessing_bo(ctx, bo, true);
panfrost_bo_wait(bo, INT64_MAX, true);
} else if (usage & PIPE_TRANSFER_READ) {
} else if (usage & PIPE_MAP_READ) {
panfrost_flush_batches_accessing_bo(ctx, bo, false);
panfrost_bo_wait(bo, INT64_MAX, false);
}
@ -907,7 +907,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
transfer->map = ralloc_size(transfer, transfer->base.layer_stride * box->depth);
assert(box->depth == 1);
if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
if ((usage & PIPE_MAP_READ) && rsrc->slices[level].initialized) {
panfrost_load_tiled_image(
transfer->map,
bo->cpu + rsrc->slices[level].offset,
@ -925,7 +925,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
* caching... I don't know if this is actually possible but we
* should still get it right */
unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE | PIPE_TRANSFER_PERSISTENT;
unsigned dpw = PIPE_MAP_DIRECTLY | PIPE_MAP_WRITE | PIPE_MAP_PERSISTENT;
if ((usage & dpw) == dpw && rsrc->index_cache)
return NULL;
@ -938,7 +938,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
/* By mapping direct-write, we're implicitly already
* initialized (maybe), so be conservative */
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_MAP_WRITE) {
rsrc->slices[level].initialized = true;
panfrost_minmax_cache_invalidate(rsrc->index_cache, &transfer->base);
}
@ -994,7 +994,7 @@ panfrost_transfer_unmap(struct pipe_context *pctx,
* malformed AFBC data if uninitialized */
if (trans->staging.rsrc) {
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (transfer->usage & PIPE_MAP_WRITE) {
if (panfrost_should_linear_convert(prsrc, transfer)) {
panfrost_bo_unreference(prsrc->bo);
@ -1018,7 +1018,7 @@ panfrost_transfer_unmap(struct pipe_context *pctx,
if (trans->map) {
struct panfrost_bo *bo = prsrc->bo;
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (transfer->usage & PIPE_MAP_WRITE) {
prsrc->slices[transfer->level].initialized = true;
if (prsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) {

View File

@ -160,8 +160,8 @@ static bool r300_get_query_result(struct pipe_context* pipe,
}
map = r300->rws->buffer_map(q->buf, r300->cs,
PIPE_TRANSFER_READ |
(!wait ? PIPE_TRANSFER_DONTBLOCK : 0));
PIPE_MAP_READ |
(!wait ? PIPE_MAP_DONTBLOCK : 0));
if (!map)
return FALSE;

View File

@ -374,7 +374,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300,
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
r300_resource(vbuf->buffer.resource)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
r300->cs, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
mapelem[i] = map[vbi] + (velem->src_offset / 4);
@ -609,8 +609,8 @@ static void r300_draw_elements(struct r300_context *r300,
/* If we got here, then orgIndexBuffer == indexBuffer. */
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
PIPE_MAP_READ |
PIPE_MAP_UNSYNCHRONIZED);
if (info->mode == PIPE_PRIM_TRIANGLES) {
memcpy(indices3, ptr + start, 6);
@ -922,7 +922,7 @@ static boolean r300_render_allocate_vertices(struct vbuf_render* render,
}
r300->draw_vbo_offset = 0;
r300render->vbo_ptr = rws->buffer_map(r300->vbo, r300->cs,
PIPE_TRANSFER_WRITE);
PIPE_MAP_WRITE);
}
r300render->vertex_size = vertex_size;

View File

@ -41,7 +41,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
&r300->context, info, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
&r300->context, info, PIPE_MAP_UNSYNCHRONIZED, index_offset,
*start, count, ptr);
*index_size = 2;
@ -55,7 +55,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_rebuild_ushort_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
PIPE_MAP_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
@ -70,7 +70,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_rebuild_uint_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
PIPE_MAP_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);

View File

@ -90,9 +90,9 @@ r300_buffer_transfer_map( struct pipe_context *context,
return rbuf->malloced_buffer + box->x;
}
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
assert(usage & PIPE_TRANSFER_WRITE);
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
@ -123,8 +123,8 @@ r300_buffer_transfer_map( struct pipe_context *context,
/* Buffers are never used for write, therefore mapping for read can be
* unsynchronized. */
if (!(usage & PIPE_TRANSFER_WRITE)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
if (!(usage & PIPE_MAP_WRITE)) {
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
map = rws->buffer_map(rbuf->buf, r300->cs, usage);

View File

@ -135,7 +135,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
* for this transfer.
* Also make write transfers pipelined. */
if (tex->tex.microtile || tex->tex.macrotile[level] ||
(referenced_hw && !(usage & PIPE_TRANSFER_READ) &&
(referenced_hw && !(usage & PIPE_MAP_READ) &&
r300_is_blit_supported(texture->format))) {
struct pipe_resource base;
@ -194,7 +194,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
trans->transfer.layer_stride =
trans->linear_texture->tex.layer_size_in_bytes[0];
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
/* We cannot map a tiled texture directly because the data is
* in a different order, therefore we do detiling using a blit. */
r300_copy_from_tiled_texture(ctx, trans);
@ -209,7 +209,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
trans->offset = r300_texture_get_offset(tex, level, box->z);
if (referenced_cs &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
r300_flush(ctx, 0, NULL);
}
}
@ -249,7 +249,7 @@ void r300_texture_transfer_unmap(struct pipe_context *ctx,
struct r300_transfer *trans = r300_transfer(transfer);
if (trans->linear_texture) {
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (transfer->usage & PIPE_MAP_WRITE) {
r300_copy_into_tiled_texture(ctx, trans);
}

View File

@ -479,7 +479,7 @@ static void compute_memory_move_item(struct compute_memory_pool *pool,
u_box_1d(new_start_in_dw * 4, (offset + item->size_in_dw) * 4, &box);
map = pipe->transfer_map(pipe, src, 0, PIPE_TRANSFER_READ_WRITE,
map = pipe->transfer_map(pipe, src, 0, PIPE_MAP_READ_WRITE,
&box, &trans);
assert(map);
@ -614,7 +614,7 @@ static void compute_memory_transfer(
offset_in_chunk, size);
if (device_to_host) {
map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_READ,
map = pipe->transfer_map(pipe, gart, 0, PIPE_MAP_READ,
&(struct pipe_box) { .width = aligned_size * 4,
.height = 1, .depth = 1 }, &xfer);
assert(xfer);
@ -622,7 +622,7 @@ static void compute_memory_transfer(
memcpy(data, map + internal_offset, size);
pipe->transfer_unmap(pipe, xfer);
} else {
map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_WRITE,
map = pipe->transfer_map(pipe, gart, 0, PIPE_MAP_WRITE,
&(struct pipe_box) { .width = aligned_size * 4,
.height = 1, .depth = 1 }, &xfer);
assert(xfer);

View File

@ -334,8 +334,8 @@ static void eg_dump_last_ib(struct r600_context *rctx, FILE *f)
*/
uint32_t *map = rctx->b.ws->buffer_map(rctx->last_trace_buf->buf,
NULL,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ);
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_READ);
if (map)
last_trace_id = *map;
}

View File

@ -458,7 +458,7 @@ static void *evergreen_create_compute_state(struct pipe_context *ctx,
shader->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(
&rctx->b, shader->code_bo,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
@ -557,7 +557,7 @@ static void evergreen_compute_upload_input(struct pipe_context *ctx,
u_box_1d(0, input_size, &box);
num_work_groups_start = ctx->transfer_map(ctx,
(struct pipe_resource*)shader->kernel_param,
0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
0, PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&box, &transfer);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
@ -758,7 +758,7 @@ static void compute_emit_cs(struct r600_context *rctx,
if (info->indirect) {
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_TRANSFER_READ);
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_MAP_READ);
unsigned offset = info->indirect_offset / 4;
indirect_grid[0] = data[offset];
indirect_grid[1] = data[offset + 1];
@ -1258,7 +1258,7 @@ static void *r600_compute_global_transfer_map(struct pipe_context *ctx,
dst = (struct pipe_resource*)item->real_buffer;
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
buffer->chunk->status |= ITEM_MAPPED_FOR_READING;
COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"

View File

@ -2776,7 +2776,7 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx,
bytecode = r600_buffer_map_sync_with_rings
(&rctx->b, shader->buffer,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
bytecode += shader->offset / 4;
if (R600_BIG_ENDIAN) {

View File

@ -661,7 +661,7 @@ static void r600_clear_buffer(struct pipe_context *ctx, struct pipe_resource *ds
r600_blitter_end(ctx);
} else {
uint32_t *map = r600_buffer_map_sync_with_rings(&rctx->b, r600_resource(dst),
PIPE_TRANSFER_WRITE);
PIPE_MAP_WRITE);
map += offset / 4;
size /= 4;
for (unsigned i = 0; i < size; i++)

View File

@ -53,11 +53,11 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
assert(!(resource->flags & RADEON_FLAG_SPARSE));
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
return ctx->ws->buffer_map(resource->buf, NULL, usage);
}
if (!(usage & PIPE_TRANSFER_WRITE)) {
if (!(usage & PIPE_MAP_WRITE)) {
/* have to wait for the last write */
rusage = RADEON_USAGE_WRITE;
}
@ -65,7 +65,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@ -76,7 +76,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@ -86,7 +86,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
}
if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
return NULL;
} else {
/* We will be wait for the GPU. Wait for any offloaded
@ -365,45 +365,45 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
* So don't ever use staging buffers.
*/
if (rbuffer->b.is_user_ptr)
usage |= PIPE_TRANSFER_PERSISTENT;
usage |= PIPE_MAP_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
usage & PIPE_TRANSFER_WRITE &&
usage & PIPE_MAP_WRITE &&
!rbuffer->b.is_shared &&
!util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* If discarding the entire range, discard the whole resource instead. */
if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
if (usage & PIPE_MAP_DISCARD_RANGE &&
box->x == 0 && box->width == resource->width0) {
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE))) {
assert(usage & PIPE_TRANSFER_WRITE);
assert(usage & PIPE_MAP_WRITE);
if (r600_invalidate_buffer(rctx, rbuffer)) {
/* At this point, the buffer is always idle. */
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
} else {
/* Fall back to a temporary buffer. */
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage |= PIPE_MAP_DISCARD_RANGE;
}
}
if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
if ((usage & PIPE_MAP_DISCARD_RANGE) &&
!(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_PERSISTENT)) &&
((!(usage & (PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_PERSISTENT)) &&
r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
(rbuffer->flags & RADEON_FLAG_SPARSE))) {
assert(usage & PIPE_TRANSFER_WRITE);
assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
@ -429,12 +429,12 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
}
} else {
/* At this point, the buffer is always idle (we checked it above). */
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
}
/* Use a staging buffer in cached GTT for reads. */
else if (((usage & PIPE_TRANSFER_READ) &&
!(usage & PIPE_TRANSFER_PERSISTENT) &&
else if (((usage & PIPE_MAP_READ) &&
!(usage & PIPE_MAP_PERSISTENT) &&
(rbuffer->domains & RADEON_DOMAIN_VRAM ||
rbuffer->flags & RADEON_FLAG_GTT_WC) &&
r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
@ -452,7 +452,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
0, 0, resource, 0, box);
data = r600_buffer_map_sync_with_rings(rctx, staging,
usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
usage & ~PIPE_MAP_UNSYNCHRONIZED);
if (!data) {
r600_resource_reference(&staging, NULL);
return NULL;
@ -506,8 +506,8 @@ static void r600_buffer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
{
unsigned required_usage = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_FLUSH_EXPLICIT;
unsigned required_usage = PIPE_MAP_WRITE |
PIPE_MAP_FLUSH_EXPLICIT;
if ((transfer->usage & required_usage) == required_usage) {
struct pipe_box box;
@ -523,8 +523,8 @@ static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
if (transfer->usage & PIPE_TRANSFER_WRITE &&
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
if (transfer->usage & PIPE_MAP_WRITE &&
!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
r600_resource_reference(&rtransfer->staging, NULL);
@ -545,10 +545,10 @@ void r600_buffer_subdata(struct pipe_context *ctx,
struct pipe_box box;
uint8_t *map = NULL;
usage |= PIPE_TRANSFER_WRITE;
usage |= PIPE_MAP_WRITE;
if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
usage |= PIPE_TRANSFER_DISCARD_RANGE;
if (!(usage & PIPE_MAP_DIRECTLY))
usage |= PIPE_MAP_DISCARD_RANGE;
u_box_1d(offset, size, &box);
map = r600_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);

View File

@ -811,7 +811,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
templ.usage = PIPE_USAGE_DEFAULT;
struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_TRANSFER_WRITE);
unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
memset(map, 0, 256);

View File

@ -527,8 +527,8 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
{
/* Callers ensure that the buffer is currently unused by the GPU. */
uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED);
PIPE_MAP_WRITE |
PIPE_MAP_UNSYNCHRONIZED);
if (!results)
return false;
@ -1337,8 +1337,8 @@ bool r600_query_hw_get_result(struct r600_common_context *rctx,
query->ops->clear_result(query, result);
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
unsigned usage = PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
unsigned usage = PIPE_MAP_READ |
(wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;
@ -1896,7 +1896,7 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
return;
/* initialize buffer with zeroes */
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_WRITE);
if (results) {
memset(results, 0, max_rbs * 4 * 4);
@ -1910,7 +1910,7 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
/* analyze results */
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_READ);
if (results) {
for(i = 0; i < max_rbs; i++) {
/* at least highest bit will be set if backend is used */

View File

@ -149,7 +149,7 @@ static int store_shader(struct pipe_context *ctx,
}
ptr = r600_buffer_map_sync_with_rings(
&rctx->b, shader->bo,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (R600_BIG_ENDIAN) {
for (i = 0; i < shader->shader.bc.ndw; ++i) {
ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);

View File

@ -998,7 +998,7 @@ static void r600_init_color_surface(struct r600_context *rctx,
}
/* Set the contents to 0xCC. */
ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer);
ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_MAP_WRITE, &transfer);
memset(ptr, 0xCC, cmask.size);
pipe_buffer_unmap(&rctx->b.b, transfer);
}

View File

@ -2143,7 +2143,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
/* Have to get start/count from indirect buffer, slow path ahead... */
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
if (data) {
data += info->indirect->offset / sizeof(unsigned);
start = data[2] * index_size;

View File

@ -59,7 +59,7 @@ static void set_random_pixels(struct pipe_context *ctx,
uint8_t *map;
unsigned x,y,z;
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE,
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_WRITE,
0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
@ -94,7 +94,7 @@ static bool compare_textures(struct pipe_context *ctx,
int y,z;
bool pass = true;
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ,
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_READ,
0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);

View File

@ -1259,7 +1259,7 @@ static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
/* r600g doesn't react to dirty_tex_descriptor_counter */
return rscreen->chip_class >= GFX6 &&
!rtex->resource.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
!(transfer_usage & PIPE_MAP_READ) &&
rtex->resource.b.b.last_level == 0 &&
util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
box->x, box->y, box->z,
@ -1336,7 +1336,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
*/
if (!rtex->surface.is_linear)
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
else if (usage & PIPE_MAP_READ)
use_staging_texture =
rtex->resource.domains & RADEON_DOMAIN_VRAM ||
rtex->resource.flags & RADEON_FLAG_GTT_WC;
@ -1386,7 +1386,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
return NULL;
}
if (usage & PIPE_TRANSFER_READ) {
if (usage & PIPE_MAP_READ) {
struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
if (!temp) {
R600_ERR("failed to create a temporary depth texture\n");
@ -1432,7 +1432,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
r600_init_temp_resource_from_box(&resource, texture, box, level,
R600_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
resource.usage = (usage & PIPE_MAP_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
@ -1449,10 +1449,10 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
r600_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
buf = trans->staging;
} else {
@ -1481,7 +1481,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
if ((transfer->usage & PIPE_MAP_WRITE) && rtransfer->staging) {
if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,

View File

@ -153,7 +153,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
/* and map it for CPU access */
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
@ -842,7 +842,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder,
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(
dec->bs_buffers[dec->cur_buffer].res->buf,
dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@ -896,7 +896,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE |
PIPE_MAP_WRITE |
RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;

View File

@ -71,7 +71,7 @@ static void flush(struct rvce_encoder *enc)
#if 0
static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
{
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE);
unsigned i = 0;
fprintf(stderr, "\n");
fprintf(stderr, "encStatus:\t\t\t%08x\n", ptr[i++]);
@ -359,7 +359,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder,
if (size) {
uint32_t *ptr = enc->ws->buffer_map(
fb->res->buf, enc->cs,
PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];

View File

@ -98,12 +98,12 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
goto error;
src = ws->buffer_map(old_buf.res->buf, cs,
PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dst)
goto error;

View File

@ -144,7 +144,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
/* and map it for CPU access */
ptr =
dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
@ -1014,7 +1014,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder, struct pipe_video
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@ -1058,7 +1058,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;

View File

@ -247,7 +247,7 @@ static void radeon_uvd_enc_get_feedback(struct pipe_video_codec *encoder, void *
if (NULL != size) {
radeon_uvd_enc_feedback_t *fb_data = (radeon_uvd_enc_feedback_t *)enc->ws->buffer_map(
fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!fb_data->status)
*size = fb_data->bitstream_size;

View File

@ -59,7 +59,7 @@ static void flush(struct rvce_encoder *enc)
#if 0
static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
{
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE);
unsigned i = 0;
fprintf(stderr, "\n");
fprintf(stderr, "encStatus:\t\t\t%08x\n", ptr[i++]);
@ -346,7 +346,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback,
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];

View File

@ -961,7 +961,7 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
/* ctx needs probs table */
ptr = dec->ws->buffer_map(dec->ctx.res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
fill_probs_table(ptr);
dec->ws->buffer_unmap(dec->ctx.res->buf);
dec->bs_ptr = NULL;
@ -1052,7 +1052,7 @@ static void map_msg_fb_it_probs_buf(struct radeon_decoder *dec)
/* and map it for CPU access */
ptr =
dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = ptr;
@ -1331,7 +1331,7 @@ static void radeon_dec_begin_frame(struct pipe_video_codec *decoder,
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@ -1376,7 +1376,7 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;
@ -1559,7 +1559,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
buf = &dec->msg_fb_it_probs_buffers[i];
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
ptr += FB_BUFFER_OFFSET + FB_BUFFER_SIZE;
fill_probs_table(ptr);
dec->ws->buffer_unmap(buf->res->buf);

View File

@ -363,7 +363,7 @@ static void radeon_enc_get_feedback(struct pipe_video_codec *encoder, void *feed
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1])
*size = ptr[6];
else

View File

@ -86,11 +86,11 @@ bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
if (!si_vid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
src = ws->buffer_map(old_buf.res->buf, cs, PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
src = ws->buffer_map(old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dst)
goto error;

View File

@ -101,7 +101,7 @@ enum radeon_transfer_flags
* Not unmapping buffers is an important performance optimization for
* OpenGL (avoids kernel overhead for frequently mapped buffers).
*/
RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
RADEON_TRANSFER_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
};
#define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
@ -310,7 +310,7 @@ struct radeon_winsys {
*
* \param buf A winsys buffer object to map.
* \param cs A command stream to flush if the buffer is referenced by it.
* \param usage A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
* \param usage A bitmask of the PIPE_MAP_* and RADEON_TRANSFER_* flags.
* \return The pointer at the beginning of the buffer.
*/
void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,

View File

@ -156,7 +156,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
* compatibility with the SET_PREDICATION packet.
*/
uint64_t *results = sctx->ws->buffer_map(qbuf->buf->buf, NULL,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
assert(results);
for (unsigned i = 0, e = qbuf->buf->b.b.width0 / sizeof(struct gfx10_sh_query_buffer_mem); i < e;
@ -292,7 +292,7 @@ static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *
for (struct gfx10_sh_query_buffer *qbuf = query->last;;
qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.prev, list)) {
unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
void *map;
if (rquery->b.flushed)

View File

@ -51,18 +51,18 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
assert(!(resource->flags & RADEON_FLAG_SPARSE));
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
return sctx->ws->buffer_map(resource->buf, NULL, usage);
}
if (!(usage & PIPE_TRANSFER_WRITE)) {
if (!(usage & PIPE_MAP_WRITE)) {
/* have to wait for the last write */
rusage = RADEON_USAGE_WRITE;
}
if (radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size) &&
sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
} else {
@ -72,7 +72,7 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
}
if (radeon_emitted(sctx->sdma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->sdma_cs, resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@ -82,7 +82,7 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
}
if (busy || !sctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
return NULL;
} else {
/* We will be wait for the GPU. Wait for any offloaded
@ -339,7 +339,7 @@ static void *si_buffer_get_transfer(struct pipe_context *ctx, struct pipe_resour
struct si_context *sctx = (struct si_context *)ctx;
struct si_transfer *transfer;
if (usage & PIPE_TRANSFER_THREAD_SAFE)
if (usage & PIPE_MAP_THREAD_SAFE)
transfer = malloc(sizeof(*transfer));
else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
transfer = slab_alloc(&sctx->pool_transfers_unsync);
@ -382,60 +382,60 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
* So don't ever use staging buffers.
*/
if (buf->b.is_user_ptr)
usage |= PIPE_TRANSFER_PERSISTENT;
usage |= PIPE_MAP_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
usage & PIPE_TRANSFER_WRITE && !buf->b.is_shared &&
if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
usage & PIPE_MAP_WRITE && !buf->b.is_shared &&
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* If discarding the entire range, discard the whole resource instead. */
if (usage & PIPE_TRANSFER_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) {
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) {
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
/* If a buffer in VRAM is too large and the range is discarded, don't
* map it directly. This makes sure that the buffer stays in VRAM.
*/
bool force_discard_range = false;
if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_PERSISTENT) &&
if (usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE) &&
!(usage & PIPE_MAP_PERSISTENT) &&
/* Try not to decrement the counter if it's not positive. Still racy,
* but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
buf->max_forced_staging_uploads > 0 &&
p_atomic_dec_return(&buf->max_forced_staging_uploads) >= 0) {
usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_UNSYNCHRONIZED);
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_UNSYNCHRONIZED);
usage |= PIPE_MAP_DISCARD_RANGE;
force_discard_range = true;
}
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
assert(usage & PIPE_TRANSFER_WRITE);
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
assert(usage & PIPE_MAP_WRITE);
if (si_invalidate_buffer(sctx, buf)) {
/* At this point, the buffer is always idle. */
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
} else {
/* Fall back to a temporary buffer. */
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage |= PIPE_MAP_DISCARD_RANGE;
}
}
if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT &&
if (usage & PIPE_MAP_FLUSH_EXPLICIT &&
buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
usage &= ~(PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_PERSISTENT);
usage |= PIPE_TRANSFER_DISCARD_RANGE;
usage &= ~(PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT);
usage |= PIPE_MAP_DISCARD_RANGE;
force_discard_range = true;
}
if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_PERSISTENT))) ||
if (usage & PIPE_MAP_DISCARD_RANGE &&
((!(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT))) ||
(buf->flags & RADEON_FLAG_SPARSE))) {
assert(usage & PIPE_TRANSFER_WRITE);
assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
@ -469,16 +469,16 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
}
} else {
/* At this point, the buffer is always idle (we checked it above). */
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
}
/* Use a staging buffer in cached GTT for reads. */
else if (((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_PERSISTENT) &&
else if (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) &&
(buf->domains & RADEON_DOMAIN_VRAM || buf->flags & RADEON_FLAG_GTT_WC)) ||
(buf->flags & RADEON_FLAG_SPARSE)) {
struct si_resource *staging;
assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_TRANSFER_THREAD_SAFE)));
assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
staging = si_aligned_buffer_create(ctx->screen, SI_RESOURCE_FLAG_UNCACHED,
PIPE_USAGE_STAGING,
box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 256);
@ -487,7 +487,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
si_sdma_copy_buffer(sctx, &staging->b.b, resource, box->x % SI_MAP_BUFFER_ALIGNMENT,
box->x, box->width);
data = si_buffer_map_sync_with_rings(sctx, staging, usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
data = si_buffer_map_sync_with_rings(sctx, staging, usage & ~PIPE_MAP_UNSYNCHRONIZED);
if (!data) {
si_resource_reference(&staging, NULL);
return NULL;
@ -570,7 +570,7 @@ static void si_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_tran
static void si_buffer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
{
unsigned required_usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT;
unsigned required_usage = PIPE_MAP_WRITE | PIPE_MAP_FLUSH_EXPLICIT;
if ((transfer->usage & required_usage) == required_usage) {
struct pipe_box box;
@ -585,14 +585,14 @@ static void si_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_trans
struct si_context *sctx = (struct si_context *)ctx;
struct si_transfer *stransfer = (struct si_transfer *)transfer;
if (transfer->usage & PIPE_TRANSFER_WRITE && !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
if (transfer->usage & PIPE_MAP_WRITE && !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
si_buffer_do_flush_region(ctx, transfer, &transfer->box);
si_resource_reference(&stransfer->staging, NULL);
assert(stransfer->b.staging == NULL); /* for threaded context only */
pipe_resource_reference(&transfer->resource, NULL);
if (transfer->usage & PIPE_TRANSFER_THREAD_SAFE) {
if (transfer->usage & PIPE_MAP_THREAD_SAFE) {
free(transfer);
} else {
/* Don't use pool_transfers_unsync. We are always in the driver
@ -609,10 +609,10 @@ static void si_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *bu
struct pipe_box box;
uint8_t *map = NULL;
usage |= PIPE_TRANSFER_WRITE;
usage |= PIPE_MAP_WRITE;
if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
usage |= PIPE_TRANSFER_DISCARD_RANGE;
if (!(usage & PIPE_MAP_DIRECTLY))
usage |= PIPE_MAP_DISCARD_RANGE;
u_box_1d(offset, size, &box);
map = si_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);

View File

@ -109,7 +109,7 @@ static void si_dump_shader(struct si_screen *sscreen, struct si_shader *shader,
const char *mapped = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
for (unsigned i = 0; i < size; i += 4) {
fprintf(f, " %4x: %08x\n", i, *(uint32_t *)(mapped + i));
@ -403,7 +403,7 @@ static void si_log_chunk_type_cs_print(void *data, FILE *f)
* If the GPU is hung, there is no point in waiting for it.
*/
uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ);
PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ);
if (map) {
last_trace_id = map[0];
last_compute_trace_id = map[1];

View File

@ -222,7 +222,7 @@ struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
static bool si_fine_fence_signaled(struct radeon_winsys *rws, const struct si_fine_fence *fine)
{
char *map =
rws->buffer_map(fine->buf->buf, NULL, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
rws->buffer_map(fine->buf->buf, NULL, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
if (!map)
return false;

View File

@ -1053,7 +1053,7 @@ static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squ
memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;

View File

@ -525,7 +525,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
goto fail;
sctx->border_color_map =
ws->buffer_map(sctx->border_color_buffer->buf, NULL, PIPE_TRANSFER_WRITE);
ws->buffer_map(sctx->border_color_buffer->buf, NULL, PIPE_MAP_WRITE);
if (!sctx->border_color_map)
goto fail;

View File

@ -669,7 +669,7 @@ static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_
/* The caller ensures that the buffer is currently unused by the GPU. */
uint32_t *results = screen->ws->buffer_map(qbuf->buf->buf, NULL,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
if (!results)
return false;
@ -1408,7 +1408,7 @@ bool si_query_hw_get_result(struct si_context *sctx, struct si_query *squery, bo
query->ops->clear_result(query, result);
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;

View File

@ -899,7 +899,7 @@ bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader
u.rx_va = shader->bo->gpu_address;
u.rx_ptr = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
if (!u.rx_ptr)
return false;

View File

@ -4727,7 +4727,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
return NULL;
}
void *map =
sscreen->ws->buffer_map(v->instance_divisor_factor_buffer->buf, NULL, PIPE_TRANSFER_WRITE);
sscreen->ws->buffer_map(v->instance_divisor_factor_buffer->buf, NULL, PIPE_MAP_WRITE);
memcpy(map, divisor_factors, num_divisors * sizeof(divisor_factors[0]));
}
return v;

View File

@ -1425,7 +1425,7 @@ static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_d
if (indirect->indirect_draw_count) {
data = pipe_buffer_map_range(&sctx->b, indirect->indirect_draw_count,
indirect->indirect_draw_count_offset, sizeof(unsigned),
PIPE_TRANSFER_READ, &transfer);
PIPE_MAP_READ, &transfer);
indirect_count = *data;
@ -1441,7 +1441,7 @@ static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_d
map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
data = pipe_buffer_map_range(&sctx->b, indirect->buffer, indirect->offset, map_size,
PIPE_TRANSFER_READ, &transfer);
PIPE_MAP_READ, &transfer);
begin = UINT_MAX;
end = 0;

View File

@ -58,7 +58,7 @@ static void set_random_pixels(struct pipe_context *ctx, struct pipe_resource *te
uint8_t *map;
int x, y, z;
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE, 0, 0, 0, tex->width0, tex->height0,
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_WRITE, 0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
@ -89,7 +89,7 @@ static bool compare_textures(struct pipe_context *ctx, struct pipe_resource *tex
bool pass = true;
unsigned stride = util_format_get_stride(tex->format, tex->width0);
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ, 0, 0, 0, tex->width0, tex->height0,
map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_READ, 0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);

View File

@ -1171,7 +1171,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
struct si_resource *buf = si_aligned_buffer_create(screen, 0, PIPE_USAGE_STREAM,
dcc_retile_map_size,
sscreen->info.tcc_cache_line_size);
void *map = sscreen->ws->buffer_map(buf->buf, NULL, PIPE_TRANSFER_WRITE);
void *map = sscreen->ws->buffer_map(buf->buf, NULL, PIPE_MAP_WRITE);
/* Upload the retile map into the staging buffer. */
memcpy(map, tex->surface.u.gfx9.dcc_retile_map, dcc_retile_map_size);
@ -1593,7 +1593,7 @@ static bool si_can_invalidate_texture(struct si_screen *sscreen, struct si_textu
unsigned transfer_usage, const struct pipe_box *box)
{
return !tex->buffer.b.is_shared && !(tex->surface.flags & RADEON_SURF_IMPORTED) &&
!(transfer_usage & PIPE_TRANSFER_READ) && tex->buffer.b.b.last_level == 0 &&
!(transfer_usage & PIPE_MAP_READ) && tex->buffer.b.b.last_level == 0 &&
util_texrange_covers_whole_level(&tex->buffer.b.b, 0, box->x, box->y, box->z, box->width,
box->height, box->depth);
}
@ -1658,7 +1658,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
*/
if (!tex->surface.is_linear || (tex->buffer.flags & RADEON_FLAG_ENCRYPTED))
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
else if (usage & PIPE_MAP_READ)
use_staging_texture =
tex->buffer.domains & RADEON_DOMAIN_VRAM || tex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
@ -1683,7 +1683,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
if (use_staging_texture) {
struct pipe_resource resource;
struct si_texture *staging;
unsigned bo_usage = usage & PIPE_TRANSFER_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
unsigned bo_usage = usage & PIPE_MAP_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
unsigned bo_flags = SI_RESOURCE_FLAG_FORCE_LINEAR;
/* The pixel shader has a bad access pattern for linear textures.
@ -1696,7 +1696,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
!tex->is_depth &&
!util_format_is_compressed(texture->format) &&
/* Texture uploads with DCC use the pixel shader to blit */
(!(usage & PIPE_TRANSFER_WRITE) || !vi_dcc_enabled(tex, level)))
(!(usage & PIPE_MAP_WRITE) || !vi_dcc_enabled(tex, level)))
bo_flags |= SI_RESOURCE_FLAG_UNCACHED;
si_init_temp_resource_from_box(&resource, texture, box, level, bo_usage,
@ -1721,10 +1721,10 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
si_texture_get_offset(sctx->screen, staging, 0, NULL, &trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
if (usage & PIPE_MAP_READ)
si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
usage |= PIPE_MAP_UNSYNCHRONIZED;
buf = trans->staging;
} else {
@ -1769,7 +1769,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_tran
sctx->ws->buffer_unmap(buf->buf);
}
if ((transfer->usage & PIPE_TRANSFER_WRITE) && stransfer->staging)
if ((transfer->usage & PIPE_MAP_WRITE) && stransfer->staging)
si_copy_from_staging_texture(ctx, stransfer);
if (stransfer->staging) {

View File

@ -152,7 +152,7 @@ fill_grid_size(struct pipe_context *context,
params = pipe_buffer_map_range(context, info->indirect,
info->indirect_offset,
3 * sizeof(uint32_t),
PIPE_TRANSFER_READ,
PIPE_MAP_READ,
&transfer);
if (!transfer)

View File

@ -254,7 +254,7 @@ prepare_shader_sampling(
struct softpipe_screen *screen = softpipe_screen(tex->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, sp_tex->dt,
PIPE_TRANSFER_READ);
PIPE_MAP_READ);
row_stride[0] = sp_tex->stride[0];
img_stride[0] = sp_tex->img_stride[0];
mip_offsets[0] = 0;

View File

@ -249,7 +249,7 @@ sp_find_cached_tile_tex(struct softpipe_tex_tile_cache *tc,
pipe_transfer_map(tc->pipe, tc->texture,
addr.bits.level,
layer,
PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED,
PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED,
0, 0, width, height, &tc->tex_trans);
tc->tex_level = addr.bits.level;

View File

@ -348,7 +348,7 @@ softpipe_surface_destroy(struct pipe_context *pipe,
* \param pipe rendering context
* \param resource the resource to transfer in/out of
* \param level which mipmap level
* \param usage bitmask of PIPE_TRANSFER_x flags
* \param usage bitmask of PIPE_MAP_x flags
* \param box the 1D/2D/3D region of interest
*/
static void *
@ -394,9 +394,9 @@ softpipe_transfer_map(struct pipe_context *pipe,
* Transfers, like other pipe operations, must happen in order, so flush the
* context if necessary.
*/
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
boolean read_only = !(usage & PIPE_TRANSFER_WRITE);
boolean do_not_block = !!(usage & PIPE_TRANSFER_DONTBLOCK);
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
boolean read_only = !(usage & PIPE_MAP_WRITE);
boolean do_not_block = !!(usage & PIPE_MAP_DONTBLOCK);
if (!softpipe_flush_resource(pipe, resource,
level, box->depth > 1 ? -1 : box->z,
0, /* flush_flags */
@ -468,7 +468,7 @@ softpipe_transfer_unmap(struct pipe_context *pipe,
winsys->displaytarget_unmap(winsys, spr->dt);
}
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (transfer->usage & PIPE_MAP_WRITE) {
/* Mark the texture as dirty to expire the tile caches. */
spr->timestamp++;
}

View File

@ -202,8 +202,8 @@ sp_tile_cache_set_surface(struct softpipe_tile_cache *tc,
for (i = 0; i < tc->num_maps; i++) {
tc->transfer_map[i] = pipe_transfer_map(pipe, ps->texture,
ps->u.tex.level, ps->u.tex.first_layer + i,
PIPE_TRANSFER_READ_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED,
PIPE_MAP_READ_WRITE |
PIPE_MAP_UNSYNCHRONIZED,
0, 0, ps->width, ps->height,
&tc->transfer[i]);
}

View File

@ -56,7 +56,7 @@ generate_indices(struct svga_hwtnl *hwtnl,
if (!dst)
goto fail;
dst_map = pipe_buffer_map(pipe, dst, PIPE_TRANSFER_WRITE, &transfer);
dst_map = pipe_buffer_map(pipe, dst, PIPE_MAP_WRITE, &transfer);
if (!dst_map)
goto fail;

View File

@ -115,14 +115,14 @@ translate_indices(struct svga_hwtnl *hwtnl,
if (!dst)
goto fail;
dst_map = pipe_buffer_map(pipe, dst, PIPE_TRANSFER_WRITE, &dst_transfer);
dst_map = pipe_buffer_map(pipe, dst, PIPE_MAP_WRITE, &dst_transfer);
if (!dst_map)
goto fail;
*out_offset = 0;
src_map = pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED,
PIPE_MAP_READ |
PIPE_MAP_UNSYNCHRONIZED,
&src_transfer);
if (!src_map)
goto fail;

View File

@ -101,7 +101,7 @@ define_query_vgpu9(struct svga_context *svga,
return PIPE_ERROR_OUT_OF_MEMORY;
sq->queryResult = (SVGA3dQueryResult *)
sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
sws->buffer_map(sws, sq->hwbuf, PIPE_MAP_WRITE);
if (!sq->queryResult) {
sws->buffer_destroy(sws, sq->hwbuf);
return PIPE_ERROR_OUT_OF_MEMORY;

View File

@ -92,7 +92,7 @@ svga_define_stream_output(struct svga_context *svga,
bufSize);
if (!declBuf)
return PIPE_ERROR;
map = sws->buffer_map(sws, declBuf, PIPE_TRANSFER_WRITE);
map = sws->buffer_map(sws, declBuf, PIPE_MAP_WRITE);
if (!map) {
sws->buffer_destroy(sws, declBuf);
return PIPE_ERROR;

View File

@ -122,14 +122,14 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
transfer->stride = 0;
transfer->layer_stride = 0;
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_MAP_WRITE) {
/* If we write to the buffer for any reason, free any saved translated
* vertices.
*/
pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
}
if ((usage & PIPE_TRANSFER_READ) && sbuf->dirty &&
if ((usage & PIPE_MAP_READ) && sbuf->dirty &&
!sbuf->key.coherent && !svga->swc->force_coherent) {
/* Host-side buffers can only be dirtied with vgpu10 features
@ -157,8 +157,8 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
sbuf->dirty = FALSE;
}
if (usage & PIPE_TRANSFER_WRITE) {
if ((usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
if (usage & PIPE_MAP_WRITE) {
if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
!(resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)) {
/*
* Flush any pending primitives, finish writing any pending DMA
@ -175,7 +175,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
* Instead of flushing the context command buffer, simply discard
* the current hwbuf, and start a new one.
* With GB objects, the map operation takes care of this
* if passed the PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE flag,
* if passed the PIPE_MAP_DISCARD_WHOLE_RESOURCE flag,
* and the old backing store is busy.
*/
@ -187,7 +187,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
sbuf->dma.flags.discard = TRUE;
}
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
if (!sbuf->map.num_ranges) {
/*
* No pending ranges to upload so far, so we can tell the host to
@ -223,7 +223,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
* without having to do a DMA download from the host.
*/
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (usage & PIPE_MAP_DONTBLOCK) {
/*
* Flushing the command buffer here will most likely cause
* the map of the hwbuf below to block, so preemptively
@ -316,8 +316,8 @@ svga_buffer_transfer_flush_region(struct pipe_context *pipe,
unsigned offset = transfer->box.x + box->x;
unsigned length = box->width;
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
assert(transfer->usage & PIPE_MAP_WRITE);
assert(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT);
if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf) {
mtx_lock(&ss->swc_mutex);
@ -352,8 +352,8 @@ svga_buffer_transfer_unmap(struct pipe_context *pipe,
svga_buffer_hw_storage_unmap(svga, sbuf);
}
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
if (transfer->usage & PIPE_MAP_WRITE) {
if (!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
/*
* Mapped range not flushed explicitly, so flush the whole buffer,
* and tell the host to discard the contents when processing the DMA

Some files were not shown because too many files have changed in this diff Show More