gallium/util: replace pipe_mutex_lock() with mtx_lock()

replace pipe_mutex_lock() was made unnecessary with fd33a6bcd7.

Replaced using:
find ./src -type f -exec sed -i -- \
's:pipe_mutex_lock(\([^)]*\)):mtx_lock(\&\1):g' {} \;

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Timothy Arceri 2017-03-05 12:12:30 +11:00
parent be188289e1
commit ba72554f3e
87 changed files with 602 additions and 605 deletions

View File

@ -189,7 +189,7 @@ hud_get_num_cpufreq(bool displayhelp)
int cpu_index;
/* Return the number of CPU metrics we support. */
pipe_mutex_lock(gcpufreq_mutex);
mtx_lock(&gcpufreq_mutex);
if (gcpufreq_count) {
pipe_mutex_unlock(gcpufreq_mutex);
return gcpufreq_count;

View File

@ -246,7 +246,7 @@ hud_get_num_disks(bool displayhelp)
char name[64];
/* Return the number of block devices and partitions. */
pipe_mutex_lock(gdiskstat_mutex);
mtx_lock(&gdiskstat_mutex);
if (gdiskstat_count) {
pipe_mutex_unlock(gdiskstat_mutex);
return gdiskstat_count;

View File

@ -331,7 +331,7 @@ hud_get_num_nics(bool displayhelp)
char name[64];
/* Return the number if network interfaces. */
pipe_mutex_lock(gnic_mutex);
mtx_lock(&gnic_mutex);
if (gnic_count) {
pipe_mutex_unlock(gnic_mutex);
return gnic_count;

View File

@ -324,7 +324,7 @@ int
hud_get_num_sensors(bool displayhelp)
{
/* Return the number of sensors detected. */
pipe_mutex_lock(gsensor_temp_mutex);
mtx_lock(&gsensor_temp_mutex);
if (gsensors_temp_count) {
pipe_mutex_unlock(gsensor_temp_mutex);
return gsensors_temp_count;

View File

@ -108,9 +108,6 @@ static inline int pipe_thread_is_self( pipe_thread thread )
return 0;
}
#define pipe_mutex_lock(mutex) \
(void) mtx_lock(&(mutex))
#define pipe_mutex_unlock(mutex) \
(void) mtx_unlock(&(mutex))
@ -188,7 +185,7 @@ static inline void pipe_barrier_destroy(pipe_barrier *barrier)
static inline void pipe_barrier_wait(pipe_barrier *barrier)
{
pipe_mutex_lock(barrier->mutex);
mtx_lock(&barrier->mutex);
assert(barrier->waiters < barrier->count);
barrier->waiters++;
@ -243,7 +240,7 @@ pipe_semaphore_destroy(pipe_semaphore *sema)
static inline void
pipe_semaphore_signal(pipe_semaphore *sema)
{
pipe_mutex_lock(sema->mutex);
mtx_lock(&sema->mutex);
sema->counter++;
cnd_signal(&sema->cond);
pipe_mutex_unlock(sema->mutex);
@ -253,7 +250,7 @@ pipe_semaphore_signal(pipe_semaphore *sema)
static inline void
pipe_semaphore_wait(pipe_semaphore *sema)
{
pipe_mutex_lock(sema->mutex);
mtx_lock(&sema->mutex);
while (sema->counter <= 0) {
cnd_wait(&sema->cond, &sema->mutex);
}

View File

@ -352,7 +352,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
@ -652,7 +652,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&fenced_buf->base.reference));
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
@ -669,7 +669,7 @@ fenced_buffer_map(struct pb_buffer *buf,
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
@ -721,7 +721,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if (fenced_buf->mapcount) {
@ -745,7 +745,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
if (!vl) {
/* Invalidate. */
@ -816,7 +816,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
@ -853,7 +853,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
/* This should only be called when the buffer is validated. Typically
* when processing relocations.
@ -917,7 +917,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
/* Try to create GPU storage without stalling. */
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
@ -958,7 +958,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
@ -974,7 +974,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
@ -982,7 +982,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}

View File

@ -236,7 +236,7 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
pb_debug_buffer_check(buf);
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
@ -260,7 +260,7 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
if (!map)
return NULL;
pipe_mutex_lock(buf->mutex);
mtx_lock(&buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
@ -274,7 +274,7 @@ pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pipe_mutex_lock(buf->mutex);
mtx_lock(&buf->mutex);
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
@ -304,7 +304,7 @@ pb_debug_buffer_validate(struct pb_buffer *_buf,
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pipe_mutex_lock(buf->mutex);
mtx_lock(&buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
debug_printf("last map backtrace is\n");
@ -388,7 +388,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
if(!buf->buffer) {
FREE(buf);
#if 0
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
@ -419,7 +419,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
(void) mtx_init(&buf->mutex, mtx_plain);
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
@ -442,7 +442,7 @@ pb_debug_manager_destroy(struct pb_manager *_mgr)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
if(!LIST_IS_EMPTY(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);

View File

@ -99,7 +99,7 @@ mm_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&mm_buf->base.reference));
pipe_mutex_lock(mm->mutex);
mtx_lock(&mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
@ -184,7 +184,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
return NULL;
pipe_mutex_lock(mm->mutex);
mtx_lock(&mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
@ -233,7 +233,7 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
pipe_mutex_lock(mm->mutex);
mtx_lock(&mm->mutex);
u_mmDestroy(mm->heap);

View File

@ -110,7 +110,7 @@ pool_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&pool_buf->base.reference));
pipe_mutex_lock(pool->mutex);
mtx_lock(&pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
@ -126,7 +126,7 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
/* XXX: it will be necessary to remap here to propagate flush_ctx */
pipe_mutex_lock(pool->mutex);
mtx_lock(&pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
return map;
@ -196,7 +196,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
pipe_mutex_lock(pool->mutex);
mtx_lock(&pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
@ -238,7 +238,7 @@ static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
pipe_mutex_lock(pool->mutex);
mtx_lock(&pool->mutex);
FREE(pool->bufs);

View File

@ -199,7 +199,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
@ -396,7 +396,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {

View File

@ -89,7 +89,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
struct pb_buffer *buf = entry->buffer;
unsigned i;
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->reference));
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
@ -155,7 +155,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
int ret = 0;
struct list_head *cache = &mgr->buckets[bucket_index];
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
entry = NULL;
cur = cache->next;
@ -228,7 +228,7 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
struct pb_cache_entry *buf;
unsigned i;
pipe_mutex_lock(mgr->mutex);
mtx_lock(&mgr->mutex);
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
struct list_head *cache = &mgr->buckets[i];

View File

@ -109,7 +109,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
group_index = heap * slabs->num_orders + (order - slabs->min_order);
group = &slabs->groups[group_index];
pipe_mutex_lock(slabs->mutex);
mtx_lock(&slabs->mutex);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
@ -139,7 +139,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
pipe_mutex_lock(slabs->mutex);
mtx_lock(&slabs->mutex);
LIST_ADD(&slab->head, &group->slabs);
}
@ -162,7 +162,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
pipe_mutex_lock(slabs->mutex);
mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
pipe_mutex_unlock(slabs->mutex);
}
@ -176,7 +176,7 @@ pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
void
pb_slabs_reclaim(struct pb_slabs *slabs)
{
pipe_mutex_lock(slabs->mutex);
mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
pipe_mutex_unlock(slabs->mutex);
}

View File

@ -90,7 +90,7 @@ rtasm_exec_malloc(size_t size)
struct mem_block *block = NULL;
void *addr = NULL;
pipe_mutex_lock(exec_mutex);
mtx_lock(&exec_mutex);
if (!init_heap())
goto bail;
@ -115,7 +115,7 @@ bail:
void
rtasm_exec_free(void *addr)
{
pipe_mutex_lock(exec_mutex);
mtx_lock(&exec_mutex);
if (exec_heap) {
struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);

View File

@ -165,7 +165,7 @@ debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
goto out_no_ref_hash;
fctx->bt_depth = bt_depth;
pipe_mutex_lock(list_mutex);
mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
pipe_mutex_unlock(list_mutex);
@ -215,7 +215,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
if (!fbuf)
return;
pipe_mutex_lock(fbuf->mutex);
mtx_lock(&fbuf->mutex);
if (fbuf->mapped) {
debug_flush_alert("Recursive map detected.", "Map",
2, fbuf->bt_depth, TRUE, TRUE, NULL);
@ -232,7 +232,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
if (mapped_sync) {
struct debug_flush_ctx *fctx;
pipe_mutex_lock(list_mutex);
mtx_lock(&list_mutex);
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
struct debug_flush_item *item =
util_hash_table_get(fctx->ref_hash, fbuf);
@ -254,7 +254,7 @@ debug_flush_unmap(struct debug_flush_buf *fbuf)
if (!fbuf)
return;
pipe_mutex_lock(fbuf->mutex);
mtx_lock(&fbuf->mutex);
if (!fbuf->mapped)
debug_flush_alert("Unmap not previously mapped detected.", "Map",
2, fbuf->bt_depth, FALSE, TRUE, NULL);
@ -277,7 +277,7 @@ debug_flush_cb_reference(struct debug_flush_ctx *fctx,
item = util_hash_table_get(fctx->ref_hash, fbuf);
pipe_mutex_lock(fbuf->mutex);
mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
2, fctx->bt_depth, TRUE, TRUE, NULL);
@ -320,7 +320,7 @@ debug_flush_might_flush_cb(void *key, void *value, void *data)
util_snprintf(message, sizeof(message),
"%s referenced mapped buffer detected.", reason);
pipe_mutex_lock(fbuf->mutex);
mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,

View File

@ -153,7 +153,7 @@ debug_malloc(const char *file, unsigned line, const char *function,
ftr = footer_from_header(hdr);
ftr->magic = DEBUG_MEMORY_MAGIC;
pipe_mutex_lock(list_mutex);
mtx_lock(&list_mutex);
LIST_ADDTAIL(&hdr->head, &list);
pipe_mutex_unlock(list_mutex);
@ -198,7 +198,7 @@ debug_free(const char *file, unsigned line, const char *function,
/* set freed memory to special value */
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
pipe_mutex_lock(list_mutex);
mtx_lock(&list_mutex);
LIST_DEL(&hdr->head);
pipe_mutex_unlock(list_mutex);
hdr->magic = 0;
@ -273,7 +273,7 @@ debug_realloc(const char *file, unsigned line, const char *function,
new_ftr = footer_from_header(new_hdr);
new_ftr->magic = DEBUG_MEMORY_MAGIC;
pipe_mutex_lock(list_mutex);
mtx_lock(&list_mutex);
LIST_REPLACE(&old_hdr->head, &new_hdr->head);
pipe_mutex_unlock(list_mutex);

View File

@ -94,7 +94,7 @@ debug_serial(void *p, unsigned *pserial)
}
#endif
pipe_mutex_lock(serials_mutex);
mtx_lock(&serials_mutex);
if (!serials_hash)
serials_hash = util_hash_table_create(hash_ptr, compare_ptr);
@ -126,7 +126,7 @@ debug_serial(void *p, unsigned *pserial)
static void
debug_serial_delete(void *p)
{
pipe_mutex_lock(serials_mutex);
mtx_lock(&serials_mutex);
util_hash_table_remove(serials_hash, p);
pipe_mutex_unlock(serials_mutex);
}

View File

@ -301,7 +301,7 @@ debug_symbol_name_cached(const void *addr)
}
#endif
pipe_mutex_lock(symbols_mutex);
mtx_lock(&symbols_mutex);
if(!symbols_hash)
symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
name = util_hash_table_get(symbols_hash, (void*)addr);

View File

@ -47,7 +47,7 @@ atexit_handler(void)
{
struct util_queue *iter;
pipe_mutex_lock(exit_mutex);
mtx_lock(&exit_mutex);
/* Wait for all queues to assert idle. */
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
util_queue_killall_and_wait(iter);
@ -67,7 +67,7 @@ add_to_atexit_list(struct util_queue *queue)
{
call_once(&atexit_once_flag, global_init);
pipe_mutex_lock(exit_mutex);
mtx_lock(&exit_mutex);
LIST_ADD(&queue->head, &queue_list);
pipe_mutex_unlock(exit_mutex);
}
@ -77,7 +77,7 @@ remove_from_atexit_list(struct util_queue *queue)
{
struct util_queue *iter, *tmp;
pipe_mutex_lock(exit_mutex);
mtx_lock(&exit_mutex);
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
if (iter == queue) {
LIST_DEL(&iter->head);
@ -94,7 +94,7 @@ remove_from_atexit_list(struct util_queue *queue)
static void
util_queue_fence_signal(struct util_queue_fence *fence)
{
pipe_mutex_lock(fence->mutex);
mtx_lock(&fence->mutex);
fence->signalled = true;
cnd_broadcast(&fence->cond);
pipe_mutex_unlock(fence->mutex);
@ -103,7 +103,7 @@ util_queue_fence_signal(struct util_queue_fence *fence)
void
util_queue_fence_wait(struct util_queue_fence *fence)
{
pipe_mutex_lock(fence->mutex);
mtx_lock(&fence->mutex);
while (!fence->signalled)
cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
@ -151,7 +151,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
while (1) {
struct util_queue_job job;
pipe_mutex_lock(queue->lock);
mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* wait if the queue is empty */
@ -180,7 +180,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
}
/* signal remaining jobs before terminating */
pipe_mutex_lock(queue->lock);
mtx_lock(&queue->lock);
while (queue->jobs[queue->read_idx].job) {
util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
@ -265,7 +265,7 @@ util_queue_killall_and_wait(struct util_queue *queue)
unsigned i;
/* Signal all threads to terminate. */
pipe_mutex_lock(queue->lock);
mtx_lock(&queue->lock);
queue->kill_threads = 1;
cnd_broadcast(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
@ -300,7 +300,7 @@ util_queue_add_job(struct util_queue *queue,
assert(fence->signalled);
fence->signalled = false;
pipe_mutex_lock(queue->lock);
mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* if the queue is full, wait until there is space */

View File

@ -59,7 +59,7 @@ static inline void
util_range_add(struct util_range *range, unsigned start, unsigned end)
{
if (start < range->start || end > range->end) {
pipe_mutex_lock(range->write_mutex);
mtx_lock(&range->write_mutex);
range->start = MIN2(start, range->start);
range->end = MAX2(end, range->end);
pipe_mutex_unlock(range->write_mutex);

View File

@ -76,7 +76,7 @@ void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
/* XXX: over-reliance on mutexes, etc:
*/
pipe_mutex_lock(ring->mutex);
mtx_lock(&ring->mutex);
/* make sure we don't request an impossible amount of space
*/
@ -117,7 +117,7 @@ enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
/* XXX: over-reliance on mutexes, etc:
*/
pipe_mutex_lock(ring->mutex);
mtx_lock(&ring->mutex);
/* Get next ring entry:
*/

View File

@ -594,7 +594,7 @@ dd_context_destroy(struct pipe_context *_pipe)
struct pipe_context *pipe = dctx->pipe;
if (dctx->thread) {
pipe_mutex_lock(dctx->mutex);
mtx_lock(&dctx->mutex);
dctx->kill_thread = 1;
pipe_mutex_unlock(dctx->mutex);
pipe_thread_wait(dctx->thread);

View File

@ -904,7 +904,7 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
struct dd_context *dctx = (struct dd_context *)input;
struct dd_screen *dscreen = dd_screen(dctx->base.screen);
pipe_mutex_lock(dctx->mutex);
mtx_lock(&dctx->mutex);
while (!dctx->kill_thread) {
struct dd_draw_record **record = &dctx->records;
@ -944,7 +944,7 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
/* Unlock and sleep before starting all over again. */
pipe_mutex_unlock(dctx->mutex);
os_time_sleep(10000); /* 10 ms */
pipe_mutex_lock(dctx->mutex);
mtx_lock(&dctx->mutex);
}
/* Thread termination. */
@ -1041,7 +1041,7 @@ dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
/* Add the record to the list. */
pipe_mutex_lock(dctx->mutex);
mtx_lock(&dctx->mutex);
record->next = dctx->records;
dctx->records = record;
pipe_mutex_unlock(dctx->mutex);

View File

@ -170,7 +170,7 @@ batch_reset_resources_locked(struct fd_batch *batch)
static void
batch_reset_resources(struct fd_batch *batch)
{
pipe_mutex_lock(batch->ctx->screen->lock);
mtx_lock(&batch->ctx->screen->lock);
batch_reset_resources_locked(batch);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
@ -203,7 +203,7 @@ __fd_batch_destroy(struct fd_batch *batch)
util_copy_framebuffer_state(&batch->framebuffer, NULL);
pipe_mutex_lock(batch->ctx->screen->lock);
mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, true);
pipe_mutex_unlock(batch->ctx->screen->lock);
@ -287,7 +287,7 @@ batch_flush(struct fd_batch *batch)
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
pipe_mutex_lock(batch->ctx->screen->lock);
mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, false);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
@ -339,7 +339,7 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
DBG("%p: flush forced on %p!", batch, dep);
pipe_mutex_unlock(batch->ctx->screen->lock);
fd_batch_flush(dep, false);
pipe_mutex_lock(batch->ctx->screen->lock);
mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);

View File

@ -130,7 +130,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
@ -139,7 +139,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
pipe_mutex_unlock(ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
}
@ -158,7 +158,7 @@ fd_bc_invalidate_context(struct fd_context *ctx)
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
@ -207,7 +207,7 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
struct fd_screen *screen = fd_screen(rsc->base.b.screen);
struct fd_batch *batch;
pipe_mutex_lock(screen->lock);
mtx_lock(&screen->lock);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
@ -233,7 +233,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
struct fd_batch *batch;
uint32_t idx;
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
@ -266,7 +266,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
pipe_mutex_unlock(ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
@ -338,7 +338,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
if (!batch)
return NULL;
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;

View File

@ -316,7 +316,7 @@ fd_context_assert_locked(struct fd_context *ctx)
static inline void
fd_context_lock(struct fd_context *ctx)
{
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
}
static inline void

View File

@ -110,7 +110,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
* Figure out the buffers/features we need:
*/
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
if (fd_depth_enabled(ctx)) {
buffers |= FD_BUFFER_DEPTH;
@ -332,7 +332,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
batch->resolve |= buffers;
batch->needs_flush = true;
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)

View File

@ -179,7 +179,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
*/
fd_bc_invalidate_resource(rsc, false);
pipe_mutex_lock(ctx->screen->lock);
mtx_lock(&ctx->screen->lock);
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we

View File

@ -88,7 +88,7 @@ lp_fence_signal(struct lp_fence *fence)
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, fence->id);
pipe_mutex_lock(fence->mutex);
mtx_lock(&fence->mutex);
fence->count++;
assert(fence->count <= fence->rank);
@ -116,7 +116,7 @@ lp_fence_wait(struct lp_fence *f)
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, f->id);
pipe_mutex_lock(f->mutex);
mtx_lock(&f->mutex);
assert(f->issued);
while (f->count < f->rank) {
cnd_wait(&f->signalled, &f->mutex);

View File

@ -484,7 +484,7 @@ lp_scene_bin_iter_next( struct lp_scene *scene , int *x, int *y)
{
struct cmd_bin *bin = NULL;
pipe_mutex_lock(scene->mutex);
mtx_lock(&scene->mutex);
if (scene->curr_x < 0) {
/* first bin */

View File

@ -165,7 +165,7 @@ lp_setup_rasterize_scene( struct lp_setup_context *setup )
if (setup->last_fence)
setup->last_fence->issued = TRUE;
pipe_mutex_lock(screen->rast_mutex);
mtx_lock(&screen->rast_mutex);
/* FIXME: We enqueue the scene then wait on the rasterizer to finish.
* This means we never actually run any vertex stuff in parallel to

View File

@ -1078,7 +1078,7 @@ nv50_blit_select_fp(struct nv50_blitctx *ctx, const struct pipe_blit_info *info)
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
pipe_mutex_lock(blitter->mutex);
mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nv50->base.pipe, mode, ptarg);

View File

@ -918,7 +918,7 @@ nvc0_blit_select_fp(struct nvc0_blitctx *ctx, const struct pipe_blit_info *info)
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
pipe_mutex_lock(blitter->mutex);
mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);

View File

@ -328,7 +328,7 @@ static void r300_clear(struct pipe_context* pipe,
/* Pair the resource with the CMASK to avoid other resources
* accessing it. */
if (!r300->screen->cmask_resource) {
pipe_mutex_lock(r300->screen->cmask_mutex);
mtx_lock(&r300->screen->cmask_mutex);
/* Double checking (first unlocked, then locked). */
if (!r300->screen->cmask_resource) {
/* Don't reference this, so that the texture can be

View File

@ -1030,7 +1030,7 @@ static void r300_texture_destroy(struct pipe_screen *screen,
struct r300_resource* tex = (struct r300_resource*)texture;
if (tex->tex.cmask_dwords) {
pipe_mutex_lock(rscreen->cmask_mutex);
mtx_lock(&rscreen->cmask_mutex);
if (texture == rscreen->cmask_resource) {
rscreen->cmask_resource = NULL;
}

View File

@ -178,7 +178,7 @@ static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
{
/* Start the thread if needed. */
if (!rscreen->gpu_load_thread) {
pipe_mutex_lock(rscreen->gpu_load_mutex);
mtx_lock(&rscreen->gpu_load_mutex);
/* Check again inside the mutex. */
if (!rscreen->gpu_load_thread)
rscreen->gpu_load_thread =

View File

@ -1403,7 +1403,7 @@ void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_re
{
struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
pipe_mutex_lock(rscreen->aux_context_lock);
mtx_lock(&rscreen->aux_context_lock);
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
pipe_mutex_unlock(rscreen->aux_context_lock);

View File

@ -305,7 +305,7 @@ static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
struct pipe_context *ctx = &rctx->b;
if (ctx == rscreen->aux_context)
pipe_mutex_lock(rscreen->aux_context_lock);
mtx_lock(&rscreen->aux_context_lock);
ctx->flush_resource(ctx, &rtex->resource.b.b);
ctx->flush(ctx, NULL, 0);
@ -394,7 +394,7 @@ bool r600_texture_disable_dcc(struct r600_common_context *rctx,
return false;
if (&rctx->b == rscreen->aux_context)
pipe_mutex_lock(rscreen->aux_context_lock);
mtx_lock(&rscreen->aux_context_lock);
/* Decompress DCC. */
rctx->decompress_dcc(&rctx->b, rtex);

View File

@ -7467,7 +7467,7 @@ si_get_shader_part(struct si_screen *sscreen,
{
struct si_shader_part *result;
pipe_mutex_lock(sscreen->shader_parts_mutex);
mtx_lock(&sscreen->shader_parts_mutex);
/* Find existing. */
for (result = *list; result; result = result->next) {

View File

@ -1256,7 +1256,7 @@ again:
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
pipe_mutex_lock(sel->mutex);
mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
@ -1457,7 +1457,7 @@ void si_init_shader_selector_async(void *job, int thread_index)
tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
pipe_mutex_lock(sscreen->shader_cache_mutex);
mtx_lock(&sscreen->shader_cache_mutex);
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
@ -1475,7 +1475,7 @@ void si_init_shader_selector_async(void *job, int thread_index)
}
if (tgsi_binary) {
pipe_mutex_lock(sscreen->shader_cache_mutex);
mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
pipe_mutex_unlock(sscreen->shader_cache_mutex);

View File

@ -46,7 +46,7 @@ rbug_destroy(struct pipe_context *_pipe)
rbug_screen_remove_from_list(rb_screen, contexts, rb_pipe);
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->destroy(pipe);
rb_pipe->pipe = NULL;
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -119,10 +119,10 @@ rbug_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->draw_mutex);
mtx_lock(&rb_pipe->draw_mutex);
rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_BEFORE);
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
/* XXX loop over PIPE_SHADER_x here */
if (!(rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] && rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT]->disabled) &&
!(rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] && rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY]->disabled) &&
@ -143,7 +143,7 @@ rbug_create_query(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_query *query;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
query = pipe->create_query(pipe,
query_type,
index);
@ -158,7 +158,7 @@ rbug_destroy_query(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->destroy_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -172,7 +172,7 @@ rbug_begin_query(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->begin_query(pipe, query);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
@ -186,7 +186,7 @@ rbug_end_query(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
bool ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->end_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -204,7 +204,7 @@ rbug_get_query_result(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->get_query_result(pipe,
query,
wait,
@ -220,7 +220,7 @@ rbug_set_active_query_state(struct pipe_context *_pipe, boolean enable)
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_active_query_state(pipe, enable);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -233,7 +233,7 @@ rbug_create_blend_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -248,7 +248,7 @@ rbug_bind_blend_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -261,7 +261,7 @@ rbug_delete_blend_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -275,7 +275,7 @@ rbug_create_sampler_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -292,7 +292,7 @@ rbug_bind_sampler_states(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_sampler_states(pipe, shader, start, count, samplers);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -304,7 +304,7 @@ rbug_delete_sampler_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -318,7 +318,7 @@ rbug_create_rasterizer_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -333,7 +333,7 @@ rbug_bind_rasterizer_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -346,7 +346,7 @@ rbug_delete_rasterizer_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -360,7 +360,7 @@ rbug_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -375,7 +375,7 @@ rbug_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -388,7 +388,7 @@ rbug_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -402,7 +402,7 @@ rbug_create_fs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_fs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -420,7 +420,7 @@ rbug_bind_fs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *fs;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
fs = rbug_shader_unwrap(_fs);
rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] = rbug_shader(_fs);
@ -437,7 +437,7 @@ rbug_delete_fs_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_fs);
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -450,7 +450,7 @@ rbug_create_vs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_vs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -468,7 +468,7 @@ rbug_bind_vs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *vs;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
vs = rbug_shader_unwrap(_vs);
rb_pipe->curr.shader[PIPE_SHADER_VERTEX] = rbug_shader(_vs);
@ -498,7 +498,7 @@ rbug_create_gs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_gs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -516,7 +516,7 @@ rbug_bind_gs_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *gs;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
gs = rbug_shader_unwrap(_gs);
rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] = rbug_shader(_gs);
@ -533,7 +533,7 @@ rbug_delete_gs_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_gs);
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -547,7 +547,7 @@ rbug_create_vertex_elements_state(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_vertex_elements_state(pipe,
num_elements,
vertex_elements);
@ -563,7 +563,7 @@ rbug_bind_vertex_elements_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -576,7 +576,7 @@ rbug_delete_vertex_elements_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -589,7 +589,7 @@ rbug_set_blend_color(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_blend_color(pipe,
blend_color);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -602,7 +602,7 @@ rbug_set_stencil_ref(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_stencil_ref(pipe,
stencil_ref);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -615,7 +615,7 @@ rbug_set_clip_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_clip_state(pipe,
clip);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -637,7 +637,7 @@ rbug_set_constant_buffer(struct pipe_context *_pipe,
cb.buffer = rbug_resource_unwrap(_cb->buffer);
}
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_constant_buffer(pipe,
shader,
index,
@ -656,7 +656,7 @@ rbug_set_framebuffer_state(struct pipe_context *_pipe,
unsigned i;
/* must protect curr status */
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.nr_cbufs = 0;
memset(rb_pipe->curr.cbufs, 0, sizeof(rb_pipe->curr.cbufs));
@ -691,7 +691,7 @@ rbug_set_polygon_stipple(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_polygon_stipple(pipe,
poly_stipple);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -706,7 +706,7 @@ rbug_set_scissor_states(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_scissor_states(pipe, start_slot, num_scissors, scissor);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -720,7 +720,7 @@ rbug_set_viewport_states(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_viewport_states(pipe, start_slot, num_viewports, viewport);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -741,7 +741,7 @@ rbug_set_sampler_views(struct pipe_context *_pipe,
assert(start == 0); /* XXX fix */
/* must protect curr status */
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.num_views[shader] = 0;
memset(rb_pipe->curr.views[shader], 0, sizeof(rb_pipe->curr.views[shader]));
@ -774,7 +774,7 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
struct pipe_vertex_buffer *buffers = NULL;
unsigned i;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
if (num_buffers && _buffers) {
memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
@ -804,7 +804,7 @@ rbug_set_index_buffer(struct pipe_context *_pipe,
ib = &unwrapped_ib;
}
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_index_buffer(pipe, ib);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -816,7 +816,7 @@ rbug_set_sample_mask(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_sample_mask(pipe, sample_mask);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -831,7 +831,7 @@ rbug_create_stream_output_target(struct pipe_context *_pipe,
struct pipe_resource *res = rbug_resource_unwrap(_res);
struct pipe_stream_output_target *target;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
target = pipe->create_stream_output_target(pipe, res, buffer_offset,
buffer_size);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -845,7 +845,7 @@ rbug_stream_output_target_destroy(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->stream_output_target_destroy(pipe, target);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -859,7 +859,7 @@ rbug_set_stream_output_targets(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_stream_output_targets(pipe, num_targets, targets, offsets);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -882,7 +882,7 @@ rbug_resource_copy_region(struct pipe_context *_pipe,
struct pipe_resource *dst = rb_resource_dst->resource;
struct pipe_resource *src = rb_resource_src->resource;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->resource_copy_region(pipe,
dst,
dst_level,
@ -910,7 +910,7 @@ rbug_blit(struct pipe_context *_pipe, const struct pipe_blit_info *_blit_info)
blit_info.dst.resource = dst;
blit_info.src.resource = src;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->blit(pipe, &blit_info);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -924,7 +924,7 @@ rbug_flush_resource(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *res = rb_resource_res->resource;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->flush_resource(pipe, res);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -939,7 +939,7 @@ rbug_clear(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->clear(pipe,
buffers,
color,
@ -961,7 +961,7 @@ rbug_clear_render_target(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->clear_render_target(pipe,
dst,
color,
@ -988,7 +988,7 @@ rbug_clear_depth_stencil(struct pipe_context *_pipe,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->clear_depth_stencil(pipe,
dst,
clear_flags,
@ -1010,7 +1010,7 @@ rbug_flush(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
pipe->flush(pipe, fence, flags);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -1026,7 +1026,7 @@ rbug_context_create_sampler_view(struct pipe_context *_pipe,
struct pipe_resource *resource = rb_resource->resource;
struct pipe_sampler_view *result;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_sampler_view(pipe,
resource,
templ);
@ -1056,7 +1056,7 @@ rbug_context_create_surface(struct pipe_context *_pipe,
struct pipe_resource *resource = rb_resource->resource;
struct pipe_surface *result;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_surface(pipe,
resource,
surf_tmpl);
@ -1074,7 +1074,7 @@ rbug_context_surface_destroy(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_surface *rb_surface = rbug_surface(_surface);
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
rbug_surface_destroy(rb_pipe,
rb_surface);
pipe_mutex_unlock(rb_pipe->call_mutex);
@ -1097,7 +1097,7 @@ rbug_context_transfer_map(struct pipe_context *_context,
struct pipe_transfer *result;
void *map;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
map = context->transfer_map(context,
resource,
level,
@ -1119,7 +1119,7 @@ rbug_context_transfer_flush_region(struct pipe_context *_context,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
context->transfer_flush_region(context,
transfer,
box);
@ -1136,7 +1136,7 @@ rbug_context_transfer_unmap(struct pipe_context *_context,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
context->transfer_unmap(context,
transfer);
rbug_transfer_destroy(rb_pipe,
@ -1156,7 +1156,7 @@ rbug_context_buffer_subdata(struct pipe_context *_context,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
context->buffer_subdata(context, resource, usage, offset, size, data);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@ -1177,7 +1177,7 @@ rbug_context_texture_subdata(struct pipe_context *_context,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
pipe_mutex_lock(rb_pipe->call_mutex);
mtx_lock(&rb_pipe->call_mutex);
context->texture_subdata(context,
resource,
level,

View File

@ -182,7 +182,7 @@ rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
rbug_texture_t *texs;
int i = 0;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
texs = MALLOC(rb_screen->num_resources * sizeof(rbug_texture_t));
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
@ -206,7 +206,7 @@ rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
struct pipe_resource *t;
unsigned num_layers;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gpti->texture == VOID2U64(tr_tex))
@ -255,7 +255,7 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
void *map;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gptr->texture == VOID2U64(tr_tex))
@ -301,7 +301,7 @@ rbug_context_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
rbug_context_t *ctxs;
int i = 0;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
ctxs = MALLOC(rb_screen->num_contexts * sizeof(rbug_context_t));
foreach(ptr, &rb_screen->contexts) {
rb_context = container_of(ptr, struct rbug_context, list);
@ -326,7 +326,7 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
rbug_texture_t texs[PIPE_MAX_SHADER_SAMPLER_VIEWS];
unsigned i;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
@ -335,8 +335,8 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
}
/* protect the pipe context */
pipe_mutex_lock(rb_context->draw_mutex);
pipe_mutex_lock(rb_context->call_mutex);
mtx_lock(&rb_context->draw_mutex);
mtx_lock(&rb_context->call_mutex);
for (i = 0; i < rb_context->curr.nr_cbufs; i++)
cbufs[i] = VOID2U64(rb_context->curr.cbufs[i]);
@ -367,7 +367,7 @@ rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, u
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, block->context);
if (!rb_context) {
@ -375,7 +375,7 @@ rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, u
return -ESRCH;
}
pipe_mutex_lock(rb_context->draw_mutex);
mtx_lock(&rb_context->draw_mutex);
rb_context->draw_blocker |= block->block;
pipe_mutex_unlock(rb_context->draw_mutex);
@ -392,7 +392,7 @@ rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, step->context);
if (!rb_context) {
@ -400,7 +400,7 @@ rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
return -ESRCH;
}
pipe_mutex_lock(rb_context->draw_mutex);
mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (step->step & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
@ -424,7 +424,7 @@ rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header,
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, unblock->context);
if (!rb_context) {
@ -432,7 +432,7 @@ rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header,
return -ESRCH;
}
pipe_mutex_lock(rb_context->draw_mutex);
mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (unblock->unblock & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
@ -457,7 +457,7 @@ rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rule->context);
if (!rb_context) {
@ -465,7 +465,7 @@ rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
return -ESRCH;
}
pipe_mutex_lock(rb_context->draw_mutex);
mtx_lock(&rb_context->draw_mutex);
rb_context->draw_rule.shader[PIPE_SHADER_VERTEX] = U642VOID(rule->vertex);
rb_context->draw_rule.shader[PIPE_SHADER_FRAGMENT] = U642VOID(rule->fragment);
rb_context->draw_rule.texture = U642VOID(rule->texture);
@ -489,7 +489,7 @@ rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, flush->context);
if (!rb_context) {
@ -498,7 +498,7 @@ rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32
}
/* protect the pipe context */
pipe_mutex_lock(rb_context->call_mutex);
mtx_lock(&rb_context->call_mutex);
rb_context->pipe->flush(rb_context->pipe, NULL, 0);
@ -520,7 +520,7 @@ rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
rbug_shader_t *shdrs;
int i = 0;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, list->context);
if (!rb_context) {
@ -528,7 +528,7 @@ rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
return -ESRCH;
}
pipe_mutex_lock(rb_context->list_mutex);
mtx_lock(&rb_context->list_mutex);
shdrs = MALLOC(rb_context->num_shaders * sizeof(rbug_shader_t));
foreach(ptr, &rb_context->shaders) {
tr_shdr = container_of(ptr, struct rbug_shader, list);
@ -555,7 +555,7 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
unsigned original_len;
unsigned replaced_len;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
@ -563,7 +563,7 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
return -ESRCH;
}
pipe_mutex_lock(rb_context->list_mutex);
mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
@ -603,7 +603,7 @@ rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, dis->context);
if (!rb_context) {
@ -611,7 +611,7 @@ rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
return -ESRCH;
}
pipe_mutex_lock(rb_context->list_mutex);
mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
@ -640,7 +640,7 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
struct pipe_context *pipe = NULL;
void *state;
pipe_mutex_lock(rb_screen->list_mutex);
mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rep->context);
if (!rb_context) {
@ -648,7 +648,7 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
return -ESRCH;
}
pipe_mutex_lock(rb_context->list_mutex);
mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
@ -659,7 +659,7 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
}
/* protect the pipe context */
pipe_mutex_lock(rb_context->call_mutex);
mtx_lock(&rb_context->call_mutex);
pipe = rb_context->pipe;

View File

@ -68,7 +68,7 @@ rbug_screen(struct pipe_screen *screen)
#define rbug_screen_add_to_list(scr, name, obj) \
do { \
pipe_mutex_lock(scr->list_mutex); \
mtx_lock(&scr->list_mutex); \
insert_at_head(&scr->name, &obj->list); \
scr->num_##name++; \
pipe_mutex_unlock(scr->list_mutex); \
@ -76,7 +76,7 @@ rbug_screen(struct pipe_screen *screen)
#define rbug_screen_remove_from_list(scr, name, obj) \
do { \
pipe_mutex_lock(scr->list_mutex); \
mtx_lock(&scr->list_mutex); \
remove_from_list(&obj->list); \
scr->num_##name--; \
pipe_mutex_unlock(scr->list_mutex); \

View File

@ -296,7 +296,7 @@ svga_buffer_transfer_flush_region( struct pipe_context *pipe,
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
pipe_mutex_lock(ss->swc_mutex);
mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
pipe_mutex_unlock(ss->swc_mutex);
}
@ -312,7 +312,7 @@ svga_buffer_transfer_unmap( struct pipe_context *pipe,
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
pipe_mutex_lock(ss->swc_mutex);
mtx_lock(&ss->swc_mutex);
assert(sbuf->map.count);
if (sbuf->map.count) {

View File

@ -641,7 +641,7 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
if (ret != PIPE_OK)
return ret;
pipe_mutex_lock(ss->swc_mutex);
mtx_lock(&ss->swc_mutex);
map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
assert(map);
assert(!retry);

View File

@ -91,7 +91,7 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
/* First try the cache */
if (view) {
pipe_mutex_lock(ss->tex_mutex);
mtx_lock(&ss->tex_mutex);
if (tex->cached_view &&
tex->cached_view->min_lod == min_lod &&
tex->cached_view->max_lod == max_lod) {
@ -163,7 +163,7 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
return sv;
}
pipe_mutex_lock(ss->tex_mutex);
mtx_lock(&ss->tex_mutex);
svga_sampler_view_reference(&tex->cached_view, sv);
pipe_mutex_unlock(ss->tex_mutex);

View File

@ -104,7 +104,7 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
bucket = svga_screen_cache_bucket(key);
pipe_mutex_lock(cache->mutex);
mtx_lock(&cache->mutex);
curr = cache->bucket[bucket].next;
next = curr->next;
@ -226,7 +226,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
surf_size = surface_size(key);
*p_handle = NULL;
pipe_mutex_lock(cache->mutex);
mtx_lock(&cache->mutex);
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
/* this surface is too large to cache, just free it */
@ -318,7 +318,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
struct list_head *curr, *next;
unsigned bucket;
pipe_mutex_lock(cache->mutex);
mtx_lock(&cache->mutex);
/* Loop over entries in the invalidated list */
curr = cache->invalidated.next;

View File

@ -302,7 +302,7 @@ boolean trace_dump_trace_enabled(void)
void trace_dump_call_lock(void)
{
pipe_mutex_lock(call_mutex);
mtx_lock(&call_mutex);
}
void trace_dump_call_unlock(void)
@ -331,14 +331,14 @@ boolean trace_dumping_enabled_locked(void)
void trace_dumping_start(void)
{
pipe_mutex_lock(call_mutex);
mtx_lock(&call_mutex);
trace_dumping_start_locked();
pipe_mutex_unlock(call_mutex);
}
void trace_dumping_stop(void)
{
pipe_mutex_lock(call_mutex);
mtx_lock(&call_mutex);
trace_dumping_stop_locked();
pipe_mutex_unlock(call_mutex);
}
@ -346,7 +346,7 @@ void trace_dumping_stop(void)
boolean trace_dumping_enabled(void)
{
boolean ret;
pipe_mutex_lock(call_mutex);
mtx_lock(&call_mutex);
ret = trace_dumping_enabled_locked();
pipe_mutex_unlock(call_mutex);
return ret;
@ -395,7 +395,7 @@ void trace_dump_call_end_locked(void)
void trace_dump_call_begin(const char *klass, const char *method)
{
pipe_mutex_lock(call_mutex);
mtx_lock(&call_mutex);
trace_dump_call_begin_locked(klass, method);
}

View File

@ -97,7 +97,7 @@ vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
return NULL;
struct vc4_bo *bo = NULL;
pipe_mutex_lock(cache->lock);
mtx_lock(&cache->lock);
if (!list_empty(&cache->size_list[page_index])) {
bo = LIST_ENTRY(struct vc4_bo, cache->size_list[page_index].next,
size_list);
@ -188,7 +188,7 @@ vc4_bo_last_unreference(struct vc4_bo *bo)
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
pipe_mutex_lock(screen->bo_cache.lock);
mtx_lock(&screen->bo_cache.lock);
vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
pipe_mutex_unlock(screen->bo_cache.lock);
}
@ -261,7 +261,7 @@ free_stale_bos(struct vc4_screen *screen, time_t time)
static void
vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
{
pipe_mutex_lock(cache->lock);
mtx_lock(&cache->lock);
list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
time_list) {
vc4_bo_remove_from_cache(cache, bo);
@ -322,7 +322,7 @@ vc4_bo_open_handle(struct vc4_screen *screen,
assert(size);
pipe_mutex_lock(screen->bo_handles_mutex);
mtx_lock(&screen->bo_handles_mutex);
bo = util_hash_table_get(screen->bo_handles, (void*)(uintptr_t)handle);
if (bo) {
@ -401,7 +401,7 @@ vc4_bo_get_dmabuf(struct vc4_bo *bo)
return -1;
}
pipe_mutex_lock(bo->screen->bo_handles_mutex);
mtx_lock(&bo->screen->bo_handles_mutex);
bo->private = false;
util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(bo->screen->bo_handles_mutex);

View File

@ -93,7 +93,7 @@ vc4_bo_unreference(struct vc4_bo **bo)
vc4_bo_last_unreference(*bo);
} else {
screen = (*bo)->screen;
pipe_mutex_lock(screen->bo_handles_mutex);
mtx_lock(&screen->bo_handles_mutex);
if (pipe_reference(&(*bo)->reference, NULL)) {
util_hash_table_remove(screen->bo_handles,

View File

@ -1431,7 +1431,7 @@ dri2_load_opencl_interop(struct dri_screen *screen)
#if defined(RTLD_DEFAULT)
bool success;
pipe_mutex_lock(screen->opencl_func_mutex);
mtx_lock(&screen->opencl_func_mutex);
if (dri2_is_opencl_interop_loaded_locked(screen)) {
pipe_mutex_unlock(screen->opencl_func_mutex);

View File

@ -197,7 +197,7 @@ xmesa_init_display( Display *display )
return NULL;
}
pipe_mutex_lock(init_mutex);
mtx_lock(&init_mutex);
/* Look for XMesaDisplay which corresponds to this display */
info = MesaExtInfo.head;
@ -372,7 +372,7 @@ xmesa_get_window_size(Display *dpy, XMesaBuffer b,
XMesaDisplay xmdpy = xmesa_init_display(dpy);
Status stat;
pipe_mutex_lock(xmdpy->mutex);
mtx_lock(&xmdpy->mutex);
stat = get_drawable_size(dpy, b->ws.drawable, width, height);
pipe_mutex_unlock(xmdpy->mutex);

View File

@ -66,7 +66,7 @@ hgl_st_framebuffer_flush_front(struct st_context_iface *stctxi,
#if 0
struct stw_st_framebuffer *stwfb = stw_st_framebuffer(stfb);
pipe_mutex_lock(stwfb->fb->mutex);
mtx_lock(&stwfb->fb->mutex);
struct pipe_resource* resource = textures[statt];
if (resource)

File diff suppressed because it is too large Load Diff

View File

@ -85,7 +85,7 @@ nine_queue_wait_flush(struct nine_queue_pool* ctx)
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
/* wait for cmdbuf full */
pipe_mutex_lock(ctx->mutex_push);
mtx_lock(&ctx->mutex_push);
while (!cmdbuf->full)
{
DBG("waiting for full cmdbuf\n");
@ -111,7 +111,7 @@ nine_queue_get(struct nine_queue_pool* ctx)
if (ctx->cur_instr == cmdbuf->num_instr) {
/* signal waiting producer */
pipe_mutex_lock(ctx->mutex_pop);
mtx_lock(&ctx->mutex_pop);
DBG("freeing cmdbuf=%p\n", cmdbuf);
cmdbuf->full = 0;
cnd_signal(&ctx->event_pop);
@ -148,7 +148,7 @@ nine_queue_flush(struct nine_queue_pool* ctx)
return;
/* signal waiting worker */
pipe_mutex_lock(ctx->mutex_push);
mtx_lock(&ctx->mutex_push);
cmdbuf->full = 1;
cnd_signal(&ctx->event_push);
pipe_mutex_unlock(ctx->mutex_push);
@ -158,7 +158,7 @@ nine_queue_flush(struct nine_queue_pool* ctx)
cmdbuf = &ctx->pool[ctx->head];
/* wait for queue empty */
pipe_mutex_lock(ctx->mutex_pop);
mtx_lock(&ctx->mutex_pop);
while (cmdbuf->full)
{
DBG("waiting for empty cmdbuf\n");

View File

@ -79,7 +79,7 @@ struct csmt_context {
static void
nine_csmt_wait_processed(struct csmt_context *ctx)
{
pipe_mutex_lock(ctx->mutex_processed);
mtx_lock(&ctx->mutex_processed);
while (!p_atomic_read(&ctx->processed)) {
cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
}
@ -98,7 +98,7 @@ PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
while (1) {
nine_queue_wait_flush(ctx->pool);
pipe_mutex_lock(ctx->thread_running);
mtx_lock(&ctx->thread_running);
/* Get instruction. NULL on empty cmdbuf. */
while (!p_atomic_read(&ctx->terminate) &&
@ -106,7 +106,7 @@ PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
/* decode */
if (instr->func(ctx->device, instr)) {
pipe_mutex_lock(ctx->mutex_processed);
mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
@ -114,15 +114,15 @@ PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
if (p_atomic_read(&ctx->toPause)) {
pipe_mutex_unlock(ctx->thread_running);
/* will wait here the thread can be resumed */
pipe_mutex_lock(ctx->thread_resume);
pipe_mutex_lock(ctx->thread_running);
mtx_lock(&ctx->thread_resume);
mtx_lock(&ctx->thread_running);
pipe_mutex_unlock(ctx->thread_resume);
}
}
pipe_mutex_unlock(ctx->thread_running);
if (p_atomic_read(&ctx->terminate)) {
pipe_mutex_lock(ctx->mutex_processed);
mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
@ -252,11 +252,11 @@ nine_csmt_pause( struct NineDevice9 *device )
if (nine_queue_no_flushed_work(ctx->pool))
return;
pipe_mutex_lock(ctx->thread_resume);
mtx_lock(&ctx->thread_resume);
p_atomic_set(&ctx->toPause, TRUE);
/* Wait the thread is paused */
pipe_mutex_lock(ctx->thread_running);
mtx_lock(&ctx->thread_running);
ctx->hasPaused = TRUE;
p_atomic_set(&ctx->toPause, FALSE);
}

View File

@ -75,7 +75,7 @@ int omx_component_library_Setup(stLoaderComponentType **stComponents)
struct vl_screen *omx_get_screen(void)
{
static bool first_time = true;
pipe_mutex_lock(omx_lock);
mtx_lock(&omx_lock);
if (!omx_screen) {
if (first_time) {
@ -117,7 +117,7 @@ error:
void omx_put_screen(void)
{
pipe_mutex_lock(omx_lock);
mtx_lock(&omx_lock);
if ((--omx_usecount) == 0) {
omx_screen->destroy(omx_screen);
omx_screen = NULL;

View File

@ -64,7 +64,7 @@ vlVaCreateBuffer(VADriverContextP ctx, VAContextID context, VABufferType type,
memcpy(buf->data, data, size * num_elements);
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
*buf_id = handle_table_add(drv->htab, buf);
pipe_mutex_unlock(drv->mutex);
@ -82,7 +82,7 @@ vlVaBufferSetNumElements(VADriverContextP ctx, VABufferID buf_id,
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
@ -117,7 +117,7 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
if (!pbuff)
return VA_STATUS_ERROR_INVALID_PARAMETER;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
@ -160,7 +160,7 @@ vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
@ -191,7 +191,7 @@ vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
@ -220,7 +220,7 @@ vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
@ -254,7 +254,7 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
drv = VL_VA_DRIVER(ctx);
screen = VL_VA_PSCREEN(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
@ -295,7 +295,7 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
case VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME: {
struct winsys_handle whandle;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
drv->pipe->flush(drv->pipe, NULL, 0);
memset(&whandle, 0, sizeof(whandle));
@ -339,7 +339,7 @@ vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);

View File

@ -200,7 +200,7 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420 | VA_RT_FORMAT_RGB32;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
@ -265,7 +265,7 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
@ -286,7 +286,7 @@ vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
if (!config)
@ -314,7 +314,7 @@ vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);

View File

@ -214,7 +214,7 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
@ -287,7 +287,7 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
*context_id = handle_table_add(drv->htab, context);
pipe_mutex_unlock(drv->mutex);
@ -304,7 +304,7 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);

View File

@ -114,7 +114,7 @@ vlVaCreateImage(VADriverContextP ctx, VAImageFormat *format, int width, int heig
img = CALLOC(1, sizeof(VAImage));
if (!img)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
pipe_mutex_unlock(drv->mutex);
@ -258,7 +258,7 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, VAImage *image)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
img_buf->type = VAImageBufferType;
@ -286,7 +286,7 @@ vlVaDestroyImage(VADriverContextP ctx, VAImageID image)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
@ -328,7 +328,7 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
@ -438,7 +438,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {

View File

@ -50,7 +50,7 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
@ -494,7 +494,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
@ -569,7 +569,7 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
pipe_mutex_unlock(drv->mutex);
if (!context)
@ -583,7 +583,7 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
return VA_STATUS_SUCCESS;
}
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, context->target_id);
context->mpeg4.frame_num++;

View File

@ -73,7 +73,7 @@ vlVaCreateSubpicture(VADriverContextP ctx, VAImageID image,
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
@ -103,7 +103,7 @@ vlVaDestroySubpicture(VADriverContextP ctx, VASubpictureID subpicture)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
@ -129,7 +129,7 @@ vlVaSubpictureImage(VADriverContextP ctx, VASubpictureID subpicture, VAImageID i
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
@ -186,7 +186,7 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
@ -256,7 +256,7 @@ vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {

View File

@ -70,7 +70,7 @@ vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_sur
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; ++i) {
vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
if (!surf) {
@ -102,7 +102,7 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, render_target);
if (!surf || !surf->buffer) {
@ -288,7 +288,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface_id);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
@ -399,7 +399,7 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
@ -686,7 +686,7 @@ vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format,
memset(surfaces, VA_INVALID_ID, num_surfaces * sizeof(VASurfaceID));
pipe_mutex_lock(drv->mutex);
mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; i++) {
vlVaSurface *surf = CALLOC(1, sizeof(vlVaSurface));
if (!surf)

View File

@ -79,7 +79,7 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
res_tmpl.usage = frequently_accessed ? PIPE_USAGE_DYNAMIC : PIPE_USAGE_DEFAULT;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl)) {
ret = VDP_STATUS_RESOURCES;
@ -106,7 +106,7 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
*surface = vlAddDataHTAB(vlsurface);
if (*surface == 0) {
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
ret = VDP_STATUS_ERROR;
goto err_sampler;
}
@ -134,7 +134,7 @@ vlVdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
@ -196,7 +196,7 @@ vlVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
pipe = vlsurface->device->context;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,

View File

@ -71,7 +71,7 @@ vlVdpDecoderCreate(VdpDevice device,
pipe = dev->context;
screen = dev->vscreen->pscreen;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
supported = screen->get_video_param
(
@ -163,7 +163,7 @@ vlVdpDecoderDestroy(VdpDecoder decoder)
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vldecoder->mutex);
mtx_lock(&vldecoder->mutex);
vldecoder->decoder->destroy(vldecoder->decoder);
pipe_mutex_unlock(vldecoder->mutex);
mtx_destroy(&vldecoder->mutex);
@ -614,7 +614,7 @@ vlVdpDecoderRender(VdpDecoder decoder,
dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
!buffer_support[vlsurf->video_buffer->interlaced]) {
pipe_mutex_lock(vlsurf->device->mutex);
mtx_lock(&vlsurf->device->mutex);
/* destroy the old one */
if (vlsurf->video_buffer)
@ -674,7 +674,7 @@ vlVdpDecoderRender(VdpDecoder decoder,
if (ret != VDP_STATUS_OK)
return ret;
pipe_mutex_lock(vldecoder->mutex);
mtx_lock(&vldecoder->mutex);
dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
dec->end_frame(dec, vlsurf->video_buffer, &desc.base);

View File

@ -38,7 +38,7 @@ boolean vlCreateHTAB(void)
/* Make sure handle table handles match VDPAU handles. */
assert(sizeof(unsigned) <= sizeof(vlHandle));
pipe_mutex_lock(htab_lock);
mtx_lock(&htab_lock);
if (!htab)
htab = handle_table_create();
ret = htab != NULL;
@ -48,7 +48,7 @@ boolean vlCreateHTAB(void)
void vlDestroyHTAB(void)
{
pipe_mutex_lock(htab_lock);
mtx_lock(&htab_lock);
if (htab && !handle_table_get_first_handle(htab)) {
handle_table_destroy(htab);
htab = NULL;
@ -61,7 +61,7 @@ vlHandle vlAddDataHTAB(void *data)
vlHandle handle = 0;
assert(data);
pipe_mutex_lock(htab_lock);
mtx_lock(&htab_lock);
if (htab)
handle = handle_table_add(htab, data);
pipe_mutex_unlock(htab_lock);
@ -73,7 +73,7 @@ void* vlGetDataHTAB(vlHandle handle)
void *data = NULL;
assert(handle);
pipe_mutex_lock(htab_lock);
mtx_lock(&htab_lock);
if (htab)
data = handle_table_get(htab, handle);
pipe_mutex_unlock(htab_lock);
@ -82,7 +82,7 @@ void* vlGetDataHTAB(vlHandle handle)
void vlRemoveDataHTAB(vlHandle handle)
{
pipe_mutex_lock(htab_lock);
mtx_lock(&htab_lock);
if (htab)
handle_table_remove(htab, handle);
pipe_mutex_unlock(htab_lock);

View File

@ -63,7 +63,7 @@ vlVdpVideoMixerCreate(VdpDevice device,
DeviceReference(&vmixer->device, dev);
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&vmixer->cstate, dev->context)) {
ret = VDP_STATUS_ERROR;
@ -191,7 +191,7 @@ vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vmixer->device->mutex);
mtx_lock(&vmixer->device->mutex);
vlRemoveDataHTAB(mixer);
@ -290,7 +290,7 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
return VDP_STATUS_INVALID_HANDLE;
}
pipe_mutex_lock(vmixer->device->mutex);
mtx_lock(&vmixer->device->mutex);
vl_compositor_clear_layers(&vmixer->cstate);
@ -658,7 +658,7 @@ vlVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vmixer->device->mutex);
mtx_lock(&vmixer->device->mutex);
for (i = 0; i < feature_count; ++i) {
switch (features[i]) {
/* they are valid, but we doesn't support them */
@ -796,7 +796,7 @@ vlVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer,
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vmixer->device->mutex);
mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:
@ -955,7 +955,7 @@ vlVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer,
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vmixer->device->mutex);
mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:

View File

@ -92,7 +92,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
PIPE_BIND_SHARED | PIPE_BIND_SCANOUT;
res_tmpl.usage = PIPE_USAGE_DEFAULT;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl))
goto err_unlock;
@ -152,7 +152,7 @@ vlVdpOutputSurfaceDestroy(VdpOutputSurface surface)
pipe = vlsurface->device->context;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
pipe_surface_reference(&vlsurface->surface, NULL);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
@ -216,7 +216,7 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
if (!destination_data || !destination_pitches)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
res = vlsurface->sampler_view->texture;
box = RectToPipeBox(source_rect, res);
@ -260,7 +260,7 @@ vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
@ -344,7 +344,7 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
res_tmpl.usage = PIPE_USAGE_STAGING;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
if (!CheckSurfaceParams(context->screen, &res_tmpl))
goto error_resource;
@ -461,7 +461,7 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
memset(&vtmpl, 0, sizeof(vtmpl));
vtmpl.buffer_format = format;
vtmpl.chroma_format = FormatYCBCRToPipeChroma(source_ycbcr_format);
@ -679,7 +679,7 @@ vlVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
src_sv = src_vlsurface->sampler_view;
}
pipe_mutex_lock(dst_vlsurface->device->mutex);
mtx_lock(&dst_vlsurface->device->mutex);
context = dst_vlsurface->device->context;
compositor = &dst_vlsurface->device->compositor;
@ -753,7 +753,7 @@ vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
compositor = &dst_vlsurface->device->compositor;
cstate = &dst_vlsurface->cstate;
pipe_mutex_lock(dst_vlsurface->device->mutex);
mtx_lock(&dst_vlsurface->device->mutex);
blend = BlenderToPipe(context, blend_state);
@ -780,7 +780,7 @@ struct pipe_resource *vlVdpOutputSurfaceGallium(VdpOutputSurface surface)
if (!vlsurface || !vlsurface->surface)
return NULL;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
@ -801,7 +801,7 @@ VdpStatus vlVdpOutputSurfaceDMABuf(VdpOutputSurface surface,
if (!vlsurface || !vlsurface->surface)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
memset(&whandle, 0, sizeof(struct winsys_handle));

View File

@ -65,7 +65,7 @@ vlVdpPresentationQueueCreate(VdpDevice device,
DeviceReference(&pq->device, dev);
pq->drawable = pqt->drawable;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&pq->cstate, dev->context)) {
pipe_mutex_unlock(dev->mutex);
ret = VDP_STATUS_ERROR;
@ -100,7 +100,7 @@ vlVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
vl_compositor_cleanup_state(&pq->cstate);
pipe_mutex_unlock(pq->device->mutex);
@ -133,7 +133,7 @@ vlVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue
color.f[2] = background_color->blue;
color.f[3] = background_color->alpha;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
vl_compositor_set_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
@ -157,7 +157,7 @@ vlVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
vl_compositor_get_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
@ -185,7 +185,7 @@ vlVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue,
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
*current_time = pq->device->vscreen->get_timestamp(pq->device->vscreen,
(void *)pq->drawable);
pipe_mutex_unlock(pq->device->mutex);
@ -230,7 +230,7 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
cstate = &pq->cstate;
vscreen = pq->device->vscreen;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
if (vscreen->set_back_texture_from_output && surf->send_to_X)
vscreen->set_back_texture_from_output(vscreen, surf->surface->texture, clip_width, clip_height);
tex = vscreen->texture_from_drawable(vscreen, (void *)pq->drawable);
@ -321,7 +321,7 @@ vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_qu
if (!surf)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
if (surf->fence) {
screen = pq->device->vscreen->pscreen;
screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
@ -364,7 +364,7 @@ vlVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue
else
*status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;
} else {
pipe_mutex_lock(pq->device->mutex);
mtx_lock(&pq->device->mutex);
screen = pq->device->vscreen->pscreen;
if (screen->fence_finish(screen, NULL, surf->fence, 0)) {
screen->fence_reference(screen, &surf->fence, NULL);

View File

@ -82,7 +82,7 @@ vlVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chrom
if (!pscreen)
return VDP_STATUS_RESOURCES;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
/* XXX: Current limits */
*is_supported = true;
@ -119,7 +119,7 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
if (!pscreen)
return VDP_STATUS_RESOURCES;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
switch(bits_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
@ -196,7 +196,7 @@ vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
return VDP_STATUS_OK;
}
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTED);
if (*is_supported) {
@ -244,7 +244,7 @@ vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
@ -296,7 +296,7 @@ vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFor
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_2D, 1,
@ -345,7 +345,7 @@ vlVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
@ -400,7 +400,7 @@ vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
@ -444,7 +444,7 @@ vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
@ -533,7 +533,7 @@ vlVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter
if (!(min_value && max_value))
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
screen = dev->vscreen->pscreen;
switch (parameter) {
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH:

View File

@ -80,7 +80,7 @@ vlVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
DeviceReference(&p_surf->device, dev);
pipe = dev->context;
pipe_mutex_lock(dev->mutex);
mtx_lock(&dev->mutex);
memset(&p_surf->templat, 0, sizeof(p_surf->templat));
p_surf->templat.buffer_format = pipe->screen->get_video_param
(
@ -138,7 +138,7 @@ vlVdpVideoSurfaceDestroy(VdpVideoSurface surface)
if (!p_surf)
return VDP_STATUS_INVALID_HANDLE;
pipe_mutex_lock(p_surf->device->mutex);
mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer)
p_surf->video_buffer->destroy(p_surf->video_buffer);
pipe_mutex_unlock(p_surf->device->mutex);
@ -238,7 +238,7 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
return VDP_STATUS_NO_IMPLEMENTATION;
}
pipe_mutex_lock(vlsurface->device->mutex);
mtx_lock(&vlsurface->device->mutex);
sampler_views = vlsurface->video_buffer->get_sampler_view_planes(vlsurface->video_buffer);
if (!sampler_views) {
pipe_mutex_unlock(vlsurface->device->mutex);
@ -321,7 +321,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
pipe_mutex_lock(p_surf->device->mutex);
mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL ||
((pformat != p_surf->video_buffer->buffer_format))) {
@ -465,7 +465,7 @@ struct pipe_video_buffer *vlVdpVideoSurfaceGallium(VdpVideoSurface surface)
if (!p_surf)
return NULL;
pipe_mutex_lock(p_surf->device->mutex);
mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;
@ -500,7 +500,7 @@ VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
memset(result, 0, sizeof(*result));
result->handle = -1;
pipe_mutex_lock(p_surf->device->mutex);
mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;

View File

@ -406,7 +406,7 @@ void
GalliumContext::Lock()
{
CALLED();
pipe_mutex_lock(fMutex);
mtx_lock(&fMutex);
}

View File

@ -83,7 +83,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
unsigned idle_fences;
bool buffer_idle;
pipe_mutex_lock(ws->bo_fence_lock);
mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
@ -105,7 +105,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
} else {
bool buffer_idle = true;
pipe_mutex_lock(ws->bo_fence_lock);
mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
@ -118,7 +118,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
fence_idle = true;
else
buffer_idle = false;
pipe_mutex_lock(ws->bo_fence_lock);
mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
@ -160,7 +160,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
assert(bo->bo && "must not be called for slab entries");
pipe_mutex_lock(bo->ws->global_bo_list_lock);
mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
@ -349,7 +349,7 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
assert(bo->bo);
pipe_mutex_lock(ws->global_bo_list_lock);
mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);

View File

@ -1037,7 +1037,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
amdgpu_bo_handle *handles;
unsigned num = 0;
pipe_mutex_lock(ws->global_bo_list_lock);
mtx_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
@ -1211,7 +1211,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
* that the order of fence dependency updates matches the order of
* submissions.
*/
pipe_mutex_lock(ws->bo_fence_lock);
mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fence_dependencies(cs);
/* Swap command streams. "cst" is going to be submitted. */

View File

@ -500,7 +500,7 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
pipe_mutex_lock(dev_tab_mutex);
mtx_lock(&dev_tab_mutex);
destroy = pipe_reference(&ws->reference, NULL);
if (destroy && dev_tab)
@ -526,7 +526,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
pipe_mutex_lock(dev_tab_mutex);
mtx_lock(&dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);

View File

@ -77,7 +77,7 @@ etna_drm_screen_destroy(struct pipe_screen *pscreen)
struct etna_screen *screen = etna_screen(pscreen);
boolean destroy;
pipe_mutex_lock(etna_screen_mutex);
mtx_lock(&etna_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = etna_device_fd(screen->dev);
@ -120,7 +120,7 @@ etna_drm_screen_create_renderonly(struct renderonly *ro)
{
struct pipe_screen *pscreen = NULL;
pipe_mutex_lock(etna_screen_mutex);
mtx_lock(&etna_screen_mutex);
if (!etna_tab) {
etna_tab = util_hash_table_create(hash_fd, compare_fd);
if (!etna_tab)

View File

@ -50,7 +50,7 @@ fd_drm_screen_destroy(struct pipe_screen *pscreen)
struct fd_screen *screen = fd_screen(pscreen);
boolean destroy;
pipe_mutex_lock(fd_screen_mutex);
mtx_lock(&fd_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = fd_device_fd(screen->dev);
@ -91,7 +91,7 @@ fd_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
pipe_mutex_lock(fd_screen_mutex);
mtx_lock(&fd_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)

View File

@ -27,7 +27,7 @@ bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
if (screen->refcount == -1)
return true;
pipe_mutex_lock(nouveau_screen_mutex);
mtx_lock(&nouveau_screen_mutex);
ret = --screen->refcount;
assert(ret >= 0);
if (ret == 0)
@ -67,7 +67,7 @@ nouveau_drm_screen_create(int fd)
struct nouveau_screen *screen = NULL;
int ret, dupfd;
pipe_mutex_lock(nouveau_screen_mutex);
mtx_lock(&nouveau_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {

View File

@ -77,7 +77,7 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
if (bo->handle)
return radeon_real_bo_is_busy(bo);
pipe_mutex_lock(bo->rws->bo_fence_lock);
mtx_lock(&bo->rws->bo_fence_lock);
for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
busy = true;
@ -107,7 +107,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
if (bo->handle) {
radeon_real_bo_wait_idle(bo);
} else {
pipe_mutex_lock(bo->rws->bo_fence_lock);
mtx_lock(&bo->rws->bo_fence_lock);
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
@ -116,7 +116,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
pipe_mutex_lock(bo->rws->bo_fence_lock);
mtx_lock(&bo->rws->bo_fence_lock);
if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
radeon_bo_reference(&bo->u.slab.fences[0], NULL);
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
@ -204,7 +204,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
*/
size = align(size, rws->info.gart_page_size);
pipe_mutex_lock(rws->bo_va_mutex);
mtx_lock(&rws->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
offset = hole->offset;
@ -262,7 +262,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
size = align(size, rws->info.gart_page_size);
pipe_mutex_lock(rws->bo_va_mutex);
mtx_lock(&rws->bo_va_mutex);
if ((va + size) == rws->va_offset) {
rws->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
@ -331,7 +331,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
memset(&args, 0, sizeof(args));
pipe_mutex_lock(rws->bo_handles_mutex);
mtx_lock(&rws->bo_handles_mutex);
util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(rws->bo_names,
@ -418,7 +418,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
}
/* Map the buffer. */
pipe_mutex_lock(bo->u.real.map_mutex);
mtx_lock(&bo->u.real.map_mutex);
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
@ -553,7 +553,7 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
if (!bo->handle)
bo = bo->u.slab.real;
pipe_mutex_lock(bo->u.real.map_mutex);
mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
pipe_mutex_unlock(bo->u.real.map_mutex);
return; /* it's not been mapped */
@ -665,7 +665,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(rws->bo_handles_mutex);
mtx_lock(&rws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@ -1030,7 +1030,7 @@ no_slab:
bo->u.real.use_reusable_pool = true;
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
@ -1063,7 +1063,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
assert(args.handle != 0);
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
@ -1101,7 +1101,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@ -1144,7 +1144,7 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
/* First check if there already is an existing bo for the handle. */
@ -1244,7 +1244,7 @@ done:
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@ -1301,7 +1301,7 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
bo->flink_name = flink.name;
pipe_mutex_lock(ws->bo_handles_mutex);
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
}

View File

@ -596,7 +596,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
if (pfence)
radeon_fence_reference(pfence, fence);
pipe_mutex_lock(cs->ws->bo_fence_lock);
mtx_lock(&cs->ws->bo_fence_lock);
for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
p_atomic_inc(&bo->num_active_ioctls);

View File

@ -66,7 +66,7 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
memset(&info, 0, sizeof(info));
pipe_mutex_lock(*mutex);
mtx_lock(&*mutex);
/* Early exit if we are sure the request will fail. */
if (enable) {
@ -709,7 +709,7 @@ static bool radeon_winsys_unref(struct radeon_winsys *ws)
* This must happen while the mutex is locked, so that
* radeon_drm_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
pipe_mutex_lock(fd_tab_mutex);
mtx_lock(&fd_tab_mutex);
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && fd_tab)
@ -736,7 +736,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
pipe_mutex_lock(fd_tab_mutex);
mtx_lock(&fd_tab_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}

View File

@ -315,7 +315,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
@ -508,7 +508,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&fenced_buf->base.reference));
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
@ -525,7 +525,7 @@ fenced_buffer_map(struct pb_buffer *buf,
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
@ -576,7 +576,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
@ -600,7 +600,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
if(!vl) {
/* invalidate */
@ -649,7 +649,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
@ -688,7 +688,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->buffer);
@ -739,7 +739,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
/*
* Try to create GPU storage without stalling,
@ -775,7 +775,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
@ -791,7 +791,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
@ -799,7 +799,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
pipe_mutex_lock(fenced_mgr->mutex);
mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}

View File

@ -528,7 +528,7 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
* Make sure backup buffer ends up fenced.
*/
pipe_mutex_lock(vsurf->mutex);
mtx_lock(&vsurf->mutex);
assert(vsurf->buf != NULL);
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)

View File

@ -101,7 +101,7 @@ vmw_fences_release(struct vmw_fence_ops *ops)
{
struct vmw_fence *fence, *n;
pipe_mutex_lock(ops->mutex);
mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
pipe_mutex_unlock(ops->mutex);
@ -130,7 +130,7 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
return;
ops = vmw_fence_ops(fence_ops);
pipe_mutex_lock(ops->mutex);
mtx_lock(&ops->mutex);
if (!has_emitted) {
emitted = ops->last_emitted;
@ -193,7 +193,7 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
fence->mask = mask;
fence->seqno = seqno;
p_atomic_set(&fence->signalled, 0);
pipe_mutex_lock(ops->mutex);
mtx_lock(&ops->mutex);
if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
p_atomic_set(&fence->signalled, 1);
@ -229,7 +229,7 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
vmw_ioctl_fence_unref(vws, vfence->handle);
pipe_mutex_lock(ops->mutex);
mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
pipe_mutex_unlock(ops->mutex);

View File

@ -48,7 +48,7 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
*retry = FALSE;
assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
pipe_mutex_lock(vsrf->mutex);
mtx_lock(&vsrf->mutex);
if (vsrf->mapcount) {
/*
@ -165,7 +165,7 @@ vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
boolean *rebind)
{
struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
pipe_mutex_lock(vsrf->mutex);
mtx_lock(&vsrf->mutex);
if (--vsrf->mapcount == 0) {
*rebind = vsrf->rebind;
vsrf->rebind = FALSE;

View File

@ -54,14 +54,14 @@ static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
struct drm_gem_close args;
if (res->flinked) {
pipe_mutex_lock(qdws->bo_handles_mutex);
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
if (res->bo_handle) {
pipe_mutex_lock(qdws->bo_handles_mutex);
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
pipe_mutex_unlock(qdws->bo_handles_mutex);
@ -98,7 +98,7 @@ virgl_cache_flush(struct virgl_drm_winsys *qdws)
struct list_head *curr, *next;
struct virgl_hw_res *res;
pipe_mutex_lock(qdws->mutex);
mtx_lock(&qdws->mutex);
curr = qdws->delayed.next;
next = curr->next;
@ -158,7 +158,7 @@ static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(qdws, old);
} else {
pipe_mutex_lock(qdws->mutex);
mtx_lock(&qdws->mutex);
virgl_cache_list_check_free(qdws);
old->start = os_time_get();
@ -310,7 +310,7 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
pipe_mutex_lock(qdws->mutex);
mtx_lock(&qdws->mutex);
res = NULL;
curr = qdws->delayed.next;
@ -386,7 +386,7 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
return NULL;
}
pipe_mutex_lock(qdws->bo_handles_mutex);
mtx_lock(&qdws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
@ -479,7 +479,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
res->flinked = TRUE;
res->flink = flink.name;
pipe_mutex_lock(qdws->bo_handles_mutex);
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
@ -489,7 +489,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
pipe_mutex_lock(qdws->bo_handles_mutex);
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
@ -814,7 +814,7 @@ virgl_drm_screen_destroy(struct pipe_screen *pscreen)
struct virgl_screen *screen = virgl_screen(pscreen);
boolean destroy;
pipe_mutex_lock(virgl_screen_mutex);
mtx_lock(&virgl_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = virgl_drm_winsys(screen->vws)->fd;
@ -855,7 +855,7 @@ virgl_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
pipe_mutex_lock(virgl_screen_mutex);
mtx_lock(&virgl_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)

View File

@ -144,7 +144,7 @@ virgl_cache_flush(struct virgl_vtest_winsys *vtws)
struct list_head *curr, *next;
struct virgl_hw_res *res;
pipe_mutex_lock(vtws->mutex);
mtx_lock(&vtws->mutex);
curr = vtws->delayed.next;
next = curr->next;
@ -189,7 +189,7 @@ static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(vtws, old);
} else {
pipe_mutex_lock(vtws->mutex);
mtx_lock(&vtws->mutex);
virgl_cache_list_check_free(vtws);
old->start = os_time_get();
@ -333,7 +333,7 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
pipe_mutex_lock(vtws->mutex);
mtx_lock(&vtws->mutex);
res = NULL;
curr = vtws->delayed.next;