gallium/util: replace pipe_mutex_unlock() with mtx_unlock()

pipe_mutex_unlock() was made unnecessary with fd33a6bcd7.

Replaced using:
find ./src -type f -exec sed -i -- \
's:pipe_mutex_unlock(\([^)]*\)):mtx_unlock(\&\1):g' {} \;

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Timothy Arceri 2017-03-05 12:32:06 +11:00
parent ba72554f3e
commit 628e84a58f
86 changed files with 772 additions and 775 deletions

View File

@ -191,7 +191,7 @@ hud_get_num_cpufreq(bool displayhelp)
/* Return the number of CPU metrics we support. */
mtx_lock(&gcpufreq_mutex);
if (gcpufreq_count) {
pipe_mutex_unlock(gcpufreq_mutex);
mtx_unlock(&gcpufreq_mutex);
return gcpufreq_count;
}
@ -201,7 +201,7 @@ hud_get_num_cpufreq(bool displayhelp)
list_inithead(&gcpufreq_list);
DIR *dir = opendir("/sys/devices/system/cpu");
if (!dir) {
pipe_mutex_unlock(gcpufreq_mutex);
mtx_unlock(&gcpufreq_mutex);
return 0;
}
@ -247,7 +247,7 @@ hud_get_num_cpufreq(bool displayhelp)
}
}
pipe_mutex_unlock(gcpufreq_mutex);
mtx_unlock(&gcpufreq_mutex);
return gcpufreq_count;
}

View File

@ -248,7 +248,7 @@ hud_get_num_disks(bool displayhelp)
/* Return the number of block devices and partitions. */
mtx_lock(&gdiskstat_mutex);
if (gdiskstat_count) {
pipe_mutex_unlock(gdiskstat_mutex);
mtx_unlock(&gdiskstat_mutex);
return gdiskstat_count;
}
@ -258,7 +258,7 @@ hud_get_num_disks(bool displayhelp)
list_inithead(&gdiskstat_list);
DIR *dir = opendir("/sys/block/");
if (!dir) {
pipe_mutex_unlock(gdiskstat_mutex);
mtx_unlock(&gdiskstat_mutex);
return 0;
}
@ -285,7 +285,7 @@ hud_get_num_disks(bool displayhelp)
struct dirent *dpart;
DIR *pdir = opendir(basename);
if (!pdir) {
pipe_mutex_unlock(gdiskstat_mutex);
mtx_unlock(&gdiskstat_mutex);
closedir(dir);
return 0;
}
@ -320,7 +320,7 @@ hud_get_num_disks(bool displayhelp)
puts(line);
}
}
pipe_mutex_unlock(gdiskstat_mutex);
mtx_unlock(&gdiskstat_mutex);
return gdiskstat_count;
}

View File

@ -333,7 +333,7 @@ hud_get_num_nics(bool displayhelp)
/* Return the number if network interfaces. */
mtx_lock(&gnic_mutex);
if (gnic_count) {
pipe_mutex_unlock(gnic_mutex);
mtx_unlock(&gnic_mutex);
return gnic_count;
}
@ -343,7 +343,7 @@ hud_get_num_nics(bool displayhelp)
list_inithead(&gnic_list);
DIR *dir = opendir("/sys/class/net/");
if (!dir) {
pipe_mutex_unlock(gnic_mutex);
mtx_unlock(&gnic_mutex);
return 0;
}
@ -419,7 +419,7 @@ hud_get_num_nics(bool displayhelp)
}
pipe_mutex_unlock(gnic_mutex);
mtx_unlock(&gnic_mutex);
return gnic_count;
}

View File

@ -326,13 +326,13 @@ hud_get_num_sensors(bool displayhelp)
/* Return the number of sensors detected. */
mtx_lock(&gsensor_temp_mutex);
if (gsensors_temp_count) {
pipe_mutex_unlock(gsensor_temp_mutex);
mtx_unlock(&gsensor_temp_mutex);
return gsensors_temp_count;
}
int ret = sensors_init(NULL);
if (ret) {
pipe_mutex_unlock(gsensor_temp_mutex);
mtx_unlock(&gsensor_temp_mutex);
return 0;
}
@ -368,7 +368,7 @@ hud_get_num_sensors(bool displayhelp)
}
}
pipe_mutex_unlock(gsensor_temp_mutex);
mtx_unlock(&gsensor_temp_mutex);
return gsensors_temp_count;
}

View File

@ -108,9 +108,6 @@ static inline int pipe_thread_is_self( pipe_thread thread )
return 0;
}
#define pipe_mutex_unlock(mutex) \
(void) mtx_unlock(&(mutex))
#define pipe_mutex_assert_locked(mutex) \
__pipe_mutex_assert_locked(&(mutex))
@ -202,7 +199,7 @@ static inline void pipe_barrier_wait(pipe_barrier *barrier)
cnd_broadcast(&barrier->condvar);
}
pipe_mutex_unlock(barrier->mutex);
mtx_unlock(&barrier->mutex);
}
@ -243,7 +240,7 @@ pipe_semaphore_signal(pipe_semaphore *sema)
mtx_lock(&sema->mutex);
sema->counter++;
cnd_signal(&sema->cond);
pipe_mutex_unlock(sema->mutex);
mtx_unlock(&sema->mutex);
}
/** Wait for semaphore counter to be greater than zero */
@ -255,7 +252,7 @@ pipe_semaphore_wait(pipe_semaphore *sema)
cnd_wait(&sema->cond, &sema->mutex);
}
sema->counter--;
pipe_mutex_unlock(sema->mutex);
mtx_unlock(&sema->mutex);
}

View File

@ -348,7 +348,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
ops->fence_reference(ops, &fence, fenced_buf->fence);
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
@ -656,7 +656,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -709,7 +709,7 @@ fenced_buffer_map(struct pb_buffer *buf,
}
done:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return map;
}
@ -732,7 +732,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -802,7 +802,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
fenced_buf->validation_flags |= flags;
done:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return ret;
}
@ -841,7 +841,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
fenced_buf->validation_flags = 0;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -868,7 +868,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
*offset = 0;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -941,12 +941,12 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return &fenced_buf->base;
no_storage:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
FREE(fenced_buf);
no_buffer:
return NULL;
@ -961,7 +961,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
if (fenced_mgr->provider->flush)
@ -978,7 +978,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
@ -991,7 +991,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/* assert(!fenced_mgr->num_unfenced); */
#endif
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
mtx_destroy(&fenced_mgr->mutex);
if (fenced_mgr->provider)

View File

@ -238,7 +238,7 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
mtx_lock(&mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
mtx_destroy(&buf->mutex);
@ -263,7 +263,7 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
mtx_lock(&buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
mtx_unlock(&buf->mutex);
return (uint8_t *)map + buf->underflow_size;
}
@ -278,7 +278,7 @@ pb_debug_buffer_unmap(struct pb_buffer *_buf)
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
pipe_mutex_unlock(buf->mutex);
mtx_unlock(&buf->mutex);
pb_unmap(buf->buffer);
@ -310,7 +310,7 @@ pb_debug_buffer_validate(struct pb_buffer *_buf,
debug_printf("last map backtrace is\n");
debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
}
pipe_mutex_unlock(buf->mutex);
mtx_unlock(&buf->mutex);
pb_debug_buffer_check(buf);
@ -392,7 +392,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
#endif
return NULL;
}
@ -421,7 +421,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
mtx_lock(&mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
return &buf->base;
}
@ -447,7 +447,7 @@ pb_debug_manager_destroy(struct pb_manager *_mgr)
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
}
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
mtx_destroy(&mgr->mutex);
mgr->provider->destroy(mgr->provider);

View File

@ -102,7 +102,7 @@ mm_buffer_destroy(struct pb_buffer *buf)
mtx_lock(&mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
mtx_unlock(&mm->mutex);
}
@ -188,7 +188,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
pipe_mutex_unlock(mm->mutex);
mtx_unlock(&mm->mutex);
return NULL;
}
@ -208,7 +208,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
mmDumpMemInfo(mm->heap);
#endif
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
mtx_unlock(&mm->mutex);
return NULL;
}
@ -216,7 +216,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
pipe_mutex_unlock(mm->mutex);
mtx_unlock(&mm->mutex);
return SUPER(mm_buf);
}
@ -240,7 +240,7 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
pb_unmap(mm->buffer);
pb_reference(&mm->buffer, NULL);
pipe_mutex_unlock(mm->mutex);
mtx_unlock(&mm->mutex);
FREE(mgr);
}

View File

@ -113,7 +113,7 @@ pool_buffer_destroy(struct pb_buffer *buf)
mtx_lock(&pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
}
@ -128,7 +128,7 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
mtx_lock(&pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
return map;
}
@ -199,7 +199,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
mtx_lock(&pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
debug_printf("warning: out of fixed size buffer objects\n");
return NULL;
}
@ -207,7 +207,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
item = pool->free.next;
if (item == &pool->free) {
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
debug_printf("error: fixed size buffer pool corruption\n");
return NULL;
}
@ -215,7 +215,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_DEL(item);
--pool->numFree;
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
assert(!pipe_is_referenced(&pool_buf->base.reference));
@ -245,7 +245,7 @@ pool_bufmgr_destroy(struct pb_manager *mgr)
pb_unmap(pool->buffer);
pb_reference(&pool->buffer, NULL);
pipe_mutex_unlock(pool->mutex);
mtx_unlock(&pool->mutex);
FREE(mgr);
}

View File

@ -221,7 +221,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
FREE(slab);
}
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
}
@ -402,7 +402,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
return NULL;
}
}
@ -418,7 +418,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
list = slab->freeBuffers.next;
LIST_DELINIT(list);
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
pipe_reference_init(&buf->base.reference, 1);

View File

@ -98,7 +98,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
/* Directly release any buffer that exceeds the limit. */
if (mgr->cache_size + buf->size > mgr->max_cache_size) {
mgr->destroy_buffer(buf);
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
return;
}
@ -107,7 +107,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
LIST_ADDTAIL(&entry->head, cache);
++mgr->num_buffers;
mgr->cache_size += buf->size;
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
}
/**
@ -208,13 +208,13 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
mgr->cache_size -= buf->size;
LIST_DEL(&entry->head);
--mgr->num_buffers;
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
/* Increase refcount */
pipe_reference_init(&buf->reference, 1);
return buf;
}
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
return NULL;
}
@ -241,7 +241,7 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
next = curr->next;
}
}
pipe_mutex_unlock(mgr->mutex);
mtx_unlock(&mgr->mutex);
}
void

View File

@ -135,7 +135,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
* There's a chance that racing threads will end up allocating multiple
* slabs for the same group, but that doesn't hurt correctness.
*/
pipe_mutex_unlock(slabs->mutex);
mtx_unlock(&slabs->mutex);
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
@ -148,7 +148,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
LIST_DEL(&entry->head);
slab->num_free--;
pipe_mutex_unlock(slabs->mutex);
mtx_unlock(&slabs->mutex);
return entry;
}
@ -164,7 +164,7 @@ pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
pipe_mutex_unlock(slabs->mutex);
mtx_unlock(&slabs->mutex);
}
/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
@ -178,7 +178,7 @@ pb_slabs_reclaim(struct pb_slabs *slabs)
{
mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
pipe_mutex_unlock(slabs->mutex);
mtx_unlock(&slabs->mutex);
}
/* Initialize the slabs manager.

View File

@ -106,7 +106,7 @@ rtasm_exec_malloc(size_t size)
debug_printf("rtasm_exec_malloc failed\n");
bail:
pipe_mutex_unlock(exec_mutex);
mtx_unlock(&exec_mutex);
return addr;
}
@ -124,7 +124,7 @@ rtasm_exec_free(void *addr)
u_mmFreeMem(block);
}
pipe_mutex_unlock(exec_mutex);
mtx_unlock(&exec_mutex);
}

View File

@ -167,7 +167,7 @@ debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
fctx->bt_depth = bt_depth;
mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
pipe_mutex_unlock(list_mutex);
mtx_unlock(&list_mutex);
return fctx;
@ -227,7 +227,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
}
fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
fbuf->mapped = TRUE;
pipe_mutex_unlock(fbuf->mutex);
mtx_unlock(&fbuf->mutex);
if (mapped_sync) {
struct debug_flush_ctx *fctx;
@ -244,7 +244,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
FALSE, FALSE, item->ref_frame);
}
}
pipe_mutex_unlock(list_mutex);
mtx_unlock(&list_mutex);
}
}
@ -263,7 +263,7 @@ debug_flush_unmap(struct debug_flush_buf *fbuf)
fbuf->mapped = FALSE;
FREE(fbuf->map_frame);
fbuf->map_frame = NULL;
pipe_mutex_unlock(fbuf->mutex);
mtx_unlock(&fbuf->mutex);
}
void
@ -284,7 +284,7 @@ debug_flush_cb_reference(struct debug_flush_ctx *fctx,
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
FALSE, fbuf->map_frame);
}
pipe_mutex_unlock(fbuf->mutex);
mtx_unlock(&fbuf->mutex);
if (!item) {
item = CALLOC_STRUCT(debug_flush_item);
@ -328,7 +328,7 @@ debug_flush_might_flush_cb(void *key, void *value, void *data)
debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
FALSE, item->ref_frame);
}
pipe_mutex_unlock(fbuf->mutex);
mtx_unlock(&fbuf->mutex);
return PIPE_OK;
}

View File

@ -155,7 +155,7 @@ debug_malloc(const char *file, unsigned line, const char *function,
mtx_lock(&list_mutex);
LIST_ADDTAIL(&hdr->head, &list);
pipe_mutex_unlock(list_mutex);
mtx_unlock(&list_mutex);
return data_from_header(hdr);
}
@ -200,7 +200,7 @@ debug_free(const char *file, unsigned line, const char *function,
#else
mtx_lock(&list_mutex);
LIST_DEL(&hdr->head);
pipe_mutex_unlock(list_mutex);
mtx_unlock(&list_mutex);
hdr->magic = 0;
ftr->magic = 0;
@ -275,7 +275,7 @@ debug_realloc(const char *file, unsigned line, const char *function,
mtx_lock(&list_mutex);
LIST_REPLACE(&old_hdr->head, &new_hdr->head);
pipe_mutex_unlock(list_mutex);
mtx_unlock(&list_mutex);
/* copy data */
new_ptr = data_from_header(new_hdr);

View File

@ -112,7 +112,7 @@ debug_serial(void *p, unsigned *pserial)
util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial);
found = FALSE;
}
pipe_mutex_unlock(serials_mutex);
mtx_unlock(&serials_mutex);
*pserial = serial;
@ -128,7 +128,7 @@ debug_serial_delete(void *p)
{
mtx_lock(&serials_mutex);
util_hash_table_remove(serials_hash, p);
pipe_mutex_unlock(serials_mutex);
mtx_unlock(&serials_mutex);
}

View File

@ -313,6 +313,6 @@ debug_symbol_name_cached(const void *addr)
util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
}
pipe_mutex_unlock(symbols_mutex);
mtx_unlock(&symbols_mutex);
return name;
}

View File

@ -52,7 +52,7 @@ atexit_handler(void)
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
util_queue_killall_and_wait(iter);
}
pipe_mutex_unlock(exit_mutex);
mtx_unlock(&exit_mutex);
}
static void
@ -69,7 +69,7 @@ add_to_atexit_list(struct util_queue *queue)
mtx_lock(&exit_mutex);
LIST_ADD(&queue->head, &queue_list);
pipe_mutex_unlock(exit_mutex);
mtx_unlock(&exit_mutex);
}
static void
@ -84,7 +84,7 @@ remove_from_atexit_list(struct util_queue *queue)
break;
}
}
pipe_mutex_unlock(exit_mutex);
mtx_unlock(&exit_mutex);
}
/****************************************************************************
@ -97,7 +97,7 @@ util_queue_fence_signal(struct util_queue_fence *fence)
mtx_lock(&fence->mutex);
fence->signalled = true;
cnd_broadcast(&fence->cond);
pipe_mutex_unlock(fence->mutex);
mtx_unlock(&fence->mutex);
}
void
@ -106,7 +106,7 @@ util_queue_fence_wait(struct util_queue_fence *fence)
mtx_lock(&fence->mutex);
while (!fence->signalled)
cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
mtx_unlock(&fence->mutex);
}
void
@ -159,7 +159,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
cnd_wait(&queue->has_queued_cond, &queue->lock);
if (queue->kill_threads) {
pipe_mutex_unlock(queue->lock);
mtx_unlock(&queue->lock);
break;
}
@ -169,7 +169,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
queue->num_queued--;
cnd_signal(&queue->has_space_cond);
pipe_mutex_unlock(queue->lock);
mtx_unlock(&queue->lock);
if (job.job) {
job.execute(job.job, thread_index);
@ -188,7 +188,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
}
queue->num_queued = 0; /* reset this when exiting the thread */
pipe_mutex_unlock(queue->lock);
mtx_unlock(&queue->lock);
return 0;
}
@ -268,7 +268,7 @@ util_queue_killall_and_wait(struct util_queue *queue)
mtx_lock(&queue->lock);
queue->kill_threads = 1;
cnd_broadcast(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
mtx_unlock(&queue->lock);
for (i = 0; i < queue->num_threads; i++)
pipe_thread_wait(queue->threads[i]);
@ -317,7 +317,7 @@ util_queue_add_job(struct util_queue *queue,
queue->num_queued++;
cnd_signal(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
mtx_unlock(&queue->lock);
}
int64_t

View File

@ -62,7 +62,7 @@ util_range_add(struct util_range *range, unsigned start, unsigned end)
mtx_lock(&range->write_mutex);
range->start = MIN2(start, range->start);
range->end = MAX2(end, range->end);
pipe_mutex_unlock(range->write_mutex);
mtx_unlock(&range->write_mutex);
}
}

View File

@ -103,7 +103,7 @@ void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
/* Signal change:
*/
cnd_signal(&ring->change);
pipe_mutex_unlock(ring->mutex);
mtx_unlock(&ring->mutex);
}
enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
@ -155,6 +155,6 @@ out:
/* Signal change:
*/
cnd_signal(&ring->change);
pipe_mutex_unlock(ring->mutex);
mtx_unlock(&ring->mutex);
return ret;
}

View File

@ -596,7 +596,7 @@ dd_context_destroy(struct pipe_context *_pipe)
if (dctx->thread) {
mtx_lock(&dctx->mutex);
dctx->kill_thread = 1;
pipe_mutex_unlock(dctx->mutex);
mtx_unlock(&dctx->mutex);
pipe_thread_wait(dctx->thread);
mtx_destroy(&dctx->mutex);
assert(!dctx->records);

View File

@ -942,7 +942,7 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
}
/* Unlock and sleep before starting all over again. */
pipe_mutex_unlock(dctx->mutex);
mtx_unlock(&dctx->mutex);
os_time_sleep(10000); /* 10 ms */
mtx_lock(&dctx->mutex);
}
@ -951,7 +951,7 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
while (dctx->records)
dd_free_record(&dctx->records);
pipe_mutex_unlock(dctx->mutex);
mtx_unlock(&dctx->mutex);
return 0;
}
@ -1044,7 +1044,7 @@ dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
mtx_lock(&dctx->mutex);
record->next = dctx->records;
dctx->records = record;
pipe_mutex_unlock(dctx->mutex);
mtx_unlock(&dctx->mutex);
}
static void

View File

@ -172,7 +172,7 @@ batch_reset_resources(struct fd_batch *batch)
{
mtx_lock(&batch->ctx->screen->lock);
batch_reset_resources_locked(batch);
pipe_mutex_unlock(batch->ctx->screen->lock);
mtx_unlock(&batch->ctx->screen->lock);
}
static void
@ -205,7 +205,7 @@ __fd_batch_destroy(struct fd_batch *batch)
mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, true);
pipe_mutex_unlock(batch->ctx->screen->lock);
mtx_unlock(&batch->ctx->screen->lock);
batch_fini(batch);
@ -289,7 +289,7 @@ batch_flush(struct fd_batch *batch)
} else {
mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, false);
pipe_mutex_unlock(batch->ctx->screen->lock);
mtx_unlock(&batch->ctx->screen->lock);
}
}
@ -337,7 +337,7 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
*/
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
pipe_mutex_unlock(batch->ctx->screen->lock);
mtx_unlock(&batch->ctx->screen->lock);
fd_batch_flush(dep, false);
mtx_lock(&batch->ctx->screen->lock);
} else {

View File

@ -136,7 +136,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
struct fd_batch *batch = NULL;
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
mtx_lock(&ctx->screen->lock);
@ -144,7 +144,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
fd_batch_reference_locked(&batch, NULL);
}
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
if (last_batch) {
fd_batch_sync(last_batch);
@ -165,7 +165,7 @@ fd_bc_invalidate_context(struct fd_context *ctx)
fd_batch_reference_locked(&batch, NULL);
}
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
}
void
@ -224,7 +224,7 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
rsc->bc_batch_mask = 0;
pipe_mutex_unlock(screen->lock);
mtx_unlock(&screen->lock);
}
struct fd_batch *
@ -263,7 +263,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
/* we can drop lock temporarily here, since we hold a ref,
* flush_batch won't disappear under us.
*/
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
mtx_lock(&ctx->screen->lock);
@ -303,7 +303,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
cache->batches[idx] = batch;
out:
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
return batch;
}
@ -349,7 +349,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
rsc->bc_batch_mask = (1 << batch->idx);
}
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
return batch;
}

View File

@ -322,7 +322,7 @@ fd_context_lock(struct fd_context *ctx)
static inline void
fd_context_unlock(struct fd_context *ctx)
{
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
}
static inline struct pipe_scissor_state *

View File

@ -172,7 +172,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
resource_written(batch, batch->query_buf);
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
batch->num_draws++;
@ -346,7 +346,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
resource_written(batch, batch->query_buf);
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
pfb->width, pfb->height, depth, stencil,

View File

@ -212,7 +212,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
}
swap(rsc->batch_mask, shadow->batch_mask);
pipe_mutex_unlock(ctx->screen->lock);
mtx_unlock(&ctx->screen->lock);
struct pipe_blit_info blit = {0};
blit.dst.resource = prsc;

View File

@ -101,7 +101,7 @@ lp_fence_signal(struct lp_fence *fence)
*/
cnd_broadcast(&fence->signalled);
pipe_mutex_unlock(fence->mutex);
mtx_unlock(&fence->mutex);
}
boolean
@ -121,7 +121,7 @@ lp_fence_wait(struct lp_fence *f)
while (f->count < f->rank) {
cnd_wait(&f->signalled, &f->mutex);
}
pipe_mutex_unlock(f->mutex);
mtx_unlock(&f->mutex);
}

View File

@ -502,7 +502,7 @@ lp_scene_bin_iter_next( struct lp_scene *scene , int *x, int *y)
end:
/*printf("return bin %p at %d, %d\n", (void *) bin, *bin_x, *bin_y);*/
pipe_mutex_unlock(scene->mutex);
mtx_unlock(&scene->mutex);
return bin;
}

View File

@ -179,7 +179,7 @@ lp_setup_rasterize_scene( struct lp_setup_context *setup )
*/
lp_rast_queue_scene(screen->rast, scene);
lp_rast_finish(screen->rast);
pipe_mutex_unlock(screen->rast_mutex);
mtx_unlock(&screen->rast_mutex);
lp_scene_end_rasterization(setup->scene);
lp_setup_reset( setup );

View File

@ -1082,7 +1082,7 @@ nv50_blit_select_fp(struct nv50_blitctx *ctx, const struct pipe_blit_info *info)
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nv50->base.pipe, mode, ptarg);
pipe_mutex_unlock(blitter->mutex);
mtx_unlock(&blitter->mutex);
}
ctx->fp = blitter->fp[targ][mode];
}

View File

@ -922,7 +922,7 @@ nvc0_blit_select_fp(struct nvc0_blitctx *ctx, const struct pipe_blit_info *info)
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);
pipe_mutex_unlock(blitter->mutex);
mtx_unlock(&blitter->mutex);
}
ctx->fp = blitter->fp[targ][mode];
}

View File

@ -336,7 +336,7 @@ static void r300_clear(struct pipe_context* pipe,
* Then in texture_destroy, we set cmask_resource to NULL. */
r300->screen->cmask_resource = fb->cbufs[0]->texture;
}
pipe_mutex_unlock(r300->screen->cmask_mutex);
mtx_unlock(&r300->screen->cmask_mutex);
}
if (r300->screen->cmask_resource == fb->cbufs[0]->texture) {

View File

@ -1034,7 +1034,7 @@ static void r300_texture_destroy(struct pipe_screen *screen,
if (texture == rscreen->cmask_resource) {
rscreen->cmask_resource = NULL;
}
pipe_mutex_unlock(rscreen->cmask_mutex);
mtx_unlock(&rscreen->cmask_mutex);
}
pb_reference(&tex->buf, NULL);
FREE(tex);

View File

@ -183,7 +183,7 @@ static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
if (!rscreen->gpu_load_thread)
rscreen->gpu_load_thread =
pipe_thread_create(r600_gpu_load_thread, rscreen);
pipe_mutex_unlock(rscreen->gpu_load_mutex);
mtx_unlock(&rscreen->gpu_load_mutex);
}
unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);

View File

@ -1406,5 +1406,5 @@ void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_re
mtx_lock(&rscreen->aux_context_lock);
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
pipe_mutex_unlock(rscreen->aux_context_lock);
mtx_unlock(&rscreen->aux_context_lock);
}

View File

@ -311,7 +311,7 @@ static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
ctx->flush(ctx, NULL, 0);
if (ctx == rscreen->aux_context)
pipe_mutex_unlock(rscreen->aux_context_lock);
mtx_unlock(&rscreen->aux_context_lock);
}
static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
@ -401,7 +401,7 @@ bool r600_texture_disable_dcc(struct r600_common_context *rctx,
rctx->b.flush(&rctx->b, NULL, 0);
if (&rctx->b == rscreen->aux_context)
pipe_mutex_unlock(rscreen->aux_context_lock);
mtx_unlock(&rscreen->aux_context_lock);
return r600_texture_discard_dcc(rscreen, rtex);
}

View File

@ -7472,7 +7472,7 @@ si_get_shader_part(struct si_screen *sscreen,
/* Find existing. */
for (result = *list; result; result = result->next) {
if (memcmp(&result->key, key, sizeof(*key)) == 0) {
pipe_mutex_unlock(sscreen->shader_parts_mutex);
mtx_unlock(&sscreen->shader_parts_mutex);
return result;
}
}
@ -7526,7 +7526,7 @@ si_get_shader_part(struct si_screen *sscreen,
out:
si_llvm_dispose(&ctx);
pipe_mutex_unlock(sscreen->shader_parts_mutex);
mtx_unlock(&sscreen->shader_parts_mutex);
return result;
}

View File

@ -1270,17 +1270,17 @@ again:
if (iter->is_optimized &&
!util_queue_fence_is_signalled(&iter->optimized_ready)) {
memset(&key->opt, 0, sizeof(key->opt));
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
goto again;
}
if (iter->compilation_failed) {
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return -1; /* skip the draw call */
}
state->current = iter;
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return 0;
}
}
@ -1288,7 +1288,7 @@ again:
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return -ENOMEM;
}
shader->selector = sel;
@ -1307,7 +1307,7 @@ again:
if (!main_part) {
FREE(shader);
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return -ENOMEM; /* skip the draw call */
}
@ -1320,7 +1320,7 @@ again:
&compiler_state->debug) != 0) {
FREE(main_part);
FREE(shader);
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return -ENOMEM; /* skip the draw call */
}
*mainp = main_part;
@ -1357,7 +1357,7 @@ again:
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
goto again;
}
@ -1367,7 +1367,7 @@ again:
if (!shader->compilation_failed)
state->current = shader;
pipe_mutex_unlock(sel->mutex);
mtx_unlock(&sel->mutex);
return shader->compilation_failed ? -1 : 0;
}
@ -1461,9 +1461,9 @@ void si_init_shader_selector_async(void *job, int thread_index)
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
pipe_mutex_unlock(sscreen->shader_cache_mutex);
mtx_unlock(&sscreen->shader_cache_mutex);
} else {
pipe_mutex_unlock(sscreen->shader_cache_mutex);
mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, tm, shader, false,
@ -1478,7 +1478,7 @@ void si_init_shader_selector_async(void *job, int thread_index)
mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
pipe_mutex_unlock(sscreen->shader_cache_mutex);
mtx_unlock(&sscreen->shader_cache_mutex);
}
}

View File

@ -49,7 +49,7 @@ rbug_destroy(struct pipe_context *_pipe)
mtx_lock(&rb_pipe->call_mutex);
pipe->destroy(pipe);
rb_pipe->pipe = NULL;
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
FREE(rb_pipe);
}
@ -128,10 +128,10 @@ rbug_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
!(rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] && rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY]->disabled) &&
!(rb_pipe->curr.shader[PIPE_SHADER_VERTEX] && rb_pipe->curr.shader[PIPE_SHADER_VERTEX]->disabled))
pipe->draw_vbo(pipe, info);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_AFTER);
pipe_mutex_unlock(rb_pipe->draw_mutex);
mtx_unlock(&rb_pipe->draw_mutex);
}
static struct pipe_query *
@ -147,7 +147,7 @@ rbug_create_query(struct pipe_context *_pipe,
query = pipe->create_query(pipe,
query_type,
index);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return query;
}
@ -161,7 +161,7 @@ rbug_destroy_query(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->destroy_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static boolean
@ -174,7 +174,7 @@ rbug_begin_query(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->begin_query(pipe, query);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -189,7 +189,7 @@ rbug_end_query(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->end_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -209,7 +209,7 @@ rbug_get_query_result(struct pipe_context *_pipe,
query,
wait,
result);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -222,7 +222,7 @@ rbug_set_active_query_state(struct pipe_context *_pipe, boolean enable)
mtx_lock(&rb_pipe->call_mutex);
pipe->set_active_query_state(pipe, enable);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -236,7 +236,7 @@ rbug_create_blend_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -251,7 +251,7 @@ rbug_bind_blend_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -264,7 +264,7 @@ rbug_delete_blend_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -278,7 +278,7 @@ rbug_create_sampler_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -294,7 +294,7 @@ rbug_bind_sampler_states(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_sampler_states(pipe, shader, start, count, samplers);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -307,7 +307,7 @@ rbug_delete_sampler_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -321,7 +321,7 @@ rbug_create_rasterizer_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -336,7 +336,7 @@ rbug_bind_rasterizer_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -349,7 +349,7 @@ rbug_delete_rasterizer_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -363,7 +363,7 @@ rbug_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -378,7 +378,7 @@ rbug_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -391,7 +391,7 @@ rbug_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -404,7 +404,7 @@ rbug_create_fs_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_fs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
if (!result)
return NULL;
@ -427,7 +427,7 @@ rbug_bind_fs_state(struct pipe_context *_pipe,
pipe->bind_fs_state(pipe,
fs);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -439,7 +439,7 @@ rbug_delete_fs_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -452,7 +452,7 @@ rbug_create_vs_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_vs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
if (!result)
return NULL;
@ -475,7 +475,7 @@ rbug_bind_vs_state(struct pipe_context *_pipe,
pipe->bind_vs_state(pipe,
vs);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -485,9 +485,9 @@ rbug_delete_vs_state(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_vs);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -500,7 +500,7 @@ rbug_create_gs_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_gs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
if (!result)
return NULL;
@ -523,7 +523,7 @@ rbug_bind_gs_state(struct pipe_context *_pipe,
pipe->bind_gs_state(pipe,
gs);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -535,7 +535,7 @@ rbug_delete_gs_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void *
@ -551,7 +551,7 @@ rbug_create_vertex_elements_state(struct pipe_context *_pipe,
ret = pipe->create_vertex_elements_state(pipe,
num_elements,
vertex_elements);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return ret;
}
@ -566,7 +566,7 @@ rbug_bind_vertex_elements_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->bind_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -579,7 +579,7 @@ rbug_delete_vertex_elements_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->delete_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -592,7 +592,7 @@ rbug_set_blend_color(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_blend_color(pipe,
blend_color);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -605,7 +605,7 @@ rbug_set_stencil_ref(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_stencil_ref(pipe,
stencil_ref);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -618,7 +618,7 @@ rbug_set_clip_state(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_clip_state(pipe,
clip);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -642,7 +642,7 @@ rbug_set_constant_buffer(struct pipe_context *_pipe,
shader,
index,
_cb ? &cb : NULL);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -681,7 +681,7 @@ rbug_set_framebuffer_state(struct pipe_context *_pipe,
pipe->set_framebuffer_state(pipe,
state);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -694,7 +694,7 @@ rbug_set_polygon_stipple(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_polygon_stipple(pipe,
poly_stipple);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -708,7 +708,7 @@ rbug_set_scissor_states(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_scissor_states(pipe, start_slot, num_scissors, scissor);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -722,7 +722,7 @@ rbug_set_viewport_states(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_viewport_states(pipe, start_slot, num_viewports, viewport);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -760,7 +760,7 @@ rbug_set_sampler_views(struct pipe_context *_pipe,
pipe->set_sampler_views(pipe, shader, start, num, views);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -787,7 +787,7 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
num_buffers,
buffers);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -806,7 +806,7 @@ rbug_set_index_buffer(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_index_buffer(pipe, ib);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -818,7 +818,7 @@ rbug_set_sample_mask(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_sample_mask(pipe, sample_mask);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static struct pipe_stream_output_target *
@ -834,7 +834,7 @@ rbug_create_stream_output_target(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
target = pipe->create_stream_output_target(pipe, res, buffer_offset,
buffer_size);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
return target;
}
@ -847,7 +847,7 @@ rbug_stream_output_target_destroy(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->stream_output_target_destroy(pipe, target);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -861,7 +861,7 @@ rbug_set_stream_output_targets(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->set_stream_output_targets(pipe, num_targets, targets, offsets);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -892,7 +892,7 @@ rbug_resource_copy_region(struct pipe_context *_pipe,
src,
src_level,
src_box);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -912,7 +912,7 @@ rbug_blit(struct pipe_context *_pipe, const struct pipe_blit_info *_blit_info)
mtx_lock(&rb_pipe->call_mutex);
pipe->blit(pipe, &blit_info);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -926,7 +926,7 @@ rbug_flush_resource(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->flush_resource(pipe, res);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -945,7 +945,7 @@ rbug_clear(struct pipe_context *_pipe,
color,
depth,
stencil);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -970,7 +970,7 @@ rbug_clear_render_target(struct pipe_context *_pipe,
width,
height,
render_condition_enabled);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -999,7 +999,7 @@ rbug_clear_depth_stencil(struct pipe_context *_pipe,
width,
height,
render_condition_enabled);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
@ -1012,7 +1012,7 @@ rbug_flush(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
pipe->flush(pipe, fence, flags);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
static struct pipe_sampler_view *
@ -1030,7 +1030,7 @@ rbug_context_create_sampler_view(struct pipe_context *_pipe,
result = pipe->create_sampler_view(pipe,
resource,
templ);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
if (result)
return rbug_sampler_view_create(rb_pipe, rb_resource, result);
@ -1060,7 +1060,7 @@ rbug_context_create_surface(struct pipe_context *_pipe,
result = pipe->create_surface(pipe,
resource,
surf_tmpl);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
if (result)
return rbug_surface_create(rb_pipe, rb_resource, result);
@ -1077,7 +1077,7 @@ rbug_context_surface_destroy(struct pipe_context *_pipe,
mtx_lock(&rb_pipe->call_mutex);
rbug_surface_destroy(rb_pipe,
rb_surface);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
@ -1103,7 +1103,7 @@ rbug_context_transfer_map(struct pipe_context *_context,
level,
usage,
box, &result);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
*transfer = rbug_transfer_create(rb_pipe, rb_resource, result);
return *transfer ? map : NULL;
@ -1123,7 +1123,7 @@ rbug_context_transfer_flush_region(struct pipe_context *_context,
context->transfer_flush_region(context,
transfer,
box);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
@ -1141,7 +1141,7 @@ rbug_context_transfer_unmap(struct pipe_context *_context,
transfer);
rbug_transfer_destroy(rb_pipe,
rb_transfer);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
@ -1158,7 +1158,7 @@ rbug_context_buffer_subdata(struct pipe_context *_context,
mtx_lock(&rb_pipe->call_mutex);
context->buffer_subdata(context, resource, usage, offset, size, data);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}
@ -1186,7 +1186,7 @@ rbug_context_texture_subdata(struct pipe_context *_context,
data,
stride,
layer_stride);
pipe_mutex_unlock(rb_pipe->call_mutex);
mtx_unlock(&rb_pipe->call_mutex);
}

View File

@ -188,7 +188,7 @@ rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
tr_tex = container_of(ptr, struct rbug_resource, list);
texs[i++] = VOID2U64(tr_tex);
}
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
rbug_send_texture_list_reply(tr_rbug->con, serial, texs, i, NULL);
FREE(texs);
@ -215,7 +215,7 @@ rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
}
if (!tr_tex) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -235,7 +235,7 @@ rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
t->bind,
NULL);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -264,7 +264,7 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
}
if (!tr_tex) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -287,7 +287,7 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
context->transfer_unmap(context, t);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -307,7 +307,7 @@ rbug_context_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
rb_context = container_of(ptr, struct rbug_context, list);
ctxs[i++] = VOID2U64(rb_context);
}
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
rbug_send_context_list_reply(tr_rbug->con, serial, ctxs, i, NULL);
FREE(ctxs);
@ -330,7 +330,7 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -352,9 +352,9 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
VOID2U64(rb_context->curr.zsbuf),
rb_context->draw_blocker, rb_context->draw_blocked, NULL);
pipe_mutex_unlock(rb_context->call_mutex);
pipe_mutex_unlock(rb_context->draw_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->call_mutex);
mtx_unlock(&rb_context->draw_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -371,15 +371,15 @@ rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, u
rb_context = rbug_get_context_locked(rb_screen, block->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
mtx_lock(&rb_context->draw_mutex);
rb_context->draw_blocker |= block->block;
pipe_mutex_unlock(rb_context->draw_mutex);
mtx_unlock(&rb_context->draw_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -396,7 +396,7 @@ rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
rb_context = rbug_get_context_locked(rb_screen, step->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -407,11 +407,11 @@ rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
} else {
rb_context->draw_blocked &= ~step->step;
}
pipe_mutex_unlock(rb_context->draw_mutex);
mtx_unlock(&rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -428,7 +428,7 @@ rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header,
rb_context = rbug_get_context_locked(rb_screen, unblock->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -440,11 +440,11 @@ rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header,
rb_context->draw_blocked &= ~unblock->unblock;
}
rb_context->draw_blocker &= ~unblock->unblock;
pipe_mutex_unlock(rb_context->draw_mutex);
mtx_unlock(&rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -461,7 +461,7 @@ rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
rb_context = rbug_get_context_locked(rb_screen, rule->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -472,11 +472,11 @@ rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
rb_context->draw_rule.surf = U642VOID(rule->surface);
rb_context->draw_rule.blocker = rule->block;
rb_context->draw_blocker |= RBUG_BLOCK_RULE;
pipe_mutex_unlock(rb_context->draw_mutex);
mtx_unlock(&rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -493,7 +493,7 @@ rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32
rb_context = rbug_get_context_locked(rb_screen, flush->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -502,8 +502,8 @@ rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32
rb_context->pipe->flush(rb_context->pipe, NULL, 0);
pipe_mutex_unlock(rb_context->call_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->call_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -524,7 +524,7 @@ rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
rb_context = rbug_get_context_locked(rb_screen, list->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -535,8 +535,8 @@ rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
shdrs[i++] = VOID2U64(tr_shdr);
}
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
rbug_send_shader_list_reply(tr_rbug->con, serial, shdrs, i, NULL);
FREE(shdrs);
@ -559,7 +559,7 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -568,8 +568,8 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -588,8 +588,8 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
tr_shdr->disabled,
NULL);
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -607,7 +607,7 @@ rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
rb_context = rbug_get_context_locked(rb_screen, dis->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -616,15 +616,15 @@ rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
tr_shdr->disabled = dis->disable;
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
}
@ -644,7 +644,7 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
rb_context = rbug_get_context_locked(rb_screen, rep->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -653,8 +653,8 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -ESRCH;
}
@ -695,9 +695,9 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
tr_shdr->replaced_shader = state;
out:
pipe_mutex_unlock(rb_context->call_mutex);
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->call_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return 0;
@ -706,9 +706,9 @@ err:
tr_shdr->replaced_shader = NULL;
tr_shdr->replaced_tokens = NULL;
pipe_mutex_unlock(rb_context->call_mutex);
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
mtx_unlock(&rb_context->call_mutex);
mtx_unlock(&rb_context->list_mutex);
mtx_unlock(&rb_screen->list_mutex);
return -EINVAL;
}

View File

@ -71,7 +71,7 @@ rbug_screen(struct pipe_screen *screen)
mtx_lock(&scr->list_mutex); \
insert_at_head(&scr->name, &obj->list); \
scr->num_##name++; \
pipe_mutex_unlock(scr->list_mutex); \
mtx_unlock(&scr->list_mutex); \
} while (0)
#define rbug_screen_remove_from_list(scr, name, obj) \
@ -79,7 +79,7 @@ rbug_screen(struct pipe_screen *screen)
mtx_lock(&scr->list_mutex); \
remove_from_list(&obj->list); \
scr->num_##name--; \
pipe_mutex_unlock(scr->list_mutex); \
mtx_unlock(&scr->list_mutex); \
} while (0)

View File

@ -298,7 +298,7 @@ svga_buffer_transfer_flush_region( struct pipe_context *pipe,
mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
pipe_mutex_unlock(ss->swc_mutex);
mtx_unlock(&ss->swc_mutex);
}
@ -339,7 +339,7 @@ svga_buffer_transfer_unmap( struct pipe_context *pipe,
}
}
pipe_mutex_unlock(ss->swc_mutex);
mtx_unlock(&ss->swc_mutex);
FREE(transfer);
SVGA_STATS_TIME_POP(svga_sws(svga));
}

View File

@ -646,7 +646,7 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
assert(map);
assert(!retry);
if (!map) {
pipe_mutex_unlock(ss->swc_mutex);
mtx_unlock(&ss->swc_mutex);
svga_buffer_destroy_hw_storage(ss, sbuf);
return PIPE_ERROR;
}
@ -670,7 +670,7 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
sbuf->swbuf = NULL;
}
pipe_mutex_unlock(ss->swc_mutex);
mtx_unlock(&ss->swc_mutex);
}
return PIPE_OK;

View File

@ -96,13 +96,13 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
tex->cached_view->min_lod == min_lod &&
tex->cached_view->max_lod == max_lod) {
svga_sampler_view_reference(&sv, tex->cached_view);
pipe_mutex_unlock(ss->tex_mutex);
mtx_unlock(&ss->tex_mutex);
SVGA_DBG(DEBUG_VIEWS, "svga: Sampler view: reuse %p, %u %u, last %u\n",
pt, min_lod, max_lod, pt->last_level);
svga_validate_sampler_view(svga_context(pipe), sv);
return sv;
}
pipe_mutex_unlock(ss->tex_mutex);
mtx_unlock(&ss->tex_mutex);
}
sv = CALLOC_STRUCT(svga_sampler_view);
@ -165,7 +165,7 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
mtx_lock(&ss->tex_mutex);
svga_sampler_view_reference(&tex->cached_view, sv);
pipe_mutex_unlock(ss->tex_mutex);
mtx_unlock(&ss->tex_mutex);
debug_reference(&sv->reference,
(debug_reference_descriptor)

View File

@ -154,7 +154,7 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
next = curr->next;
}
pipe_mutex_unlock(cache->mutex);
mtx_unlock(&cache->mutex);
if (SVGA_DEBUG & DEBUG_DMA)
debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__,
@ -231,7 +231,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
/* this surface is too large to cache, just free it */
sws->surface_reference(sws, &handle, NULL);
pipe_mutex_unlock(cache->mutex);
mtx_unlock(&cache->mutex);
return;
}
@ -249,7 +249,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
* just discard this surface.
*/
sws->surface_reference(sws, &handle, NULL);
pipe_mutex_unlock(cache->mutex);
mtx_unlock(&cache->mutex);
return;
}
}
@ -300,7 +300,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
sws->surface_reference(sws, &handle, NULL);
}
pipe_mutex_unlock(cache->mutex);
mtx_unlock(&cache->mutex);
}
@ -368,7 +368,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
next = curr->next;
}
pipe_mutex_unlock(cache->mutex);
mtx_unlock(&cache->mutex);
}

View File

@ -307,7 +307,7 @@ void trace_dump_call_lock(void)
void trace_dump_call_unlock(void)
{
pipe_mutex_unlock(call_mutex);
mtx_unlock(&call_mutex);
}
/*
@ -333,14 +333,14 @@ void trace_dumping_start(void)
{
mtx_lock(&call_mutex);
trace_dumping_start_locked();
pipe_mutex_unlock(call_mutex);
mtx_unlock(&call_mutex);
}
void trace_dumping_stop(void)
{
mtx_lock(&call_mutex);
trace_dumping_stop_locked();
pipe_mutex_unlock(call_mutex);
mtx_unlock(&call_mutex);
}
boolean trace_dumping_enabled(void)
@ -348,7 +348,7 @@ boolean trace_dumping_enabled(void)
boolean ret;
mtx_lock(&call_mutex);
ret = trace_dumping_enabled_locked();
pipe_mutex_unlock(call_mutex);
mtx_unlock(&call_mutex);
return ret;
}
@ -402,7 +402,7 @@ void trace_dump_call_begin(const char *klass, const char *method)
void trace_dump_call_end(void)
{
trace_dump_call_end_locked();
pipe_mutex_unlock(call_mutex);
mtx_unlock(&call_mutex);
}
void trace_dump_arg_begin(const char *name)

View File

@ -107,7 +107,7 @@ vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
* user will proceed to CPU map it and fill it with stuff.
*/
if (!vc4_bo_wait(bo, 0, NULL)) {
pipe_mutex_unlock(cache->lock);
mtx_unlock(&cache->lock);
return NULL;
}
@ -116,7 +116,7 @@ vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
bo->name = name;
}
pipe_mutex_unlock(cache->lock);
mtx_unlock(&cache->lock);
return bo;
}
@ -190,7 +190,7 @@ vc4_bo_last_unreference(struct vc4_bo *bo)
clock_gettime(CLOCK_MONOTONIC, &time);
mtx_lock(&screen->bo_cache.lock);
vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
pipe_mutex_unlock(screen->bo_cache.lock);
mtx_unlock(&screen->bo_cache.lock);
}
static void
@ -267,7 +267,7 @@ vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
vc4_bo_remove_from_cache(cache, bo);
vc4_bo_free(bo);
}
pipe_mutex_unlock(cache->lock);
mtx_unlock(&cache->lock);
}
void
@ -347,7 +347,7 @@ vc4_bo_open_handle(struct vc4_screen *screen,
util_hash_table_set(screen->bo_handles, (void *)(uintptr_t)handle, bo);
done:
pipe_mutex_unlock(screen->bo_handles_mutex);
mtx_unlock(&screen->bo_handles_mutex);
return bo;
}
@ -404,7 +404,7 @@ vc4_bo_get_dmabuf(struct vc4_bo *bo)
mtx_lock(&bo->screen->bo_handles_mutex);
bo->private = false;
util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(bo->screen->bo_handles_mutex);
mtx_unlock(&bo->screen->bo_handles_mutex);
return fd;
}

View File

@ -101,7 +101,7 @@ vc4_bo_unreference(struct vc4_bo **bo)
vc4_bo_last_unreference(*bo);
}
pipe_mutex_unlock(screen->bo_handles_mutex);
mtx_unlock(&screen->bo_handles_mutex);
}
*bo = NULL;

View File

@ -1434,7 +1434,7 @@ dri2_load_opencl_interop(struct dri_screen *screen)
mtx_lock(&screen->opencl_func_mutex);
if (dri2_is_opencl_interop_loaded_locked(screen)) {
pipe_mutex_unlock(screen->opencl_func_mutex);
mtx_unlock(&screen->opencl_func_mutex);
return true;
}
@ -1448,7 +1448,7 @@ dri2_load_opencl_interop(struct dri_screen *screen)
dlsym(RTLD_DEFAULT, "opencl_dri_event_get_fence");
success = dri2_is_opencl_interop_loaded_locked(screen);
pipe_mutex_unlock(screen->opencl_func_mutex);
mtx_unlock(&screen->opencl_func_mutex);
return success;
#else
return false;

View File

@ -204,7 +204,7 @@ xmesa_init_display( Display *display )
while(info) {
if (info->display == display) {
/* Found it */
pipe_mutex_unlock(init_mutex);
mtx_unlock(&init_mutex);
return &info->mesaDisplay;
}
info = info->next;
@ -216,7 +216,7 @@ xmesa_init_display( Display *display )
/* allocate mesa display info */
info = (XMesaExtDisplayInfo *) Xmalloc(sizeof(XMesaExtDisplayInfo));
if (info == NULL) {
pipe_mutex_unlock(init_mutex);
mtx_unlock(&init_mutex);
return NULL;
}
info->display = display;
@ -255,7 +255,7 @@ xmesa_init_display( Display *display )
xmdpy->display = NULL;
}
pipe_mutex_unlock(init_mutex);
mtx_unlock(&init_mutex);
return xmdpy;
}
@ -374,7 +374,7 @@ xmesa_get_window_size(Display *dpy, XMesaBuffer b,
mtx_lock(&xmdpy->mutex);
stat = get_drawable_size(dpy, b->ws.drawable, width, height);
pipe_mutex_unlock(xmdpy->mutex);
mtx_unlock(&xmdpy->mutex);
if (!stat) {
/* probably querying a window that's recently been destroyed */

File diff suppressed because it is too large Load Diff

View File

@ -92,7 +92,7 @@ nine_queue_wait_flush(struct nine_queue_pool* ctx)
cnd_wait(&ctx->event_push, &ctx->mutex_push);
}
DBG("got cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_push);
mtx_unlock(&ctx->mutex_push);
cmdbuf->offset = 0;
ctx->cur_instr = 0;
@ -115,7 +115,7 @@ nine_queue_get(struct nine_queue_pool* ctx)
DBG("freeing cmdbuf=%p\n", cmdbuf);
cmdbuf->full = 0;
cnd_signal(&ctx->event_pop);
pipe_mutex_unlock(ctx->mutex_pop);
mtx_unlock(&ctx->mutex_pop);
ctx->tail = (ctx->tail + 1) & NINE_CMD_BUFS_MASK;
@ -151,7 +151,7 @@ nine_queue_flush(struct nine_queue_pool* ctx)
mtx_lock(&ctx->mutex_push);
cmdbuf->full = 1;
cnd_signal(&ctx->event_push);
pipe_mutex_unlock(ctx->mutex_push);
mtx_unlock(&ctx->mutex_push);
ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
@ -165,7 +165,7 @@ nine_queue_flush(struct nine_queue_pool* ctx)
cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
}
DBG("got empty cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_pop);
mtx_unlock(&ctx->mutex_pop);
cmdbuf->offset = 0;
cmdbuf->num_instr = 0;
}

View File

@ -83,7 +83,7 @@ nine_csmt_wait_processed(struct csmt_context *ctx)
while (!p_atomic_read(&ctx->processed)) {
cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
}
pipe_mutex_unlock(ctx->mutex_processed);
mtx_unlock(&ctx->mutex_processed);
}
/* CSMT worker thread */
@ -109,23 +109,23 @@ PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
mtx_unlock(&ctx->mutex_processed);
}
if (p_atomic_read(&ctx->toPause)) {
pipe_mutex_unlock(ctx->thread_running);
mtx_unlock(&ctx->thread_running);
/* will wait here the thread can be resumed */
mtx_lock(&ctx->thread_resume);
mtx_lock(&ctx->thread_running);
pipe_mutex_unlock(ctx->thread_resume);
mtx_unlock(&ctx->thread_resume);
}
}
pipe_mutex_unlock(ctx->thread_running);
mtx_unlock(&ctx->thread_running);
if (p_atomic_read(&ctx->terminate)) {
mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
mtx_unlock(&ctx->mutex_processed);
break;
}
}
@ -273,8 +273,8 @@ nine_csmt_resume( struct NineDevice9 *device )
return;
ctx->hasPaused = FALSE;
pipe_mutex_unlock(ctx->thread_running);
pipe_mutex_unlock(ctx->thread_resume);
mtx_unlock(&ctx->thread_running);
mtx_unlock(&ctx->thread_resume);
}
struct pipe_context *

View File

@ -107,11 +107,11 @@ struct vl_screen *omx_get_screen(void)
++omx_usecount;
pipe_mutex_unlock(omx_lock);
mtx_unlock(&omx_lock);
return omx_screen;
error:
pipe_mutex_unlock(omx_lock);
mtx_unlock(&omx_lock);
return NULL;
}
@ -127,7 +127,7 @@ void omx_put_screen(void)
else
XCloseDisplay(omx_display);
}
pipe_mutex_unlock(omx_lock);
mtx_unlock(&omx_lock);
}
OMX_ERRORTYPE omx_workaround_Destructor(OMX_COMPONENTTYPE *comp)

View File

@ -66,7 +66,7 @@ vlVaCreateBuffer(VADriverContextP ctx, VAContextID context, VABufferType type,
drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
*buf_id = handle_table_add(drv->htab, buf);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -84,7 +84,7 @@ vlVaBufferSetNumElements(VADriverContextP ctx, VABufferID buf_id,
drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
@ -120,7 +120,7 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
@ -128,7 +128,7 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
*pbuff = pipe_buffer_map(drv->pipe, buf->derived_surface.resource,
PIPE_TRANSFER_WRITE,
&buf->derived_surface.transfer);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!buf->derived_surface.transfer || !*pbuff)
return VA_STATUS_ERROR_INVALID_BUFFER;
@ -140,7 +140,7 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
*pbuff = buf->data;
}
} else {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
*pbuff = buf->data;
}
@ -163,20 +163,20 @@ vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
if (buf->derived_surface.resource) {
if (!buf->derived_surface.transfer) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
pipe_buffer_unmap(drv->pipe, buf->derived_surface.transfer);
buf->derived_surface.transfer = NULL;
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -194,7 +194,7 @@ vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
@ -204,7 +204,7 @@ vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
FREE(buf->data);
FREE(buf);
handle_table_remove(VL_VA_DRIVER(ctx)->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -222,7 +222,7 @@ vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
@ -256,7 +256,7 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
screen = VL_VA_PSCREEN(ctx);
mtx_lock(&drv->mutex);
buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
@ -304,11 +304,11 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
if (!screen->resource_get_handle(screen, drv->pipe,
buf->derived_surface.resource,
&whandle, PIPE_HANDLE_USAGE_READ_WRITE)) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
buf_info->handle = (intptr_t)whandle.handle;
break;
@ -341,7 +341,7 @@ vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;

View File

@ -202,7 +202,7 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -267,7 +267,7 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -294,7 +294,7 @@ vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
FREE(config);
handle_table_remove(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -316,7 +316,7 @@ vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!config)
return VA_STATUS_ERROR_INVALID_CONFIG;

View File

@ -216,7 +216,7 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
is_vpp = config->profile == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
!picture_height && !flag && !render_targets && !num_render_targets;
@ -289,7 +289,7 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
mtx_lock(&drv->mutex);
*context_id = handle_table_add(drv->htab, context);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -307,7 +307,7 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
@ -332,7 +332,7 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
}
FREE(context);
handle_table_remove(drv->htab, context_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}

View File

@ -116,7 +116,7 @@ vlVaCreateImage(VADriverContextP ctx, VAImageFormat *format, int width, int heig
return VA_STATUS_ERROR_ALLOCATION_FAILED;
mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
img->format = *format;
img->width = width;
@ -268,7 +268,7 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, VAImage *image)
pipe_resource_reference(&img_buf->derived_surface.resource, surfaces[0]->texture);
img->buf = handle_table_add(VL_VA_DRIVER(ctx)->htab, img_buf);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
*image = *img;
@ -289,12 +289,12 @@ vlVaDestroyImage(VADriverContextP ctx, VAImageID image)
mtx_lock(&drv->mutex);
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
handle_table_remove(VL_VA_DRIVER(ctx)->htab, image);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
status = vlVaDestroyBuffer(ctx, vaimage->buf);
FREE(vaimage);
return status;
@ -331,25 +331,25 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
img_buf = handle_table_get(drv->htab, vaimage->buf);
if (!img_buf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
format = VaFourccToPipeFormat(vaimage->format.fourcc);
if (format == PIPE_FORMAT_NONE) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
@ -361,14 +361,14 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
surf->buffer->buffer_format == PIPE_FORMAT_NV12))
convert = true;
else {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
}
views = surf->buffer->get_sampler_view_planes(surf->buffer);
if (!views) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
@ -398,7 +398,7 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
map = drv->pipe->transfer_map(drv->pipe, views[i]->texture, 0,
PIPE_TRANSFER_READ, &box, &transfer);
if (!map) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
@ -415,7 +415,7 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
pipe_transfer_unmap(drv->pipe, transfer);
}
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -442,32 +442,32 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
img_buf = handle_table_get(drv->htab, vaimage->buf);
if (!img_buf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
if (img_buf->derived_surface.resource) {
/* Attempting to transfer derived image to surface */
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_UNIMPLEMENTED;
}
format = VaFourccToPipeFormat(vaimage->format.fourcc);
if (format == PIPE_FORMAT_NONE) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
@ -481,7 +481,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
tmp_buf = drv->pipe->create_video_buffer(drv->pipe, &templat);
if (!tmp_buf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
@ -492,7 +492,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
views = surf->buffer->get_sampler_view_planes(surf->buffer);
if (!views) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
}
@ -549,7 +549,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
}
}
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}

View File

@ -53,12 +53,12 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
surf = handle_table_get(drv->htab, render_target);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!surf || !surf->buffer)
return VA_STATUS_ERROR_INVALID_SURFACE;
@ -497,14 +497,14 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
for (i = 0; i < num_buffers; ++i) {
vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
@ -548,7 +548,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
break;
}
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return vaStatus;
}
@ -571,7 +571,7 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!context)
return VA_STATUS_ERROR_INVALID_CONTEXT;
@ -619,6 +619,6 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
surf->force_flushed = true;
}
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}

View File

@ -76,19 +76,19 @@ vlVaCreateSubpicture(VADriverContextP ctx, VAImageID image,
mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
sub = CALLOC(1, sizeof(*sub));
if (!sub) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
sub->image = img;
*subpicture = handle_table_add(VL_VA_DRIVER(ctx)->htab, sub);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -107,13 +107,13 @@ vlVaDestroySubpicture(VADriverContextP ctx, VASubpictureID subpicture)
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
FREE(sub);
handle_table_remove(drv->htab, subpicture);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -133,12 +133,12 @@ vlVaSubpictureImage(VADriverContextP ctx, VASubpictureID subpicture, VAImageID i
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
sub = handle_table_get(drv->htab, subpicture);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!sub)
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
@ -190,14 +190,14 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
for (i = 0; i < num_surfaces; i++) {
surf = handle_table_get(drv->htab, target_surfaces[i]);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
}
@ -219,7 +219,7 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
if (!drv->pipe->screen->is_format_supported(
drv->pipe->screen, tex_temp.format, tex_temp.target,
tex_temp.nr_samples, tex_temp.bind)) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
@ -230,7 +230,7 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
sub->sampler = drv->pipe->create_sampler_view(drv->pipe, tex, &sampler_templ);
pipe_resource_reference(&tex, NULL);
if (!sub->sampler) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
@ -238,7 +238,7 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
surf = handle_table_get(drv->htab, target_surfaces[i]);
util_dynarray_append(&surf->subpics, vlVaSubpicture *, sub);
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -260,14 +260,14 @@ vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
for (i = 0; i < num_surfaces; i++) {
surf = handle_table_get(drv->htab, target_surfaces[i]);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
@ -283,7 +283,7 @@ vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
while (surf->subpics.size && util_dynarray_top(&surf->subpics, vlVaSubpicture *) == NULL)
(void)util_dynarray_pop(&surf->subpics, vlVaSubpicture *);
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}

View File

@ -74,7 +74,7 @@ vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_sur
for (i = 0; i < num_surfaces; ++i) {
vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
if (surf->buffer)
@ -83,7 +83,7 @@ vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_sur
FREE(surf);
handle_table_remove(drv->htab, surface_list[i]);
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -106,19 +106,19 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
surf = handle_table_get(drv->htab, render_target);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
if (!surf->feedback) {
// No outstanding operation: nothing to do.
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
context = handle_table_get(drv->htab, surf->ctx);
if (!context) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
@ -137,7 +137,7 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
context->decoder->get_feedback(context->decoder, surf->feedback, &(surf->coded_buf->coded_size));
surf->feedback = NULL;
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -291,7 +291,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface_id);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
@ -300,7 +300,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
tex = vscreen->texture_from_drawable(vscreen, draw);
if (!tex) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_DISPLAY;
}
@ -311,7 +311,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
surf_draw = drv->pipe->create_surface(drv->pipe, tex, &surf_templ);
if (!surf_draw) {
pipe_resource_reference(&tex, NULL);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_DISPLAY;
}
@ -327,7 +327,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
status = vlVaPutSubpictures(surf, drv, surf_draw, dirty_area, &src_rect, &dst_rect);
if (status) {
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return status;
}
@ -342,7 +342,7 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
pipe_resource_reference(&tex, NULL);
pipe_surface_reference(&surf_draw, NULL);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
@ -401,7 +401,7 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (!config)
return VA_STATUS_ERROR_INVALID_CONFIG;
@ -723,12 +723,12 @@ vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format,
assert(0);
}
}
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
no_res:
pipe_mutex_unlock(drv->mutex);
mtx_unlock(&drv->mutex);
if (i)
vlVaDestroySurfaces(ctx, surfaces, i);

View File

@ -102,7 +102,7 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
goto err_unlock;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
*surface = vlAddDataHTAB(vlsurface);
if (*surface == 0) {
@ -116,7 +116,7 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
err_sampler:
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
err_unlock:
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
DeviceReference(&vlsurface->device, NULL);
FREE(vlsurface);
return ret;
@ -136,7 +136,7 @@ vlVdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
mtx_lock(&vlsurface->device->mutex);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&vlsurface->device, NULL);
@ -203,7 +203,7 @@ vlVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
PIPE_TRANSFER_WRITE, &dst_box, *source_data,
*source_pitches, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
}

View File

@ -81,7 +81,7 @@ vlVdpDecoderCreate(VdpDevice device,
PIPE_VIDEO_CAP_SUPPORTED
);
if (!supported) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_INVALID_DECODER_PROFILE;
}
@ -100,13 +100,13 @@ vlVdpDecoderCreate(VdpDevice device,
PIPE_VIDEO_CAP_MAX_HEIGHT
);
if (width > maxwidth || height > maxheight) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_INVALID_SIZE;
}
vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
if (!vldecoder) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_RESOURCES;
}
@ -137,7 +137,7 @@ vlVdpDecoderCreate(VdpDevice device,
}
(void) mtx_init(&vldecoder->mutex, mtx_plain);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
@ -145,7 +145,7 @@ error_handle:
vldecoder->decoder->destroy(vldecoder->decoder);
error_decoder:
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
DeviceReference(&vldecoder->device, NULL);
FREE(vldecoder);
return ret;
@ -165,7 +165,7 @@ vlVdpDecoderDestroy(VdpDecoder decoder)
mtx_lock(&vldecoder->mutex);
vldecoder->decoder->destroy(vldecoder->decoder);
pipe_mutex_unlock(vldecoder->mutex);
mtx_unlock(&vldecoder->mutex);
mtx_destroy(&vldecoder->mutex);
vlRemoveDataHTAB(decoder);
@ -633,11 +633,11 @@ vlVdpDecoderRender(VdpDecoder decoder,
/* still no luck? get me out of here... */
if (!vlsurf->video_buffer) {
pipe_mutex_unlock(vlsurf->device->mutex);
mtx_unlock(&vlsurf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
vlVdpVideoSurfaceClear(vlsurf);
pipe_mutex_unlock(vlsurf->device->mutex);
mtx_unlock(&vlsurf->device->mutex);
}
for (i = 0; i < bitstream_buffer_count; ++i) {
@ -678,6 +678,6 @@ vlVdpDecoderRender(VdpDecoder decoder,
dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
dec->end_frame(dec, vlsurf->video_buffer, &desc.base);
pipe_mutex_unlock(vldecoder->mutex);
mtx_unlock(&vldecoder->mutex);
return ret;
}

View File

@ -42,7 +42,7 @@ boolean vlCreateHTAB(void)
if (!htab)
htab = handle_table_create();
ret = htab != NULL;
pipe_mutex_unlock(htab_lock);
mtx_unlock(&htab_lock);
return ret;
}
@ -53,7 +53,7 @@ void vlDestroyHTAB(void)
handle_table_destroy(htab);
htab = NULL;
}
pipe_mutex_unlock(htab_lock);
mtx_unlock(&htab_lock);
}
vlHandle vlAddDataHTAB(void *data)
@ -64,7 +64,7 @@ vlHandle vlAddDataHTAB(void *data)
mtx_lock(&htab_lock);
if (htab)
handle = handle_table_add(htab, data);
pipe_mutex_unlock(htab_lock);
mtx_unlock(&htab_lock);
return handle;
}
@ -76,7 +76,7 @@ void* vlGetDataHTAB(vlHandle handle)
mtx_lock(&htab_lock);
if (htab)
data = handle_table_get(htab, handle);
pipe_mutex_unlock(htab_lock);
mtx_unlock(&htab_lock);
return data;
}
@ -85,5 +85,5 @@ void vlRemoveDataHTAB(vlHandle handle)
mtx_lock(&htab_lock);
if (htab)
handle_table_remove(htab, handle);
pipe_mutex_unlock(htab_lock);
mtx_unlock(&htab_lock);
}

View File

@ -162,7 +162,7 @@ vlVdpVideoMixerCreate(VdpDevice device,
}
vmixer->luma_key.luma_min = 1.0f;
vmixer->luma_key.luma_max = 0.0f;
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
@ -173,7 +173,7 @@ no_handle:
err_csc_matrix:
vl_compositor_cleanup_state(&vmixer->cstate);
no_compositor_state:
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
DeviceReference(&vmixer->device, NULL);
FREE(vmixer);
return ret;
@ -216,7 +216,7 @@ vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
vl_bicubic_filter_cleanup(vmixer->bicubic.filter);
FREE(vmixer->bicubic.filter);
}
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
DeviceReference(&vmixer->device, NULL);
FREE(vmixer);
@ -312,7 +312,7 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
break;
default:
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE;
}
@ -387,7 +387,7 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
for (i = 0; i < layer_count; ++i) {
vlVdpOutputSurface *src = vlGetDataHTAB(layers->source_surface);
if (!src) {
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_INVALID_HANDLE;
}
@ -454,7 +454,7 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
pipe_sampler_view_reference(&sampler_view, NULL);
pipe_surface_reference(&surface, NULL);
}
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_OK;
}
@ -694,7 +694,7 @@ vlVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
if (!debug_get_bool_option("G3DVL_NO_CSC", FALSE))
if (!vl_compositor_set_csc_matrix(&vmixer->cstate, (const vl_csc_matrix *)&vmixer->csc,
vmixer->luma_key.luma_min, vmixer->luma_key.luma_max)) {
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_ERROR;
}
break;
@ -705,11 +705,11 @@ vlVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
break;
default:
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE;
}
}
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_OK;
}
@ -889,11 +889,11 @@ vlVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer,
goto fail;
}
}
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_OK;
fail:
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return ret;
}
@ -987,11 +987,11 @@ vlVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer,
*(uint8_t*)attribute_values[i] = vmixer->skip_chroma_deint;
break;
default:
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE;
}
}
pipe_mutex_unlock(vmixer->device->mutex);
mtx_unlock(&vmixer->device->mutex);
return VDP_STATUS_OK;
}

View File

@ -122,7 +122,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
goto err_resource;
vl_compositor_reset_dirty_area(&vlsurface->dirty_area);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
@ -131,7 +131,7 @@ err_resource:
pipe_surface_reference(&vlsurface->surface, NULL);
pipe_resource_reference(&res, NULL);
err_unlock:
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
DeviceReference(&vlsurface->device, NULL);
FREE(vlsurface);
return VDP_STATUS_ERROR;
@ -158,7 +158,7 @@ vlVdpOutputSurfaceDestroy(VdpOutputSurface surface)
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe->screen->fence_reference(pipe->screen, &vlsurface->fence, NULL);
vl_compositor_cleanup_state(&vlsurface->cstate);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&vlsurface->device, NULL);
@ -222,7 +222,7 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
box = RectToPipeBox(source_rect, res);
map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box, &transfer);
if (!map) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -230,7 +230,7 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
box.width, box.height, map, transfer->stride, 0, 0);
pipe_transfer_unmap(pipe, transfer);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
}
@ -266,14 +266,14 @@ vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
/* Check for a no-op. (application bug?) */
if (!dst_box.width || !dst_box.height) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
}
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
PIPE_TRANSFER_WRITE, &dst_box, *source_data,
*source_pitches, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
}
@ -410,14 +410,14 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
pipe_sampler_view_reference(&sv_idx, NULL);
pipe_sampler_view_reference(&sv_tbl, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
error_resource:
pipe_sampler_view_reference(&sv_idx, NULL);
pipe_sampler_view_reference(&sv_tbl, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -476,14 +476,14 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
vbuffer = pipe->create_video_buffer(pipe, &vtmpl);
if (!vbuffer) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
sampler_views = vbuffer->get_sampler_view_planes(vbuffer);
if (!sampler_views) {
vbuffer->destroy(vbuffer);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -516,12 +516,12 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
vl_compositor_render(cstate, compositor, vlsurface->surface, &vlsurface->dirty_area, false);
vbuffer->destroy(vbuffer);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
err_csc_matrix:
vbuffer->destroy(vbuffer);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_ERROR;
}
@ -701,7 +701,7 @@ vlVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
vl_compositor_render(cstate, compositor, dst_vlsurface->surface, &dst_vlsurface->dirty_area, false);
context->delete_blend_state(context, blend);
pipe_mutex_unlock(dst_vlsurface->device->mutex);
mtx_unlock(&dst_vlsurface->device->mutex);
return VDP_STATUS_OK;
}
@ -767,7 +767,7 @@ vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
vl_compositor_render(cstate, compositor, dst_vlsurface->surface, &dst_vlsurface->dirty_area, false);
context->delete_blend_state(context, blend);
pipe_mutex_unlock(dst_vlsurface->device->mutex);
mtx_unlock(&dst_vlsurface->device->mutex);
return VDP_STATUS_OK;
}
@ -782,7 +782,7 @@ struct pipe_resource *vlVdpOutputSurfaceGallium(VdpOutputSurface surface)
mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return vlsurface->surface->texture;
}
@ -811,11 +811,11 @@ VdpStatus vlVdpOutputSurfaceDMABuf(VdpOutputSurface surface,
if (!pscreen->resource_get_handle(pscreen, vlsurface->device->context,
vlsurface->surface->texture, &whandle,
PIPE_HANDLE_USAGE_READ_WRITE)) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
result->handle = whandle.handle;
result->width = vlsurface->surface->width;

View File

@ -67,11 +67,11 @@ vlVdpPresentationQueueCreate(VdpDevice device,
mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&pq->cstate, dev->context)) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
ret = VDP_STATUS_ERROR;
goto no_compositor;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
*presentation_queue = vlAddDataHTAB(pq);
if (*presentation_queue == 0) {
@ -102,7 +102,7 @@ vlVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
mtx_lock(&pq->device->mutex);
vl_compositor_cleanup_state(&pq->cstate);
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
vlRemoveDataHTAB(presentation_queue);
DeviceReference(&pq->device, NULL);
@ -135,7 +135,7 @@ vlVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue
mtx_lock(&pq->device->mutex);
vl_compositor_set_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
return VDP_STATUS_OK;
}
@ -159,7 +159,7 @@ vlVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue
mtx_lock(&pq->device->mutex);
vl_compositor_get_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
background_color->red = color.f[0];
background_color->green = color.f[1];
@ -188,7 +188,7 @@ vlVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue,
mtx_lock(&pq->device->mutex);
*current_time = pq->device->vscreen->get_timestamp(pq->device->vscreen,
(void *)pq->drawable);
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
return VDP_STATUS_OK;
}
@ -235,7 +235,7 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
vscreen->set_back_texture_from_output(vscreen, surf->surface->texture, clip_width, clip_height);
tex = vscreen->texture_from_drawable(vscreen, (void *)pq->drawable);
if (!tex) {
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
return VDP_STATUS_INVALID_HANDLE;
}
@ -293,7 +293,7 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
pipe_resource_reference(&tex, NULL);
pipe_surface_reference(&surf_draw, NULL);
}
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
return VDP_STATUS_OK;
}
@ -327,7 +327,7 @@ vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_qu
screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
screen->fence_reference(screen, &surf->fence, NULL);
}
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
return vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
}
@ -369,14 +369,14 @@ vlVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue
if (screen->fence_finish(screen, NULL, surf->fence, 0)) {
screen->fence_reference(screen, &surf->fence, NULL);
*status = VDP_PRESENTATION_QUEUE_STATUS_VISIBLE;
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
// We actually need to query the timestamp of the last VSYNC event from the hardware
vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
*first_presentation_time += 1;
} else {
*status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED;
pipe_mutex_unlock(pq->device->mutex);
mtx_unlock(&pq->device->mutex);
}
}

View File

@ -87,7 +87,7 @@ vlVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chrom
/* XXX: Current limits */
*is_supported = true;
max_2d_texture_level = pscreen->get_param(pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
if (!max_2d_texture_level)
return VDP_STATUS_RESOURCES;
@ -135,7 +135,7 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
PIPE_FORMAT_NV12,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
break;
@ -162,7 +162,7 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM
);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -213,7 +213,7 @@ vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
*max_level = 0;
*max_macroblocks = 0;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -255,7 +255,7 @@ vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
if (!max_2d_texture_level) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_ERROR;
}
@ -264,7 +264,7 @@ vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
*max_width = 0;
*max_height = 0;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -302,7 +302,7 @@ vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFor
pscreen, format, PIPE_TEXTURE_2D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -363,7 +363,7 @@ vlVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
pscreen, colortbl_format, PIPE_TEXTURE_1D, 1,
PIPE_BIND_SAMPLER_VIEW
);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -413,7 +413,7 @@ vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM
);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -455,7 +455,7 @@ vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
if (!max_2d_texture_level) {
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_ERROR;
}
@ -464,7 +464,7 @@ vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
*max_width = 0;
*max_height = 0;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}
@ -556,10 +556,10 @@ vlVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter
case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE:
default:
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER;
}
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
return VDP_STATUS_OK;
}

View File

@ -104,7 +104,7 @@ vlVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
/* do not mandate early allocation of a video buffer */
vlVdpVideoSurfaceClear(p_surf);
pipe_mutex_unlock(dev->mutex);
mtx_unlock(&dev->mutex);
*surface = vlAddDataHTAB(p_surf);
if (*surface == 0) {
@ -141,7 +141,7 @@ vlVdpVideoSurfaceDestroy(VdpVideoSurface surface)
mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer)
p_surf->video_buffer->destroy(p_surf->video_buffer);
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&p_surf->device, NULL);
@ -241,7 +241,7 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
mtx_lock(&vlsurface->device->mutex);
sampler_views = vlsurface->video_buffer->get_sampler_view_planes(vlsurface->video_buffer);
if (!sampler_views) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -263,7 +263,7 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
map = pipe->transfer_map(pipe, sv->texture, 0,
PIPE_TRANSFER_READ, &box, &transfer);
if (!map) {
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -288,7 +288,7 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
pipe_transfer_unmap(pipe, transfer);
}
}
pipe_mutex_unlock(vlsurface->device->mutex);
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_OK;
}
@ -337,7 +337,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_PREFERED_FORMAT);
if (nformat == PIPE_FORMAT_NONE) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
}
@ -356,7 +356,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
/* stil no luck? ok forget it we don't support it */
if (!p_surf->video_buffer) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
vlVdpVideoSurfaceClear(p_surf);
@ -373,7 +373,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
sampler_views = p_surf->video_buffer->get_sampler_view_planes(p_surf->video_buffer);
if (!sampler_views) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -399,7 +399,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
map = pipe->transfer_map(pipe, tex, 0, usage,
&dst_box, &transfer);
if (!map) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -422,7 +422,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
}
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_OK;
}
@ -472,7 +472,7 @@ struct pipe_video_buffer *vlVdpVideoSurfaceGallium(VdpVideoSurface surface)
/* try to create a video buffer if we don't already have one */
p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
}
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return p_surf->video_buffer;
}
@ -511,13 +511,13 @@ VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
/* Check if surface match interop requirements */
if (p_surf->video_buffer == NULL || !p_surf->video_buffer->interlaced ||
p_surf->video_buffer->buffer_format != PIPE_FORMAT_NV12) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
surf = p_surf->video_buffer->get_surfaces(p_surf->video_buffer)[plane];
if (!surf) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_RESOURCES;
}
@ -529,11 +529,11 @@ VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
if (!pscreen->resource_get_handle(pscreen, p_surf->device->context,
surf->texture, &whandle,
PIPE_HANDLE_USAGE_READ_WRITE)) {
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
pipe_mutex_unlock(p_surf->device->mutex);
mtx_unlock(&p_surf->device->mutex);
result->handle = whandle.handle;
result->width = surf->width;

View File

@ -414,6 +414,6 @@ void
GalliumContext::Unlock()
{
CALLED();
pipe_mutex_unlock(fMutex);
mtx_unlock(&fMutex);
}
/* vim: set tabstop=4: */

View File

@ -99,7 +99,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
bo->num_fences -= idle_fences;
buffer_idle = !bo->num_fences;
pipe_mutex_unlock(ws->bo_fence_lock);
mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
} else {
@ -113,7 +113,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
amdgpu_fence_reference(&fence, bo->fences[0]);
/* Wait for the fence. */
pipe_mutex_unlock(ws->bo_fence_lock);
mtx_unlock(&ws->bo_fence_lock);
if (amdgpu_fence_wait(fence, abs_timeout, true))
fence_idle = true;
else
@ -132,7 +132,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
amdgpu_fence_reference(&fence, NULL);
}
pipe_mutex_unlock(ws->bo_fence_lock);
mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
}
@ -163,7 +163,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
mtx_unlock(&bo->ws->global_bo_list_lock);
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
@ -352,7 +352,7 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);
mtx_unlock(&ws->global_bo_list_lock);
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,

View File

@ -1041,7 +1041,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
pipe_mutex_unlock(ws->global_bo_list_lock);
mtx_unlock(&ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
cs->error_code = -ENOMEM;
return;
@ -1056,7 +1056,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
handles, NULL,
&cs->request.resources);
free(handles);
pipe_mutex_unlock(ws->global_bo_list_lock);
mtx_unlock(&ws->global_bo_list_lock);
} else {
r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
cs->handles, cs->flags,
@ -1222,7 +1222,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
amdgpu_cs_submit_ib, NULL);
/* The submission has been queued, unlock the fence now. */
pipe_mutex_unlock(ws->bo_fence_lock);
mtx_unlock(&ws->bo_fence_lock);
if (!(flags & RADEON_FLUSH_ASYNC)) {
amdgpu_cs_sync_flush(rcs);

View File

@ -506,7 +506,7 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
if (destroy && dev_tab)
util_hash_table_remove(dev_tab, ws->dev);
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return destroy;
}
@ -534,7 +534,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
return NULL;
}
@ -543,7 +543,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws = util_hash_table_get(dev_tab, dev);
if (ws) {
pipe_reference(NULL, &ws->reference);
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return &ws->base;
}
@ -596,7 +596,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
amdgpu_winsys_destroy(&ws->base);
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return NULL;
}
@ -608,7 +608,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
amdgpu_winsys_destroy(&ws->base);
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return NULL;
}
@ -617,7 +617,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return &ws->base;
@ -627,6 +627,6 @@ fail_cache:
fail_alloc:
FREE(ws);
fail:
pipe_mutex_unlock(dev_tab_mutex);
mtx_unlock(&dev_tab_mutex);
return NULL;
}

View File

@ -83,7 +83,7 @@ etna_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = etna_device_fd(screen->dev);
util_hash_table_remove(etna_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(etna_screen_mutex);
mtx_unlock(&etna_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@ -145,7 +145,7 @@ etna_drm_screen_create_renderonly(struct renderonly *ro)
}
unlock:
pipe_mutex_unlock(etna_screen_mutex);
mtx_unlock(&etna_screen_mutex);
return pscreen;
}

View File

@ -56,7 +56,7 @@ fd_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = fd_device_fd(screen->dev);
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(fd_screen_mutex);
mtx_unlock(&fd_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@ -122,6 +122,6 @@ fd_drm_screen_create(int fd)
}
unlock:
pipe_mutex_unlock(fd_screen_mutex);
mtx_unlock(&fd_screen_mutex);
return pscreen;
}

View File

@ -32,7 +32,7 @@ bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
assert(ret >= 0);
if (ret == 0)
util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd));
pipe_mutex_unlock(nouveau_screen_mutex);
mtx_unlock(&nouveau_screen_mutex);
return ret == 0;
}
@ -71,7 +71,7 @@ nouveau_drm_screen_create(int fd)
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {
pipe_mutex_unlock(nouveau_screen_mutex);
mtx_unlock(&nouveau_screen_mutex);
return NULL;
}
}
@ -79,7 +79,7 @@ nouveau_drm_screen_create(int fd)
screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (screen) {
screen->refcount++;
pipe_mutex_unlock(nouveau_screen_mutex);
mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
}
@ -143,7 +143,7 @@ nouveau_drm_screen_create(int fd)
*/
util_hash_table_set(fd_tab, intptr_to_pointer(dupfd), screen);
screen->refcount = 1;
pipe_mutex_unlock(nouveau_screen_mutex);
mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
err:
@ -154,6 +154,6 @@ err:
nouveau_drm_del(&drm);
close(dupfd);
}
pipe_mutex_unlock(nouveau_screen_mutex);
mtx_unlock(&nouveau_screen_mutex);
return NULL;
}

View File

@ -88,7 +88,7 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
(bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
bo->u.slab.num_fences -= num_idle;
pipe_mutex_unlock(bo->rws->bo_fence_lock);
mtx_unlock(&bo->rws->bo_fence_lock);
return busy;
}
@ -111,7 +111,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
pipe_mutex_unlock(bo->rws->bo_fence_lock);
mtx_unlock(&bo->rws->bo_fence_lock);
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
@ -125,7 +125,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
}
radeon_bo_reference(&fence, NULL);
}
pipe_mutex_unlock(bo->rws->bo_fence_lock);
mtx_unlock(&bo->rws->bo_fence_lock);
}
}
@ -218,7 +218,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
pipe_mutex_unlock(rws->bo_va_mutex);
mtx_unlock(&rws->bo_va_mutex);
return offset;
}
if ((hole->size - waste) > size) {
@ -230,12 +230,12 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
}
hole->size -= (size + waste);
hole->offset += size + waste;
pipe_mutex_unlock(rws->bo_va_mutex);
mtx_unlock(&rws->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
pipe_mutex_unlock(rws->bo_va_mutex);
mtx_unlock(&rws->bo_va_mutex);
return offset;
}
}
@ -251,7 +251,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
}
offset += waste;
rws->va_offset += size + waste;
pipe_mutex_unlock(rws->bo_va_mutex);
mtx_unlock(&rws->bo_va_mutex);
return offset;
}
@ -318,7 +318,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
}
}
out:
pipe_mutex_unlock(rws->bo_va_mutex);
mtx_unlock(&rws->bo_va_mutex);
}
void radeon_bo_destroy(struct pb_buffer *_buf)
@ -337,7 +337,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
util_hash_table_remove(rws->bo_names,
(void*)(uintptr_t)bo->flink_name);
}
pipe_mutex_unlock(rws->bo_handles_mutex);
mtx_unlock(&rws->bo_handles_mutex);
if (bo->u.real.ptr)
os_munmap(bo->u.real.ptr, bo->base.size);
@ -422,7 +422,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
return (uint8_t*)bo->u.real.ptr + offset;
}
args.handle = bo->handle;
@ -432,7 +432,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
DRM_RADEON_GEM_MMAP,
&args,
sizeof(args))) {
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
bo, bo->handle);
return NULL;
@ -447,7 +447,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
bo->rws->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
return NULL;
}
@ -461,7 +461,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
bo->rws->mapped_gtt += bo->base.size;
bo->rws->num_mapped_buffers++;
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
return (uint8_t*)bo->u.real.ptr + offset;
}
@ -555,13 +555,13 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
return; /* it's not been mapped */
}
assert(bo->u.real.map_count);
if (--bo->u.real.map_count) {
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
return; /* it's been mapped multiple times */
}
@ -574,7 +574,7 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
bo->rws->mapped_gtt -= bo->base.size;
bo->rws->num_mapped_buffers--;
pipe_mutex_unlock(bo->u.real.map_mutex);
mtx_unlock(&bo->u.real.map_mutex);
}
static const struct pb_vtbl radeon_bo_vtbl = {
@ -671,13 +671,13 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
struct radeon_bo *old_bo =
util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(rws->bo_handles_mutex);
mtx_unlock(&rws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return radeon_bo(b);
}
util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(rws->bo_handles_mutex);
mtx_unlock(&rws->bo_handles_mutex);
}
if (initial_domains & RADEON_DOMAIN_VRAM)
@ -1032,7 +1032,7 @@ no_slab:
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
return &bo->base;
}
@ -1080,7 +1080,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
if (ws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
@ -1107,13 +1107,13 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
}
ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
@ -1218,7 +1218,7 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
done:
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
if (stride)
*stride = whandle->stride;
@ -1250,13 +1250,13 @@ done:
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
}
bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
@ -1269,7 +1269,7 @@ done:
return (struct pb_buffer*)bo;
fail:
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
return NULL;
}
@ -1303,7 +1303,7 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
mtx_unlock(&ws->bo_handles_mutex);
}
whandle->handle = bo->flink_name;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {

View File

@ -602,7 +602,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
p_atomic_inc(&bo->num_active_ioctls);
radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
}
pipe_mutex_unlock(cs->ws->bo_fence_lock);
mtx_unlock(&cs->ws->bo_fence_lock);
radeon_fence_reference(&fence, NULL);
} else {

View File

@ -71,12 +71,12 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
/* Early exit if we are sure the request will fail. */
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
mtx_unlock(&*mutex);
return false;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
mtx_unlock(&*mutex);
return false;
}
}
@ -86,7 +86,7 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
info.request = request;
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
pipe_mutex_unlock(*mutex);
mtx_unlock(&*mutex);
return false;
}
@ -94,14 +94,14 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
if (enable) {
if (value) {
*owner = applier;
pipe_mutex_unlock(*mutex);
mtx_unlock(&*mutex);
return true;
}
} else {
*owner = NULL;
}
pipe_mutex_unlock(*mutex);
mtx_unlock(&*mutex);
return false;
}
@ -715,7 +715,7 @@ static bool radeon_winsys_unref(struct radeon_winsys *ws)
if (destroy && fd_tab)
util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
return destroy;
}
@ -744,13 +744,13 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (ws) {
pipe_reference(NULL, &ws->reference);
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
return &ws->base;
}
ws = CALLOC_STRUCT(radeon_drm_winsys);
if (!ws) {
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
return NULL;
}
@ -830,7 +830,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
radeon_winsys_destroy(&ws->base);
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
return NULL;
}
@ -839,7 +839,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
return &ws->base;
@ -849,7 +849,7 @@ fail_slab:
fail_cache:
pb_cache_deinit(&ws->bo_cache);
fail1:
pipe_mutex_unlock(fd_tab_mutex);
mtx_unlock(&fd_tab_mutex);
if (ws->surf_man)
radeon_surface_manager_free(ws->surf_man);
if (ws->fd >= 0)

View File

@ -311,7 +311,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
ops->fence_reference(ops, &fence, fenced_buf->fence);
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
@ -512,7 +512,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -564,7 +564,7 @@ fenced_buffer_map(struct pb_buffer *buf,
}
done:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return map;
}
@ -587,7 +587,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -635,7 +635,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
fenced_buf->validation_flags |= flags;
done:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return ret;
}
@ -676,7 +676,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
fenced_buf->validation_flags = 0;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -699,7 +699,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
*offset = 0;
}
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
}
@ -758,12 +758,12 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
return &fenced_buf->base;
no_storage:
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
FREE(fenced_buf);
no_buffer:
return NULL;
@ -778,7 +778,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
if(fenced_mgr->provider->flush)
@ -795,7 +795,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
@ -808,7 +808,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/*assert(!fenced_mgr->num_unfenced);*/
#endif
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_unlock(&fenced_mgr->mutex);
mtx_destroy(&fenced_mgr->mutex);
FREE(fenced_mgr);

View File

@ -533,7 +533,7 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
vsurf->buf, 0, flags);
pipe_mutex_unlock(vsurf->mutex);
mtx_unlock(&vsurf->mutex);
}
}

View File

@ -104,7 +104,7 @@ vmw_fences_release(struct vmw_fence_ops *ops)
mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
pipe_mutex_unlock(ops->mutex);
mtx_unlock(&ops->mutex);
}
/**
@ -152,7 +152,7 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
ops->last_emitted = emitted;
out_unlock:
pipe_mutex_unlock(ops->mutex);
mtx_unlock(&ops->mutex);
}
@ -203,7 +203,7 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
}
pipe_mutex_unlock(ops->mutex);
mtx_unlock(&ops->mutex);
return (struct pipe_fence_handle *) fence;
}
@ -231,7 +231,7 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
pipe_mutex_unlock(ops->mutex);
mtx_unlock(&ops->mutex);
FREE(vfence);
}

View File

@ -154,7 +154,7 @@ out_mapped:
vsrf->data = data;
vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
out_unlock:
pipe_mutex_unlock(vsrf->mutex);
mtx_unlock(&vsrf->mutex);
return data;
}
@ -173,7 +173,7 @@ vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
} else {
*rebind = FALSE;
}
pipe_mutex_unlock(vsrf->mutex);
mtx_unlock(&vsrf->mutex);
}
void

View File

@ -57,14 +57,14 @@ static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
pipe_mutex_unlock(qdws->bo_handles_mutex);
mtx_unlock(&qdws->bo_handles_mutex);
}
if (res->bo_handle) {
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
pipe_mutex_unlock(qdws->bo_handles_mutex);
mtx_unlock(&qdws->bo_handles_mutex);
}
if (res->ptr)
@ -109,7 +109,7 @@ virgl_cache_flush(struct virgl_drm_winsys *qdws)
curr = next;
next = curr->next;
}
pipe_mutex_unlock(qdws->mutex);
mtx_unlock(&qdws->mutex);
}
static void
virgl_drm_winsys_destroy(struct virgl_winsys *qws)
@ -165,7 +165,7 @@ static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
old->end = old->start + qdws->usecs;
LIST_ADDTAIL(&old->head, &qdws->delayed);
qdws->num_delayed++;
pipe_mutex_unlock(qdws->mutex);
mtx_unlock(&qdws->mutex);
}
}
*dres = sres;
@ -353,12 +353,12 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
if (res) {
LIST_DEL(&res->head);
--qdws->num_delayed;
pipe_mutex_unlock(qdws->mutex);
mtx_unlock(&qdws->mutex);
pipe_reference_init(&res->reference, 1);
return res;
}
pipe_mutex_unlock(qdws->mutex);
mtx_unlock(&qdws->mutex);
alloc:
res = virgl_drm_winsys_resource_create(qws, target, format, bind,
@ -453,7 +453,7 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
done:
pipe_mutex_unlock(qdws->bo_handles_mutex);
mtx_unlock(&qdws->bo_handles_mutex);
return res;
}
@ -481,7 +481,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
mtx_unlock(&qdws->bo_handles_mutex);
}
whandle->handle = res->flink;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
@ -491,7 +491,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
return FALSE;
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
mtx_unlock(&qdws->bo_handles_mutex);
}
whandle->stride = stride;
return TRUE;
@ -820,7 +820,7 @@ virgl_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = virgl_drm_winsys(screen->vws)->fd;
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(virgl_screen_mutex);
mtx_unlock(&virgl_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@ -885,6 +885,6 @@ virgl_drm_screen_create(int fd)
}
unlock:
pipe_mutex_unlock(virgl_screen_mutex);
mtx_unlock(&virgl_screen_mutex);
return pscreen;
}

View File

@ -155,7 +155,7 @@ virgl_cache_flush(struct virgl_vtest_winsys *vtws)
curr = next;
next = curr->next;
}
pipe_mutex_unlock(vtws->mutex);
mtx_unlock(&vtws->mutex);
}
static void
@ -196,7 +196,7 @@ static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
old->end = old->start + vtws->usecs;
LIST_ADDTAIL(&old->head, &vtws->delayed);
vtws->num_delayed++;
pipe_mutex_unlock(vtws->mutex);
mtx_unlock(&vtws->mutex);
}
}
*dres = sres;
@ -376,12 +376,12 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
if (res) {
LIST_DEL(&res->head);
--vtws->num_delayed;
pipe_mutex_unlock(vtws->mutex);
mtx_unlock(&vtws->mutex);
pipe_reference_init(&res->reference, 1);
return res;
}
pipe_mutex_unlock(vtws->mutex);
mtx_unlock(&vtws->mutex);
alloc:
res = virgl_vtest_winsys_resource_create(vws, target, format, bind,