gallium: thread wrapper clean-up

In p_thread.h replace _glthread_* functions with new pipe_* functions.
Remove other old cruft.
This commit is contained in:
Brian Paul 2008-08-26 16:35:12 -06:00
parent b5ab3b7dfc
commit 0bb852fa49
17 changed files with 349 additions and 474 deletions

View File

@ -69,7 +69,7 @@
struct fenced_buffer_list
{
_glthread_Mutex mutex;
pipe_mutex mutex;
struct pipe_winsys *winsys;
@ -240,7 +240,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_buffer_list *fenced_list = fenced_buf->list;
_glthread_LOCK_MUTEX(fenced_list->mutex);
pipe_mutex_lock(fenced_list->mutex);
assert(fenced_buf->base.base.refcount == 0);
if (fenced_buf->fence) {
struct pipe_winsys *winsys = fenced_list->winsys;
@ -263,7 +263,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
else {
_fenced_buffer_destroy(fenced_buf);
}
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
pipe_mutex_unlock(fenced_list->mutex);
}
@ -396,7 +396,7 @@ buffer_fence(struct pb_buffer *buf,
return;
}
_glthread_LOCK_MUTEX(fenced_list->mutex);
pipe_mutex_lock(fenced_list->mutex);
if (fenced_buf->fence)
_fenced_buffer_remove(fenced_list, fenced_buf);
if (fence) {
@ -404,7 +404,7 @@ buffer_fence(struct pb_buffer *buf,
fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
_fenced_buffer_add(fenced_buf);
}
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
pipe_mutex_unlock(fenced_list->mutex);
}
@ -423,7 +423,7 @@ fenced_buffer_list_create(struct pipe_winsys *winsys)
fenced_list->numDelayed = 0;
_glthread_INIT_MUTEX(fenced_list->mutex);
pipe_mutex_init(fenced_list->mutex);
return fenced_list;
}
@ -433,28 +433,28 @@ void
fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
int wait)
{
_glthread_LOCK_MUTEX(fenced_list->mutex);
pipe_mutex_lock(fenced_list->mutex);
_fenced_buffer_list_check_free(fenced_list, wait);
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
pipe_mutex_unlock(fenced_list->mutex);
}
void
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
{
_glthread_LOCK_MUTEX(fenced_list->mutex);
pipe_mutex_lock(fenced_list->mutex);
/* Wait on outstanding fences */
while (fenced_list->numDelayed) {
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
pipe_mutex_unlock(fenced_list->mutex);
#if defined(PIPE_OS_LINUX)
sched_yield();
#endif
_fenced_buffer_list_check_free(fenced_list, 1);
_glthread_LOCK_MUTEX(fenced_list->mutex);
pipe_mutex_lock(fenced_list->mutex);
}
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
pipe_mutex_unlock(fenced_list->mutex);
FREE(fenced_list);
}

View File

@ -79,7 +79,7 @@ struct pb_cache_manager
struct pb_manager *provider;
unsigned usecs;
_glthread_Mutex mutex;
pipe_mutex mutex;
struct list_head delayed;
size_t numDelayed;
@ -153,7 +153,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
struct pb_cache_manager *mgr = buf->mgr;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
assert(buf->base.base.refcount == 0);
_pb_cache_buffer_list_check_free(mgr);
@ -162,7 +162,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
util_time_add(&buf->start, mgr->usecs, &buf->end);
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
}
@ -235,7 +235,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
struct list_head *curr, *next;
struct util_time now;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
buf = NULL;
curr = mgr->delayed.next;
@ -264,12 +264,12 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
if(buf) {
LIST_DEL(&buf->head);
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
++buf->base.base.refcount;
return &buf->base;
}
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
buf = CALLOC_STRUCT(pb_cache_buffer);
if(!buf)
@ -305,7 +305,7 @@ pb_cache_flush(struct pb_manager *_mgr)
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
@ -314,7 +314,7 @@ pb_cache_flush(struct pb_manager *_mgr)
curr = next;
next = curr->next;
}
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
}
@ -345,7 +345,7 @@ pb_cache_manager_create(struct pb_manager *provider,
mgr->usecs = usecs;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
_glthread_INIT_MUTEX(mgr->mutex);
pipe_mutex_init(mgr->mutex);
return &mgr->base;
}

View File

@ -53,7 +53,7 @@ struct mm_pb_manager
{
struct pb_manager base;
_glthread_Mutex mutex;
pipe_mutex mutex;
size_t size;
struct mem_block *heap;
@ -99,10 +99,10 @@ mm_buffer_destroy(struct pb_buffer *buf)
assert(buf->base.refcount == 0);
_glthread_LOCK_MUTEX(mm->mutex);
pipe_mutex_lock(mm->mutex);
mmFreeMem(mm_buf->block);
FREE(buf);
_glthread_UNLOCK_MUTEX(mm->mutex);
pipe_mutex_unlock(mm->mutex);
}
@ -158,11 +158,11 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
if(desc->alignment % (1 << mm->align2))
return NULL;
_glthread_LOCK_MUTEX(mm->mutex);
pipe_mutex_lock(mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
_glthread_UNLOCK_MUTEX(mm->mutex);
pipe_mutex_unlock(mm->mutex);
return NULL;
}
@ -185,7 +185,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
if(!mm_buf->block) {
FREE(mm_buf);
_glthread_UNLOCK_MUTEX(mm->mutex);
pipe_mutex_unlock(mm->mutex);
return NULL;
}
}
@ -194,7 +194,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
assert(0 <= (unsigned)mm_buf->block->ofs && (unsigned)mm_buf->block->ofs < mm->size);
assert(size <= (unsigned)mm_buf->block->size && (unsigned)mm_buf->block->ofs + (unsigned)mm_buf->block->size <= mm->size);
_glthread_UNLOCK_MUTEX(mm->mutex);
pipe_mutex_unlock(mm->mutex);
return SUPER(mm_buf);
}
@ -204,14 +204,14 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
_glthread_LOCK_MUTEX(mm->mutex);
pipe_mutex_lock(mm->mutex);
mmDestroy(mm->heap);
pb_unmap(mm->buffer);
pb_reference(&mm->buffer, NULL);
_glthread_UNLOCK_MUTEX(mm->mutex);
pipe_mutex_unlock(mm->mutex);
FREE(mgr);
}
@ -236,7 +236,7 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
mm->size = size;
mm->align2 = align2; /* 64-byte alignment */
_glthread_INIT_MUTEX(mm->mutex);
pipe_mutex_init(mm->mutex);
mm->buffer = buffer;

View File

@ -56,7 +56,7 @@ struct pool_pb_manager
{
struct pb_manager base;
_glthread_Mutex mutex;
pipe_mutex mutex;
size_t bufSize;
size_t bufAlign;
@ -110,10 +110,10 @@ pool_buffer_destroy(struct pb_buffer *buf)
assert(pool_buf->base.base.refcount == 0);
_glthread_LOCK_MUTEX(pool->mutex);
pipe_mutex_lock(pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
}
@ -124,9 +124,9 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags)
struct pool_pb_manager *pool = pool_buf->mgr;
void *map;
_glthread_LOCK_MUTEX(pool->mutex);
pipe_mutex_lock(pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
return map;
}
@ -171,10 +171,10 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
_glthread_LOCK_MUTEX(pool->mutex);
pipe_mutex_lock(pool->mutex);
if (pool->numFree == 0) {
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
debug_printf("warning: out of fixed size buffer objects\n");
return NULL;
}
@ -182,7 +182,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
item = pool->free.next;
if (item == &pool->free) {
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
debug_printf("error: fixed size buffer pool corruption\n");
return NULL;
}
@ -190,7 +190,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_DEL(item);
--pool->numFree;
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
assert(pool_buf->base.base.refcount == 0);
@ -206,14 +206,14 @@ static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
_glthread_LOCK_MUTEX(pool->mutex);
pipe_mutex_lock(pool->mutex);
FREE(pool->bufs);
pb_unmap(pool->buffer);
pb_reference(&pool->buffer, NULL);
_glthread_UNLOCK_MUTEX(pool->mutex);
pipe_mutex_unlock(pool->mutex);
FREE(mgr);
}
@ -246,7 +246,7 @@ pool_bufmgr_create(struct pb_manager *provider,
pool->bufSize = bufSize;
pool->bufAlign = desc->alignment;
_glthread_INIT_MUTEX(pool->mutex);
pipe_mutex_init(pool->mutex);
pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
if (!pool->buffer)

View File

@ -57,7 +57,7 @@ struct pb_slab_buffer
struct list_head head;
unsigned mapCount;
size_t start;
_glthread_Cond event;
pipe_condvar event;
};
struct pb_slab
@ -85,7 +85,7 @@ struct pb_slab_manager
struct list_head slabs;
struct list_head freeSlabs;
_glthread_Mutex mutex;
pipe_mutex mutex;
};
/**
@ -143,7 +143,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
assert(buf->base.base.refcount == 0);
@ -179,7 +179,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
}
}
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
}
@ -201,7 +201,7 @@ pb_slab_buffer_unmap(struct pb_buffer *_buf)
--buf->mapCount;
if (buf->mapCount == 0)
_glthread_COND_BROADCAST(buf->event);
pipe_condvar_broadcast(buf->event);
}
@ -283,7 +283,7 @@ pb_slab_create(struct pb_slab_manager *mgr)
buf->slab = slab;
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
_glthread_INIT_COND(buf->event);
pipe_condvar_init(buf->event);
LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
@ -328,11 +328,11 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
return NULL;
}
}
@ -344,7 +344,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
list = slab->freeBuffers.next;
LIST_DELINIT(list);
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
++buf->base.base.refcount;
@ -388,7 +388,7 @@ pb_slab_manager_create(struct pb_manager *provider,
LIST_INITHEAD(&mgr->slabs);
LIST_INITHEAD(&mgr->freeSlabs);
_glthread_INIT_MUTEX(mgr->mutex);
pipe_mutex_init(mgr->mutex);
return &mgr->base;
}

View File

@ -47,11 +47,12 @@
#include <unistd.h>
#include <sys/mman.h>
#include "pipe/p_thread.h"
#include "util/u_mm.h"
#define EXEC_HEAP_SIZE (10*1024*1024)
_glthread_DECLARE_STATIC_MUTEX(exec_mutex);
pipe_static_mutex(exec_mutex);
static struct mem_block *exec_heap = NULL;
static unsigned char *exec_mem = NULL;
@ -76,7 +77,7 @@ rtasm_exec_malloc(size_t size)
struct mem_block *block = NULL;
void *addr = NULL;
_glthread_LOCK_MUTEX(exec_mutex);
pipe_mutex_lock(exec_mutex);
init_heap();
@ -90,7 +91,7 @@ rtasm_exec_malloc(size_t size)
else
debug_printf("rtasm_exec_malloc failed\n");
_glthread_UNLOCK_MUTEX(exec_mutex);
pipe_mutex_unlock(exec_mutex);
return addr;
}
@ -99,7 +100,7 @@ rtasm_exec_malloc(size_t size)
void
rtasm_exec_free(void *addr)
{
_glthread_LOCK_MUTEX(exec_mutex);
pipe_mutex_lock(exec_mutex);
if (exec_heap) {
struct mem_block *block = mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
@ -108,7 +109,7 @@ rtasm_exec_free(void *addr)
mmFreeMem(block);
}
_glthread_UNLOCK_MUTEX(exec_mutex);
pipe_mutex_unlock(exec_mutex);
}

View File

@ -23,307 +23,199 @@
*
**************************************************************************/
/**
* @file
* Thread
*
* Initial version by John Stone (j.stone@acm.org) (johns@cs.umr.edu)
* and Christoph Poliwoda (poliwoda@volumegraphics.com)
* Revised by Keith Whitwell
* Adapted for new gl dispatcher by Brian Paul
*
*
*
* DOCUMENTATION
*
* This thread module exports the following types:
* _glthread_TSD Thread-specific data area
* _glthread_Thread Thread datatype
* _glthread_Mutex Mutual exclusion lock
*
* Macros:
* _glthread_DECLARE_STATIC_MUTEX(name) Declare a non-local mutex
* _glthread_INIT_MUTEX(name) Initialize a mutex
* _glthread_LOCK_MUTEX(name) Lock a mutex
* _glthread_UNLOCK_MUTEX(name) Unlock a mutex
*
* Functions:
* _glthread_GetID(v) Get integer thread ID
* _glthread_InitTSD() Initialize thread-specific data
* _glthread_GetTSD() Get thread-specific data
* _glthread_SetTSD() Set thread-specific data
*
* If this file is accidentally included by a non-threaded build,
* it should not cause the build to fail, or otherwise cause problems.
* In general, it should only be included when needed however.
* Thread, mutex, condition var and thread-specific data functions.
*/
#ifndef _P_THREAD_H_
#define _P_THREAD_H_
#ifndef _P_THREAD2_H_
#define _P_THREAD2_H_
#if (defined(PTHREADS) || defined(SOLARIS_THREADS) ||\
defined(WIN32_THREADS) || defined(USE_XTHREADS) || defined(BEOS_THREADS)) \
&& !defined(THREADS)
# define THREADS
#endif
#include "pipe/p_compiler.h"
#ifdef VMS
#include <GL/vms_x_fix.h>
#endif
/*
* POSIX threads. This should be your choice in the Unix world
* whenever possible. When building with POSIX threads, be sure
* to enable any compiler flags which will cause the MT-safe
* libc (if one exists) to be used when linking, as well as any
* header macros for MT-safe errno, etc. For Solaris, this is the -mt
* compiler flag. On Solaris with gcc, use -D_REENTRANT to enable
* proper compiling for MT-safe libc etc.
*/
#if defined(PTHREADS)
#if defined(PIPE_OS_LINUX)
#include <pthread.h> /* POSIX threads headers */
#include <stdio.h> /* for perror() */
typedef struct {
pthread_key_t key;
int initMagic;
} _glthread_TSD;
typedef pthread_t _glthread_Thread;
typedef pthread_t pipe_thread;
typedef pthread_mutex_t pipe_mutex;
typedef pthread_cond_t pipe_condvar;
typedef pthread_mutex_t _glthread_Mutex;
#define pipe_static_mutex(mutex) \
static pipe_mutex mutex = PTHREAD_MUTEX_INITIALIZER
#define _glthread_DECLARE_STATIC_MUTEX(name) \
static _glthread_Mutex name = PTHREAD_MUTEX_INITIALIZER
#define pipe_mutex_init(mutex) \
pthread_mutex_init(&(mutex), NULL)
#define _glthread_INIT_MUTEX(name) \
pthread_mutex_init(&(name), NULL)
#define pipe_mutex_destroy(mutex) \
pthread_mutex_destroy(&(mutex))
#define _glthread_DESTROY_MUTEX(name) \
pthread_mutex_destroy(&(name))
#define pipe_mutex_lock(mutex) \
(void) pthread_mutex_lock(&(mutex))
#define _glthread_LOCK_MUTEX(name) \
(void) pthread_mutex_lock(&(name))
#define pipe_mutex_unlock(mutex) \
(void) pthread_mutex_unlock(&(mutex))
#define _glthread_UNLOCK_MUTEX(name) \
(void) pthread_mutex_unlock(&(name))
#define pipe_static_condvar(mutex) \
static pipe_condvar mutex = PTHREAD_COND_INITIALIZER
typedef pthread_cond_t _glthread_Cond;
#define _glthread_DECLARE_STATIC_COND(name) \
static _glthread_Cond name = PTHREAD_COND_INITIALIZER
#define _glthread_INIT_COND(cond) \
#define pipe_condvar_init(cond) \
pthread_cond_init(&(cond), NULL)
#define _glthread_DESTROY_COND(name) \
pthread_cond_destroy(&(name))
#define pipe_condvar_destroy(cond) \
pthread_cond_destroy(&(cond))
#define _glthread_COND_WAIT(cond, mutex) \
#define pipe_condvar_wait(cond, mutex) \
pthread_cond_wait(&(cond), &(mutex))
#define _glthread_COND_SIGNAL(cond) \
#define pipe_condvar_signal(cond) \
pthread_cond_signal(&(cond))
#define _glthread_COND_BROADCAST(cond) \
#define pipe_condvar_broadcast(cond) \
pthread_cond_broadcast(&(cond))
#endif /* PTHREADS */
#elif defined(PIPE_OS_WINDOWS)
/*
* Solaris threads. Use only up to Solaris 2.4.
* Solaris 2.5 and higher provide POSIX threads.
* Be sure to compile with -mt on the Solaris compilers, or
* use -D_REENTRANT if using gcc.
*/
#ifdef SOLARIS_THREADS
#include <thread.h>
typedef struct {
thread_key_t key;
mutex_t keylock;
int initMagic;
} _glthread_TSD;
typedef thread_t _glthread_Thread;
typedef mutex_t _glthread_Mutex;
/* XXX need to really implement mutex-related macros */
#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0
#define _glthread_INIT_MUTEX(name) (void) name
#define _glthread_DESTROY_MUTEX(name) (void) name
#define _glthread_LOCK_MUTEX(name) (void) name
#define _glthread_UNLOCK_MUTEX(name) (void) name
#endif /* SOLARIS_THREADS */
/*
* Windows threads. Should work with Windows NT and 95.
* IMPORTANT: Link with multithreaded runtime library when THREADS are
* used!
*/
#ifdef WIN32_THREADS
#include <windows.h>
typedef struct {
DWORD key;
int initMagic;
} _glthread_TSD;
typedef HANDLE pipe_thread;
typedef CRITICAL_SECTION pipe_mutex;
typedef HANDLE _glthread_Thread;
#define pipe_static_mutex(name) \
/*static*/ pipe_mutex name = {0,0,0,0,0,0}
typedef CRITICAL_SECTION _glthread_Mutex;
#define pipe_mutex_init(name) \
InitializeCriticalSection(&name)
#define _glthread_DECLARE_STATIC_MUTEX(name) /*static*/ _glthread_Mutex name = {0,0,0,0,0,0}
#define _glthread_INIT_MUTEX(name) InitializeCriticalSection(&name)
#define _glthread_DESTROY_MUTEX(name) DeleteCriticalSection(&name)
#define _glthread_LOCK_MUTEX(name) EnterCriticalSection(&name)
#define _glthread_UNLOCK_MUTEX(name) LeaveCriticalSection(&name)
#define pipe_mutex_destroy(name) \
DeleteCriticalSection(&name)
#endif /* WIN32_THREADS */
#define pipe_mutex_lock(name) \
EnterCriticalSection(&name)
#define pipe_mutex_unlock(name) \
LeaveCriticalSection(&name)
/*
* XFree86 has its own thread wrapper, Xthreads.h
* We wrap it again for GL.
*/
#ifdef USE_XTHREADS
#include <X11/Xthreads.h>
typedef struct {
xthread_key_t key;
int initMagic;
} _glthread_TSD;
typedef xthread_t _glthread_Thread;
typedef xmutex_rec _glthread_Mutex;
#ifdef XMUTEX_INITIALIZER
#define _glthread_DECLARE_STATIC_MUTEX(name) \
static _glthread_Mutex name = XMUTEX_INITIALIZER
#else
#define _glthread_DECLARE_STATIC_MUTEX(name) \
static _glthread_Mutex name
/** Dummy definitions */
typedef unsigned pipe_thread;
typedef unsigned pipe_mutex;
typedef unsigned pipe_condvar;
typedef unsigned pipe_tsd;
#define pipe_static_mutex(mutex) \
static pipe_mutex mutex = 0
#define pipe_mutex_init(mutex) \
(void) mutex
#define pipe_mutex_destroy(mutex) \
(void) mutex
#define pipe_mutex_lock(mutex) \
(void) mutex
#define pipe_mutex_unlock(mutex) \
(void) mutex
#define pipe_static_condvar(condvar) \
static _glthread_Cond condvar = 0
#define pipe_condvar_init(condvar) \
(void) condvar
#define pipe_condvar_destroy(condvar) \
(void) condvar
#define pipe_condvar_wait(condvar, mutex) \
(void) condvar
#define pipe_condvar_signal(condvar) \
(void) condvar
#define pipe_condvar_broadcast(condvar) \
(void) condvar
#endif /* PIPE_OS_? */
/*
* Thread-specific data.
*/
typedef struct {
#if defined(PIPE_OS_LINUX)
pthread_key_t key;
#elif defined(PIPE_OS_WINDOWS)
DWORD key;
#endif
int initMagic;
} pipe_tsd;
#define _glthread_INIT_MUTEX(name) \
xmutex_init(&(name))
#define _glthread_DESTROY_MUTEX(name) \
xmutex_clear(&(name))
#define PIPE_TSD_INIT_MAGIC 0xff8adc98
#define _glthread_LOCK_MUTEX(name) \
(void) xmutex_lock(&(name))
#define _glthread_UNLOCK_MUTEX(name) \
(void) xmutex_unlock(&(name))
static INLINE void
pipe_tsd_init(pipe_tsd *tsd)
{
#if defined(PIPE_OS_LINUX)
if (pthread_key_create(&tsd->key, NULL/*free*/) != 0) {
perror("pthread_key_create(): failed to allocate key for thread specific data");
exit(-1);
}
#elif defined(PIPE_OS_WINDOWS)
assert(0);
#endif
tsd->initMagic = PIPE_TSD_INIT_MAGIC;
}
#endif /* USE_XTHREADS */
static INLINE void *
pipe_tsd_get(pipe_tsd *tsd)
{
if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
pipe_tsd_init(tsd);
}
#if defined(PIPE_OS_LINUX)
return pthread_getspecific(tsd->key);
#elif defined(PIPE_OS_WINDOWS)
assert(0);
return NULL;
#else
assert(0);
return NULL;
#endif
}
static INLINE void
pipe_tsd_set(pipe_tsd *tsd, void *value)
{
if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
pipe_tsd_init(tsd);
}
#if defined(PIPE_OS_LINUX)
if (pthread_setspecific(tsd->key, value) != 0) {
perror("pthread_set_specific() failed");
exit(-1);
}
#elif defined(PIPE_OS_WINDOWS)
assert(0);
#else
assert(0);
#endif
}
/*
* BeOS threads. R5.x required.
*/
#ifdef BEOS_THREADS
#include <kernel/OS.h>
#include <support/TLS.h>
typedef struct {
int32 key;
int initMagic;
} _glthread_TSD;
typedef thread_id _glthread_Thread;
/* Use Benaphore, aka speeder semaphore */
typedef struct {
int32 lock;
sem_id sem;
} benaphore;
typedef benaphore _glthread_Mutex;
#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = { 0, 0 }
#define _glthread_INIT_MUTEX(name) name.sem = create_sem(0, #name"_benaphore"), name.lock = 0
#define _glthread_DESTROY_MUTEX(name) delete_sem(name.sem), name.lock = 0
#define _glthread_LOCK_MUTEX(name) if (name.sem == 0) _glthread_INIT_MUTEX(name); \
if (atomic_add(&(name.lock), 1) >= 1) acquire_sem(name.sem)
#define _glthread_UNLOCK_MUTEX(name) if (atomic_add(&(name.lock), -1) > 1) release_sem(name.sem)
#endif /* BEOS_THREADS */
#ifndef THREADS
/*
* THREADS not defined
*/
typedef unsigned _glthread_TSD;
typedef unsigned _glthread_Thread;
typedef unsigned _glthread_Mutex;
#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0
#define _glthread_INIT_MUTEX(name) (void) name
#define _glthread_DESTROY_MUTEX(name) (void) name
#define _glthread_LOCK_MUTEX(name) (void) name
#define _glthread_UNLOCK_MUTEX(name) (void) name
typedef unsigned _glthread_Cond;
#define _glthread_DECLARE_STATIC_COND(name) static _glthread_Cond name = 0
#define _glthread_INIT_COND(name) (void) name
#define _glthread_DESTROY_COND(name) (void) name
#define _glthread_COND_WAIT(name, mutex) (void) name
#define _glthread_COND_SIGNAL(name) (void) name
#define _glthread_COND_BROADCAST(name) (void) name
#endif /* THREADS */
/*
* Platform independent thread specific data API.
*/
extern unsigned long
_glthread_GetID(void);
extern void
_glthread_InitTSD(_glthread_TSD *);
extern void *
_glthread_GetTSD(_glthread_TSD *);
extern void
_glthread_SetTSD(_glthread_TSD *, void *);
#endif /* _P_THREAD_H_ */
#endif /* _P_THREAD2_H_ */

View File

@ -33,7 +33,7 @@
#include <xf86drm.h>
#include <stdlib.h>
#include <stdio.h>
#include "glthread.h"
#include "pipe/p_thread.h"
#include "errno.h"
#include "ws_dri_bufmgr.h"
#include "string.h"
@ -51,8 +51,8 @@
* driBufferObject mutex - > this rw lock.
*/
_glthread_DECLARE_STATIC_MUTEX(bmMutex);
_glthread_DECLARE_STATIC_COND(bmCond);
pipe_static_mutex(bmMutex);
pipe_static_condvar(bmCond);
static int kernelReaders = 0;
static int num_buffers = 0;
@ -241,29 +241,29 @@ static int drmBOResetList(drmBOList *list)
void driWriteLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
pipe_mutex_lock(bmMutex);
while(kernelReaders != 0)
_glthread_COND_WAIT(bmCond, bmMutex);
pipe_condvar_wait(bmCond, bmMutex);
}
void driWriteUnlockKernelBO(void)
{
_glthread_UNLOCK_MUTEX(bmMutex);
pipe_mutex_unlock(bmMutex);
}
void driReadLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
pipe_mutex_lock(bmMutex);
kernelReaders++;
_glthread_UNLOCK_MUTEX(bmMutex);
pipe_mutex_unlock(bmMutex);
}
void driReadUnlockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
pipe_mutex_lock(bmMutex);
if (--kernelReaders == 0)
_glthread_COND_BROADCAST(bmCond);
_glthread_UNLOCK_MUTEX(bmMutex);
pipe_condvar_broadcast(bmCond);
pipe_mutex_unlock(bmMutex);
}
@ -277,7 +277,7 @@ void driReadUnlockKernelBO(void)
typedef struct _DriBufferObject
{
DriBufferPool *pool;
_glthread_Mutex mutex;
pipe_mutx mutex;
int refCount;
const char *name;
uint64_t flags;
@ -318,12 +318,12 @@ driBOKernel(struct _DriBufferObject *buf)
drmBO *ret;
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
driReadUnlockKernelBO();
return ret;
@ -338,9 +338,9 @@ driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
* that time??
*/
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
void *
@ -353,11 +353,11 @@ driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
return buf->userData;
}
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
assert(buf->private != NULL);
retval = buf->pool->map(buf->pool, buf->private, flags, hint,
&buf->mutex, &virtual);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return retval == 0 ? virtual : NULL;
}
@ -369,9 +369,9 @@ driBOUnmap(struct _DriBufferObject *buf)
return;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
unsigned long
@ -381,9 +381,9 @@ driBOOffset(struct _DriBufferObject *buf)
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
ret = buf->pool->offset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return ret;
}
@ -394,9 +394,9 @@ driBOPoolOffset(struct _DriBufferObject *buf)
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
ret = buf->pool->poolOffset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return ret;
}
@ -408,9 +408,9 @@ driBOFlags(struct _DriBufferObject *buf)
assert(buf->private != NULL);
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
@ -418,12 +418,12 @@ driBOFlags(struct _DriBufferObject *buf)
struct _DriBufferObject *
driBOReference(struct _DriBufferObject *buf)
{
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (++buf->refCount == 1) {
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
BM_CKFATAL(-EINVAL);
}
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return buf;
}
@ -435,10 +435,10 @@ driBOUnReference(struct _DriBufferObject *buf)
if (!buf)
return;
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
tmp = --buf->refCount;
if (!tmp) {
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
if (buf->private) {
if (buf->createdByReference)
buf->pool->unreference(buf->pool, buf->private);
@ -451,7 +451,7 @@ driBOUnReference(struct _DriBufferObject *buf)
num_buffers--;
free(buf);
} else
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
@ -469,7 +469,7 @@ driBOData(struct _DriBufferObject *buf,
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
pool = buf->pool;
if (pool == NULL && newPool != NULL) {
@ -556,7 +556,7 @@ driBOData(struct _DriBufferObject *buf,
}
out:
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return retval;
}
@ -569,7 +569,7 @@ driBOSubData(struct _DriBufferObject *buf,
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex,
@ -577,7 +577,7 @@ driBOSubData(struct _DriBufferObject *buf,
memcpy((unsigned char *) virtual + offset, data, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
void
@ -588,21 +588,21 @@ driBOGetSubData(struct _DriBufferObject *buf,
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
memcpy(data, (unsigned char *) virtual + offset, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
void
driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle)
{
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (buf->private != NULL) {
assert((size_t)"Invalid buffer for setReferenced\n" & 0);
BM_CKFATAL(-EINVAL);
@ -619,7 +619,7 @@ driBOSetReferenced(struct _DriBufferObject *buf,
}
buf->createdByReference = TRUE;
buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
int
@ -644,8 +644,8 @@ driGenBuffers(struct _DriBufferPool *pool,
if (!buf)
return -ENOMEM;
_glthread_INIT_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_init(buf->mutex);
pipe_mutex_lock(buf->mutex);
buf->refCount = 1;
buf->flags = flags;
buf->hint = hint;
@ -653,7 +653,7 @@ driGenBuffers(struct _DriBufferPool *pool,
buf->alignment = alignment;
buf->pool = pool;
buf->createdByReference = 0;
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
buffers[i] = buf;
}
return 0;
@ -818,7 +818,7 @@ driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
{
int newItem;
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
buf->pool->kernel(buf->pool, buf->private),
flags, mask, itemLoc, node));
@ -827,7 +827,7 @@ driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
if (newItem)
buf->refCount++;
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
drmBOList *driGetdrmBOList(struct _DriBufferList *list)
@ -845,10 +845,10 @@ void driPutdrmBOList(struct _DriBufferList *list)
void
driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (buf->pool->fence)
BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
}
@ -908,10 +908,10 @@ driBOValidateUserList(struct _DriBufferList * list)
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
if (buf->pool->validate)
BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
@ -929,9 +929,9 @@ driBOSize(struct _DriBufferObject *buf)
{
unsigned long size;
_glthread_LOCK_MUTEX(buf->mutex);
pipe_mutex_lock(buf->mutex);
size = buf->pool->size(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
pipe_mutex_unlock(buf->mutex);
return size;

View File

@ -33,14 +33,14 @@
#define _PSB_BUFPOOL_H_
#include <xf86drm.h>
#include <glthread.h>
#include "pipe/p_thread.h"
struct _DriFenceObject;
typedef struct _DriBufferPool
{
int fd;
int (*map) (struct _DriBufferPool * pool, void *private,
unsigned flags, int hint, _glthread_Mutex *mutex,
unsigned flags, int hint, pipe_mutex *mutex,
void **virtual);
int (*unmap) (struct _DriBufferPool * pool, void *private);
int (*destroy) (struct _DriBufferPool * pool, void *private);
@ -55,8 +55,8 @@ typedef struct _DriBufferPool
int (*fence) (struct _DriBufferPool * pool, void *private,
struct _DriFenceObject * fence);
drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
int (*validate) (struct _DriBufferPool * pool, void *private, pipe_mutex *mutex);
int (*waitIdle) (struct _DriBufferPool *pool, void *private, pipe_mutex *mutex,
int lazy);
int (*setStatus) (struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags);

View File

@ -113,7 +113,7 @@ pool_unreference(struct _DriBufferPool *pool, void *private)
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
int hint, pipe_mutex *mutex, void **virtual)
{
drmBO *buf = (drmBO *) private;
int ret;
@ -202,7 +202,7 @@ pool_kernel(struct _DriBufferPool *pool, void *private)
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
pool_waitIdle(struct _DriBufferPool *pool, void *private, pipe_mutex *mutex,
int lazy)
{
drmBO *buf = (drmBO *) private;

View File

@ -1,5 +1,5 @@
#include "ws_dri_fencemgr.h"
#include "glthread.h"
#include "pipe/p_thread.h"
#include <xf86mm.h>
#include <string.h>
#include <unistd.h>
@ -20,7 +20,7 @@ struct _DriFenceMgr {
/*
* These members are protected by this->mutex
*/
_glthread_Mutex mutex;
pipe_mutex mutex;
int refCount;
drmMMListHead *heads;
int num_fences;
@ -44,7 +44,7 @@ struct _DriFenceObject {
/*
* These members are protected by this->mutex.
*/
_glthread_Mutex mutex;
pipe_mutex mutex;
uint32_t signaled_type;
void *private;
};
@ -65,8 +65,8 @@ driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
pipe_mutex_init(tmp->mutex);
pipe_mutex_lock(tmp->mutex);
tmp->refCount = 1;
tmp->info = *info;
tmp->num_fences = 0;
@ -77,7 +77,7 @@ driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
for (i=0; i<tmp->info.num_classes; ++i) {
DRMINITLISTHEAD(&tmp->heads[i]);
}
_glthread_UNLOCK_MUTEX(tmp->mutex);
pipe_mutex_unlock(tmp->mutex);
return tmp;
out_err:
@ -95,13 +95,13 @@ driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
if (--mgr->refCount == 0)
free(mgr);
else
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
}
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
{
_glthread_LOCK_MUTEX((*pMgr)->mutex);
pipe_mutex_lock((*pMgr)->mutex);
driFenceMgrUnrefUnlock(pMgr);
}
@ -143,9 +143,9 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
*/
++entry->refCount;
_glthread_UNLOCK_MUTEX(mgr->mutex);
_glthread_LOCK_MUTEX(entry->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
pipe_mutex_lock(entry->mutex);
pipe_mutex_lock(mgr->mutex);
prev = list->prev;
@ -157,7 +157,7 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
* Somebody else removed the entry from the list.
*/
_glthread_UNLOCK_MUTEX(entry->mutex);
pipe_mutex_unlock(entry->mutex);
driFenceUnReferenceLocked(&entry);
return;
}
@ -167,7 +167,7 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
DRMLISTDELINIT(list);
mgr->info.unreference(mgr, &entry->private);
}
_glthread_UNLOCK_MUTEX(entry->mutex);
pipe_mutex_unlock(entry->mutex);
driFenceUnReferenceLocked(&entry);
list = prev;
}
@ -181,7 +181,7 @@ driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
struct _DriFenceMgr *mgr = fence->mgr;
int ret = 0;
_glthread_LOCK_MUTEX(fence->mutex);
pipe_mutex_lock(fence->mutex);
if ((fence->signaled_type & fence_type) == fence_type)
goto out0;
@ -190,16 +190,16 @@ driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
if (ret)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_lock(mgr->mutex);
pipe_mutex_unlock(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
fence_type);
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_unlock(fence->mutex);
return ret;
}
@ -207,9 +207,9 @@ uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
{
uint32_t ret;
_glthread_LOCK_MUTEX(fence->mutex);
pipe_mutex_lock(fence->mutex);
ret = fence->signaled_type;
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_unlock(fence->mutex);
return ret;
}
@ -221,7 +221,7 @@ driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
int ret = 0;
struct _DriFenceMgr *mgr;
_glthread_LOCK_MUTEX(fence->mutex);
pipe_mutex_lock(fence->mutex);
mgr = fence->mgr;
*signaled = fence->signaled_type;
if ((fence->signaled_type & flush_type) == flush_type)
@ -236,25 +236,25 @@ driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
if ((fence->signaled_type | *signaled) == fence->signaled_type)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_lock(mgr->mutex);
pipe_mutex_unlock(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
*signaled);
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_unlock(fence->mutex);
return ret;
}
struct _DriFenceObject *
driFenceReference(struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(fence->mgr->mutex);
pipe_mutex_lock(fence->mgr->mutex);
++fence->refCount;
_glthread_UNLOCK_MUTEX(fence->mgr->mutex);
pipe_mutex_unlock(fence->mgr->mutex);
return fence;
}
@ -267,7 +267,7 @@ driFenceUnReference(struct _DriFenceObject **pFence)
return;
mgr = (*pFence)->mgr;
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_lock(mgr->mutex);
++mgr->refCount;
driFenceUnReferenceLocked(pFence);
driFenceMgrUnrefUnlock(&mgr);
@ -294,15 +294,15 @@ struct _DriFenceObject
return NULL;
}
_glthread_INIT_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
pipe_mutex_init(fence->mutex);
pipe_mutex_lock(fence->mutex);
pipe_mutex_lock(mgr->mutex);
fence->refCount = 1;
DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
fence->mgr = mgr;
++mgr->refCount;
++mgr->num_fences;
_glthread_UNLOCK_MUTEX(mgr->mutex);
pipe_mutex_unlock(mgr->mutex);
fence->fence_class = fence_class;
fence->fence_type = fence_type;
fence->signaled_type = 0;
@ -312,7 +312,7 @@ struct _DriFenceObject
memcpy(fence->private, private, private_size);
}
_glthread_UNLOCK_MUTEX(fence->mutex);
pipe_mutex_unlock(fence->mutex);
return fence;
}

View File

@ -33,7 +33,7 @@
#include <stdlib.h>
#include <errno.h>
#include "pipe/p_debug.h"
#include "glthread.h"
#include "pipe/p_thread.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
@ -60,14 +60,14 @@ pool_destroy(struct _DriBufferPool *pool, void *private)
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex, int lazy)
pipe_mutex *mutex, int lazy)
{
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
int hint, pipe_mutex *mutex, void **virtual)
{
*virtual = (void *)((unsigned long *)private + 2);
return 0;

View File

@ -37,7 +37,7 @@
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
#include "ws_dri_bufmgr.h"
#include "glthread.h"
#include "pipe/p_thread.h"
#define DRI_SLABPOOL_ALLOC_RETRIES 100
@ -53,7 +53,7 @@ struct _DriSlabBuffer {
uint32_t start;
uint32_t fenceType;
int unFenced;
_glthread_Cond event;
pipe_condvar event;
};
struct _DriKernelBO {
@ -84,7 +84,7 @@ struct _DriSlabSizeHeader {
uint32_t numDelayed;
struct _DriSlabPool *slabPool;
uint32_t bufSize;
_glthread_Mutex mutex;
pipe_mutex mutex;
};
struct _DriFreeSlabManager {
@ -94,7 +94,7 @@ struct _DriFreeSlabManager {
drmMMListHead timeoutList;
drmMMListHead unCached;
drmMMListHead cached;
_glthread_Mutex mutex;
pipe_mutex mutex;
};
@ -196,7 +196,7 @@ driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
{
struct timeval time;
_glthread_LOCK_MUTEX(fMan->mutex);
pipe_mutex_lock(fMan->mutex);
gettimeofday(&time, NULL);
driTimeAdd(&time, &fMan->slabTimeout);
@ -210,7 +210,7 @@ driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
pipe_mutex_unlock(fMan->mutex);
}
/*
@ -237,7 +237,7 @@ driAllocKernelBO(struct _DriSlabSizeHeader *header)
size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
_glthread_LOCK_MUTEX(fMan->mutex);
pipe_mutex_lock(fMan->mutex);
kbo = NULL;
@ -269,7 +269,7 @@ driAllocKernelBO(struct _DriSlabSizeHeader *header)
DRMLISTDELINIT(&kbo->timeoutHead);
}
_glthread_UNLOCK_MUTEX(fMan->mutex);
pipe_mutex_unlock(fMan->mutex);
if (kbo) {
uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
@ -360,7 +360,7 @@ driAllocSlab(struct _DriSlabSizeHeader *header)
buf->start = i* header->bufSize;
buf->mapCount = 0;
buf->isSlabBuffer = 1;
_glthread_INIT_COND(buf->event);
pipe_condvar_init(buf->event);
DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
@ -494,23 +494,23 @@ driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
drmMMListHead *list;
int count = DRI_SLABPOOL_ALLOC_RETRIES;
_glthread_LOCK_MUTEX(header->mutex);
pipe_mutex_lock(header->mutex);
while(header->slabs.next == &header->slabs && count > 0) {
driSlabCheckFreeLocked(header, 0);
if (header->slabs.next != &header->slabs)
break;
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
if (count != DRI_SLABPOOL_ALLOC_RETRIES)
usleep(1);
_glthread_LOCK_MUTEX(header->mutex);
pipe_mutex_lock(header->mutex);
(void) driAllocSlab(header);
count--;
}
list = header->slabs.next;
if (list == &header->slabs) {
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
return NULL;
}
slab = DRMLISTENTRY(struct _DriSlab, list, head);
@ -520,7 +520,7 @@ driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
list = slab->freeBuffers.next;
DRMLISTDELINIT(list);
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
return buf;
}
@ -618,7 +618,7 @@ pool_destroy(struct _DriBufferPool *driPool, void *private)
slab = buf->parent;
header = slab->header;
_glthread_LOCK_MUTEX(header->mutex);
pipe_mutex_lock(header->mutex);
buf->unFenced = 0;
buf->mapCount = 0;
@ -631,18 +631,18 @@ pool_destroy(struct _DriBufferPool *driPool, void *private)
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *driPool, void *private,
_glthread_Mutex *mutex, int lazy)
pipe_mutex *mutex, int lazy)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
while(buf->unFenced)
_glthread_COND_WAIT(buf->event, *mutex);
pipe_condvar_wait(buf->event, *mutex);
if (!buf->fence)
return 0;
@ -655,7 +655,7 @@ pool_waitIdle(struct _DriBufferPool *driPool, void *private,
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
int hint, pipe_mutex *mutex, void **virtual)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
int busy;
@ -689,7 +689,7 @@ pool_unmap(struct _DriBufferPool *pool, void *private)
--buf->mapCount;
if (buf->mapCount == 0 && buf->isSlabBuffer)
_glthread_COND_BROADCAST(buf->event);
pipe_condvar_broadcast(buf->event);
return 0;
}
@ -760,7 +760,7 @@ pool_fence(struct _DriBufferPool *pool, void *private,
buf->fenceType = bo->fenceFlags;
buf->unFenced = 0;
_glthread_COND_BROADCAST(buf->event);
pipe_condvar_broadcast(buf->event);
return 0;
}
@ -775,7 +775,7 @@ pool_kernel(struct _DriBufferPool *pool, void *private)
static int
pool_validate(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex)
pipe_mutex *mutex)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
@ -783,7 +783,7 @@ pool_validate(struct _DriBufferPool *pool, void *private,
return 0;
while(buf->mapCount != 0)
_glthread_COND_WAIT(buf->event, *mutex);
pipe_condvar_wait(buf->event, *mutex);
buf->unFenced = 1;
return 0;
@ -799,8 +799,8 @@ driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
pipe_mutex_init(tmp->mutex);
pipe_mutex_lock(tmp->mutex);
tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
@ -814,7 +814,7 @@ driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
DRMINITLISTHEAD(&tmp->timeoutList);
DRMINITLISTHEAD(&tmp->unCached);
DRMINITLISTHEAD(&tmp->cached);
_glthread_UNLOCK_MUTEX(tmp->mutex);
pipe_mutex_unlock(tmp->mutex);
return tmp;
}
@ -827,9 +827,9 @@ driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
time = fMan->nextCheck;
driTimeAdd(&time, &fMan->checkInterval);
_glthread_LOCK_MUTEX(fMan->mutex);
pipe_mutex_lock(fMan->mutex);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
pipe_mutex_unlock(fMan->mutex);
assert(fMan->timeoutList.next == &fMan->timeoutList);
assert(fMan->unCached.next == &fMan->unCached);
@ -842,8 +842,8 @@ static void
driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
struct _DriSlabSizeHeader *header)
{
_glthread_INIT_MUTEX(header->mutex);
_glthread_LOCK_MUTEX(header->mutex);
pipe_mutex_init(header->mutex);
pipe_mutex_lock(header->mutex);
DRMINITLISTHEAD(&header->slabs);
DRMINITLISTHEAD(&header->freeSlabs);
@ -853,7 +853,7 @@ driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
header->slabPool = pool;
header->bufSize = size;
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
}
static void
@ -862,7 +862,7 @@ driFinishSizeHeader(struct _DriSlabSizeHeader *header)
drmMMListHead *list, *next;
struct _DriSlabBuffer *buf;
_glthread_LOCK_MUTEX(header->mutex);
pipe_mutex_lock(header->mutex);
for (list = header->delayedBuffers.next, next = list->next;
list != &header->delayedBuffers;
list = next, next = list->next) {
@ -875,7 +875,7 @@ driFinishSizeHeader(struct _DriSlabSizeHeader *header)
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
pipe_mutex_unlock(header->mutex);
}
static void

View File

@ -27,7 +27,7 @@
#include "main/glheader.h"
#include "glapi/glthread.h"
#include "pipe/p_lthread.h"
#include <GL/internal/glcore.h>
#include "state_tracker/st_public.h"
#include "intel_context.h"
@ -35,7 +35,7 @@
_glthread_DECLARE_STATIC_MUTEX( lockMutex );
pipe_static_mutex( lockMutex );
static void
@ -72,7 +72,7 @@ void LOCK_HARDWARE( struct intel_context *intel )
{
char __ret = 0;
_glthread_LOCK_MUTEX(lockMutex);
pipe_mutex_lock(lockMutex);
assert(!intel->locked);
DRM_CAS(intel->driHwLock, intel->hHWContext,
@ -96,7 +96,7 @@ void UNLOCK_HARDWARE( struct intel_context *intel )
DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
_glthread_UNLOCK_MUTEX(lockMutex);
pipe_mutex_unlock(lockMutex);
DBG(LOCK, "%s - unlocked\n", __progname);
}

View File

@ -37,6 +37,7 @@
#include "main/glheader.h"
#include "glapi/glapi.h"
#include "glxapi.h"
#include "pipe/p_thread.h"
extern struct _glxapi_table *_real_GetGLXDispatchTable(void);
@ -127,26 +128,13 @@ get_dispatch(Display *dpy)
/**
* GLX API current context.
*/
#if defined(GLX_USE_TLS)
PUBLIC __thread void * CurrentContext
__attribute__((tls_model("initial-exec")));
#elif defined(THREADS)
static _glthread_TSD ContextTSD; /**< Per-thread context pointer */
#else
static GLXContext CurrentContext = 0;
#endif
pipe_tsd ContextTSD;
static void
SetCurrentContext(GLXContext c)
{
#if defined(GLX_USE_TLS)
CurrentContext = c;
#elif defined(THREADS)
_glthread_SetTSD(&ContextTSD, c);
#else
CurrentContext = c;
#endif
pipe_tsd_set(&ContextTSD, c);
}
@ -238,13 +226,7 @@ glXGetConfig(Display *dpy, XVisualInfo *visinfo, int attrib, int *value)
GLXContext PUBLIC
glXGetCurrentContext(void)
{
#if defined(GLX_USE_TLS)
return CurrentContext;
#elif defined(THREADS)
return (GLXContext) _glthread_GetTSD(&ContextTSD);
#else
return CurrentContext;
#endif
return (GLXContext) pipe_tsd_get(&ContextTSD);
}

View File

@ -62,7 +62,6 @@
#include "xmesaP.h"
#include "main/context.h"
#include "main/framebuffer.h"
#include "glapi/glthread.h"
#include "state_tracker/st_public.h"
#include "state_tracker/st_context.h"
@ -75,7 +74,7 @@
/**
* Global X driver lock
*/
_glthread_Mutex _xmesa_lock;
pipe_mutex _xmesa_lock;
int xmesa_mode;
@ -245,10 +244,10 @@ xmesa_get_window_size(XMesaDisplay *dpy, XMesaBuffer b,
#else
Status stat;
_glthread_LOCK_MUTEX(_xmesa_lock);
pipe_mutex_lock(_xmesa_lock);
XSync(b->xm_visual->display, 0); /* added for Chromium */
stat = get_drawable_size(dpy, b->drawable, width, height);
_glthread_UNLOCK_MUTEX(_xmesa_lock);
pipe_mutex_unlock(_xmesa_lock);
if (!stat) {
/* probably querying a window that's recently been destroyed */
@ -779,7 +778,7 @@ XMesaContext XMesaCreateContext( XMesaVisual v, XMesaContext share_list )
uint pf;
if (firstTime) {
_glthread_INIT_MUTEX(_xmesa_lock);
pipe_mutex_init(_xmesa_lock);
firstTime = GL_FALSE;
}

View File

@ -35,9 +35,10 @@
#include "state_tracker/st_context.h"
#include "state_tracker/st_public.h"
#include "pipe/p_thread.h"
extern _glthread_Mutex _xmesa_lock;
extern pipe_mutex _xmesa_lock;
extern XMesaBuffer XMesaBufferList;