radeon: Add protection against recursive DRM locking.

Reference counting protects DRM lock call from recursive locking that would
cause hang. Code also adds optional debugging output for recursive call that
is compiled only if NDEBUG is not defined.

This code is not 100% thread safe because mesa doesn't include increment and
test atomic operation. There is built-in gcc functions but they are only
available from gcc 4.2.
This commit is contained in:
Pauli Nieminen 2009-07-30 20:17:29 +03:00 committed by Alex Deucher
parent 5eeb44f398
commit 29173d3d5c
4 changed files with 63 additions and 2 deletions

View File

@ -211,6 +211,7 @@ GLboolean radeonInitContext(radeonContextPtr radeon,
radeon->dri.screen = sPriv;
radeon->dri.hwContext = driContextPriv->hHWContext;
radeon->dri.hwLock = &sPriv->pSAREA->lock;
radeon->dri.hwLockCount = 0;
radeon->dri.fd = sPriv->fd;
radeon->dri.drmMinor = sPriv->drm_version.minor;

View File

@ -365,6 +365,7 @@ struct radeon_dri_mirror {
drm_context_t hwContext;
drm_hw_lock_t *hwLock;
int hwLockCount;
int fd;
int drmMinor;
};

View File

@ -86,8 +86,34 @@ void radeonGetLock(radeonContextPtr rmesa, GLuint flags)
rmesa->vtbl.get_lock(rmesa);
}
#ifndef NDEBUG
struct lock_debug {
const char* function;
const char* file;
int line;
};
void radeon_lock_hardware(radeonContextPtr radeon)
static struct lock_debug ldebug = {0};
#endif
#if 0
/** TODO: use atomic operations for reference counting **/
/** gcc 4.2 has builtin functios for this **/
#define ATOMIC_INC_AND_FETCH(atomic) __sync_add_and_fetch(&atomic, 1)
#define ATOMIC_DEC_AND_FETCH(atomic) __sync_sub_and_fetch(&atomic, 1)
#else
#define ATOMIC_INC_AND_FETCH(atomic) (++atomic)
#define ATOMIC_DEC_AND_FETCH(atomic) (--atomic)
#endif
void radeon_lock_hardware(radeonContextPtr radeon
#ifndef NDEBUG
,const char* function
,const char* file
,const int line
#endif
)
{
char ret = 0;
struct radeon_framebuffer *rfb = NULL;
@ -102,16 +128,39 @@ void radeon_lock_hardware(radeonContextPtr radeon)
}
if (!radeon->radeonScreen->driScreen->dri2.enabled) {
if (ATOMIC_INC_AND_FETCH(radeon->dri.hwLockCount) > 1)
{
#ifndef NDEBUG
if ( RADEON_DEBUG & DEBUG_SANITY )
fprintf(stderr, "*** %d times of recursive call to %s ***\n"
"Original call was from %s (file: %s line: %d)\n"
"Now call is coming from %s (file: %s line: %d)\n"
, radeon->dri.hwLockCount, __FUNCTION__
, ldebug.function, ldebug.file, ldebug.line
, function, file, line
);
#endif
return;
}
DRM_CAS(radeon->dri.hwLock, radeon->dri.hwContext,
(DRM_LOCK_HELD | radeon->dri.hwContext), ret );
if (ret)
radeonGetLock(radeon, 0);
#ifndef NDEBUG
ldebug.function = function;
ldebug.file = file;
ldebug.line = line;
#endif
}
}
void radeon_unlock_hardware(radeonContextPtr radeon)
{
if (!radeon->radeonScreen->driScreen->dri2.enabled) {
if (ATOMIC_DEC_AND_FETCH(radeon->dri.hwLockCount) > 0)
{
return;
}
DRM_UNLOCK( radeon->dri.fd,
radeon->dri.hwLock,
radeon->dri.hwContext );

View File

@ -48,12 +48,22 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
extern void radeonGetLock(radeonContextPtr rmesa, GLuint flags);
void radeon_lock_hardware(radeonContextPtr rmesa);
void radeon_lock_hardware(radeonContextPtr rmesa
#ifndef NDEBUG
,const char* function
,const char* file
,const int line
#endif
);
void radeon_unlock_hardware(radeonContextPtr rmesa);
/* Lock the hardware and validate our state.
*/
#ifdef NDEBUG
#define LOCK_HARDWARE( rmesa ) radeon_lock_hardware(rmesa)
#else
#define LOCK_HARDWARE( rmesa ) radeon_lock_hardware(rmesa, __FUNCTION__, __FILE__, __LINE__)
#endif
#define UNLOCK_HARDWARE( rmesa ) radeon_unlock_hardware(rmesa)
#endif