i915/sync: Implement DRI2_Fence extension

Here is the porting of corresponding patch for i965,
i.e. commit c636284 i965/sync: Implement DRI2_Fence extension

Here follows part of original commit message by Chad Versace:

"This enables EGL_KHR_fence_sync and EGL_KHR_wait_sync."

Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
This commit is contained in:
Mauro Rossi 2016-07-14 05:33:16 +02:00 committed by Emil Velikov
parent 19fa29a592
commit d11515ff1b
3 changed files with 152 additions and 30 deletions

View File

@ -787,6 +787,7 @@ static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
static const __DRIextension *intelScreenExtensions[] = {
&intelTexBufferExtension.base,
&intelFenceExtension.base,
&intelFlushExtension.base,
&intelImageExtension.base,
&intelRendererQueryExtension.base,

View File

@ -162,6 +162,7 @@ extern void intelDestroyContext(__DRIcontext * driContextPriv);
extern GLboolean intelUnbindContext(__DRIcontext * driContextPriv);
const __DRIextension **__driDriverGetExtensions_i915(void);
extern const __DRI2fenceExtension intelFenceExtension;
extern GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,

View File

@ -25,11 +25,11 @@
*
*/
/** @file intel_syncobj.c
/**
* \file
* \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
*
* Support for ARB_sync
*
* ARB_sync is implemented by flushing the current batchbuffer and keeping a
* GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
* reference on it. We can then check for completion or wait for completion
* using the normal buffer object mechanisms. This does mean that if an
* application is using many sync objects, it will emit small batchbuffers
@ -45,13 +45,94 @@
#include "intel_batchbuffer.h"
#include "intel_reg.h"
struct intel_fence {
/** The fence waits for completion of this batch. */
drm_intel_bo *batch_bo;
bool signalled;
};
struct intel_gl_sync_object {
struct gl_sync_object Base;
/** Batch associated with this sync object */
drm_intel_bo *bo;
struct intel_fence fence;
};
static void
intel_fence_finish(struct intel_fence *fence)
{
if (fence->batch_bo)
drm_intel_bo_unreference(fence->batch_bo);
}
static void
intel_fence_insert(struct intel_context *intel, struct intel_fence *fence)
{
assert(!fence->batch_bo);
assert(!fence->signalled);
intel_batchbuffer_emit_mi_flush(intel);
fence->batch_bo = intel->batch.bo;
drm_intel_bo_reference(fence->batch_bo);
intel_batchbuffer_flush(intel);
}
static bool
intel_fence_has_completed(struct intel_fence *fence)
{
if (fence->signalled)
return true;
if (fence->batch_bo && !drm_intel_bo_busy(fence->batch_bo)) {
drm_intel_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
fence->signalled = true;
return true;
}
return false;
}
/**
* Return true if the function successfully signals or has already signalled.
* (This matches the behavior expected from __DRI2fence::client_wait_sync).
*/
static bool
intel_fence_client_wait(struct intel_context *intel, struct intel_fence *fence,
uint64_t timeout)
{
if (fence->signalled)
return true;
assert(fence->batch_bo);
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
* immediately for timeouts <= 0. The best we can do is to clamp the
* timeout to INT64_MAX. This limits the maximum timeout from 584 years to
* 292 years - likely not a big deal.
*/
if (timeout > INT64_MAX)
timeout = INT64_MAX;
if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
return false;
fence->signalled = true;
drm_intel_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return true;
}
static void
intel_fence_server_wait(struct intel_context *intel, struct intel_fence *fence)
{
/* We have nothing to do for WaitSync. Our GL command stream is sequential,
* so given that the sync object has already flushed the batchbuffer, any
* batchbuffers coming after this waitsync will naturally not occur until
* the previous one is done.
*/
}
static struct gl_sync_object *
intel_gl_new_sync_object(struct gl_context *ctx, GLuint id)
{
@ -69,9 +150,7 @@ intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
{
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo)
drm_intel_bo_unreference(sync->bo);
intel_fence_finish(&sync->fence);
free(sync);
}
@ -82,37 +161,28 @@ intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
intel_batchbuffer_emit_mi_flush(intel);
sync->bo = intel->batch.bo;
drm_intel_bo_reference(sync->bo);
intel_flush(ctx);
intel_fence_insert(intel, &sync->fence);
}
static void
intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout)
{
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo && drm_intel_gem_bo_wait(sync->bo, timeout) == 0) {
if (intel_fence_client_wait(intel, &sync->fence, timeout))
s->StatusFlag = 1;
drm_intel_bo_unreference(sync->bo);
sync->bo = NULL;
}
}
/* We have nothing to do for WaitSync. Our GL command stream is sequential,
* so given that the sync object has already flushed the batchbuffer,
* any batchbuffers coming after this waitsync will naturally not occur until
* the previous one is done.
*/
static void
intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout)
{
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
intel_fence_server_wait(intel, &sync->fence);
}
static void
@ -120,11 +190,8 @@ intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
{
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
drm_intel_bo_unreference(sync->bo);
sync->bo = NULL;
if (intel_fence_has_completed(&sync->fence))
s->StatusFlag = 1;
}
}
void
@ -137,3 +204,56 @@ intel_init_syncobj_functions(struct dd_function_table *functions)
functions->ClientWaitSync = intel_gl_client_wait_sync;
functions->ServerWaitSync = intel_gl_server_wait_sync;
}
static void *
intel_dri_create_fence(__DRIcontext *ctx)
{
struct intel_context *intel = ctx->driverPrivate;
struct intel_fence *fence;
fence = calloc(1, sizeof(*fence));
if (!fence)
return NULL;
intel_fence_insert(intel, fence);
return fence;
}
static void
intel_dri_destroy_fence(__DRIscreen *screen, void *driver_fence)
{
struct intel_fence *fence = driver_fence;
intel_fence_finish(fence);
free(fence);
}
static GLboolean
intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags,
uint64_t timeout)
{
struct intel_context *intel = ctx->driverPrivate;
struct intel_fence *fence = driver_fence;
return intel_fence_client_wait(intel, fence, timeout);
}
static void
intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags)
{
struct intel_context *intel = ctx->driverPrivate;
struct intel_fence *fence = driver_fence;
intel_fence_server_wait(intel, fence);
}
const __DRI2fenceExtension intelFenceExtension = {
.base = { __DRI2_FENCE, 1 },
.create_fence = intel_dri_create_fence,
.destroy_fence = intel_dri_destroy_fence,
.client_wait_sync = intel_dri_client_wait_sync,
.server_wait_sync = intel_dri_server_wait_sync,
.get_fence_from_cl_event = NULL,
};