gallium/u_queue: isolate util_queue_fence implementation

it's cleaner this way.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák 2017-02-20 18:42:41 +01:00
parent 4aea8fe7e0
commit 24847dd1b5
6 changed files with 30 additions and 26 deletions

View File

@ -88,7 +88,7 @@ remove_from_atexit_list(struct util_queue *queue)
}
/****************************************************************************
* util_queue implementation
* util_queue_fence
*/
static void
@ -101,7 +101,7 @@ util_queue_fence_signal(struct util_queue_fence *fence)
}
void
util_queue_job_wait(struct util_queue_fence *fence)
util_queue_fence_wait(struct util_queue_fence *fence)
{
pipe_mutex_lock(fence->mutex);
while (!fence->signalled)
@ -109,6 +109,27 @@ util_queue_job_wait(struct util_queue_fence *fence)
pipe_mutex_unlock(fence->mutex);
}
void
util_queue_fence_init(struct util_queue_fence *fence)
{
memset(fence, 0, sizeof(*fence));
pipe_mutex_init(fence->mutex);
pipe_condvar_init(fence->cond);
fence->signalled = true;
}
void
util_queue_fence_destroy(struct util_queue_fence *fence)
{
assert(fence->signalled);
pipe_condvar_destroy(fence->cond);
pipe_mutex_destroy(fence->mutex);
}
/****************************************************************************
* util_queue implementation
*/
struct thread_input {
struct util_queue *queue;
int thread_index;
@ -266,23 +287,6 @@ util_queue_destroy(struct util_queue *queue)
FREE(queue->threads);
}
void
util_queue_fence_init(struct util_queue_fence *fence)
{
memset(fence, 0, sizeof(*fence));
pipe_mutex_init(fence->mutex);
pipe_condvar_init(fence->cond);
fence->signalled = true;
}
void
util_queue_fence_destroy(struct util_queue_fence *fence)
{
assert(fence->signalled);
pipe_condvar_destroy(fence->cond);
pipe_mutex_destroy(fence->mutex);
}
void
util_queue_add_job(struct util_queue *queue,
void *job,

View File

@ -87,7 +87,7 @@ void util_queue_add_job(struct util_queue *queue,
util_queue_execute_func execute,
util_queue_execute_func cleanup);
void util_queue_job_wait(struct util_queue_fence *fence);
void util_queue_fence_wait(struct util_queue_fence *fence);
int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
unsigned thread_index);

View File

@ -230,7 +230,7 @@ fd_batch_sync(struct fd_batch *batch)
{
if (!batch->ctx->screen->reorder)
return;
util_queue_job_wait(&batch->flush_fence);
util_queue_fence_wait(&batch->flush_fence);
}
static void

View File

@ -1200,7 +1200,7 @@ again:
* in a compiler thread.
*/
if (thread_index < 0)
util_queue_job_wait(&sel->ready);
util_queue_fence_wait(&sel->ready);
pipe_mutex_lock(sel->mutex);
@ -1832,7 +1832,7 @@ static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
{
if (shader->is_optimized) {
util_queue_job_wait(&shader->optimized_ready);
util_queue_fence_wait(&shader->optimized_ready);
util_queue_fence_destroy(&shader->optimized_ready);
}
@ -1884,7 +1884,7 @@ static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
[PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
};
util_queue_job_wait(&sel->ready);
util_queue_fence_wait(&sel->ready);
if (current_shader[sel->type]->cso == sel) {
current_shader[sel->type]->cso = NULL;

View File

@ -1118,7 +1118,7 @@ void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
struct amdgpu_cs *cs = amdgpu_cs(rcs);
/* Wait for any pending ioctl of this CS to complete. */
util_queue_job_wait(&cs->flush_completed);
util_queue_fence_wait(&cs->flush_completed);
}
static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,

View File

@ -486,7 +486,7 @@ void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
/* Wait for any pending ioctl of this CS to complete. */
if (util_queue_is_initialized(&cs->ws->cs_queue))
util_queue_job_wait(&cs->flush_completed);
util_queue_fence_wait(&cs->flush_completed);
}
/* Add the given fence to a slab buffer fence list.