glthread: use glthread->used instead of glthread->next_batch->used

remove one pointer dereference by having "used" in glthread too

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7719>
This commit is contained in:
Marek Olšák 2020-10-09 22:16:29 -04:00 committed by Marge Bot
parent 0a4004e5fa
commit 70b778945b
3 changed files with 25 additions and 13 deletions

View File

@ -121,6 +121,7 @@ _mesa_glthread_init(struct gl_context *ctx)
util_queue_fence_init(&glthread->batches[i].fence);
}
glthread->next_batch = &glthread->batches[glthread->next];
glthread->used = 0;
glthread->enabled = true;
glthread->stats.queue = &glthread->queue;
@ -207,8 +208,7 @@ _mesa_glthread_flush_batch(struct gl_context *ctx)
if (!glthread->enabled)
return;
struct glthread_batch *next = glthread->next_batch;
if (!next->used)
if (!glthread->used)
return;
/* Pin threads regularly to the same Zen CCX that the main thread is
@ -230,6 +230,8 @@ _mesa_glthread_flush_batch(struct gl_context *ctx)
}
}
struct glthread_batch *next = glthread->next_batch;
/* Debug: execute the batch immediately from this thread.
*
* Note that glthread_unmarshal_batch() changes the dispatch table so we'll
@ -241,13 +243,15 @@ _mesa_glthread_flush_batch(struct gl_context *ctx)
return;
}
p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
p_atomic_add(&glthread->stats.num_offloaded_items, glthread->used);
next->used = glthread->used;
util_queue_add_job(&glthread->queue, next, &next->fence,
glthread_unmarshal_batch, NULL, 0);
glthread->last = glthread->next;
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
glthread->next_batch = &glthread->batches[glthread->next];
glthread->used = 0;
}
/**
@ -280,8 +284,10 @@ _mesa_glthread_finish(struct gl_context *ctx)
synced = true;
}
if (next->used) {
p_atomic_add(&glthread->stats.num_direct_items, next->used);
if (glthread->used) {
p_atomic_add(&glthread->stats.num_direct_items, glthread->used);
next->used = glthread->used;
glthread->used = 0;
/* Since glthread_unmarshal_batch changes the dispatch to direct,
* restore it after it's done.

View File

@ -100,7 +100,12 @@ struct glthread_batch
/** The worker thread will access the context with this. */
struct gl_context *ctx;
/** Amount of data used by batch commands, in bytes. */
/**
* Amount of data used by batch commands, in bytes.
* This is 0 when it's being filled because glthread::used holds the real
* value temporarily, and glthread::used is copied to this variable when
* the batch is submitted.
*/
int used;
/** Data contained in the command buffer. */
@ -153,6 +158,9 @@ struct glthread_state
/** Index of the batch being filled and about to be submitted. */
unsigned next;
/** Amount of data filled in next_batch, in bytes. */
int used;
/** Upload buffer. */
struct gl_buffer_object *upload_buffer;
uint8_t *upload_ptr;

View File

@ -57,17 +57,15 @@ _mesa_glthread_allocate_command(struct gl_context *ctx,
int size)
{
struct glthread_state *glthread = &ctx->GLThread;
struct glthread_batch *next = glthread->next_batch;
struct marshal_cmd_base *cmd_base;
if (unlikely(next->used + size > MARSHAL_MAX_CMD_SIZE)) {
if (unlikely(glthread->used + size > MARSHAL_MAX_CMD_SIZE))
_mesa_glthread_flush_batch(ctx);
next = glthread->next_batch;
}
struct glthread_batch *next = glthread->next_batch;
const int aligned_size = align(size, 8);
cmd_base = (struct marshal_cmd_base *)&next->buffer[next->used];
next->used += aligned_size;
struct marshal_cmd_base *cmd_base =
(struct marshal_cmd_base *)&next->buffer[glthread->used];
glthread->used += aligned_size;
cmd_base->cmd_id = cmd_id;
cmd_base->cmd_size = aligned_size;
return cmd_base;