panfrost: Get rid of ctx->payloads[]

Now that vertex/tiler payloads are re-initialized at draw/launch_grid
time we can get of of the ctx->payloads[] field and allocate those
payload templates on the stack.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4083>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4083>
This commit is contained in:
Boris Brezillon 2020-03-06 11:46:39 +01:00
parent 093da77ce6
commit b1a6a15aaf
3 changed files with 33 additions and 42 deletions

View File

@ -102,7 +102,7 @@ panfrost_launch_grid(struct pipe_context *pipe,
ctx->compute_grid = info;
/* TODO: Stub */
struct midgard_payload_vertex_tiler *payload = &ctx->payloads[PIPE_SHADER_COMPUTE];
struct midgard_payload_vertex_tiler payload;
/* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so
* reuse the graphics path for this by lowering to Gallium */
@ -117,19 +117,23 @@ panfrost_launch_grid(struct pipe_context *pipe,
if (info->input)
pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
panfrost_vt_init(ctx, PIPE_SHADER_COMPUTE, payload);
panfrost_vt_init(ctx, PIPE_SHADER_COMPUTE, &payload);
panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, payload);
panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, payload);
panfrost_emit_shared_memory(batch, info, payload);
panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, &payload);
panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, &payload);
panfrost_emit_shared_memory(batch, info, &payload);
/* Invoke according to the grid info */
panfrost_pack_work_groups_compute(&payload->prefix,
info->grid[0], info->grid[1], info->grid[2],
info->block[0], info->block[1], info->block[2], false);
panfrost_pack_work_groups_compute(&payload.prefix,
info->grid[0], info->grid[1],
info->grid[2],
info->block[0], info->block[1],
info->block[2],
false);
panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, payload, sizeof(*payload), false);
panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, &payload,
sizeof(payload), false);
panfrost_flush_all_batches(ctx, true);
}

View File

@ -398,14 +398,13 @@ panfrost_draw_vbo(
ctx->instance_count = info->instance_count;
ctx->active_prim = info->mode;
struct midgard_payload_vertex_tiler vt, tp;
unsigned vertex_count;
for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i)
panfrost_vt_init(ctx, i, &ctx->payloads[i]);
panfrost_vt_init(ctx, PIPE_SHADER_VERTEX, &vt);
panfrost_vt_init(ctx, PIPE_SHADER_FRAGMENT, &tp);
panfrost_vt_set_draw_info(ctx, info, g2m_draw_mode(mode),
&ctx->payloads[PIPE_SHADER_VERTEX],
&ctx->payloads[PIPE_SHADER_FRAGMENT],
panfrost_vt_set_draw_info(ctx, info, g2m_draw_mode(mode), &vt, &tp,
&vertex_count, &ctx->padded_count);
panfrost_statistics_record(ctx, info);
@ -413,38 +412,29 @@ panfrost_draw_vbo(
/* Dispatch "compute jobs" for the vertex/tiler pair as (1,
* vertex_count, 1) */
panfrost_pack_work_groups_fused(
&ctx->payloads[PIPE_SHADER_VERTEX].prefix,
&ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
1, vertex_count, info->instance_count,
1, 1, 1);
panfrost_pack_work_groups_fused(&vt.prefix, &tp.prefix,
1, vertex_count, info->instance_count,
1, 1, 1);
/* Emit all sort of descriptors. */
panfrost_emit_vertex_data(batch, &ctx->payloads[PIPE_SHADER_VERTEX]);
panfrost_emit_vertex_data(batch, &vt);
panfrost_emit_varying_descriptor(batch,
ctx->padded_count *
ctx->instance_count,
&ctx->payloads[PIPE_SHADER_VERTEX],
&ctx->payloads[PIPE_SHADER_FRAGMENT]);
panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX,
&ctx->payloads[PIPE_SHADER_VERTEX]);
panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT,
&ctx->payloads[PIPE_SHADER_FRAGMENT]);
panfrost_emit_vertex_attr_meta(batch,
&ctx->payloads[PIPE_SHADER_VERTEX]);
for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i) {
panfrost_emit_sampler_descriptors(batch, i, &ctx->payloads[i]);
panfrost_emit_texture_descriptors(batch, i, &ctx->payloads[i]);
panfrost_emit_const_buf(batch, i, &ctx->payloads[i]);
}
panfrost_emit_viewport(batch, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
&vt, &tp);
panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, &vt);
panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT, &tp);
panfrost_emit_vertex_attr_meta(batch, &vt);
panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX, &vt);
panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT, &tp);
panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX, &vt);
panfrost_emit_texture_descriptors(batch, PIPE_SHADER_FRAGMENT, &tp);
panfrost_emit_const_buf(batch, PIPE_SHADER_VERTEX, &vt);
panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &tp);
panfrost_emit_viewport(batch, &tp);
/* Fire off the draw itself */
panfrost_emit_vertex_tiler_jobs(batch,
&ctx->payloads[PIPE_SHADER_VERTEX],
&ctx->payloads[PIPE_SHADER_FRAGMENT]);
panfrost_emit_vertex_tiler_jobs(batch, &vt, &tp);
/* Adjust the batch stack size based on the new shader stack sizes. */
panfrost_batch_adjust_stack_size(batch);

View File

@ -116,9 +116,6 @@ struct panfrost_context {
uint64_t tf_prims_generated;
struct panfrost_query *occlusion_query;
/* Each draw has corresponding vertex and tiler payloads */
struct midgard_payload_vertex_tiler payloads[PIPE_SHADER_TYPES];
unsigned vertex_count;
unsigned instance_count;
enum pipe_prim_type active_prim;