pvr: Add graphics pipeline hard coding infrastructure.

Signed-off-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com>
Acked-by: Alyssa Rosenzweig <alyssa@collabora.com>
Reviewed-by: Frank Binns <frank.binns@imgtec.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16999>
This commit is contained in:
Karmjit Mahil 2022-06-07 15:29:46 +01:00 committed by Marge Bot
parent e47350a245
commit 7c25c6f04e
3 changed files with 216 additions and 49 deletions

View File

@ -42,6 +42,11 @@
* This should eventually be deleted as the compiler becomes more capable.
*/
enum pvr_hard_code_shader_type {
PVR_HARD_CODE_SHADER_TYPE_COMPUTE,
PVR_HARD_CODE_SHADER_TYPE_GRAPHICS,
};
/* Applications for which the compiler is capable of generating valid shaders.
*/
static const char *const compilable_progs[] = {
@ -50,6 +55,7 @@ static const char *const compilable_progs[] = {
static const struct pvr_hard_coding_data {
const char *const name;
enum pvr_hard_code_shader_type type;
union {
struct {
@ -61,11 +67,26 @@ static const struct pvr_hard_coding_data {
const struct pvr_hard_code_compute_build_info build_info;
} compute;
struct {
struct rogue_shader_binary *const *const vert_shaders;
struct rogue_shader_binary *const *const frag_shaders;
const struct pvr_vertex_shader_state *const *const vert_shader_states;
const struct pvr_fragment_shader_state *const *const frag_shader_states;
const struct pvr_hard_code_graphics_build_info *const
*const build_infos;
uint32_t shader_count;
} graphics;
};
} hard_coding_table[] = {
{
.name = "simple-compute",
.type = PVR_HARD_CODE_SHADER_TYPE_COMPUTE,
.compute = {
.shader = pvr_simple_compute_shader,
.shader_size = sizeof(pvr_simple_compute_shader),
@ -132,6 +153,8 @@ VkResult pvr_hard_code_compute_pipeline(
rogue_get_slc_cache_line_size(&device->pdevice->dev_info);
const struct pvr_hard_coding_data *const data = pvr_get_hard_coding_data();
assert(data->type == PVR_HARD_CODE_SHADER_TYPE_COMPUTE);
mesa_logd("Hard coding compute pipeline for %s", data->name);
*build_info_out = data->compute.build_info;
@ -143,3 +166,80 @@ VkResult pvr_hard_code_compute_pipeline(
cache_line_size,
&shader_state_out->bo);
}
void pvr_hard_code_graphics_shaders(
uint32_t pipeline_n,
struct rogue_shader_binary **const vert_shader_out,
struct rogue_shader_binary **const frag_shader_out)
{
const struct pvr_hard_coding_data *const data = pvr_get_hard_coding_data();
assert(data->type == PVR_HARD_CODE_SHADER_TYPE_GRAPHICS);
assert(pipeline_n < data->graphics.shader_count);
mesa_logd("Hard coding graphics pipeline for %s", data->name);
*vert_shader_out = data->graphics.vert_shaders[pipeline_n];
*frag_shader_out = data->graphics.frag_shaders[pipeline_n];
}
void pvr_hard_code_graphics_vertex_state(
uint32_t pipeline_n,
struct pvr_vertex_shader_state *const vert_state_out)
{
const struct pvr_hard_coding_data *const data = pvr_get_hard_coding_data();
assert(data->type == PVR_HARD_CODE_SHADER_TYPE_GRAPHICS);
assert(pipeline_n < data->graphics.shader_count);
*vert_state_out = *data->graphics.vert_shader_states[0];
}
void pvr_hard_code_graphics_fragment_state(
uint32_t pipeline_n,
struct pvr_fragment_shader_state *const frag_state_out)
{
const struct pvr_hard_coding_data *const data = pvr_get_hard_coding_data();
assert(data->type == PVR_HARD_CODE_SHADER_TYPE_GRAPHICS);
assert(pipeline_n < data->graphics.shader_count);
*frag_state_out = *data->graphics.frag_shader_states[0];
}
void pvr_hard_code_graphics_inject_build_info(
uint32_t pipeline_n,
struct rogue_build_ctx *ctx,
struct pvr_explicit_constant_usage *const vert_common_data_out,
struct pvr_explicit_constant_usage *const frag_common_data_out)
{
const struct pvr_hard_coding_data *const data = pvr_get_hard_coding_data();
assert(data->type == PVR_HARD_CODE_SHADER_TYPE_GRAPHICS);
assert(pipeline_n < data->graphics.shader_count);
ctx->stage_data = data->graphics.build_infos[pipeline_n]->stage_data;
ctx->common_data[MESA_SHADER_VERTEX] =
data->graphics.build_infos[pipeline_n]->vert_common_data;
ctx->common_data[MESA_SHADER_FRAGMENT] =
data->graphics.build_infos[pipeline_n]->frag_common_data;
assert(
ctx->common_data[MESA_SHADER_VERTEX].temps ==
data->graphics.vert_shader_states[pipeline_n]->stage_state.temps_count);
assert(
ctx->common_data[MESA_SHADER_FRAGMENT].temps ==
data->graphics.frag_shader_states[pipeline_n]->stage_state.temps_count);
assert(ctx->common_data[MESA_SHADER_VERTEX].coeffs ==
data->graphics.vert_shader_states[pipeline_n]
->stage_state.coefficient_size);
assert(ctx->common_data[MESA_SHADER_FRAGMENT].coeffs ==
data->graphics.frag_shader_states[pipeline_n]
->stage_state.coefficient_size);
*vert_common_data_out =
data->graphics.build_infos[pipeline_n]->vert_explicit_conts_usage;
*frag_common_data_out =
data->graphics.build_infos[pipeline_n]->frag_explicit_conts_usage;
}

View File

@ -39,6 +39,8 @@
struct pvr_compute_pipeline_shader_state;
struct pvr_device;
struct pvr_fragment_shader_state;
struct pvr_vertex_shader_state;
struct pvr_explicit_constant_usage {
/* Hardware register number assigned to the explicit constant with the lower
@ -58,6 +60,16 @@ struct pvr_hard_code_compute_build_info {
struct pvr_explicit_constant_usage explicit_conts_usage;
};
struct pvr_hard_code_graphics_build_info {
struct rogue_build_data stage_data;
struct rogue_common_build_data vert_common_data;
struct rogue_common_build_data frag_common_data;
struct pvr_explicit_constant_usage vert_explicit_conts_usage;
struct pvr_explicit_constant_usage frag_explicit_conts_usage;
};
/* Returns true if the shader for the currently running program requires hard
* coded shaders.
*/
@ -68,4 +80,29 @@ VkResult pvr_hard_code_compute_pipeline(
struct pvr_compute_pipeline_shader_state *const shader_state_out,
struct pvr_hard_code_compute_build_info *const build_info_out);
/* pipeline_n:
* The pipeline number. Each pipeline created requires unique hard
* coding so a pipeline number is necessary to identify which data to use.
* This pipeline number to request data for the first pipeline to be created
* is 0 and should be incremented for each subsequent pipeline.
*/
void pvr_hard_code_graphics_shaders(
uint32_t pipeline_n,
struct rogue_shader_binary **const vert_shader_out,
struct rogue_shader_binary **const frag_shader_out);
void pvr_hard_code_graphics_vertex_state(
uint32_t pipeline_n,
struct pvr_vertex_shader_state *vert_state);
void pvr_hard_code_graphics_fragment_state(
uint32_t pipelien_n,
struct pvr_fragment_shader_state *frag_state);
void pvr_hard_code_graphics_inject_build_info(
uint32_t pipeline_n,
struct rogue_build_ctx *ctx,
struct pvr_explicit_constant_usage *const vert_common_data_out,
struct pvr_explicit_constant_usage *const frag_common_data_out);
#endif /* PVR_HARDCODE_SHADERS_H */

View File

@ -1393,75 +1393,90 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
struct pvr_graphics_pipeline *const gfx_pipeline)
{
/* FIXME: Remove this hard coding. */
const struct pvr_explicit_constant_usage explicit_const_usage = {
struct pvr_explicit_constant_usage vert_explicit_const_usage = {
.start_offset = 16,
};
struct pvr_explicit_constant_usage frag_explicit_const_usage = {
.start_offset = 0,
};
const bool requires_hard_coding = pvr_hard_code_shader_required();
static uint32_t hard_code_pipeline_n = 0;
const VkPipelineVertexInputStateCreateInfo *const vertex_input_state =
pCreateInfo->pVertexInputState;
const uint32_t cache_line_size =
rogue_get_slc_cache_line_size(&device->pdevice->dev_info);
struct rogue_compiler *compiler = device->pdevice->compiler;
struct rogue_build_ctx *ctx;
VkResult result;
/* Compile the USC shaders. */
/* Setup shared build context. */
ctx = rogue_create_build_context(compiler);
if (!ctx)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
/* NIR middle-end translation. */
for (gl_shader_stage stage = MESA_SHADER_FRAGMENT; stage > MESA_SHADER_NONE;
stage--) {
const VkPipelineShaderStageCreateInfo *create_info;
size_t stage_index = gfx_pipeline->stage_indices[stage];
if (requires_hard_coding) {
pvr_hard_code_graphics_shaders(hard_code_pipeline_n,
&ctx->binary[MESA_SHADER_VERTEX],
&ctx->binary[MESA_SHADER_FRAGMENT]);
} else {
/* NIR middle-end translation. */
for (gl_shader_stage stage = MESA_SHADER_FRAGMENT;
stage > MESA_SHADER_NONE;
stage--) {
const VkPipelineShaderStageCreateInfo *create_info;
size_t stage_index = gfx_pipeline->stage_indices[stage];
/* Skip unused/inactive stages. */
if (stage_index == ~0)
continue;
/* Skip unused/inactive stages. */
if (stage_index == ~0)
continue;
create_info = &pCreateInfo->pStages[stage_index];
create_info = &pCreateInfo->pStages[stage_index];
/* SPIR-V to NIR. */
ctx->nir[stage] = pvr_spirv_to_nir(ctx, stage, create_info);
if (!ctx->nir[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
/* SPIR-V to NIR. */
ctx->nir[stage] = pvr_spirv_to_nir(ctx, stage, create_info);
if (!ctx->nir[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
/* Pre-back-end analysis and optimization, driver data extraction. */
/* TODO: Analyze and cull unused I/O between stages. */
/* TODO: Allocate UBOs between stages;
* pipeline->layout->set_{count,layout}.
*/
/* Back-end translation. */
for (gl_shader_stage stage = MESA_SHADER_FRAGMENT;
stage > MESA_SHADER_NONE;
stage--) {
if (!ctx->nir[stage])
continue;
ctx->rogue[stage] = pvr_nir_to_rogue(ctx, ctx->nir[stage]);
if (!ctx->rogue[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
ctx->binary[stage] = pvr_rogue_to_binary(ctx, ctx->rogue[stage]);
if (!ctx->binary[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
}
/* Pre-back-end analysis and optimization, driver data extraction. */
/* TODO: Analyze and cull unused I/O between stages. */
/* TODO: Allocate UBOs between stages;
* pipeline->layout->set_{count,layout}.
*/
/* Back-end translation. */
for (gl_shader_stage stage = MESA_SHADER_FRAGMENT; stage > MESA_SHADER_NONE;
stage--) {
if (!ctx->nir[stage])
continue;
ctx->rogue[stage] = pvr_nir_to_rogue(ctx, ctx->nir[stage]);
if (!ctx->rogue[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
ctx->binary[stage] = pvr_rogue_to_binary(ctx, ctx->rogue[stage]);
if (!ctx->binary[stage]) {
ralloc_free(ctx);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (requires_hard_coding) {
pvr_hard_code_graphics_vertex_state(hard_code_pipeline_n,
&gfx_pipeline->vertex_shader_state);
} else {
pvr_vertex_state_init(gfx_pipeline,
&ctx->common_data[MESA_SHADER_VERTEX],
&ctx->stage_data.vs);
}
pvr_vertex_state_init(gfx_pipeline,
&ctx->common_data[MESA_SHADER_VERTEX],
&ctx->stage_data.vs);
result = pvr_gpu_upload_usc(device,
ctx->binary[MESA_SHADER_VERTEX]->data,
ctx->binary[MESA_SHADER_VERTEX]->size,
@ -1470,8 +1485,14 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
if (result != VK_SUCCESS)
goto err_free_build_context;
pvr_fragment_state_init(gfx_pipeline,
&ctx->common_data[MESA_SHADER_FRAGMENT]);
if (requires_hard_coding) {
pvr_hard_code_graphics_fragment_state(
hard_code_pipeline_n,
&gfx_pipeline->fragment_shader_state);
} else {
pvr_fragment_state_init(gfx_pipeline,
&ctx->common_data[MESA_SHADER_FRAGMENT]);
}
result = pvr_gpu_upload_usc(device,
ctx->binary[MESA_SHADER_FRAGMENT]->data,
@ -1486,6 +1507,13 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
* case the optimization doesn't happen.
*/
if (requires_hard_coding) {
pvr_hard_code_graphics_inject_build_info(hard_code_pipeline_n,
ctx,
&vert_explicit_const_usage,
&frag_explicit_const_usage);
}
/* TODO: The programs we use are hard coded for now, but these should be
* selected dynamically.
*/
@ -1525,7 +1553,7 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
device,
allocator,
&ctx->common_data[MESA_SHADER_VERTEX].ubo_data,
&explicit_const_usage,
&vert_explicit_const_usage,
gfx_pipeline->base.layout,
PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY,
&gfx_pipeline->vertex_shader_state.uniform_state.pds_code,
@ -1548,7 +1576,7 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
device,
allocator,
&ctx->common_data[MESA_SHADER_FRAGMENT].ubo_data,
&explicit_const_usage,
&frag_explicit_const_usage,
gfx_pipeline->base.layout,
PVR_STAGE_ALLOCATION_FRAGMENT,
&gfx_pipeline->fragment_shader_state.uniform_state.pds_code,
@ -1558,6 +1586,8 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device,
ralloc_free(ctx);
hard_code_pipeline_n++;
return VK_SUCCESS;
err_free_vertex_uniform_program: