anv: Get rid of graphics_pipeline_create_info_extra

Now that we no longer have meta, all pipelines get created via the normal
Vulkan pipeline creation mechanics.  There is no more need for this bit of
extra magic data that we've been passing around.

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Topi Pohjolainen <topi.pohjolainen@intel.com>
This commit is contained in:
Jason Ekstrand 2016-10-07 15:29:47 -07:00
parent dedc406ec8
commit ac77528f7d
7 changed files with 35 additions and 101 deletions

View File

@ -70,7 +70,6 @@ VkResult
genX(graphics_pipeline_create)(VkDevice _device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);

View File

@ -267,7 +267,6 @@ populate_gs_prog_key(const struct gen_device_info *devinfo,
static void
populate_wm_prog_key(const struct gen_device_info *devinfo,
const VkGraphicsPipelineCreateInfo *info,
const struct anv_graphics_pipeline_create_info *extra,
struct brw_wm_prog_key *key)
{
ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
@ -284,12 +283,8 @@ populate_wm_prog_key(const struct gen_device_info *devinfo,
/* XXX Vulkan doesn't appear to specify */
key->clamp_fragment_color = false;
if (extra && extra->color_attachment_count >= 0) {
key->nr_color_regions = extra->color_attachment_count;
} else {
key->nr_color_regions =
render_pass->subpasses[info->subpass].color_count;
}
key->nr_color_regions =
render_pass->subpasses[info->subpass].color_count;
key->replicate_alpha = key->nr_color_regions > 1 &&
info->pMultisampleState &&
@ -605,7 +600,6 @@ static VkResult
anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *info,
const struct anv_graphics_pipeline_create_info *extra,
struct anv_shader_module *module,
const char *entrypoint,
const VkSpecializationInfo *spec_info)
@ -617,7 +611,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
struct anv_shader_bin *bin = NULL;
unsigned char sha1[20];
populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
populate_wm_prog_key(&pipeline->device->info, info, &key);
if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
@ -675,11 +669,6 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
num_rts += array_len;
}
if (pipeline->use_repclear) {
assert(num_rts == 1);
key.nr_color_regions = 1;
}
if (num_rts == 0) {
/* If we have no render targets, we need a null render target */
rt_bindings[0] = (struct anv_pipeline_binding) {
@ -707,8 +696,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
unsigned code_size;
const unsigned *shader_code =
brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
NULL, -1, -1, true, pipeline->use_repclear,
&code_size, NULL);
NULL, -1, -1, true, false, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@ -1015,7 +1003,6 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
struct anv_device *device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc)
{
VkResult result;
@ -1043,8 +1030,6 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
pCreateInfo->pRasterizationState->depthClampEnable;
pipeline->use_repclear = extra && extra->use_repclear;
pipeline->needs_data_cache = false;
/* When we free the pipeline, we detect stages based on the NULL status
@ -1089,7 +1074,7 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
}
if (modules[MESA_SHADER_FRAGMENT]) {
result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
modules[MESA_SHADER_FRAGMENT],
pStages[MESA_SHADER_FRAGMENT]->pName,
pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
@ -1097,26 +1082,14 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
goto compile_fail;
}
if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
/* Vertex is only optional if disable_vs is set */
assert(extra->disable_vs);
}
assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
anv_pipeline_setup_l3_config(pipeline, false);
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;
uint64_t inputs_read;
if (extra && extra->disable_vs) {
/* If the VS is disabled, just assume the user knows what they're
* doing and apply the layout blindly. This can only come from
* meta, so this *should* be safe.
*/
inputs_read = ~0ull;
} else {
inputs_read = get_vs_prog_data(pipeline)->inputs_read;
}
const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
pipeline->vb_used = 0;
for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
@ -1152,9 +1125,6 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
pipeline->primitive_restart = ia_info->primitiveRestartEnable;
pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
if (extra && extra->use_rectlist)
pipeline->topology = _3DPRIM_RECTLIST;
return VK_SUCCESS;
compile_fail:
@ -1173,7 +1143,6 @@ anv_graphics_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
@ -1183,13 +1152,13 @@ anv_graphics_pipeline_create(
switch (device->info.gen) {
case 7:
if (device->info.is_haswell)
return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
else
return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
case 8:
return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
case 9:
return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
default:
unreachable("unsupported gen\n");
}
@ -1210,7 +1179,7 @@ VkResult anv_CreateGraphicsPipelines(
result = anv_graphics_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
NULL, pAllocator, &pPipelines[i]);
pAllocator, &pPipelines[i]);
if (result != VK_SUCCESS) {
for (unsigned j = 0; j < i; j++) {
anv_DestroyPipeline(_device, pPipelines[j], pAllocator);

View File

@ -1458,7 +1458,6 @@ struct anv_pipeline {
struct anv_pipeline_layout * layout;
bool use_repclear;
bool needs_data_cache;
struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
@ -1526,23 +1525,10 @@ ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
struct anv_graphics_pipeline_create_info {
/**
* If non-negative, overrides the color attachment count of the pipeline's
* subpass.
*/
int8_t color_attachment_count;
bool use_repclear;
bool disable_vs;
bool use_rectlist;
};
VkResult
anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc);
VkResult
@ -1557,7 +1543,6 @@ VkResult
anv_graphics_pipeline_create(VkDevice device,
VkPipelineCache cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);

View File

@ -39,7 +39,6 @@ genX(graphics_pipeline_create)(
VkDevice _device,
struct anv_pipeline_cache * cache,
const VkGraphicsPipelineCreateInfo* pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipeline)
{
@ -60,18 +59,18 @@ genX(graphics_pipeline_create)(
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(pipeline, device, cache,
pCreateInfo, extra, pAllocator);
pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
anv_free2(&device->alloc, pAllocator, pipeline);
return result;
}
assert(pCreateInfo->pVertexInputState);
emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
assert(pCreateInfo->pRasterizationState);
emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
pCreateInfo->pMultisampleState, pass, subpass, extra);
pCreateInfo->pMultisampleState, pass, subpass);
emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
@ -81,7 +80,7 @@ genX(graphics_pipeline_create)(
emit_urb_setup(pipeline);
emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
pCreateInfo->pRasterizationState, extra);
pCreateInfo->pRasterizationState);
emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
@ -107,7 +106,7 @@ genX(graphics_pipeline_create)(
gen7_emit_vs_workaround_flush(brw);
#endif
if (pipeline->vs_vec4 == NO_KERNEL || (extra && extra->disable_vs))
if (pipeline->vs_vec4 == NO_KERNEL)
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs);
else
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
@ -133,7 +132,7 @@ genX(graphics_pipeline_create)(
const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
if (pipeline->gs_kernel == NO_KERNEL || (extra && extra->disable_vs)) {
if (pipeline->gs_kernel == NO_KERNEL) {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
} else {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {

View File

@ -36,8 +36,7 @@
static void
emit_ia_state(struct anv_pipeline *pipeline,
const VkPipelineInputAssemblyStateCreateInfo *info,
const struct anv_graphics_pipeline_create_info *extra)
const VkPipelineInputAssemblyStateCreateInfo *info)
{
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
vft.PrimitiveTopologyType = pipeline->topology;
@ -49,7 +48,6 @@ genX(graphics_pipeline_create)(
VkDevice _device,
struct anv_pipeline_cache * cache,
const VkGraphicsPipelineCreateInfo* pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipeline)
{
@ -71,19 +69,19 @@ genX(graphics_pipeline_create)(
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(pipeline, device, cache,
pCreateInfo, extra, pAllocator);
pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
anv_free2(&device->alloc, pAllocator, pipeline);
return result;
}
assert(pCreateInfo->pVertexInputState);
emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
assert(pCreateInfo->pInputAssemblyState);
emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState);
assert(pCreateInfo->pRasterizationState);
emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
pCreateInfo->pMultisampleState, pass, subpass, extra);
pCreateInfo->pMultisampleState, pass, subpass);
emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
@ -92,7 +90,7 @@ genX(graphics_pipeline_create)(
emit_urb_setup(pipeline);
emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
pCreateInfo->pRasterizationState, extra);
pCreateInfo->pRasterizationState);
emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
@ -179,7 +177,7 @@ genX(graphics_pipeline_create)(
uint32_t vs_start = pipeline->vs_simd8 != NO_KERNEL ? pipeline->vs_simd8 :
pipeline->vs_vec4;
if (vs_start == NO_KERNEL || (extra && extra->disable_vs)) {
if (vs_start == NO_KERNEL) {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
vs.FunctionEnable = false;
/* Even if VS is disabled, SBE still gets the amount of

View File

@ -86,8 +86,6 @@ genX(compute_pipeline_create)(
return result;
}
pipeline->use_repclear = false;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);

View File

@ -56,26 +56,14 @@ vertex_element_comp_control(enum isl_format format, unsigned comp)
static void
emit_vertex_input(struct anv_pipeline *pipeline,
const VkPipelineVertexInputStateCreateInfo *info,
const struct anv_graphics_pipeline_create_info *extra)
const VkPipelineVertexInputStateCreateInfo *info)
{
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
uint32_t elements;
if (extra && extra->disable_vs) {
/* If the VS is disabled, just assume the user knows what they're
* doing and apply the layout blindly. This can only come from
* meta, so this *should* be safe.
*/
elements = 0;
for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++)
elements |= (1 << info->pVertexAttributeDescriptions[i].location);
} else {
/* Pull inputs_read out of the VS prog data */
uint64_t inputs_read = vs_prog_data->inputs_read;
assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
elements = inputs_read >> VERT_ATTRIB_GENERIC0;
}
/* Pull inputs_read out of the VS prog data */
const uint64_t inputs_read = vs_prog_data->inputs_read;
assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
#if GEN_GEN >= 8
/* On BDW+, we only need to allocate space for base ids. Setting up
@ -488,14 +476,13 @@ emit_rs_state(struct anv_pipeline *pipeline,
const VkPipelineRasterizationStateCreateInfo *rs_info,
const VkPipelineMultisampleStateCreateInfo *ms_info,
const struct anv_render_pass *pass,
const struct anv_subpass *subpass,
const struct anv_graphics_pipeline_create_info *extra)
const struct anv_subpass *subpass)
{
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
};
sf.ViewportTransformEnable = !(extra && extra->use_rectlist);
sf.ViewportTransformEnable = true;
sf.StatisticsEnable = true;
sf.TriangleStripListProvokingVertexSelect = 0;
sf.LineStripListProvokingVertexSelect = 0;
@ -528,7 +515,7 @@ emit_rs_state(struct anv_pipeline *pipeline,
raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
raster.ScissorRectangleEnable = !(extra && extra->use_rectlist);
raster.ScissorRectangleEnable = true;
#if GEN_GEN >= 9
/* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
@ -932,13 +919,12 @@ emit_cb_state(struct anv_pipeline *pipeline,
static void
emit_3dstate_clip(struct anv_pipeline *pipeline,
const VkPipelineViewportStateCreateInfo *vp_info,
const VkPipelineRasterizationStateCreateInfo *rs_info,
const struct anv_graphics_pipeline_create_info *extra)
const VkPipelineRasterizationStateCreateInfo *rs_info)
{
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
(void) wm_prog_data;
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
clip.ClipEnable = !(extra && extra->use_rectlist);
clip.ClipEnable = true;
clip.EarlyCullEnable = true;
clip.APIMode = APIMODE_D3D,
clip.ViewportXYClipTestEnable = true;