vkd3d: Prototype implementation of shader module identifier.

Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
This commit is contained in:
Hans-Kristian Arntzen 2022-03-21 16:24:32 +01:00
parent 4d708bd7fe
commit fc69f469d5
4 changed files with 296 additions and 25 deletions

View File

@ -198,6 +198,8 @@ enum vkd3d_pipeline_blob_chunk_type
/* VkShaderStage is stored in upper 16 bits. */
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_META = 4,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_PSO_COMPAT = 5,
/* VkShaderStage is stored in upper 16 bits. */
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_IDENTIFIER = 6,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_MASK = 0xffff,
VKD3D_PIPELINE_BLOB_CHUNK_INDEX_SHIFT = 16,
};
@ -380,6 +382,11 @@ HRESULT d3d12_cached_pipeline_state_validate(struct d3d12_device *device,
if (memcmp(blob->cache_uuid, device_properties->pipelineCacheUUID, VK_UUID_SIZE) != 0)
return D3D12_ERROR_DRIVER_VERSION_MISMATCH;
if (pipeline_library_flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
if (memcmp(blob->cache_uuid, device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE) != 0)
return D3D12_ERROR_DRIVER_VERSION_MISMATCH;
/* In stream archives, we perform checksums ahead of time before accepting a stream blob into internal cache.
* No need to do redundant work. */
if (!(pipeline_library_flags & VKD3D_PIPELINE_LIBRARY_FLAG_STREAM_ARCHIVE))
@ -473,6 +480,11 @@ bool d3d12_cached_pipeline_state_is_dummy(const struct d3d12_cached_pipeline_sta
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_MASK))
return false;
if (find_blob_chunk_masked(chunk, payload_size,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_IDENTIFIER,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_MASK))
return false;
return true;
}
@ -599,7 +611,8 @@ HRESULT vkd3d_create_pipeline_cache_from_d3d12_desc(struct d3d12_device *device,
HRESULT vkd3d_get_cached_spirv_code_from_d3d12_desc(
const struct d3d12_cached_pipeline_state *state,
VkShaderStageFlagBits stage,
struct vkd3d_shader_code *spirv_code)
struct vkd3d_shader_code *spirv_code,
VkPipelineShaderStageModuleIdentifierCreateInfoEXT *identifier)
{
const struct vkd3d_pipeline_blob *blob = state->blob.pCachedBlob;
const struct vkd3d_pipeline_blob_chunk_shader_meta *meta;
@ -623,6 +636,22 @@ HRESULT vkd3d_get_cached_spirv_code_from_d3d12_desc(
meta = CONST_CAST_CHUNK_DATA(chunk, shader_meta);
memcpy(&spirv_code->meta, &meta->meta, sizeof(meta->meta));
if (state->library && (state->library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER))
{
/* Only return identifier if we can use it. */
chunk = find_blob_chunk(CONST_CAST_CHUNK_BASE(blob), payload_size,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_IDENTIFIER | (stage << VKD3D_PIPELINE_BLOB_CHUNK_INDEX_SHIFT));
if (chunk && chunk->size <= VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT)
{
identifier->identifierSize = chunk->size;
identifier->pIdentifier = chunk->data;
spirv_code->size = 0;
spirv_code->code = NULL;
return S_OK;
}
}
/* Aim to pull SPIR-V either from inlined chunk, or a link. */
chunk = find_blob_chunk(CONST_CAST_CHUNK_BASE(blob), payload_size,
VKD3D_PIPELINE_BLOB_CHUNK_TYPE_VARINT_SPIRV | (stage << VKD3D_PIPELINE_BLOB_CHUNK_INDEX_SHIFT));
@ -797,6 +826,33 @@ static void vkd3d_shader_code_serialize_inline(const struct vkd3d_shader_code *c
*inout_chunk = chunk;
}
static void vkd3d_shader_code_serialize_identifier(struct d3d12_pipeline_library *pipeline_library,
const struct vkd3d_shader_code *code,
const VkShaderModuleIdentifierEXT *identifier, VkShaderStageFlagBits stage,
struct vkd3d_pipeline_blob_chunk **inout_chunk)
{
struct vkd3d_pipeline_blob_chunk *chunk = *inout_chunk;
struct vkd3d_pipeline_blob_chunk_shader_meta *meta;
if (!identifier->identifierSize)
return;
/* Store identifier. */
chunk->type = VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_IDENTIFIER | (stage << VKD3D_PIPELINE_BLOB_CHUNK_INDEX_SHIFT);
chunk->size = identifier->identifierSize;
memcpy(chunk->data, identifier->identifier, chunk->size);
chunk = finish_and_iterate_blob_chunk(chunk);
/* Store meta information for SPIR-V. */
chunk->type = VKD3D_PIPELINE_BLOB_CHUNK_TYPE_SHADER_META | (stage << VKD3D_PIPELINE_BLOB_CHUNK_INDEX_SHIFT);
chunk->size = sizeof(*meta);
meta = CAST_CHUNK_DATA(chunk, shader_meta);
meta->meta = code->meta;
chunk = finish_and_iterate_blob_chunk(chunk);
*inout_chunk = chunk;
}
static void vkd3d_shader_code_serialize_referenced(struct d3d12_pipeline_library *pipeline_library,
const struct vkd3d_shader_code *code,
VkShaderStageFlagBits stage, size_t varint_size,
@ -1017,6 +1073,27 @@ static VkResult vkd3d_serialize_pipeline_state_referenced(struct d3d12_pipeline_
}
}
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
{
if (d3d12_pipeline_state_is_graphics(state))
{
for (i = 0; i < state->graphics.stage_count; i++)
{
vkd3d_shader_code_serialize_identifier(pipeline_library,
&state->graphics.code[i],
&state->graphics.identifiers[i], state->graphics.stages[i].stage,
&chunk);
}
}
else if (d3d12_pipeline_state_is_compute(state))
{
vkd3d_shader_code_serialize_identifier(pipeline_library,
&state->compute.code,
&state->compute.identifier, VK_SHADER_STAGE_COMPUTE_BIT,
&chunk);
}
}
return VK_SUCCESS;
}
@ -1076,6 +1153,29 @@ VkResult vkd3d_serialize_pipeline_state(struct d3d12_pipeline_library *pipeline_
}
}
if (pipeline_library && (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER))
{
if (d3d12_pipeline_state_is_graphics(state))
{
for (i = 0; i < state->graphics.stage_count; i++)
{
if (state->graphics.identifiers[i].identifierSize)
{
vk_blob_size += VKD3D_PIPELINE_BLOB_CHUNK_SIZE_RAW(state->graphics.identifiers[i].identifierSize);
vk_blob_size += VKD3D_PIPELINE_BLOB_CHUNK_SIZE(shader_meta);
}
}
}
else if (d3d12_pipeline_state_is_compute(state))
{
if (state->compute.identifier.identifierSize)
{
vk_blob_size += VKD3D_PIPELINE_BLOB_CHUNK_SIZE_RAW(state->compute.identifier.identifierSize);
vk_blob_size += VKD3D_PIPELINE_BLOB_CHUNK_SIZE(shader_meta);
}
}
}
total_size += vk_blob_size;
if (blob && *size < total_size)
@ -1089,7 +1189,13 @@ VkResult vkd3d_serialize_pipeline_state(struct d3d12_pipeline_library *pipeline_
blob->vkd3d_shader_interface_key = state->device->shader_interface_key;
blob->vkd3d_build = vkd3d_build;
if (!pipeline_library || (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID))
if (pipeline_library && (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER))
{
memcpy(blob->cache_uuid,
pipeline_library->device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE);
}
else if (!pipeline_library || (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID))
memcpy(blob->cache_uuid, device_properties->pipelineCacheUUID, VK_UUID_SIZE);
else
memset(blob->cache_uuid, 0, VK_UUID_SIZE);
@ -1776,7 +1882,14 @@ static void d3d12_pipeline_library_serialize_stream_archive_header(struct d3d12_
header->reserved = 0;
header->vkd3d_build = vkd3d_build;
header->vkd3d_shader_interface_key = pipeline_library->device->shader_interface_key;
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID)
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
{
memcpy(header->cache_uuid,
pipeline_library->device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE);
}
else if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID)
memcpy(header->cache_uuid, device_properties->pipelineCacheUUID, VK_UUID_SIZE);
else
memset(header->cache_uuid, 0, VK_UUID_SIZE);
@ -1814,7 +1927,13 @@ static HRESULT d3d12_pipeline_library_serialize(struct d3d12_pipeline_library *p
header->vkd3d_build = vkd3d_build;
header->vkd3d_shader_interface_key = pipeline_library->device->shader_interface_key;
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID)
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
{
memcpy(header->cache_uuid,
pipeline_library->device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE);
}
else if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID)
memcpy(header->cache_uuid, device_properties->pipelineCacheUUID, VK_UUID_SIZE);
else
memset(header->cache_uuid, 0, VK_UUID_SIZE);
@ -2016,6 +2135,11 @@ static HRESULT d3d12_pipeline_library_validate_stream_format_header(struct d3d12
if (memcmp(header->cache_uuid, device_properties->pipelineCacheUUID, VK_UUID_SIZE) != 0)
return D3D12_ERROR_DRIVER_VERSION_MISMATCH;
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
if (memcmp(header->cache_uuid, device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE) != 0)
return D3D12_ERROR_DRIVER_VERSION_MISMATCH;
return S_OK;
}
@ -2198,6 +2322,17 @@ static HRESULT d3d12_pipeline_library_read_blob_toc_format(struct d3d12_pipeline
}
}
if (pipeline_library->flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER)
{
if (memcmp(header->cache_uuid, device->device_info.shader_module_identifier_properties.shaderModuleIdentifierAlgorithmUUID,
VK_UUID_SIZE) != 0)
{
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
INFO("Rejecting pipeline library due to shaderModuleIdentifierAlgorithmUUID mismatch.\n");
return D3D12_ERROR_DRIVER_VERSION_MISMATCH;
}
}
total_toc_entries = header->pipeline_count + header->spirv_count + header->driver_cache_count;
header_entry_size = offsetof(struct vkd3d_serialized_pipeline_library_toc, entries) +
@ -2277,6 +2412,11 @@ static HRESULT d3d12_pipeline_library_init(struct d3d12_pipeline_library *pipeli
pipeline_library->internal_refcount = 1;
pipeline_library->flags = flags;
/* Mutually exclusive features. */
if ((flags & VKD3D_PIPELINE_LIBRARY_FLAG_USE_PIPELINE_CACHE_UUID) &&
(flags & VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER))
return E_INVALIDARG;
if (!blob_length && blob)
return E_INVALIDARG;
@ -3010,7 +3150,9 @@ HRESULT vkd3d_pipeline_library_init_disk_cache(struct vkd3d_pipeline_library_dis
if (!(vkd3d_config_flags & VKD3D_CONFIG_FLAG_SHADER_CACHE_SYNC))
flags |= VKD3D_PIPELINE_LIBRARY_FLAG_STREAM_ARCHIVE_PARSE_ASYNC;
if (!(vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_NO_SERIALIZE_SPIRV))
if (device->device_info.shader_module_identifier_features.shaderModuleIdentifier)
flags |= VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER;
else if (!(vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_NO_SERIALIZE_SPIRV))
flags |= VKD3D_PIPELINE_LIBRARY_FLAG_SAVE_FULL_SPIRV;
/* For internal caches, we're mostly just concerned with caching SPIR-V.

View File

@ -2100,10 +2100,16 @@ CONST_VTBL struct ID3D12PipelineStateVtbl d3d12_pipeline_state_vtbl =
static HRESULT vkd3d_load_spirv_from_cached_state(struct d3d12_device *device,
const struct d3d12_cached_pipeline_state *cached_state,
VkShaderStageFlagBits stage, struct vkd3d_shader_code *spirv_code)
VkShaderStageFlagBits stage, struct vkd3d_shader_code *spirv_code,
VkPipelineShaderStageModuleIdentifierCreateInfoEXT *identifier)
{
HRESULT hr;
identifier->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT;
identifier->pNext = NULL;
identifier->identifierSize = 0;
identifier->pIdentifier = NULL;
if (!cached_state->blob.CachedBlobSizeInBytes)
{
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
@ -2114,7 +2120,7 @@ static HRESULT vkd3d_load_spirv_from_cached_state(struct d3d12_device *device,
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_IGNORE_SPIRV)
return E_FAIL;
hr = vkd3d_get_cached_spirv_code_from_d3d12_desc(cached_state, stage, spirv_code);
hr = vkd3d_get_cached_spirv_code_from_d3d12_desc(cached_state, stage, spirv_code, identifier);
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
{
@ -2184,6 +2190,7 @@ static void d3d12_pipeline_state_init_compile_arguments(struct d3d12_pipeline_st
static HRESULT vkd3d_setup_shader_stage(struct d3d12_pipeline_state *state, struct d3d12_device *device,
VkPipelineShaderStageCreateInfo *stage_desc, VkShaderStageFlagBits stage,
VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *required_subgroup_size_info,
const VkPipelineShaderStageModuleIdentifierCreateInfoEXT *identifier_create_info,
const struct vkd3d_shader_code *spirv_code)
{
stage_desc->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
@ -2197,6 +2204,9 @@ static HRESULT vkd3d_setup_shader_stage(struct d3d12_pipeline_state *state, stru
if (!d3d12_device_validate_shader_meta(device, &spirv_code->meta))
return E_INVALIDARG;
if (identifier_create_info && identifier_create_info->identifierSize)
stage_desc->pNext = identifier_create_info;
if (((spirv_code->meta.flags & VKD3D_SHADER_META_FLAG_USES_SUBGROUP_SIZE) &&
device->device_info.subgroup_size_control_features.subgroupSizeControl) ||
spirv_code->meta.cs_required_wave_size)
@ -2206,6 +2216,9 @@ static HRESULT vkd3d_setup_shader_stage(struct d3d12_pipeline_state *state, stru
if (required_subgroup_size_info)
{
required_subgroup_size_info->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
required_subgroup_size_info->pNext = (void*)stage_desc->pNext;
if (spirv_code->meta.cs_required_wave_size)
{
/* [WaveSize(N)] attribute in SM 6.6. */
@ -2225,8 +2238,6 @@ static HRESULT vkd3d_setup_shader_stage(struct d3d12_pipeline_state *state, stru
stage_desc->flags &= ~VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT;
}
required_subgroup_size_info->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
required_subgroup_size_info->pNext = NULL;
required_subgroup_size_info->requiredSubgroupSize = subgroup_size_alignment;
}
@ -2240,7 +2251,10 @@ static HRESULT vkd3d_setup_shader_stage(struct d3d12_pipeline_state *state, stru
}
}
return d3d12_pipeline_state_create_shader_module(device, &stage_desc->module, spirv_code);
if (identifier_create_info && identifier_create_info->identifierSize == 0)
return d3d12_pipeline_state_create_shader_module(device, &stage_desc->module, spirv_code);
else
return S_OK;
}
static HRESULT vkd3d_compile_shader_stage(struct d3d12_pipeline_state *state, struct d3d12_device *device,
@ -2318,6 +2332,8 @@ static HRESULT vkd3d_late_compile_shader_stages(struct d3d12_pipeline_state *sta
rwlock_lock_write(&state->lock);
for (i = 0; i < graphics->stage_count; i++)
{
graphics->identifier_create_infos[i].identifierSize = 0;
if (graphics->stages[i].module == VK_NULL_HANDLE && !graphics->code[i].size &&
graphics->cached_desc.bytecode[i].BytecodeLength)
{
@ -2413,15 +2429,29 @@ static HRESULT vkd3d_create_compute_pipeline(struct d3d12_pipeline_state *state,
pipeline_info.pNext = NULL;
pipeline_info.flags = 0;
if (FAILED(hr = vkd3d_compile_shader_stage(state, device,
VK_SHADER_STAGE_COMPUTE_BIT, code, spirv_code)))
return hr;
if (state->compute.identifier_create_info.identifierSize == 0)
{
if (FAILED(hr = vkd3d_compile_shader_stage(state, device,
VK_SHADER_STAGE_COMPUTE_BIT, code, spirv_code)))
return hr;
}
if (FAILED(hr = vkd3d_setup_shader_stage(state, device,
&pipeline_info.stage, VK_SHADER_STAGE_COMPUTE_BIT, &required_subgroup_size_info,
&pipeline_info.stage, VK_SHADER_STAGE_COMPUTE_BIT,
&required_subgroup_size_info,
&state->compute.identifier_create_info,
spirv_code)))
return hr;
if (pipeline_info.stage.module != VK_NULL_HANDLE &&
device->device_info.shader_module_identifier_features.shaderModuleIdentifier)
{
state->compute.identifier.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT;
state->compute.identifier.pNext = NULL;
VK_CALL(vkGetShaderModuleIdentifierEXT(device->vk_device, pipeline_info.stage.module,
&state->compute.identifier));
}
pipeline_info.layout = state->root_signature->compute.vk_pipeline_layout;
pipeline_info.basePipelineHandle = VK_NULL_HANDLE;
pipeline_info.basePipelineIndex = -1;
@ -2447,9 +2477,44 @@ static HRESULT vkd3d_create_compute_pipeline(struct d3d12_pipeline_state *state,
else
feedback_info.pipelineStageCreationFeedbackCount = 0;
if (pipeline_info.stage.module == VK_NULL_HANDLE)
pipeline_info.flags |= VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT;
vr = VK_CALL(vkCreateComputePipelines(device->vk_device,
vk_cache, 1, &pipeline_info, NULL, &state->compute.vk_pipeline));
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
{
if (pipeline_info.flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT)
{
if (vr == VK_SUCCESS)
INFO("[IDENTIFIER] Successfully created compute pipeline from identifier.\n");
else if (vr == VK_PIPELINE_COMPILE_REQUIRED_EXT)
INFO("[IDENTIFIER] Failed to create compute pipeline from identifier, falling back ...\n");
}
else
INFO("[IDENTIFIER] None compute.\n");
}
/* Fallback. */
if (vr == VK_PIPELINE_COMPILE_REQUIRED_EXT)
{
pipeline_info.flags &= ~VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT;
if (FAILED(hr = vkd3d_compile_shader_stage(state, device,
VK_SHADER_STAGE_COMPUTE_BIT, code, spirv_code)))
return hr;
if (FAILED(hr = vkd3d_setup_shader_stage(state, device,
&pipeline_info.stage, VK_SHADER_STAGE_COMPUTE_BIT,
&required_subgroup_size_info, NULL,
spirv_code)))
return hr;
vr = VK_CALL(vkCreateComputePipelines(device->vk_device,
vk_cache, 1, &pipeline_info, NULL, &state->compute.vk_pipeline));
}
TRACE("Called vkCreateComputePipelines.\n");
VK_CALL(vkDestroyShaderModule(device->vk_device, pipeline_info.stage.module, NULL));
if (vr < 0)
@ -2487,7 +2552,8 @@ static HRESULT d3d12_pipeline_state_init_compute(struct d3d12_pipeline_state *st
}
vkd3d_load_spirv_from_cached_state(device, cached_pso,
VK_SHADER_STAGE_COMPUTE_BIT, &state->compute.code);
VK_SHADER_STAGE_COMPUTE_BIT, &state->compute.code,
&state->compute.identifier_create_info);
hr = vkd3d_create_compute_pipeline(state, device, &desc->cs);
@ -3165,7 +3231,8 @@ static void d3d12_pipeline_state_graphics_load_spirv_from_cached_state(
for (i = 0; i < graphics->stage_count; i++)
{
if (FAILED(vkd3d_load_spirv_from_cached_state(device, cached_pso,
graphics->cached_desc.bytecode_stages[i], &graphics->code[i])))
graphics->cached_desc.bytecode_stages[i], &graphics->code[i],
&graphics->identifier_create_infos[i])))
{
for (j = 0; j < i; j++)
{
@ -3192,14 +3259,18 @@ static HRESULT d3d12_pipeline_state_graphics_create_shader_stages(
* we fail to create shader module for whatever reason. */
for (i = 0; i < graphics->stage_count; i++)
{
if (FAILED(hr = vkd3d_compile_shader_stage(state, device,
graphics->cached_desc.bytecode_stages[i],
&graphics->cached_desc.bytecode[i], &graphics->code[i])))
return hr;
if (graphics->identifier_create_infos[i].identifierSize == 0)
{
if (FAILED(hr = vkd3d_compile_shader_stage(state, device,
graphics->cached_desc.bytecode_stages[i],
&graphics->cached_desc.bytecode[i], &graphics->code[i])))
return hr;
}
if (FAILED(hr = vkd3d_setup_shader_stage(state, device,
&graphics->stages[i],
graphics->cached_desc.bytecode_stages[i], NULL,
graphics->cached_desc.bytecode_stages[i],
NULL, &graphics->identifier_create_infos[i],
&graphics->code[i])))
return hr;
}
@ -3211,6 +3282,7 @@ static void d3d12_pipeline_state_graphics_handle_meta(struct d3d12_pipeline_stat
struct d3d12_device *device)
{
struct d3d12_graphics_pipeline_state *graphics = &state->graphics;
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
unsigned int i;
for (i = 0; i < graphics->stage_count; i++)
@ -3226,6 +3298,15 @@ static void d3d12_pipeline_state_graphics_handle_meta(struct d3d12_pipeline_stat
graphics->code[i].meta.hash);
graphics->stages[i].pSpecializationInfo = &graphics->spec_info[i].spec_info;
}
if (graphics->stages[i].module != VK_NULL_HANDLE &&
device->device_info.shader_module_identifier_features.shaderModuleIdentifier)
{
state->graphics.identifiers[i].sType = VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT;
state->graphics.identifiers[i].pNext = NULL;
VK_CALL(vkGetShaderModuleIdentifierEXT(device->vk_device, graphics->stages[i].module,
&state->graphics.identifiers[i]));
}
}
}
@ -4334,6 +4415,11 @@ VkPipeline d3d12_pipeline_state_create_pipeline_variant(struct d3d12_pipeline_st
}
}
/* If we're using identifiers, set the appropriate flag. */
for (i = 0; i < graphics->stage_count; i++)
if (pipeline_desc.pStages[i].module == VK_NULL_HANDLE)
pipeline_desc.flags |= VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT;
TRACE("Calling vkCreateGraphicsPipelines.\n");
if ((vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG) &&
@ -4349,13 +4435,49 @@ VkPipeline d3d12_pipeline_state_create_pipeline_variant(struct d3d12_pipeline_st
else
feedback_info.pipelineStageCreationFeedbackCount = 0;
if ((vr = VK_CALL(vkCreateGraphicsPipelines(device->vk_device,
vk_cache, 1, &pipeline_desc, NULL, &vk_pipeline))) < 0)
vr = VK_CALL(vkCreateGraphicsPipelines(device->vk_device, vk_cache, 1, &pipeline_desc, NULL, &vk_pipeline));
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
{
if (pipeline_desc.flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT)
{
if (vr == VK_SUCCESS)
INFO("[IDENTIFIER] Successfully created graphics pipeline from identifier.\n");
else if (vr == VK_PIPELINE_COMPILE_REQUIRED_EXT)
INFO("[IDENTIFIER] Failed to create graphics pipeline from identifier, falling back ...\n");
}
else
INFO("[IDENTIFIER] No graphics identifier\n");
}
if (vr == VK_PIPELINE_COMPILE_REQUIRED_EXT)
{
if (FAILED(hr = vkd3d_late_compile_shader_stages(state)))
{
ERR("Late compilation of SPIR-V failed.\n");
return VK_NULL_HANDLE;
}
pipeline_desc.flags &= ~VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT;
/* Clean up any temporary SPIR-V modules we created. */
if (pipeline_desc.pStages == stages)
for (i = 0; i < graphics->stage_count; i++)
if (stages[i].module != graphics->stages[i].module)
VK_CALL(vkDestroyShaderModule(device->vk_device, stages[i].module, NULL));
/* Internal modules are known to be non-null now. */
pipeline_desc.pStages = state->graphics.stages;
vr = VK_CALL(vkCreateGraphicsPipelines(device->vk_device, vk_cache, 1, &pipeline_desc, NULL, &vk_pipeline));
}
TRACE("Completed vkCreateGraphicsPipelines.\n");
if (vr < 0)
{
WARN("Failed to create Vulkan graphics pipeline, vr %d.\n", vr);
return VK_NULL_HANDLE;
}
TRACE("Completed vkCreateGraphicsPipelines.\n");
/* Clean up any temporary SPIR-V modules we created. */
if (pipeline_desc.pStages == stages)

View File

@ -1492,6 +1492,8 @@ struct d3d12_graphics_pipeline_state
struct vkd3d_shader_debug_ring_spec_info spec_info[VKD3D_MAX_SHADER_STAGES];
VkPipelineShaderStageCreateInfo stages[VKD3D_MAX_SHADER_STAGES];
struct vkd3d_shader_code code[VKD3D_MAX_SHADER_STAGES];
VkShaderModuleIdentifierEXT identifiers[VKD3D_MAX_SHADER_STAGES];
VkPipelineShaderStageModuleIdentifierCreateInfoEXT identifier_create_infos[VKD3D_MAX_SHADER_STAGES];
size_t stage_count;
struct d3d12_graphics_pipeline_state_cached_desc cached_desc;
@ -1546,6 +1548,8 @@ struct d3d12_compute_pipeline_state
{
VkPipeline vk_pipeline;
struct vkd3d_shader_code code;
VkShaderModuleIdentifierEXT identifier;
VkPipelineShaderStageModuleIdentifierCreateInfoEXT identifier_create_info;
};
/* To be able to load a pipeline from cache, this information must match exactly,
@ -1768,6 +1772,7 @@ enum vkd3d_pipeline_library_flags
VKD3D_PIPELINE_LIBRARY_FLAG_STREAM_ARCHIVE = 1 << 4,
/* We expect to parse archive from thread, so consider thread safety and cancellation points. */
VKD3D_PIPELINE_LIBRARY_FLAG_STREAM_ARCHIVE_PARSE_ASYNC = 1 << 5,
VKD3D_PIPELINE_LIBRARY_FLAG_SHADER_IDENTIFIER = 1 << 6,
};
HRESULT d3d12_pipeline_library_create(struct d3d12_device *device, const void *blob,
@ -1781,7 +1786,8 @@ HRESULT vkd3d_create_pipeline_cache_from_d3d12_desc(struct d3d12_device *device,
HRESULT vkd3d_get_cached_spirv_code_from_d3d12_desc(
const struct d3d12_cached_pipeline_state *state,
VkShaderStageFlagBits stage,
struct vkd3d_shader_code *spirv_code);
struct vkd3d_shader_code *spirv_code,
VkPipelineShaderStageModuleIdentifierCreateInfoEXT *identifier);
VkResult vkd3d_serialize_pipeline_state(struct d3d12_pipeline_library *pipeline_library,
const struct d3d12_pipeline_state *state, size_t *size, void *data);
HRESULT d3d12_cached_pipeline_state_validate(struct d3d12_device *device,

View File

@ -313,6 +313,7 @@ VK_DEVICE_EXT_PFN(vkCmdExecuteGeneratedCommandsNV)
/* VK_EXT_shader_module_identifier */
VK_DEVICE_EXT_PFN(vkGetShaderModuleIdentifierEXT)
VK_DEVICE_EXT_PFN(vkGetShaderModuleCreateInfoIdentifierEXT)
#undef VK_INSTANCE_PFN
#undef VK_INSTANCE_EXT_PFN