turnip: use updated tokens from vk.xml

Signed-off-by: Eric Engestrom <eric@igalia.com>
Acked-by: Jason Ekstrand <jason.ekstrand@collabora.com>
Reviewed-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17342>
This commit is contained in:
Eric Engestrom 2022-07-01 13:04:48 +01:00 committed by Marge Bot
parent 9db1af8757
commit 2c99dc5b22
8 changed files with 77 additions and 77 deletions

View File

@ -1413,7 +1413,7 @@ static void
tu6_blit_image(struct tu_cmd_buffer *cmd,
struct tu_image *src_image,
struct tu_image *dst_image,
const VkImageBlit2KHR *info,
const VkImageBlit2 *info,
VkFilter filter)
{
const struct blit_ops *ops = &r2d_ops;
@ -1547,7 +1547,7 @@ tu6_blit_image(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdBlitImage2KHR(VkCommandBuffer commandBuffer,
const VkBlitImageInfo2KHR* pBlitImageInfo)
const VkBlitImageInfo2* pBlitImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
@ -1560,7 +1560,7 @@ tu_CmdBlitImage2KHR(VkCommandBuffer commandBuffer,
*/
if (src_image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT ||
dst_image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
VkImageBlit2KHR region = pBlitImageInfo->pRegions[i];
VkImageBlit2 region = pBlitImageInfo->pRegions[i];
u_foreach_bit(b, region.dstSubresource.aspectMask) {
region.srcSubresource.aspectMask = BIT(b);
region.dstSubresource.aspectMask = BIT(b);
@ -1607,7 +1607,7 @@ static void
tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
struct tu_buffer *src_buffer,
struct tu_image *dst_image,
const VkBufferImageCopy2KHR *info)
const VkBufferImageCopy2 *info)
{
struct tu_cs *cs = &cmd->cs;
uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
@ -1670,7 +1670,7 @@ tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo)
const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, dst_image, pCopyBufferToImageInfo->dstImage);
@ -1689,7 +1689,7 @@ static void
tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
struct tu_image *src_image,
struct tu_buffer *dst_buffer,
const VkBufferImageCopy2KHR *info)
const VkBufferImageCopy2 *info)
{
struct tu_cs *cs = &cmd->cs;
uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
@ -1751,7 +1751,7 @@ tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo)
const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageToBufferInfo->srcImage);
@ -1795,7 +1795,7 @@ static void
tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
struct tu_image *src_image,
struct tu_image *dst_image,
const VkImageCopy2KHR *info)
const VkImageCopy2 *info)
{
const struct blit_ops *ops = &r2d_ops;
struct tu_cs *cs = &cmd->cs;
@ -1977,7 +1977,7 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageInfo2KHR* pCopyImageInfo)
const VkCopyImageInfo2* pCopyImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageInfo->srcImage);
@ -1985,7 +1985,7 @@ tu_CmdCopyImage2KHR(VkCommandBuffer commandBuffer,
for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) {
if (src_image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
VkImageCopy2KHR info = pCopyImageInfo->pRegions[i];
VkImageCopy2 info = pCopyImageInfo->pRegions[i];
u_foreach_bit(b, info.dstSubresource.aspectMask) {
info.srcSubresource.aspectMask = BIT(b);
info.dstSubresource.aspectMask = BIT(b);
@ -2038,14 +2038,14 @@ copy_buffer(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2KHR *pCopyBufferInfo)
const VkCopyBufferInfo2 *pCopyBufferInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
TU_FROM_HANDLE(tu_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
for (unsigned i = 0; i < pCopyBufferInfo->regionCount; ++i) {
const VkBufferCopy2KHR *region = &pCopyBufferInfo->pRegions[i];
const VkBufferCopy2 *region = &pCopyBufferInfo->pRegions[i];
copy_buffer(cmd,
dst_buffer->iova + region->dstOffset,
src_buffer->iova + region->srcOffset,
@ -2113,7 +2113,7 @@ tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
tu_CmdResolveImage2KHR(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2KHR* pResolveImageInfo)
const VkResolveImageInfo2* pResolveImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pResolveImageInfo->srcImage);
@ -2126,7 +2126,7 @@ tu_CmdResolveImage2KHR(VkCommandBuffer commandBuffer,
VK_SAMPLE_COUNT_1_BIT);
for (uint32_t i = 0; i < pResolveImageInfo->regionCount; ++i) {
const VkImageResolve2KHR *info = &pResolveImageInfo->pRegions[i];
const VkImageResolve2 *info = &pResolveImageInfo->pRegions[i];
uint32_t layers = MAX2(info->extent.depth, info->dstSubresource.layerCount);
assert(info->srcSubresource.layerCount == info->dstSubresource.layerCount);

View File

@ -4090,7 +4090,7 @@ tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
if (pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY)) {
if (primtype < DI_PT_PATCHES0) {
/* If tesselation used, only VK_PRIMITIVE_TOPOLOGY_PATCH_LIST can be
* set via vkCmdSetPrimitiveTopologyEXT, but primtype is already
* set via vkCmdSetPrimitiveTopology, but primtype is already
* calculated at the pipeline creation based on control points
* for each patch.
*
@ -4680,7 +4680,7 @@ tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
const VkSubpassEndInfoKHR *pSubpassEndInfo)
const VkSubpassEndInfo *pSubpassEndInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
@ -4733,7 +4733,7 @@ tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
static void
tu_barrier(struct tu_cmd_buffer *cmd,
const VkDependencyInfoKHR *dep_info)
const VkDependencyInfo *dep_info)
{
VkPipelineStageFlags2 srcStage = 0;
VkPipelineStageFlags2 dstStage = 0;
@ -4813,7 +4813,7 @@ tu_barrier(struct tu_cmd_buffer *cmd,
* VK_DEPENDENCY_BY_REGION_BIT.
* [...]
* Each of the synchronization scopes and access scopes of a
* vkCmdPipelineBarrier2KHR or vkCmdPipelineBarrier command inside
* vkCmdPipelineBarrier2 or vkCmdPipelineBarrier command inside
* a render pass instance must be a subset of the scopes of one of
* the self-dependencies for the current subpass.
*
@ -4841,7 +4841,7 @@ tu_barrier(struct tu_cmd_buffer *cmd,
VKAPI_ATTR void VKAPI_CALL
tu_CmdPipelineBarrier2(VkCommandBuffer commandBuffer,
const VkDependencyInfoKHR *pDependencyInfo)
const VkDependencyInfo *pDependencyInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
@ -4882,11 +4882,11 @@ write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
VKAPI_ATTR void VKAPI_CALL
tu_CmdSetEvent2(VkCommandBuffer commandBuffer,
VkEvent _event,
const VkDependencyInfoKHR *pDependencyInfo)
const VkDependencyInfo *pDependencyInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event);
VkPipelineStageFlags2KHR src_stage_mask = 0;
VkPipelineStageFlags2 src_stage_mask = 0;
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
src_stage_mask |= pDependencyInfo->pMemoryBarriers[i].srcStageMask;
@ -4913,7 +4913,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdWaitEvents2(VkCommandBuffer commandBuffer,
uint32_t eventCount,
const VkEvent *pEvents,
const VkDependencyInfoKHR* pDependencyInfos)
const VkDependencyInfo* pDependencyInfos)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;

View File

@ -116,10 +116,10 @@ tu_CreateDescriptorSetLayout(
assert(pCreateInfo->sType ==
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
vk_find_struct_const(
pCreateInfo->pNext,
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
vk_find_struct_const(
pCreateInfo->pNext,
@ -205,7 +205,7 @@ tu_CreateDescriptorSetLayout(
if (variable_flags && binding->binding < variable_flags->bindingCount &&
(variable_flags->pBindingFlags[binding->binding] &
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
assert(!binding->pImmutableSamplers); /* Terribly ill defined how
many samplers are valid */
assert(binding->binding == num_bindings - 1);
@ -305,14 +305,14 @@ tu_GetDescriptorSetLayoutSupport(
return;
}
const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
vk_find_struct_const(
pCreateInfo->pNext,
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
VkDescriptorSetVariableDescriptorCountLayoutSupportEXT *variable_count =
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count =
vk_find_struct(
(void *) pCreateInfo->pNext,
DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT);
DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
vk_find_struct_const(
pCreateInfo->pNext,
@ -368,7 +368,7 @@ tu_GetDescriptorSetLayoutSupport(
if (variable_flags && binding->binding < variable_flags->bindingCount &&
variable_count &&
(variable_flags->pBindingFlags[binding->binding] &
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
variable_count->maxVariableDescriptorCount =
MIN2(UINT32_MAX, max_count);
}
@ -824,8 +824,8 @@ tu_AllocateDescriptorSets(VkDevice _device,
uint32_t i;
struct tu_descriptor_set *set = NULL;
const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT *variable_counts =
vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT);
const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_counts =
vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
const uint32_t zero = 0;
/* allocate a set of buffers for each shader to contain descriptors */

View File

@ -806,8 +806,8 @@ tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
features->shaderInt8 = false;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features = (void *)ext;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES: {
VkPhysicalDeviceScalarBlockLayoutFeatures *features = (void *)ext;
features->scalarBlockLayout = true;
break;
}
@ -819,8 +819,8 @@ tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES: {
VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
(VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
VkPhysicalDeviceTimelineSemaphoreFeatures *features =
(VkPhysicalDeviceTimelineSemaphoreFeatures *) ext;
features->timelineSemaphore = true;
break;
}
@ -969,12 +969,12 @@ tu_get_physical_device_properties_1_2(struct tu_physical_device *pdevice,
p->driverID = VK_DRIVER_ID_MESA_TURNIP;
memset(p->driverName, 0, sizeof(p->driverName));
snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE,
"turnip Mesa driver");
memset(p->driverInfo, 0, sizeof(p->driverInfo));
snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE,
"Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
p->conformanceVersion = (VkConformanceVersionKHR) {
p->conformanceVersion = (VkConformanceVersion) {
.major = 1,
.minor = 2,
.subminor = 7,
@ -2888,7 +2888,7 @@ tu_GetPhysicalDeviceMultisamplePropertiesEXT(
VkDeviceAddress
tu_GetBufferDeviceAddress(VkDevice _device,
const VkBufferDeviceAddressInfoKHR* pInfo)
const VkBufferDeviceAddressInfo* pInfo)
{
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
@ -2897,7 +2897,7 @@ tu_GetBufferDeviceAddress(VkDevice _device,
uint64_t tu_GetBufferOpaqueCaptureAddress(
VkDevice device,
const VkBufferDeviceAddressInfoKHR* pInfo)
const VkBufferDeviceAddressInfo* pInfo)
{
tu_stub();
return 0;
@ -2905,7 +2905,7 @@ uint64_t tu_GetBufferOpaqueCaptureAddress(
uint64_t tu_GetDeviceMemoryOpaqueCaptureAddress(
VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo)
const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
{
tu_stub();
return 0;

View File

@ -216,11 +216,11 @@ tu_physical_device_get_format_properties(
VK_FORMAT_FEATURE_BLIT_DST_BIT |
VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT |
VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT |
VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR;
VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT;
buffer |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT |
VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT |
VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR;
VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT;
/* TODO: The blob also exposes these for R16G16_UINT/R16G16_SINT, but we
* don't have any tests for those.

View File

@ -278,7 +278,7 @@ tu_render_pass_add_implicit_deps(struct tu_render_pass *pass,
}
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
vk_find_struct_const(subpass->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR);
vk_find_struct_const(subpass->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) {
@ -294,7 +294,7 @@ tu_render_pass_add_implicit_deps(struct tu_render_pass *pass,
}
if (src_implicit_dep) {
tu_render_pass_add_subpass_dep(pass, &(VkSubpassDependency2KHR) {
tu_render_pass_add_subpass_dep(pass, &(VkSubpassDependency2) {
.srcSubpass = VK_SUBPASS_EXTERNAL,
.dstSubpass = i,
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
@ -370,7 +370,7 @@ tu_render_pass_add_implicit_deps(struct tu_render_pass *pass,
}
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
vk_find_struct_const(subpass->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR);
vk_find_struct_const(subpass->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) {
@ -386,7 +386,7 @@ tu_render_pass_add_implicit_deps(struct tu_render_pass *pass,
}
if (dst_implicit_dep) {
tu_render_pass_add_subpass_dep(pass, &(VkSubpassDependency2KHR) {
tu_render_pass_add_subpass_dep(pass, &(VkSubpassDependency2) {
.srcSubpass = i,
.dstSubpass = VK_SUBPASS_EXTERNAL,
.srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
@ -672,7 +672,7 @@ is_depth_stencil_resolve_enabled(const VkSubpassDescriptionDepthStencilResolve *
}
static void
tu_subpass_use_attachment(struct tu_render_pass *pass, int i, uint32_t a, const VkRenderPassCreateInfo2KHR *pCreateInfo)
tu_subpass_use_attachment(struct tu_render_pass *pass, int i, uint32_t a, const VkRenderPassCreateInfo2 *pCreateInfo)
{
struct tu_subpass *subpass = &pass->subpasses[i];
@ -683,7 +683,7 @@ tu_subpass_use_attachment(struct tu_render_pass *pass, int i, uint32_t a, const
VKAPI_ATTR VkResult VKAPI_CALL
tu_CreateRenderPass2(VkDevice _device,
const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass)
{
@ -692,7 +692,7 @@ tu_CreateRenderPass2(VkDevice _device,
size_t size;
size_t attachments_offset;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2);
size = sizeof(*pass);
size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
@ -743,7 +743,7 @@ tu_CreateRenderPass2(VkDevice _device,
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 *desc = &pCreateInfo->pSubpasses[i];
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
vk_find_struct_const(desc->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR);
vk_find_struct_const(desc->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
subpass_attachment_count +=
desc->inputAttachmentCount + desc->colorAttachmentCount +
@ -767,7 +767,7 @@ tu_CreateRenderPass2(VkDevice _device,
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 *desc = &pCreateInfo->pSubpasses[i];
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
vk_find_struct_const(desc->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR);
vk_find_struct_const(desc->pNext, SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
struct tu_subpass *subpass = &pass->subpasses[i];
subpass->input_count = desc->inputAttachmentCount;

View File

@ -2551,12 +2551,12 @@ tu_shader_key_init(struct tu_shader_key *key,
if (stage_info) {
if (stage_info->flags &
VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) {
VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT) {
api_wavesize = real_wavesize = IR3_SINGLE_OR_DOUBLE;
} else {
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *size_info =
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *size_info =
vk_find_struct_const(stage_info->pNext,
PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO);
if (size_info) {
if (size_info->requiredSubgroupSize == dev->compiler->threadsize_base) {
@ -2571,7 +2571,7 @@ tu_shader_key_init(struct tu_shader_key *key,
}
if (stage_info->flags &
VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT)
VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT)
real_wavesize = api_wavesize;
else if (api_wavesize == IR3_SINGLE_ONLY)
real_wavesize = IR3_SINGLE_ONLY;
@ -2782,10 +2782,10 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = {
NULL
};
VkPipelineCreationFeedbackEXT pipeline_feedback = {
VkPipelineCreationFeedback pipeline_feedback = {
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
};
VkPipelineCreationFeedbackEXT stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
VkPipelineCreationFeedback stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
int64_t pipeline_start = os_time_get_nano();
@ -3095,52 +3095,52 @@ tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_SAMPLE_LOCATIONS);
break;
case VK_DYNAMIC_STATE_CULL_MODE_EXT:
case VK_DYNAMIC_STATE_CULL_MODE:
pipeline->gras_su_cntl_mask &=
~(A6XX_GRAS_SU_CNTL_CULL_BACK | A6XX_GRAS_SU_CNTL_CULL_FRONT);
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);
break;
case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
case VK_DYNAMIC_STATE_FRONT_FACE:
pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_FRONT_CW;
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);
break;
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY:
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY);
break;
case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE:
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_VB_STRIDE);
break;
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT:
pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_VIEWPORT);
break;
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT:
pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_SCISSOR);
break;
case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE:
pipeline->rb_depth_cntl_mask &=
~(A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE | A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE);
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
break;
case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE:
pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
break;
case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP:
pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
break;
case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE:
pipeline->rb_depth_cntl_mask &=
~(A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE);
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
break;
case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE:
pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
A6XX_RB_STENCIL_CONTROL_STENCIL_READ);
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);
break;
case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
case VK_DYNAMIC_STATE_STENCIL_OP:
pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_FUNC__MASK |
A6XX_RB_STENCIL_CONTROL_FAIL__MASK |
A6XX_RB_STENCIL_CONTROL_ZPASS__MASK |
@ -3151,14 +3151,14 @@ tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK);
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);
break;
case VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE:
pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_POLY_OFFSET;
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);
break;
case VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT:
case VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE:
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE);
break;
case VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT:
case VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE:
pipeline->pc_raster_cntl_mask &= ~A6XX_PC_RASTER_CNTL_DISCARD;
pipeline->vpc_unknown_9107_mask &= ~A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD;
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RASTERIZER_DISCARD);
@ -3861,7 +3861,7 @@ tu_pipeline_builder_init_graphics(
if (create_info->pDynamicState) {
for (uint32_t i = 0; i < create_info->pDynamicState->dynamicStateCount; i++) {
if (create_info->pDynamicState->pDynamicStates[i] ==
VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) {
VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE) {
rasterizer_discard_dynamic = true;
break;
}
@ -4000,7 +4000,7 @@ tu_compute_pipeline_create(VkDevice device,
*pPipeline = VK_NULL_HANDLE;
VkPipelineCreationFeedbackEXT pipeline_feedback = {
VkPipelineCreationFeedback pipeline_feedback = {
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
};

View File

@ -1679,7 +1679,7 @@ tu_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
for (int j = 0; j < group[i].num_countables; j++) {
vk_outarray_append_typed(VkPerformanceCounterKHR, &out, counter) {
counter->scope = VK_QUERY_SCOPE_COMMAND_BUFFER_KHR;
counter->scope = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR;
counter->unit =
fd_perfcntr_type_to_vk_unit[group[i].countables[j].query_type];
counter->storage =