tu: Replace TU_FROM_HANDLE with VK_FROM_HANDLE

It was exactly the same thing.

Signed-off-by: Valentine Burley <valentine.burley@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28571>
This commit is contained in:
Valentine Burley 2024-04-04 12:35:33 +00:00 committed by Marge Bot
parent a5adbae6f6
commit 4850aebcaf
17 changed files with 263 additions and 266 deletions

View File

@ -12,7 +12,7 @@
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo) tu_rmv_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{ {
TU_FROM_HANDLE(tu_queue, queue, _queue); VK_FROM_HANDLE(tu_queue, queue, _queue);
struct tu_device *device = queue->device; struct tu_device *device = queue->device;
VkResult result = wsi_QueuePresentKHR(_queue, pPresentInfo); VkResult result = wsi_QueuePresentKHR(_queue, pPresentInfo);
@ -28,7 +28,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, tu_rmv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges) const VkMappedMemoryRange *pMemoryRanges)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = tu_FlushMappedMemoryRanges(_device, memoryRangeCount, VkResult result = tu_FlushMappedMemoryRanges(_device, memoryRangeCount,
pMemoryRanges); pMemoryRanges);
@ -43,7 +43,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, tu_rmv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges) const VkMappedMemoryRange *pMemoryRanges)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = tu_InvalidateMappedMemoryRanges(_device, memoryRangeCount, VkResult result = tu_InvalidateMappedMemoryRanges(_device, memoryRangeCount,
pMemoryRanges); pMemoryRanges);
@ -58,7 +58,7 @@ VkResult tu_rmv_SetDebugUtilsObjectNameEXT(VkDevice _device,
const VkDebugUtilsObjectNameInfoEXT* pNameInfo) const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
{ {
assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT); assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT);
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = vk_common_SetDebugUtilsObjectNameEXT(_device, pNameInfo); VkResult result = vk_common_SetDebugUtilsObjectNameEXT(_device, pNameInfo);
if (result != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled) if (result != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)

View File

@ -298,7 +298,7 @@ static VkResult
format_supported_with_usage(VkDevice device_h, VkFormat format, format_supported_with_usage(VkDevice device_h, VkFormat format,
VkImageUsageFlags imageUsage) VkImageUsageFlags imageUsage)
{ {
TU_FROM_HANDLE(tu_device, device, device_h); VK_FROM_HANDLE(tu_device, device, device_h);
struct tu_physical_device *phys_dev = device->physical_device; struct tu_physical_device *phys_dev = device->physical_device;
VkPhysicalDevice phys_dev_h = tu_physical_device_to_handle(phys_dev); VkPhysicalDevice phys_dev_h = tu_physical_device_to_handle(phys_dev);
VkResult result; VkResult result;
@ -389,7 +389,7 @@ tu_GetSwapchainGrallocUsageANDROID(VkDevice device_h,
VkImageUsageFlags imageUsage, VkImageUsageFlags imageUsage,
int *grallocUsage) int *grallocUsage)
{ {
TU_FROM_HANDLE(tu_device, device, device_h); VK_FROM_HANDLE(tu_device, device, device_h);
VkResult result; VkResult result;
result = format_supported_with_usage(device_h, format, imageUsage); result = format_supported_with_usage(device_h, format, imageUsage);
@ -409,7 +409,7 @@ tu_GetSwapchainGrallocUsage2ANDROID(VkDevice device_h,
uint64_t *grallocConsumerUsage, uint64_t *grallocConsumerUsage,
uint64_t *grallocProducerUsage) uint64_t *grallocProducerUsage)
{ {
TU_FROM_HANDLE(tu_device, device, device_h); VK_FROM_HANDLE(tu_device, device, device_h);
VkResult result; VkResult result;
*grallocConsumerUsage = 0; *grallocConsumerUsage = 0;

View File

@ -1996,9 +1996,9 @@ tu_CmdBlitImage2(VkCommandBuffer commandBuffer,
const VkBlitImageInfo2 *pBlitImageInfo) const VkBlitImageInfo2 *pBlitImageInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pBlitImageInfo->srcImage); VK_FROM_HANDLE(tu_image, src_image, pBlitImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pBlitImageInfo->dstImage); VK_FROM_HANDLE(tu_image, dst_image, pBlitImageInfo->dstImage);
for (uint32_t i = 0; i < pBlitImageInfo->regionCount; ++i) { for (uint32_t i = 0; i < pBlitImageInfo->regionCount; ++i) {
/* can't blit both depth and stencil at once with D32_S8 /* can't blit both depth and stencil at once with D32_S8
@ -2128,9 +2128,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBufferToImage2(VkCommandBuffer commandBuffer, tu_CmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, dst_image, pCopyBufferToImageInfo->dstImage); VK_FROM_HANDLE(tu_image, dst_image, pCopyBufferToImageInfo->dstImage);
TU_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer); VK_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
for (unsigned i = 0; i < pCopyBufferToImageInfo->regionCount; ++i) for (unsigned i = 0; i < pCopyBufferToImageInfo->regionCount; ++i)
tu_copy_buffer_to_image<CHIP>(cmd, src_buffer, dst_image, tu_copy_buffer_to_image<CHIP>(cmd, src_buffer, dst_image,
@ -2217,9 +2217,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImageToBuffer2(VkCommandBuffer commandBuffer, tu_CmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageToBufferInfo->srcImage); VK_FROM_HANDLE(tu_image, src_image, pCopyImageToBufferInfo->srcImage);
TU_FROM_HANDLE(tu_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer); VK_FROM_HANDLE(tu_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
for (unsigned i = 0; i < pCopyImageToBufferInfo->regionCount; ++i) for (unsigned i = 0; i < pCopyImageToBufferInfo->regionCount; ++i)
tu_copy_image_to_buffer<CHIP>(cmd, src_image, dst_buffer, tu_copy_image_to_buffer<CHIP>(cmd, src_image, dst_buffer,
@ -2461,9 +2461,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImage2(VkCommandBuffer commandBuffer, tu_CmdCopyImage2(VkCommandBuffer commandBuffer,
const VkCopyImageInfo2 *pCopyImageInfo) const VkCopyImageInfo2 *pCopyImageInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageInfo->srcImage); VK_FROM_HANDLE(tu_image, src_image, pCopyImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pCopyImageInfo->dstImage); VK_FROM_HANDLE(tu_image, dst_image, pCopyImageInfo->dstImage);
for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) { for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) {
if (src_image->vk.format == VK_FORMAT_D32_SFLOAT_S8_UINT) { if (src_image->vk.format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
@ -2525,9 +2525,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBuffer2(VkCommandBuffer commandBuffer, tu_CmdCopyBuffer2(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2 *pCopyBufferInfo) const VkCopyBufferInfo2 *pCopyBufferInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferInfo->srcBuffer); VK_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
TU_FROM_HANDLE(tu_buffer, dst_buffer, pCopyBufferInfo->dstBuffer); VK_FROM_HANDLE(tu_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
for (unsigned i = 0; i < pCopyBufferInfo->regionCount; ++i) { for (unsigned i = 0; i < pCopyBufferInfo->regionCount; ++i) {
const VkBufferCopy2 *region = &pCopyBufferInfo->pRegions[i]; const VkBufferCopy2 *region = &pCopyBufferInfo->pRegions[i];
@ -2547,8 +2547,8 @@ tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
VkDeviceSize dataSize, VkDeviceSize dataSize,
const void *pData) const void *pData)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer); VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
struct tu_cs_memory tmp; struct tu_cs_memory tmp;
VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64 / 4, &tmp); VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64 / 4, &tmp);
@ -2570,8 +2570,8 @@ tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
VkDeviceSize fillSize, VkDeviceSize fillSize,
uint32_t data) uint32_t data)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer); VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
const struct blit_ops *ops = &r2d_ops<CHIP>; const struct blit_ops *ops = &r2d_ops<CHIP>;
struct tu_cs *cs = &cmd->cs; struct tu_cs *cs = &cmd->cs;
@ -2609,9 +2609,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdResolveImage2(VkCommandBuffer commandBuffer, tu_CmdResolveImage2(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2 *pResolveImageInfo) const VkResolveImageInfo2 *pResolveImageInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pResolveImageInfo->srcImage); VK_FROM_HANDLE(tu_image, src_image, pResolveImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pResolveImageInfo->dstImage); VK_FROM_HANDLE(tu_image, dst_image, pResolveImageInfo->dstImage);
const struct blit_ops *ops = &r2d_ops<CHIP>; const struct blit_ops *ops = &r2d_ops<CHIP>;
struct tu_cs *cs = &cmd->cs; struct tu_cs *cs = &cmd->cs;
@ -2811,8 +2811,8 @@ tu_CmdClearColorImage(VkCommandBuffer commandBuffer,
uint32_t rangeCount, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const VkImageSubresourceRange *pRanges)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, image, image_h); VK_FROM_HANDLE(tu_image, image, image_h);
for (unsigned i = 0; i < rangeCount; i++) for (unsigned i = 0; i < rangeCount; i++)
clear_image<CHIP>(cmd, image, (const VkClearValue*) pColor, pRanges + i, VK_IMAGE_ASPECT_COLOR_BIT); clear_image<CHIP>(cmd, image, (const VkClearValue*) pColor, pRanges + i, VK_IMAGE_ASPECT_COLOR_BIT);
@ -2828,8 +2828,8 @@ tu_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
uint32_t rangeCount, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const VkImageSubresourceRange *pRanges)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, image, image_h); VK_FROM_HANDLE(tu_image, image, image_h);
for (unsigned i = 0; i < rangeCount; i++) { for (unsigned i = 0; i < rangeCount; i++) {
const VkImageSubresourceRange *range = &pRanges[i]; const VkImageSubresourceRange *range = &pRanges[i];
@ -3234,7 +3234,7 @@ tu_CmdClearAttachments(VkCommandBuffer commandBuffer,
uint32_t rectCount, uint32_t rectCount,
const VkClearRect *pRects) const VkClearRect *pRects)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
/* sysmem path behaves like a draw, note we don't have a way of using different /* sysmem path behaves like a draw, note we don't have a way of using different

View File

@ -2273,7 +2273,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_BeginCommandBuffer(VkCommandBuffer commandBuffer, tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const VkCommandBufferBeginInfo *pBeginInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VkResult result = tu_cmd_buffer_begin(cmd_buffer, pBeginInfo); VkResult result = tu_cmd_buffer_begin(cmd_buffer, pBeginInfo);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
@ -2406,7 +2406,7 @@ tu_CmdBindVertexBuffers2(VkCommandBuffer commandBuffer,
const VkDeviceSize *pSizes, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides) const VkDeviceSize *pStrides)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs cs; struct tu_cs cs;
cmd->state.max_vbs_bound = MAX2( cmd->state.max_vbs_bound = MAX2(
@ -2448,8 +2448,8 @@ tu_CmdBindIndexBuffer2KHR(VkCommandBuffer commandBuffer,
VkDeviceSize size, VkDeviceSize size,
VkIndexType indexType) VkIndexType indexType)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, buffer); VK_FROM_HANDLE(tu_buffer, buf, buffer);
size = vk_buffer_range(&buf->vk, offset, size); size = vk_buffer_range(&buf->vk, offset, size);
@ -2580,8 +2580,8 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
uint32_t dynamicOffsetCount, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const uint32_t *pDynamicOffsets)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout); VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
unsigned dyn_idx = 0; unsigned dyn_idx = 0;
struct tu_descriptor_state *descriptors_state = struct tu_descriptor_state *descriptors_state =
@ -2597,7 +2597,7 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
for (unsigned i = 0; i < descriptorSetCount; ++i) { for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet; unsigned idx = i + firstSet;
TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]); VK_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
descriptors_state->sets[idx] = set; descriptors_state->sets[idx] = set;
descriptors_state->set_iova[idx] = set ? descriptors_state->set_iova[idx] = set ?
@ -2707,7 +2707,7 @@ tu_CmdBindDescriptorBuffersEXT(
uint32_t bufferCount, uint32_t bufferCount,
const VkDescriptorBufferBindingInfoEXT *pBindingInfos) const VkDescriptorBufferBindingInfoEXT *pBindingInfos)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
for (unsigned i = 0; i < bufferCount; i++) for (unsigned i = 0; i < bufferCount; i++)
cmd->state.descriptor_buffer_iova[i] = pBindingInfos[i].address; cmd->state.descriptor_buffer_iova[i] = pBindingInfos[i].address;
@ -2723,8 +2723,8 @@ tu_CmdSetDescriptorBufferOffsetsEXT(
const uint32_t *pBufferIndices, const uint32_t *pBufferIndices,
const VkDeviceSize *pOffsets) const VkDeviceSize *pOffsets)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout); VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
struct tu_descriptor_state *descriptors_state = struct tu_descriptor_state *descriptors_state =
tu_get_descriptors_state(cmd, pipelineBindPoint); tu_get_descriptors_state(cmd, pipelineBindPoint);
@ -2754,8 +2754,8 @@ tu_CmdBindDescriptorBufferEmbeddedSamplersEXT(
VkPipelineLayout _layout, VkPipelineLayout _layout,
uint32_t set) uint32_t set)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout); VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
struct tu_descriptor_set_layout *set_layout = layout->set[set].layout; struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
@ -2804,8 +2804,8 @@ tu_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
uint32_t descriptorWriteCount, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const VkWriteDescriptorSet *pDescriptorWrites)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout); VK_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout; struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
struct tu_descriptor_set *set = struct tu_descriptor_set *set =
&tu_get_descriptors_state(cmd, pipelineBindPoint)->push_set; &tu_get_descriptors_state(cmd, pipelineBindPoint)->push_set;
@ -2843,9 +2843,9 @@ tu_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
uint32_t _set, uint32_t _set,
const void* pData) const void* pData)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout); VK_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
TU_FROM_HANDLE(tu_descriptor_update_template, templ, descriptorUpdateTemplate); VK_FROM_HANDLE(tu_descriptor_update_template, templ, descriptorUpdateTemplate);
struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout; struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
struct tu_descriptor_set *set = struct tu_descriptor_set *set =
&tu_get_descriptors_state(cmd, templ->bind_point)->push_set; &tu_get_descriptors_state(cmd, templ->bind_point)->push_set;
@ -2883,7 +2883,7 @@ tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
const VkDeviceSize *pOffsets, const VkDeviceSize *pOffsets,
const VkDeviceSize *pSizes) const VkDeviceSize *pSizes)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
/* using COND_REG_EXEC for xfb commands matches the blob behavior /* using COND_REG_EXEC for xfb commands matches the blob behavior
@ -2895,7 +2895,7 @@ tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
CP_COND_REG_EXEC_0_BINNING); CP_COND_REG_EXEC_0_BINNING);
for (uint32_t i = 0; i < bindingCount; i++) { for (uint32_t i = 0; i < bindingCount; i++) {
TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]); VK_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
uint64_t iova = buf->iova + pOffsets[i]; uint64_t iova = buf->iova + pOffsets[i];
uint32_t size = buf->bo->size - (iova - buf->bo->iova); uint32_t size = buf->bo->size - (iova - buf->bo->iova);
uint32_t idx = i + firstBinding; uint32_t idx = i + firstBinding;
@ -2924,7 +2924,7 @@ tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
const VkBuffer *pCounterBuffers, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const VkDeviceSize *pCounterBufferOffsets)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) | tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
@ -2945,7 +2945,7 @@ tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
if (!pCounterBuffers[i]) if (!pCounterBuffers[i])
continue; continue;
TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]); VK_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3); tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) | tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
@ -2973,7 +2973,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
const VkBuffer *pCounterBuffers, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const VkDeviceSize *pCounterBufferOffsets)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) | tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
@ -2998,7 +2998,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
if (!pCounterBuffers[i]) if (!pCounterBuffers[i])
continue; continue;
TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]); VK_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
/* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */ /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3); tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
@ -3037,7 +3037,7 @@ tu_CmdPushConstants(VkCommandBuffer commandBuffer,
uint32_t size, uint32_t size,
const void *pValues) const void *pValues)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
memcpy((char *) cmd->push_constants + offset, pValues, size); memcpy((char *) cmd->push_constants + offset, pValues, size);
cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS; cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
} }
@ -3056,7 +3056,7 @@ template <chip CHIP>
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_EndCommandBuffer(VkCommandBuffer commandBuffer) tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
/* We currently flush CCU at the end of the command buffer, like /* We currently flush CCU at the end of the command buffer, like
* what the blob does. There's implicit synchronization around every * what the blob does. There's implicit synchronization around every
@ -3190,8 +3190,8 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint, VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline) VkPipeline _pipeline)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline); VK_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
cmd->state.shaders[MESA_SHADER_COMPUTE] = cmd->state.shaders[MESA_SHADER_COMPUTE] =
@ -3830,7 +3830,7 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
uint32_t commandBufferCount, uint32_t commandBufferCount,
const VkCommandBuffer *pCmdBuffers) const VkCommandBuffer *pCmdBuffers)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VkResult result; VkResult result;
assert(commandBufferCount > 0); assert(commandBufferCount > 0);
@ -3845,7 +3845,7 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
} }
for (uint32_t i = 0; i < commandBufferCount; i++) { for (uint32_t i = 0; i < commandBufferCount; i++) {
TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]); VK_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
if (secondary->usage_flags & if (secondary->usage_flags &
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
@ -4140,7 +4140,7 @@ tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const VkSubpassBeginInfo *pSubpassBeginInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
if (TU_DEBUG(DYNAMIC)) { if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdBeginRenderPass2(commandBuffer, pRenderPassBegin, vk_common_CmdBeginRenderPass2(commandBuffer, pRenderPassBegin,
@ -4148,8 +4148,8 @@ tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
return; return;
} }
TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass); VK_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer); VK_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
const struct VkRenderPassAttachmentBeginInfo *pAttachmentInfo = const struct VkRenderPassAttachmentBeginInfo *pAttachmentInfo =
vk_find_struct_const(pRenderPassBegin->pNext, vk_find_struct_const(pRenderPassBegin->pNext,
@ -4217,7 +4217,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdBeginRendering(VkCommandBuffer commandBuffer, tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
const VkRenderingInfo *pRenderingInfo) const VkRenderingInfo *pRenderingInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
tu_setup_dynamic_render_pass(cmd, pRenderingInfo); tu_setup_dynamic_render_pass(cmd, pRenderingInfo);
tu_setup_dynamic_framebuffer(cmd, pRenderingInfo); tu_setup_dynamic_framebuffer(cmd, pRenderingInfo);
@ -4238,13 +4238,13 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
cmd->state.clear_values[a] = cmd->state.clear_values[a] =
pRenderingInfo->pColorAttachments[i].clearValue; pRenderingInfo->pColorAttachments[i].clearValue;
TU_FROM_HANDLE(tu_image_view, view, VK_FROM_HANDLE(tu_image_view, view,
pRenderingInfo->pColorAttachments[i].imageView); pRenderingInfo->pColorAttachments[i].imageView);
cmd->state.attachments[a] = view; cmd->state.attachments[a] = view;
a = cmd->dynamic_subpass.resolve_attachments[i].attachment; a = cmd->dynamic_subpass.resolve_attachments[i].attachment;
if (a != VK_ATTACHMENT_UNUSED) { if (a != VK_ATTACHMENT_UNUSED) {
TU_FROM_HANDLE(tu_image_view, resolve_view, VK_FROM_HANDLE(tu_image_view, resolve_view,
pRenderingInfo->pColorAttachments[i].resolveImageView); pRenderingInfo->pColorAttachments[i].resolveImageView);
cmd->state.attachments[a] = resolve_view; cmd->state.attachments[a] = resolve_view;
} }
@ -4258,7 +4258,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
pRenderingInfo->pDepthAttachment : pRenderingInfo->pDepthAttachment :
pRenderingInfo->pStencilAttachment; pRenderingInfo->pStencilAttachment;
if (common_info && common_info->imageView != VK_NULL_HANDLE) { if (common_info && common_info->imageView != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_image_view, view, common_info->imageView); VK_FROM_HANDLE(tu_image_view, view, common_info->imageView);
cmd->state.attachments[a] = view; cmd->state.attachments[a] = view;
if (pRenderingInfo->pDepthAttachment) { if (pRenderingInfo->pDepthAttachment) {
cmd->state.clear_values[a].depthStencil.depth = cmd->state.clear_values[a].depthStencil.depth =
@ -4272,7 +4272,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
if (cmd->dynamic_subpass.resolve_count > if (cmd->dynamic_subpass.resolve_count >
cmd->dynamic_subpass.color_count) { cmd->dynamic_subpass.color_count) {
TU_FROM_HANDLE(tu_image_view, resolve_view, VK_FROM_HANDLE(tu_image_view, resolve_view,
common_info->resolveImageView); common_info->resolveImageView);
a = cmd->dynamic_subpass.resolve_attachments[cmd->dynamic_subpass.color_count].attachment; a = cmd->dynamic_subpass.resolve_attachments[cmd->dynamic_subpass.color_count].attachment;
cmd->state.attachments[a] = resolve_view; cmd->state.attachments[a] = resolve_view;
@ -4285,7 +4285,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
const VkRenderingFragmentDensityMapAttachmentInfoEXT *fdm_info = const VkRenderingFragmentDensityMapAttachmentInfoEXT *fdm_info =
vk_find_struct_const(pRenderingInfo->pNext, vk_find_struct_const(pRenderingInfo->pNext,
RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT); RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT);
TU_FROM_HANDLE(tu_image_view, view, fdm_info->imageView); VK_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
cmd->state.attachments[a] = view; cmd->state.attachments[a] = view;
} }
@ -4371,7 +4371,7 @@ tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const VkSubpassEndInfo *pSubpassEndInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
if (TU_DEBUG(DYNAMIC)) { if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdNextSubpass2(commandBuffer, pSubpassBeginInfo, vk_common_CmdNextSubpass2(commandBuffer, pSubpassBeginInfo,
@ -5320,7 +5320,7 @@ tu_CmdDraw(VkCommandBuffer commandBuffer,
uint32_t firstVertex, uint32_t firstVertex,
uint32_t firstInstance) uint32_t firstInstance)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_vs_params(cmd, 0, firstVertex, firstInstance); tu6_emit_vs_params(cmd, 0, firstVertex, firstInstance);
@ -5343,7 +5343,7 @@ tu_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,
uint32_t firstInstance, uint32_t firstInstance,
uint32_t stride) uint32_t stride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
if (!drawCount) if (!drawCount)
@ -5389,7 +5389,7 @@ tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
int32_t vertexOffset, int32_t vertexOffset,
uint32_t firstInstance) uint32_t firstInstance)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_vs_params(cmd, 0, vertexOffset, firstInstance); tu6_emit_vs_params(cmd, 0, vertexOffset, firstInstance);
@ -5416,7 +5416,7 @@ tu_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,
uint32_t stride, uint32_t stride,
const int32_t *pVertexOffset) const int32_t *pVertexOffset)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
if (!drawCount) if (!drawCount)
@ -5479,8 +5479,8 @@ tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
uint32_t drawCount, uint32_t drawCount,
uint32_t stride) uint32_t stride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer); VK_FROM_HANDLE(tu_buffer, buf, _buffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd); tu6_emit_empty_vs_params(cmd);
@ -5508,8 +5508,8 @@ tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
uint32_t drawCount, uint32_t drawCount,
uint32_t stride) uint32_t stride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer); VK_FROM_HANDLE(tu_buffer, buf, _buffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd); tu6_emit_empty_vs_params(cmd);
@ -5541,9 +5541,9 @@ tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
uint32_t drawCount, uint32_t drawCount,
uint32_t stride) uint32_t stride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer); VK_FROM_HANDLE(tu_buffer, buf, _buffer);
TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer); VK_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd); tu6_emit_empty_vs_params(cmd);
@ -5578,9 +5578,9 @@ tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
uint32_t drawCount, uint32_t drawCount,
uint32_t stride) uint32_t stride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer); VK_FROM_HANDLE(tu_buffer, buf, _buffer);
TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer); VK_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd); tu6_emit_empty_vs_params(cmd);
@ -5612,8 +5612,8 @@ tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
uint32_t counterOffset, uint32_t counterOffset,
uint32_t vertexStride) uint32_t vertexStride)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer); VK_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
struct tu_cs *cs = &cmd->draw_cs; struct tu_cs *cs = &cmd->draw_cs;
/* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO. /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
@ -5998,7 +5998,7 @@ tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
uint32_t y, uint32_t y,
uint32_t z) uint32_t z)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
struct tu_dispatch_info info = {}; struct tu_dispatch_info info = {};
info.blocks[0] = x; info.blocks[0] = x;
@ -6018,8 +6018,8 @@ tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
VkBuffer _buffer, VkBuffer _buffer,
VkDeviceSize offset) VkDeviceSize offset)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, _buffer); VK_FROM_HANDLE(tu_buffer, buffer, _buffer);
struct tu_dispatch_info info = {}; struct tu_dispatch_info info = {};
info.indirect = buffer; info.indirect = buffer;
@ -6033,7 +6033,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer, tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
const VkSubpassEndInfo *pSubpassEndInfo) const VkSubpassEndInfo *pSubpassEndInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
if (TU_DEBUG(DYNAMIC)) { if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdEndRenderPass2(commandBuffer, pSubpassEndInfo); vk_common_CmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
@ -6056,7 +6056,7 @@ tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL VKAPI_ATTR void VKAPI_CALL
tu_CmdEndRendering(VkCommandBuffer commandBuffer) tu_CmdEndRendering(VkCommandBuffer commandBuffer)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
if (cmd_buffer->state.suspending) if (cmd_buffer->state.suspending)
cmd_buffer->state.suspended_pass.lrz = cmd_buffer->state.lrz; cmd_buffer->state.suspended_pass.lrz = cmd_buffer->state.lrz;
@ -6148,7 +6148,7 @@ tu_barrier(struct tu_cmd_buffer *cmd,
* to the image. We don't want these entries being flushed later and * to the image. We don't want these entries being flushed later and
* overwriting the actual image, so we need to flush the CCU. * overwriting the actual image, so we need to flush the CCU.
*/ */
TU_FROM_HANDLE(tu_image, image, dep_info->pImageMemoryBarriers[i].image); VK_FROM_HANDLE(tu_image, image, dep_info->pImageMemoryBarriers[i].image);
if (vk_format_is_depth_or_stencil(image->vk.format)) { if (vk_format_is_depth_or_stencil(image->vk.format)) {
src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE; src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
@ -6214,7 +6214,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdPipelineBarrier2(VkCommandBuffer commandBuffer, tu_CmdPipelineBarrier2(VkCommandBuffer commandBuffer,
const VkDependencyInfo *pDependencyInfo) const VkDependencyInfo *pDependencyInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
tu_barrier(cmd_buffer, pDependencyInfo); tu_barrier(cmd_buffer, pDependencyInfo);
} }
@ -6266,8 +6266,8 @@ tu_CmdSetEvent2(VkCommandBuffer commandBuffer,
VkEvent _event, VkEvent _event,
const VkDependencyInfo *pDependencyInfo) const VkDependencyInfo *pDependencyInfo)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
VkPipelineStageFlags2 src_stage_mask = 0; VkPipelineStageFlags2 src_stage_mask = 0;
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++) for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
@ -6287,8 +6287,8 @@ tu_CmdResetEvent2(VkCommandBuffer commandBuffer,
VkEvent _event, VkEvent _event,
VkPipelineStageFlags2 stageMask) VkPipelineStageFlags2 stageMask)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
write_event<CHIP>(cmd, event, stageMask, 0); write_event<CHIP>(cmd, event, stageMask, 0);
} }
@ -6300,11 +6300,11 @@ tu_CmdWaitEvents2(VkCommandBuffer commandBuffer,
const VkEvent *pEvents, const VkEvent *pEvents,
const VkDependencyInfo* pDependencyInfos) const VkDependencyInfo* pDependencyInfos)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs; struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
for (uint32_t i = 0; i < eventCount; i++) { for (uint32_t i = 0; i < eventCount; i++) {
TU_FROM_HANDLE(tu_event, event, pEvents[i]); VK_FROM_HANDLE(tu_event, event, pEvents[i]);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6); tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) | tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
@ -6323,7 +6323,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer, tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin) const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
cmd->state.predication_active = true; cmd->state.predication_active = true;
@ -6338,7 +6338,7 @@ tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
else else
tu_emit_cache_flush<CHIP>(cmd); tu_emit_cache_flush<CHIP>(cmd);
TU_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer); VK_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
uint64_t iova = buf->iova + pConditionalRenderingBegin->offset; uint64_t iova = buf->iova + pConditionalRenderingBegin->offset;
/* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan /* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
@ -6365,7 +6365,7 @@ TU_GENX(tu_CmdBeginConditionalRenderingEXT);
VKAPI_ATTR void VKAPI_CALL VKAPI_ATTR void VKAPI_CALL
tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer) tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
cmd->state.predication_active = false; cmd->state.predication_active = false;
@ -6384,8 +6384,8 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
uint32_t marker) uint32_t marker)
{ {
/* Almost the same as write_event, but also allowed in renderpass */ /* Almost the same as write_event, but also allowed in renderpass */
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer); VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
uint64_t va = buffer->iova + dstOffset; uint64_t va = buffer->iova + dstOffset;

View File

@ -131,9 +131,6 @@
#define MAX_FDM_TEXEL_SIZE_LOG2 10 #define MAX_FDM_TEXEL_SIZE_LOG2 10
#define MAX_FDM_TEXEL_SIZE (1u << MAX_FDM_TEXEL_SIZE_LOG2) #define MAX_FDM_TEXEL_SIZE (1u << MAX_FDM_TEXEL_SIZE_LOG2)
#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
VK_FROM_HANDLE(__tu_type, __name, __handle)
#define TU_GPU_GENS A6XX, A7XX #define TU_GPU_GENS A6XX, A7XX
#define TU_GENX(FUNC_NAME) \ #define TU_GENX(FUNC_NAME) \
template <chip... CHIPs> constexpr auto FUNC_NAME##instantiate() \ template <chip... CHIPs> constexpr auto FUNC_NAME##instantiate() \

View File

@ -115,7 +115,7 @@ tu_CreateDescriptorSetLayout(
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) VkDescriptorSetLayout *pSetLayout)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_set_layout *set_layout; struct tu_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == assert(pCreateInfo->sType ==
@ -320,7 +320,7 @@ tu_GetDescriptorSetLayoutSupport(
const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
VkDescriptorSetLayoutSupport *pSupport) VkDescriptorSetLayoutSupport *pSupport)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
VkDescriptorSetLayoutBinding *bindings = NULL; VkDescriptorSetLayoutBinding *bindings = NULL;
VkResult result = vk_create_sorted_bindings( VkResult result = vk_create_sorted_bindings(
@ -419,7 +419,7 @@ tu_GetDescriptorSetLayoutSizeEXT(
VkDescriptorSetLayout _layout, VkDescriptorSetLayout _layout,
VkDeviceSize *pLayoutSizeInBytes) VkDeviceSize *pLayoutSizeInBytes)
{ {
TU_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout); VK_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
*pLayoutSizeInBytes = layout->size; *pLayoutSizeInBytes = layout->size;
} }
@ -431,7 +431,7 @@ tu_GetDescriptorSetLayoutBindingOffsetEXT(
uint32_t binding, uint32_t binding,
VkDeviceSize *pOffset) VkDeviceSize *pOffset)
{ {
TU_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout); VK_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
assert(binding < layout->binding_count); assert(binding < layout->binding_count);
*pOffset = layout->binding[binding].offset; *pOffset = layout->binding[binding].offset;
@ -509,7 +509,7 @@ tu_CreatePipelineLayout(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) VkPipelineLayout *pPipelineLayout)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_pipeline_layout *layout; struct tu_pipeline_layout *layout;
assert(pCreateInfo->sType == assert(pCreateInfo->sType ==
@ -523,7 +523,7 @@ tu_CreatePipelineLayout(VkDevice _device,
layout->num_sets = pCreateInfo->setLayoutCount; layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) { for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout, VK_FROM_HANDLE(tu_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]); pCreateInfo->pSetLayouts[set]);
assert(set < device->physical_device->usable_sets); assert(set < device->physical_device->usable_sets);
@ -554,8 +554,8 @@ tu_DestroyPipelineLayout(VkDevice _device,
VkPipelineLayout _pipelineLayout, VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, _pipelineLayout); VK_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!pipeline_layout) if (!pipeline_layout)
return; return;
@ -725,7 +725,7 @@ tu_CreateDescriptorPool(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool) VkDescriptorPool *pDescriptorPool)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_pool *pool; struct tu_descriptor_pool *pool;
uint64_t size = sizeof(struct tu_descriptor_pool); uint64_t size = sizeof(struct tu_descriptor_pool);
uint64_t bo_size = 0, dynamic_size = 0; uint64_t bo_size = 0, dynamic_size = 0;
@ -840,8 +840,8 @@ tu_DestroyDescriptorPool(VkDevice _device,
VkDescriptorPool _pool, VkDescriptorPool _pool,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, _pool); VK_FROM_HANDLE(tu_descriptor_pool, pool, _pool);
if (!pool) if (!pool)
return; return;
@ -874,8 +874,8 @@ tu_ResetDescriptorPool(VkDevice _device,
VkDescriptorPool descriptorPool, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) VkDescriptorPoolResetFlags flags)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool); VK_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
list_for_each_entry_safe(struct tu_descriptor_set, set, list_for_each_entry_safe(struct tu_descriptor_set, set,
&pool->desc_sets, pool_link) { &pool->desc_sets, pool_link) {
@ -901,8 +901,8 @@ tu_AllocateDescriptorSets(VkDevice _device,
const VkDescriptorSetAllocateInfo *pAllocateInfo, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) VkDescriptorSet *pDescriptorSets)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, pAllocateInfo->descriptorPool); VK_FROM_HANDLE(tu_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
uint32_t i; uint32_t i;
@ -915,7 +915,7 @@ tu_AllocateDescriptorSets(VkDevice _device,
/* allocate a set of buffers for each shader to contain descriptors */ /* allocate a set of buffers for each shader to contain descriptors */
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) { for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
TU_FROM_HANDLE(tu_descriptor_set_layout, layout, VK_FROM_HANDLE(tu_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]); pAllocateInfo->pSetLayouts[i]);
assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
@ -945,11 +945,11 @@ tu_FreeDescriptorSets(VkDevice _device,
uint32_t count, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const VkDescriptorSet *pDescriptorSets)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool); VK_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
for (uint32_t i = 0; i < count; i++) { for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]); VK_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
if (set) { if (set) {
vk_descriptor_set_layout_unref(&device->vk, &set->layout->vk); vk_descriptor_set_layout_unref(&device->vk, &set->layout->vk);
@ -983,7 +983,7 @@ write_texel_buffer_descriptor(uint32_t *dst, const VkBufferView buffer_view)
if (buffer_view == VK_NULL_HANDLE) { if (buffer_view == VK_NULL_HANDLE) {
memset(dst, 0, A6XX_TEX_CONST_DWORDS * sizeof(uint32_t)); memset(dst, 0, A6XX_TEX_CONST_DWORDS * sizeof(uint32_t));
} else { } else {
TU_FROM_HANDLE(tu_buffer_view, view, buffer_view); VK_FROM_HANDLE(tu_buffer_view, view, buffer_view);
memcpy(dst, view->descriptor, sizeof(view->descriptor)); memcpy(dst, view->descriptor, sizeof(view->descriptor));
} }
@ -992,7 +992,7 @@ write_texel_buffer_descriptor(uint32_t *dst, const VkBufferView buffer_view)
static VkDescriptorAddressInfoEXT static VkDescriptorAddressInfoEXT
buffer_info_to_address(const VkDescriptorBufferInfo *buffer_info) buffer_info_to_address(const VkDescriptorBufferInfo *buffer_info)
{ {
TU_FROM_HANDLE(tu_buffer, buffer, buffer_info->buffer); VK_FROM_HANDLE(tu_buffer, buffer, buffer_info->buffer);
uint32_t range = buffer ? vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range) : 0; uint32_t range = buffer ? vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range) : 0;
uint64_t va = buffer ? buffer->iova + buffer_info->offset : 0; uint64_t va = buffer ? buffer->iova + buffer_info->offset : 0;
@ -1092,7 +1092,7 @@ write_image_descriptor(uint32_t *dst,
return; return;
} }
TU_FROM_HANDLE(tu_image_view, iview, image_info->imageView); VK_FROM_HANDLE(tu_image_view, iview, image_info->imageView);
if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
memcpy(dst, iview->view.storage_descriptor, sizeof(iview->view.storage_descriptor)); memcpy(dst, iview->view.storage_descriptor, sizeof(iview->view.storage_descriptor));
@ -1110,7 +1110,7 @@ write_combined_image_sampler_descriptor(uint32_t *dst,
write_image_descriptor(dst, descriptor_type, image_info); write_image_descriptor(dst, descriptor_type, image_info);
/* copy over sampler state */ /* copy over sampler state */
if (has_sampler) { if (has_sampler) {
TU_FROM_HANDLE(tu_sampler, sampler, image_info->sampler); VK_FROM_HANDLE(tu_sampler, sampler, image_info->sampler);
memcpy(dst + A6XX_TEX_CONST_DWORDS, sampler->descriptor, sizeof(sampler->descriptor)); memcpy(dst + A6XX_TEX_CONST_DWORDS, sampler->descriptor, sizeof(sampler->descriptor));
} }
@ -1119,7 +1119,7 @@ write_combined_image_sampler_descriptor(uint32_t *dst,
static void static void
write_sampler_descriptor(uint32_t *dst, VkSampler _sampler) write_sampler_descriptor(uint32_t *dst, VkSampler _sampler)
{ {
TU_FROM_HANDLE(tu_sampler, sampler, _sampler); VK_FROM_HANDLE(tu_sampler, sampler, _sampler);
memcpy(dst, sampler->descriptor, sizeof(sampler->descriptor)); memcpy(dst, sampler->descriptor, sizeof(sampler->descriptor));
} }
@ -1138,7 +1138,7 @@ tu_GetDescriptorEXT(
size_t dataSize, size_t dataSize,
void *pDescriptor) void *pDescriptor)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
uint32_t *dest = (uint32_t *) pDescriptor; uint32_t *dest = (uint32_t *) pDescriptor;
switch (pDescriptorInfo->type) { switch (pDescriptorInfo->type) {
@ -1195,7 +1195,7 @@ tu_update_descriptor_sets(const struct tu_device *device,
uint32_t i, j; uint32_t i, j;
for (i = 0; i < descriptorWriteCount; i++) { for (i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
TU_FROM_HANDLE(tu_descriptor_set, set, dstSetOverride ?: writeset->dstSet); VK_FROM_HANDLE(tu_descriptor_set, set, dstSetOverride ?: writeset->dstSet);
const struct tu_descriptor_set_binding_layout *binding_layout = const struct tu_descriptor_set_binding_layout *binding_layout =
set->layout->binding + writeset->dstBinding; set->layout->binding + writeset->dstBinding;
uint32_t *ptr = set->mapped_ptr; uint32_t *ptr = set->mapped_ptr;
@ -1302,9 +1302,9 @@ tu_update_descriptor_sets(const struct tu_device *device,
for (i = 0; i < descriptorCopyCount; i++) { for (i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i]; const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
TU_FROM_HANDLE(tu_descriptor_set, src_set, VK_FROM_HANDLE(tu_descriptor_set, src_set,
copyset->srcSet); copyset->srcSet);
TU_FROM_HANDLE(tu_descriptor_set, dst_set, VK_FROM_HANDLE(tu_descriptor_set, dst_set,
copyset->dstSet); copyset->dstSet);
const struct tu_descriptor_set_binding_layout *src_binding_layout = const struct tu_descriptor_set_binding_layout *src_binding_layout =
src_set->layout->binding + copyset->srcBinding; src_set->layout->binding + copyset->srcBinding;
@ -1387,7 +1387,7 @@ tu_UpdateDescriptorSets(VkDevice _device,
uint32_t descriptorCopyCount, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const VkCopyDescriptorSet *pDescriptorCopies)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
tu_update_descriptor_sets(device, VK_NULL_HANDLE, tu_update_descriptor_sets(device, VK_NULL_HANDLE,
descriptorWriteCount, pDescriptorWrites, descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies); descriptorCopyCount, pDescriptorCopies);
@ -1400,13 +1400,13 @@ tu_CreateDescriptorUpdateTemplate(
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_set_layout *set_layout = NULL; struct tu_descriptor_set_layout *set_layout = NULL;
const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount; const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
uint32_t dst_entry_count = 0; uint32_t dst_entry_count = 0;
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
TU_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout); VK_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
/* descriptorSetLayout should be ignored for push descriptors /* descriptorSetLayout should be ignored for push descriptors
* and instead it refers to pipelineLayout and set. * and instead it refers to pipelineLayout and set.
@ -1414,7 +1414,7 @@ tu_CreateDescriptorUpdateTemplate(
assert(pCreateInfo->set < device->physical_device->usable_sets); assert(pCreateInfo->set < device->physical_device->usable_sets);
set_layout = pipeline_layout->set[pCreateInfo->set].layout; set_layout = pipeline_layout->set[pCreateInfo->set].layout;
} else { } else {
TU_FROM_HANDLE(tu_descriptor_set_layout, _set_layout, VK_FROM_HANDLE(tu_descriptor_set_layout, _set_layout,
pCreateInfo->descriptorSetLayout); pCreateInfo->descriptorSetLayout);
set_layout = _set_layout; set_layout = _set_layout;
} }
@ -1543,8 +1543,8 @@ tu_DestroyDescriptorUpdateTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_update_template, templ, VK_FROM_HANDLE(tu_descriptor_update_template, templ,
descriptorUpdateTemplate); descriptorUpdateTemplate);
if (!templ) if (!templ)
@ -1560,7 +1560,7 @@ tu_update_descriptor_set_with_template(
VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const void *pData)
{ {
TU_FROM_HANDLE(tu_descriptor_update_template, templ, VK_FROM_HANDLE(tu_descriptor_update_template, templ,
descriptorUpdateTemplate); descriptorUpdateTemplate);
for (uint32_t i = 0; i < templ->entry_count; i++) { for (uint32_t i = 0; i < templ->entry_count; i++) {
@ -1646,8 +1646,8 @@ tu_UpdateDescriptorSetWithTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const void *pData)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_set, set, descriptorSet); VK_FROM_HANDLE(tu_descriptor_set, set, descriptorSet);
tu_update_descriptor_set_with_template(device, set, descriptorUpdateTemplate, pData); tu_update_descriptor_set_with_template(device, set, descriptorUpdateTemplate, pData);
} }
@ -1659,7 +1659,7 @@ tu_CreateSamplerYcbcrConversion(
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) VkSamplerYcbcrConversion *pYcbcrConversion)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_sampler_ycbcr_conversion *conversion; struct tu_sampler_ycbcr_conversion *conversion;
conversion = (struct tu_sampler_ycbcr_conversion *) vk_object_alloc( conversion = (struct tu_sampler_ycbcr_conversion *) vk_object_alloc(
@ -1685,8 +1685,8 @@ tu_DestroySamplerYcbcrConversion(VkDevice _device,
VkSamplerYcbcrConversion ycbcrConversion, VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion); VK_FROM_HANDLE(tu_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
if (!ycbcr_conversion) if (!ycbcr_conversion)
return; return;

View File

@ -1366,7 +1366,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_DestroyInstance(VkInstance _instance, tu_DestroyInstance(VkInstance _instance,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_instance, instance, _instance); VK_FROM_HANDLE(tu_instance, instance, _instance);
if (!instance) if (!instance)
return; return;
@ -1453,7 +1453,7 @@ tu_GetPhysicalDeviceQueueFamilyProperties2(
uint32_t *pQueueFamilyPropertyCount, uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2 *pQueueFamilyProperties) VkQueueFamilyProperties2 *pQueueFamilyProperties)
{ {
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice); VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties2, out, VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties2, out,
pQueueFamilyProperties, pQueueFamilyPropertyCount); pQueueFamilyProperties, pQueueFamilyPropertyCount);
@ -1525,7 +1525,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice pdev, tu_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice pdev,
VkPhysicalDeviceMemoryProperties2 *props2) VkPhysicalDeviceMemoryProperties2 *props2)
{ {
TU_FROM_HANDLE(tu_physical_device, physical_device, pdev); VK_FROM_HANDLE(tu_physical_device, physical_device, pdev);
VkPhysicalDeviceMemoryProperties *props = &props2->memoryProperties; VkPhysicalDeviceMemoryProperties *props = &props2->memoryProperties;
props->memoryHeapCount = 1; props->memoryHeapCount = 1;
@ -2100,7 +2100,7 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkDevice *pDevice) VkDevice *pDevice)
{ {
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice); VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VkResult result; VkResult result;
struct tu_device *device; struct tu_device *device;
bool custom_border_colors = false; bool custom_border_colors = false;
@ -2547,7 +2547,7 @@ fail_queues:
VKAPI_ATTR void VKAPI_CALL VKAPI_ATTR void VKAPI_CALL
tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator) tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (!device) if (!device)
return; return;
@ -2705,7 +2705,7 @@ tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
tu_GetInstanceProcAddr(VkInstance _instance, const char *pName) tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
{ {
TU_FROM_HANDLE(tu_instance, instance, _instance); VK_FROM_HANDLE(tu_instance, instance, _instance);
return vk_instance_get_proc_addr(instance != NULL ? &instance->vk : NULL, return vk_instance_get_proc_addr(instance != NULL ? &instance->vk : NULL,
&tu_instance_entrypoints, &tu_instance_entrypoints,
pName); pName);
@ -2727,7 +2727,7 @@ tu_AllocateMemory(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkDeviceMemory *pMem) VkDeviceMemory *pMem)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_device_memory *mem; struct tu_device_memory *mem;
VkResult result; VkResult result;
@ -2851,8 +2851,8 @@ tu_FreeMemory(VkDevice _device,
VkDeviceMemory _mem, VkDeviceMemory _mem,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, _mem); VK_FROM_HANDLE(tu_device_memory, mem, _mem);
if (mem == NULL) if (mem == NULL)
return; return;
@ -2867,8 +2867,8 @@ tu_FreeMemory(VkDevice _device,
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo, void **ppData) tu_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo, void **ppData)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, pMemoryMapInfo->memory); VK_FROM_HANDLE(tu_device_memory, mem, pMemoryMapInfo->memory);
VkResult result; VkResult result;
if (mem == NULL) { if (mem == NULL) {
@ -2924,8 +2924,8 @@ tu_GetBufferMemoryRequirements2(
const VkBufferMemoryRequirementsInfo2 *pInfo, const VkBufferMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) VkMemoryRequirements2 *pMemoryRequirements)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer); VK_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
tu_get_buffer_memory_requirements(device, buffer->vk.size, pMemoryRequirements); tu_get_buffer_memory_requirements(device, buffer->vk.size, pMemoryRequirements);
} }
@ -2936,7 +2936,7 @@ tu_GetDeviceBufferMemoryRequirements(
const VkDeviceBufferMemoryRequirements *pInfo, const VkDeviceBufferMemoryRequirements *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) VkMemoryRequirements2 *pMemoryRequirements)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
tu_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pMemoryRequirements); tu_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pMemoryRequirements);
} }
@ -2953,11 +2953,11 @@ tu_BindBufferMemory2(VkDevice device,
uint32_t bindInfoCount, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const VkBindBufferMemoryInfo *pBindInfos)
{ {
TU_FROM_HANDLE(tu_device, dev, device); VK_FROM_HANDLE(tu_device, dev, device);
for (uint32_t i = 0; i < bindInfoCount; ++i) { for (uint32_t i = 0; i < bindInfoCount; ++i) {
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory); VK_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer); VK_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
if (mem) { if (mem) {
buffer->bo = mem->bo; buffer->bo = mem->bo;
@ -2993,7 +2993,7 @@ tu_CreateEvent(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent) VkEvent *pEvent)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_event *event = (struct tu_event *) struct tu_event *event = (struct tu_event *)
vk_object_alloc(&device->vk, pAllocator, sizeof(*event), vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
@ -3028,8 +3028,8 @@ tu_DestroyEvent(VkDevice _device,
VkEvent _event, VkEvent _event,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
if (!event) if (!event)
return; return;
@ -3043,8 +3043,8 @@ tu_DestroyEvent(VkDevice _device,
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_GetEventStatus(VkDevice _device, VkEvent _event) tu_GetEventStatus(VkDevice _device, VkEvent _event)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
if (vk_device_is_lost(&device->vk)) if (vk_device_is_lost(&device->vk))
return VK_ERROR_DEVICE_LOST; return VK_ERROR_DEVICE_LOST;
@ -3057,7 +3057,7 @@ tu_GetEventStatus(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_SetEvent(VkDevice _device, VkEvent _event) tu_SetEvent(VkDevice _device, VkEvent _event)
{ {
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
*(uint64_t*) event->bo->map = 1; *(uint64_t*) event->bo->map = 1;
return VK_SUCCESS; return VK_SUCCESS;
@ -3066,7 +3066,7 @@ tu_SetEvent(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL VKAPI_ATTR VkResult VKAPI_CALL
tu_ResetEvent(VkDevice _device, VkEvent _event) tu_ResetEvent(VkDevice _device, VkEvent _event)
{ {
TU_FROM_HANDLE(tu_event, event, _event); VK_FROM_HANDLE(tu_event, event, _event);
*(uint64_t*) event->bo->map = 0; *(uint64_t*) event->bo->map = 0;
return VK_SUCCESS; return VK_SUCCESS;
@ -3078,7 +3078,7 @@ tu_CreateBuffer(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkBuffer *pBuffer) VkBuffer *pBuffer)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer *buffer; struct tu_buffer *buffer;
buffer = (struct tu_buffer *) vk_buffer_create( buffer = (struct tu_buffer *) vk_buffer_create(
@ -3102,8 +3102,8 @@ tu_DestroyBuffer(VkDevice _device,
VkBuffer _buffer, VkBuffer _buffer,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer, buffer, _buffer); VK_FROM_HANDLE(tu_buffer, buffer, _buffer);
if (!buffer) if (!buffer)
return; return;
@ -3123,13 +3123,13 @@ tu_CreateFramebuffer(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkFramebuffer *pFramebuffer) VkFramebuffer *pFramebuffer)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) if (TU_DEBUG(DYNAMIC))
return vk_common_CreateFramebuffer(_device, pCreateInfo, pAllocator, return vk_common_CreateFramebuffer(_device, pCreateInfo, pAllocator,
pFramebuffer); pFramebuffer);
TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass); VK_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
struct tu_framebuffer *framebuffer; struct tu_framebuffer *framebuffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO); assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
@ -3185,14 +3185,14 @@ tu_DestroyFramebuffer(VkDevice _device,
VkFramebuffer _fb, VkFramebuffer _fb,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) { if (TU_DEBUG(DYNAMIC)) {
vk_common_DestroyFramebuffer(_device, _fb, pAllocator); vk_common_DestroyFramebuffer(_device, _fb, pAllocator);
return; return;
} }
TU_FROM_HANDLE(tu_framebuffer, fb, _fb); VK_FROM_HANDLE(tu_framebuffer, fb, _fb);
if (!fb) if (!fb)
return; return;
@ -3293,7 +3293,7 @@ tu_CreateSampler(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkSampler *pSampler) VkSampler *pSampler)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_sampler *sampler; struct tu_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO); assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
@ -3314,8 +3314,8 @@ tu_DestroySampler(VkDevice _device,
VkSampler _sampler, VkSampler _sampler,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_sampler, sampler, _sampler); VK_FROM_HANDLE(tu_sampler, sampler, _sampler);
uint32_t border_color; uint32_t border_color;
if (!sampler) if (!sampler)
@ -3339,8 +3339,8 @@ tu_GetMemoryFdKHR(VkDevice _device,
const VkMemoryGetFdInfoKHR *pGetFdInfo, const VkMemoryGetFdInfoKHR *pGetFdInfo,
int *pFd) int *pFd)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory); VK_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR); assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
@ -3384,7 +3384,7 @@ tu_GetMemoryFdPropertiesKHR(VkDevice _device,
int fd, int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties) VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT); assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
pMemoryFdProperties->memoryTypeBits = pMemoryFdProperties->memoryTypeBits =
(1 << device->physical_device->memory.type_count) - 1; (1 << device->physical_device->memory.type_count) - 1;
@ -3397,7 +3397,7 @@ tu_GetPhysicalDeviceMultisamplePropertiesEXT(
VkSampleCountFlagBits samples, VkSampleCountFlagBits samples,
VkMultisamplePropertiesEXT* pMultisampleProperties) VkMultisamplePropertiesEXT* pMultisampleProperties)
{ {
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice); VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->vk.supported_extensions.EXT_sample_locations) if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->vk.supported_extensions.EXT_sample_locations)
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 }; pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
@ -3409,7 +3409,7 @@ VkDeviceAddress
tu_GetBufferDeviceAddress(VkDevice _device, tu_GetBufferDeviceAddress(VkDevice _device,
const VkBufferDeviceAddressInfo* pInfo) const VkBufferDeviceAddressInfo* pInfo)
{ {
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer); VK_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
return buffer->iova; return buffer->iova;
} }
@ -3426,7 +3426,7 @@ uint64_t tu_GetDeviceMemoryOpaqueCaptureAddress(
VkDevice device, VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
{ {
TU_FROM_HANDLE(tu_device_memory, mem, pInfo->memory); VK_FROM_HANDLE(tu_device_memory, mem, pInfo->memory);
return mem->bo->iova; return mem->bo->iova;
} }

View File

@ -73,7 +73,7 @@ get_cmd_buffer(struct tu_device *dev, struct tu_cmd_buffer **cmd_buffer_out)
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, vk_buf); VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, vk_buf);
struct dynamic_rendering_entry entry = { struct dynamic_rendering_entry entry = {
.cmd_buffer = cmd_buffer, .cmd_buffer = cmd_buffer,

View File

@ -402,7 +402,7 @@ tu_GetPhysicalDeviceFormatProperties2(
VkFormat format, VkFormat format,
VkFormatProperties2 *pFormatProperties) VkFormatProperties2 *pFormatProperties)
{ {
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice); VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VkFormatProperties3 local_props3; VkFormatProperties3 local_props3;
VkFormatProperties3 *props3 = VkFormatProperties3 *props3 =
@ -713,7 +713,7 @@ tu_GetPhysicalDeviceImageFormatProperties2(
const VkPhysicalDeviceImageFormatInfo2 *base_info, const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props) VkImageFormatProperties2 *base_props)
{ {
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice); VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL; const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
const VkPhysicalDeviceImageViewImageFormatInfoEXT *image_view_info = NULL; const VkPhysicalDeviceImageViewImageFormatInfoEXT *image_view_info = NULL;
VkExternalImageFormatProperties *external_props = NULL; VkExternalImageFormatProperties *external_props = NULL;

View File

@ -167,7 +167,7 @@ tu_image_view_init(struct tu_device *device,
const VkImageViewCreateInfo *pCreateInfo, const VkImageViewCreateInfo *pCreateInfo,
bool has_z24uint_s8uint) bool has_z24uint_s8uint)
{ {
TU_FROM_HANDLE(tu_image, image, pCreateInfo->image); VK_FROM_HANDLE(tu_image, image, pCreateInfo->image);
const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange; const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
VkFormat vk_format = pCreateInfo->format; VkFormat vk_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask; VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
@ -680,7 +680,7 @@ tu_CreateImage(VkDevice _device,
uint64_t modifier = DRM_FORMAT_MOD_INVALID; uint64_t modifier = DRM_FORMAT_MOD_INVALID;
const VkSubresourceLayout *plane_layouts = NULL; const VkSubresourceLayout *plane_layouts = NULL;
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
#ifdef TU_USE_WSI_PLATFORM #ifdef TU_USE_WSI_PLATFORM
/* Ignore swapchain creation info on Android. Since we don't have an /* Ignore swapchain creation info on Android. Since we don't have an
@ -771,8 +771,8 @@ tu_DestroyImage(VkDevice _device,
VkImage _image, VkImage _image,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image, image, _image); VK_FROM_HANDLE(tu_image, image, _image);
if (!image) if (!image)
return; return;
@ -796,11 +796,11 @@ tu_BindImageMemory2(VkDevice _device,
uint32_t bindInfoCount, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const VkBindImageMemoryInfo *pBindInfos)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
for (uint32_t i = 0; i < bindInfoCount; ++i) { for (uint32_t i = 0; i < bindInfoCount; ++i) {
TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image); VK_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory); VK_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
/* Ignore this struct on Android, we cannot access swapchain structures there. */ /* Ignore this struct on Android, we cannot access swapchain structures there. */
#ifdef TU_USE_WSI_PLATFORM #ifdef TU_USE_WSI_PLATFORM
@ -810,7 +810,7 @@ tu_BindImageMemory2(VkDevice _device,
if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE) { if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE) {
VkImage _wsi_image = wsi_common_get_image(swapchain_info->swapchain, VkImage _wsi_image = wsi_common_get_image(swapchain_info->swapchain,
swapchain_info->imageIndex); swapchain_info->imageIndex);
TU_FROM_HANDLE(tu_image, wsi_img, _wsi_image); VK_FROM_HANDLE(tu_image, wsi_img, _wsi_image);
image->bo = wsi_img->bo; image->bo = wsi_img->bo;
image->map = NULL; image->map = NULL;
@ -880,8 +880,8 @@ tu_GetImageMemoryRequirements2(VkDevice _device,
const VkImageMemoryRequirementsInfo2 *pInfo, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) VkMemoryRequirements2 *pMemoryRequirements)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image, image, pInfo->image); VK_FROM_HANDLE(tu_image, image, pInfo->image);
tu_get_image_memory_requirements(device, image, pMemoryRequirements); tu_get_image_memory_requirements(device, image, pMemoryRequirements);
} }
@ -902,7 +902,7 @@ tu_GetDeviceImageMemoryRequirements(
const VkDeviceImageMemoryRequirements *pInfo, const VkDeviceImageMemoryRequirements *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) VkMemoryRequirements2 *pMemoryRequirements)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image image = {0}; struct tu_image image = {0};
@ -957,7 +957,7 @@ tu_GetImageSubresourceLayout2KHR(VkDevice _device,
const VkImageSubresource2KHR *pSubresource, const VkImageSubresource2KHR *pSubresource,
VkSubresourceLayout2KHR *pLayout) VkSubresourceLayout2KHR *pLayout)
{ {
TU_FROM_HANDLE(tu_image, image, _image); VK_FROM_HANDLE(tu_image, image, _image);
tu_get_image_subresource_layout(image, pSubresource, pLayout); tu_get_image_subresource_layout(image, pSubresource, pLayout);
} }
@ -967,7 +967,7 @@ tu_GetDeviceImageSubresourceLayoutKHR(VkDevice _device,
const VkDeviceImageSubresourceInfoKHR *pInfo, const VkDeviceImageSubresourceInfoKHR *pInfo,
VkSubresourceLayout2KHR *pLayout) VkSubresourceLayout2KHR *pLayout)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image image = {0}; struct tu_image image = {0};
@ -983,7 +983,7 @@ tu_CreateImageView(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkImageView *pView) VkImageView *pView)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image_view *view; struct tu_image_view *view;
view = (struct tu_image_view *) vk_object_alloc( view = (struct tu_image_view *) vk_object_alloc(
@ -1003,8 +1003,8 @@ tu_DestroyImageView(VkDevice _device,
VkImageView _iview, VkImageView _iview,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image_view, iview, _iview); VK_FROM_HANDLE(tu_image_view, iview, _iview);
if (!iview) if (!iview)
return; return;
@ -1017,7 +1017,7 @@ tu_buffer_view_init(struct tu_buffer_view *view,
struct tu_device *device, struct tu_device *device,
const VkBufferViewCreateInfo *pCreateInfo) const VkBufferViewCreateInfo *pCreateInfo)
{ {
TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer); VK_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
view->buffer = buffer; view->buffer = buffer;
@ -1037,7 +1037,7 @@ tu_CreateBufferView(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkBufferView *pView) VkBufferView *pView)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer_view *view; struct tu_buffer_view *view;
view = (struct tu_buffer_view *) vk_object_alloc( view = (struct tu_buffer_view *) vk_object_alloc(
@ -1057,8 +1057,8 @@ tu_DestroyBufferView(VkDevice _device,
VkBufferView bufferView, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer_view, view, bufferView); VK_FROM_HANDLE(tu_buffer_view, view, bufferView);
if (!view) if (!view)
return; return;

View File

@ -74,7 +74,7 @@ sync_cache(VkDevice _device,
uint32_t count, uint32_t count,
const VkMappedMemoryRange *ranges) const VkMappedMemoryRange *ranges)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (!device->physical_device->has_cached_non_coherent_memory) { if (!device->physical_device->has_cached_non_coherent_memory) {
tu_finishme( tu_finishme(
@ -83,7 +83,7 @@ sync_cache(VkDevice _device,
} }
for (uint32_t i = 0; i < count; i++) { for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory); VK_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
tu_sync_cache_bo(device, mem->bo, ranges[i].offset, ranges[i].size, op); tu_sync_cache_bo(device, mem->bo, ranges[i].offset, ranges[i].size, op);
} }

View File

@ -238,7 +238,7 @@ kgsl_sync_cache(VkDevice _device,
uint32_t count, uint32_t count,
const VkMappedMemoryRange *ranges) const VkMappedMemoryRange *ranges)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
struct kgsl_gpuobj_sync_obj *sync_list = struct kgsl_gpuobj_sync_obj *sync_list =
(struct kgsl_gpuobj_sync_obj *) vk_zalloc( (struct kgsl_gpuobj_sync_obj *) vk_zalloc(
@ -252,7 +252,7 @@ kgsl_sync_cache(VkDevice _device,
}; };
for (uint32_t i = 0; i < count; i++) { for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory); VK_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
sync_list[i].op = op; sync_list[i].op = op;
sync_list[i].id = mem->bo->gem_handle; sync_list[i].id = mem->bo->gem_handle;

View File

@ -799,7 +799,7 @@ tu_CreateRenderPass2(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass) VkRenderPass *pRenderPass)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) if (TU_DEBUG(DYNAMIC))
return vk_common_CreateRenderPass2(_device, pCreateInfo, pAllocator, return vk_common_CreateRenderPass2(_device, pCreateInfo, pAllocator,
@ -1015,14 +1015,14 @@ tu_DestroyRenderPass(VkDevice _device,
VkRenderPass _pass, VkRenderPass _pass,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) { if (TU_DEBUG(DYNAMIC)) {
vk_common_DestroyRenderPass(_device, _pass, pAllocator); vk_common_DestroyRenderPass(_device, _pass, pAllocator);
return; return;
} }
TU_FROM_HANDLE(tu_render_pass, pass, _pass); VK_FROM_HANDLE(tu_render_pass, pass, _pass);
if (!_pass) if (!_pass)
return; return;
@ -1078,7 +1078,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
continue; continue;
} }
TU_FROM_HANDLE(tu_image_view, view, att_info->imageView); VK_FROM_HANDLE(tu_image_view, view, att_info->imageView);
tu_setup_dynamic_attachment(att, view); tu_setup_dynamic_attachment(att, view);
att->gmem = true; att->gmem = true;
att->clear_views = info->viewMask; att->clear_views = info->viewMask;
@ -1094,7 +1094,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
if (att_info->resolveMode != VK_RESOLVE_MODE_NONE) { if (att_info->resolveMode != VK_RESOLVE_MODE_NONE) {
struct tu_render_pass_attachment *resolve_att = &pass->attachments[a]; struct tu_render_pass_attachment *resolve_att = &pass->attachments[a];
TU_FROM_HANDLE(tu_image_view, resolve_view, att_info->resolveImageView); VK_FROM_HANDLE(tu_image_view, resolve_view, att_info->resolveImageView);
tu_setup_dynamic_attachment(resolve_att, resolve_view); tu_setup_dynamic_attachment(resolve_att, resolve_view);
resolve_att->gmem = false; resolve_att->gmem = false;
attachment_set_ops( attachment_set_ops(
@ -1117,7 +1117,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
info->pStencilAttachment; info->pStencilAttachment;
if (common_info && common_info->imageView != VK_NULL_HANDLE) { if (common_info && common_info->imageView != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_image_view, view, common_info->imageView); VK_FROM_HANDLE(tu_image_view, view, common_info->imageView);
struct tu_render_pass_attachment *att = &pass->attachments[a]; struct tu_render_pass_attachment *att = &pass->attachments[a];
tu_setup_dynamic_attachment(att, view); tu_setup_dynamic_attachment(att, view);
@ -1141,7 +1141,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
if (common_info->resolveMode != VK_RESOLVE_MODE_NONE) { if (common_info->resolveMode != VK_RESOLVE_MODE_NONE) {
unsigned i = subpass->resolve_count++; unsigned i = subpass->resolve_count++;
struct tu_render_pass_attachment *resolve_att = &pass->attachments[a]; struct tu_render_pass_attachment *resolve_att = &pass->attachments[a];
TU_FROM_HANDLE(tu_image_view, resolve_view, VK_FROM_HANDLE(tu_image_view, resolve_view,
common_info->resolveImageView); common_info->resolveImageView);
tu_setup_dynamic_attachment(resolve_att, resolve_view); tu_setup_dynamic_attachment(resolve_att, resolve_view);
resolve_att->gmem = false; resolve_att->gmem = false;
@ -1170,7 +1170,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT); RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT);
if (fdm_info && fdm_info->imageView != VK_NULL_HANDLE && if (fdm_info && fdm_info->imageView != VK_NULL_HANDLE &&
!tu_render_pass_disable_fdm(pass)) { !tu_render_pass_disable_fdm(pass)) {
TU_FROM_HANDLE(tu_image_view, view, fdm_info->imageView); VK_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
struct tu_render_pass_attachment *att = &pass->attachments[a]; struct tu_render_pass_attachment *att = &pass->attachments[a];
tu_setup_dynamic_attachment(att, view); tu_setup_dynamic_attachment(att, view);
@ -1264,7 +1264,7 @@ tu_GetRenderAreaGranularity(VkDevice _device,
VkRenderPass renderPass, VkRenderPass renderPass,
VkExtent2D *pGranularity) VkExtent2D *pGranularity)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
pGranularity->width = device->physical_device->info->gmem_align_w; pGranularity->width = device->physical_device->info->gmem_align_w;
pGranularity->height = device->physical_device->info->gmem_align_h; pGranularity->height = device->physical_device->info->gmem_align_h;
} }
@ -1274,7 +1274,7 @@ tu_GetRenderingAreaGranularityKHR(VkDevice _device,
const VkRenderingAreaInfoKHR *pRenderingAreaInfo, const VkRenderingAreaInfoKHR *pRenderingAreaInfo,
VkExtent2D *pGranularity) VkExtent2D *pGranularity)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
pGranularity->width = device->physical_device->info->gmem_align_w; pGranularity->width = device->physical_device->info->gmem_align_w;
pGranularity->height = device->physical_device->info->gmem_align_h; pGranularity->height = device->physical_device->info->gmem_align_h;
} }

View File

@ -2014,7 +2014,7 @@ tu_pipeline_builder_parse_libraries(struct tu_pipeline_builder *builder,
assert(library_info->libraryCount <= MAX_LIBRARIES); assert(library_info->libraryCount <= MAX_LIBRARIES);
builder->num_libraries = library_info->libraryCount; builder->num_libraries = library_info->libraryCount;
for (unsigned i = 0; i < library_info->libraryCount; i++) { for (unsigned i = 0; i < library_info->libraryCount; i++) {
TU_FROM_HANDLE(tu_pipeline, library, library_info->pLibraries[i]); VK_FROM_HANDLE(tu_pipeline, library, library_info->pLibraries[i]);
builder->libraries[i] = tu_pipeline_to_graphics_lib(library); builder->libraries[i] = tu_pipeline_to_graphics_lib(library);
} }
} }
@ -2073,7 +2073,7 @@ static void
tu_pipeline_builder_parse_layout(struct tu_pipeline_builder *builder, tu_pipeline_builder_parse_layout(struct tu_pipeline_builder *builder,
struct tu_pipeline *pipeline) struct tu_pipeline *pipeline)
{ {
TU_FROM_HANDLE(tu_pipeline_layout, layout, builder->create_info->layout); VK_FROM_HANDLE(tu_pipeline_layout, layout, builder->create_info->layout);
if (layout) { if (layout) {
/* Note: it's still valid to have a layout even if there are libraries. /* Note: it's still valid to have a layout even if there are libraries.
@ -4039,8 +4039,8 @@ tu_graphics_pipeline_create(VkDevice device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline) VkPipeline *pPipeline)
{ {
TU_FROM_HANDLE(tu_device, dev, device); VK_FROM_HANDLE(tu_device, dev, device);
TU_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache); VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
cache = cache ? cache : dev->mem_cache; cache = cache ? cache : dev->mem_cache;
@ -4109,9 +4109,9 @@ tu_compute_pipeline_create(VkDevice device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline) VkPipeline *pPipeline)
{ {
TU_FROM_HANDLE(tu_device, dev, device); VK_FROM_HANDLE(tu_device, dev, device);
TU_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache); VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout); VK_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage; const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
VkResult result; VkResult result;
const struct ir3_shader_variant *v = NULL; const struct ir3_shader_variant *v = NULL;
@ -4299,8 +4299,8 @@ tu_DestroyPipeline(VkDevice _device,
VkPipeline _pipeline, VkPipeline _pipeline,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, dev, _device); VK_FROM_HANDLE(tu_device, dev, _device);
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline); VK_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
if (!_pipeline) if (!_pipeline)
return; return;
@ -4333,8 +4333,8 @@ tu_GetPipelineExecutablePropertiesKHR(
uint32_t* pExecutableCount, uint32_t* pExecutableCount,
VkPipelineExecutablePropertiesKHR* pProperties) VkPipelineExecutablePropertiesKHR* pProperties)
{ {
TU_FROM_HANDLE(tu_device, dev, _device); VK_FROM_HANDLE(tu_device, dev, _device);
TU_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline); VK_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutablePropertiesKHR, out, VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutablePropertiesKHR, out,
pProperties, pExecutableCount); pProperties, pExecutableCount);
@ -4365,7 +4365,7 @@ tu_GetPipelineExecutableStatisticsKHR(
uint32_t* pStatisticCount, uint32_t* pStatisticCount,
VkPipelineExecutableStatisticKHR* pStatistics) VkPipelineExecutableStatisticKHR* pStatistics)
{ {
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline); VK_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableStatisticKHR, out, VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableStatisticKHR, out,
pStatistics, pStatisticCount); pStatistics, pStatisticCount);
@ -4552,7 +4552,7 @@ tu_GetPipelineExecutableInternalRepresentationsKHR(
uint32_t* pInternalRepresentationCount, uint32_t* pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
{ {
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline); VK_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableInternalRepresentationKHR, out, VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableInternalRepresentationKHR, out,
pInternalRepresentations, pInternalRepresentationCount); pInternalRepresentations, pInternalRepresentationCount);
bool incomplete_text = false; bool incomplete_text = false;

View File

@ -214,7 +214,7 @@ tu_CreateQueryPool(VkDevice _device,
const VkAllocationCallbacks *pAllocator, const VkAllocationCallbacks *pAllocator,
VkQueryPool *pQueryPool) VkQueryPool *pQueryPool)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO); assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
assert(pCreateInfo->queryCount > 0); assert(pCreateInfo->queryCount > 0);
@ -347,8 +347,8 @@ tu_DestroyQueryPool(VkDevice _device,
VkQueryPool _pool, VkQueryPool _pool,
const VkAllocationCallbacks *pAllocator) const VkAllocationCallbacks *pAllocator)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_query_pool, pool, _pool); VK_FROM_HANDLE(tu_query_pool, pool, _pool);
if (!pool) if (!pool)
return; return;
@ -572,8 +572,8 @@ tu_GetQueryPoolResults(VkDevice _device,
VkDeviceSize stride, VkDeviceSize stride,
VkQueryResultFlags flags) VkQueryResultFlags flags)
{ {
TU_FROM_HANDLE(tu_device, device, _device); VK_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(firstQuery + queryCount <= pool->size); assert(firstQuery + queryCount <= pool->size);
if (vk_device_is_lost(&device->vk)) if (vk_device_is_lost(&device->vk))
@ -723,9 +723,9 @@ tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
VkDeviceSize stride, VkDeviceSize stride,
VkQueryResultFlags flags) VkQueryResultFlags flags)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer); VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
struct tu_cs *cs = &cmdbuf->cs; struct tu_cs *cs = &cmdbuf->cs;
assert(firstQuery + queryCount <= pool->size); assert(firstQuery + queryCount <= pool->size);
@ -792,8 +792,8 @@ tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
uint32_t firstQuery, uint32_t firstQuery,
uint32_t queryCount) uint32_t queryCount)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
switch (pool->type) { switch (pool->type) {
case VK_QUERY_TYPE_TIMESTAMP: case VK_QUERY_TYPE_TIMESTAMP:
@ -815,7 +815,7 @@ tu_ResetQueryPool(VkDevice device,
uint32_t firstQuery, uint32_t firstQuery,
uint32_t queryCount) uint32_t queryCount)
{ {
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
for (uint32_t i = 0; i < queryCount; i++) { for (uint32_t i = 0; i < queryCount; i++) {
struct query_slot *slot = slot_address(pool, i + firstQuery); struct query_slot *slot = slot_address(pool, i + firstQuery);
@ -1084,8 +1084,8 @@ tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
uint32_t query, uint32_t query,
VkQueryControlFlags flags) VkQueryControlFlags flags)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size); assert(query < pool->size);
switch (pool->type) { switch (pool->type) {
@ -1124,8 +1124,8 @@ tu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,
VkQueryControlFlags flags, VkQueryControlFlags flags,
uint32_t index) uint32_t index)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size); assert(query < pool->size);
switch (pool->type) { switch (pool->type) {
@ -1574,8 +1574,8 @@ tu_CmdEndQuery(VkCommandBuffer commandBuffer,
VkQueryPool queryPool, VkQueryPool queryPool,
uint32_t query) uint32_t query)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size); assert(query < pool->size);
switch (pool->type) { switch (pool->type) {
@ -1611,8 +1611,8 @@ tu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,
uint32_t query, uint32_t query,
uint32_t index) uint32_t index)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size); assert(query < pool->size);
switch (pool->type) { switch (pool->type) {
@ -1635,8 +1635,8 @@ tu_CmdWriteTimestamp2(VkCommandBuffer commandBuffer,
VkQueryPool queryPool, VkQueryPool queryPool,
uint32_t query) uint32_t query)
{ {
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer); VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool); VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
/* Inside a render pass, just write the timestamp multiple times so that /* Inside a render pass, just write the timestamp multiple times so that
* the user gets the last one if we use GMEM. There isn't really much * the user gets the last one if we use GMEM. There isn't really much
@ -1715,7 +1715,7 @@ tu_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
VkPerformanceCounterKHR* pCounters, VkPerformanceCounterKHR* pCounters,
VkPerformanceCounterDescriptionKHR* pCounterDescriptions) VkPerformanceCounterDescriptionKHR* pCounterDescriptions)
{ {
TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice); VK_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
uint32_t desc_count = *pCounterCount; uint32_t desc_count = *pCounterCount;
uint32_t group_count; uint32_t group_count;
@ -1765,7 +1765,7 @@ tu_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo,
uint32_t* pNumPasses) uint32_t* pNumPasses)
{ {
TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice); VK_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
uint32_t group_count = 0; uint32_t group_count = 0;
uint32_t gid = 0, cid = 0, n_passes; uint32_t gid = 0, cid = 0, n_passes;
const struct fd_perfcntr_group *group = const struct fd_perfcntr_group *group =

View File

@ -16,7 +16,7 @@
static VkResult static VkResult
capture_trace(VkQueue _queue) capture_trace(VkQueue _queue)
{ {
TU_FROM_HANDLE(tu_queue, queue, _queue); VK_FROM_HANDLE(tu_queue, queue, _queue);
struct tu_device *device = queue->device; struct tu_device *device = queue->device;
assert(device->vk.memory_trace_data.is_enabled); assert(device->vk.memory_trace_data.is_enabled);

View File

@ -17,14 +17,14 @@
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
tu_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName) tu_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{ {
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice); VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
return vk_instance_get_proc_addr_unchecked(&pdevice->instance->vk, pName); return vk_instance_get_proc_addr_unchecked(&pdevice->instance->vk, pName);
} }
static bool static bool
tu_wsi_can_present_on_device(VkPhysicalDevice physicalDevice, int fd) tu_wsi_can_present_on_device(VkPhysicalDevice physicalDevice, int fd)
{ {
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice); VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
return wsi_common_drm_devices_equal(fd, pdevice->local_fd); return wsi_common_drm_devices_equal(fd, pdevice->local_fd);
} }