tu: Replace TU_FROM_HANDLE with VK_FROM_HANDLE

It was exactly the same thing.

Signed-off-by: Valentine Burley <valentine.burley@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28571>
This commit is contained in:
Valentine Burley 2024-04-04 12:35:33 +00:00 committed by Marge Bot
parent a5adbae6f6
commit 4850aebcaf
17 changed files with 263 additions and 266 deletions

View File

@ -12,7 +12,7 @@
VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
TU_FROM_HANDLE(tu_queue, queue, _queue);
VK_FROM_HANDLE(tu_queue, queue, _queue);
struct tu_device *device = queue->device;
VkResult result = wsi_QueuePresentKHR(_queue, pPresentInfo);
@ -28,7 +28,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = tu_FlushMappedMemoryRanges(_device, memoryRangeCount,
pMemoryRanges);
@ -43,7 +43,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_rmv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = tu_InvalidateMappedMemoryRanges(_device, memoryRangeCount,
pMemoryRanges);
@ -58,7 +58,7 @@ VkResult tu_rmv_SetDebugUtilsObjectNameEXT(VkDevice _device,
const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
{
assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT);
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
VkResult result = vk_common_SetDebugUtilsObjectNameEXT(_device, pNameInfo);
if (result != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)

View File

@ -298,7 +298,7 @@ static VkResult
format_supported_with_usage(VkDevice device_h, VkFormat format,
VkImageUsageFlags imageUsage)
{
TU_FROM_HANDLE(tu_device, device, device_h);
VK_FROM_HANDLE(tu_device, device, device_h);
struct tu_physical_device *phys_dev = device->physical_device;
VkPhysicalDevice phys_dev_h = tu_physical_device_to_handle(phys_dev);
VkResult result;
@ -389,7 +389,7 @@ tu_GetSwapchainGrallocUsageANDROID(VkDevice device_h,
VkImageUsageFlags imageUsage,
int *grallocUsage)
{
TU_FROM_HANDLE(tu_device, device, device_h);
VK_FROM_HANDLE(tu_device, device, device_h);
VkResult result;
result = format_supported_with_usage(device_h, format, imageUsage);
@ -409,7 +409,7 @@ tu_GetSwapchainGrallocUsage2ANDROID(VkDevice device_h,
uint64_t *grallocConsumerUsage,
uint64_t *grallocProducerUsage)
{
TU_FROM_HANDLE(tu_device, device, device_h);
VK_FROM_HANDLE(tu_device, device, device_h);
VkResult result;
*grallocConsumerUsage = 0;

View File

@ -1996,9 +1996,9 @@ tu_CmdBlitImage2(VkCommandBuffer commandBuffer,
const VkBlitImageInfo2 *pBlitImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pBlitImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pBlitImageInfo->dstImage);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, src_image, pBlitImageInfo->srcImage);
VK_FROM_HANDLE(tu_image, dst_image, pBlitImageInfo->dstImage);
for (uint32_t i = 0; i < pBlitImageInfo->regionCount; ++i) {
/* can't blit both depth and stencil at once with D32_S8
@ -2128,9 +2128,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, dst_image, pCopyBufferToImageInfo->dstImage);
TU_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, dst_image, pCopyBufferToImageInfo->dstImage);
VK_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
for (unsigned i = 0; i < pCopyBufferToImageInfo->regionCount; ++i)
tu_copy_buffer_to_image<CHIP>(cmd, src_buffer, dst_image,
@ -2217,9 +2217,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageToBufferInfo->srcImage);
TU_FROM_HANDLE(tu_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, src_image, pCopyImageToBufferInfo->srcImage);
VK_FROM_HANDLE(tu_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
for (unsigned i = 0; i < pCopyImageToBufferInfo->regionCount; ++i)
tu_copy_image_to_buffer<CHIP>(cmd, src_image, dst_buffer,
@ -2461,9 +2461,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyImage2(VkCommandBuffer commandBuffer,
const VkCopyImageInfo2 *pCopyImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pCopyImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pCopyImageInfo->dstImage);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, src_image, pCopyImageInfo->srcImage);
VK_FROM_HANDLE(tu_image, dst_image, pCopyImageInfo->dstImage);
for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) {
if (src_image->vk.format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
@ -2525,9 +2525,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdCopyBuffer2(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2 *pCopyBufferInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
TU_FROM_HANDLE(tu_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
VK_FROM_HANDLE(tu_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
for (unsigned i = 0; i < pCopyBufferInfo->regionCount; ++i) {
const VkBufferCopy2 *region = &pCopyBufferInfo->pRegions[i];
@ -2547,8 +2547,8 @@ tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
VkDeviceSize dataSize,
const void *pData)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
struct tu_cs_memory tmp;
VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64 / 4, &tmp);
@ -2570,8 +2570,8 @@ tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
VkDeviceSize fillSize,
uint32_t data)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
const struct blit_ops *ops = &r2d_ops<CHIP>;
struct tu_cs *cs = &cmd->cs;
@ -2609,9 +2609,9 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdResolveImage2(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2 *pResolveImageInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, src_image, pResolveImageInfo->srcImage);
TU_FROM_HANDLE(tu_image, dst_image, pResolveImageInfo->dstImage);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, src_image, pResolveImageInfo->srcImage);
VK_FROM_HANDLE(tu_image, dst_image, pResolveImageInfo->dstImage);
const struct blit_ops *ops = &r2d_ops<CHIP>;
struct tu_cs *cs = &cmd->cs;
@ -2811,8 +2811,8 @@ tu_CmdClearColorImage(VkCommandBuffer commandBuffer,
uint32_t rangeCount,
const VkImageSubresourceRange *pRanges)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, image, image_h);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, image, image_h);
for (unsigned i = 0; i < rangeCount; i++)
clear_image<CHIP>(cmd, image, (const VkClearValue*) pColor, pRanges + i, VK_IMAGE_ASPECT_COLOR_BIT);
@ -2828,8 +2828,8 @@ tu_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
uint32_t rangeCount,
const VkImageSubresourceRange *pRanges)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_image, image, image_h);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_image, image, image_h);
for (unsigned i = 0; i < rangeCount; i++) {
const VkImageSubresourceRange *range = &pRanges[i];
@ -3234,7 +3234,7 @@ tu_CmdClearAttachments(VkCommandBuffer commandBuffer,
uint32_t rectCount,
const VkClearRect *pRects)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
/* sysmem path behaves like a draw, note we don't have a way of using different

View File

@ -2273,7 +2273,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VkResult result = tu_cmd_buffer_begin(cmd_buffer, pBeginInfo);
if (result != VK_SUCCESS)
return result;
@ -2406,7 +2406,7 @@ tu_CmdBindVertexBuffers2(VkCommandBuffer commandBuffer,
const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs cs;
cmd->state.max_vbs_bound = MAX2(
@ -2448,8 +2448,8 @@ tu_CmdBindIndexBuffer2KHR(VkCommandBuffer commandBuffer,
VkDeviceSize size,
VkIndexType indexType)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, buffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, buffer);
size = vk_buffer_range(&buf->vk, offset, size);
@ -2580,8 +2580,8 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
unsigned dyn_idx = 0;
struct tu_descriptor_state *descriptors_state =
@ -2597,7 +2597,7 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
VK_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
descriptors_state->sets[idx] = set;
descriptors_state->set_iova[idx] = set ?
@ -2707,7 +2707,7 @@ tu_CmdBindDescriptorBuffersEXT(
uint32_t bufferCount,
const VkDescriptorBufferBindingInfoEXT *pBindingInfos)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
for (unsigned i = 0; i < bufferCount; i++)
cmd->state.descriptor_buffer_iova[i] = pBindingInfos[i].address;
@ -2723,8 +2723,8 @@ tu_CmdSetDescriptorBufferOffsetsEXT(
const uint32_t *pBufferIndices,
const VkDeviceSize *pOffsets)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
struct tu_descriptor_state *descriptors_state =
tu_get_descriptors_state(cmd, pipelineBindPoint);
@ -2754,8 +2754,8 @@ tu_CmdBindDescriptorBufferEmbeddedSamplersEXT(
VkPipelineLayout _layout,
uint32_t set)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
@ -2804,8 +2804,8 @@ tu_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
struct tu_descriptor_set *set =
&tu_get_descriptors_state(cmd, pipelineBindPoint)->push_set;
@ -2843,9 +2843,9 @@ tu_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
uint32_t _set,
const void* pData)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
TU_FROM_HANDLE(tu_descriptor_update_template, templ, descriptorUpdateTemplate);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
VK_FROM_HANDLE(tu_descriptor_update_template, templ, descriptorUpdateTemplate);
struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
struct tu_descriptor_set *set =
&tu_get_descriptors_state(cmd, templ->bind_point)->push_set;
@ -2883,7 +2883,7 @@ tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
const VkDeviceSize *pOffsets,
const VkDeviceSize *pSizes)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
/* using COND_REG_EXEC for xfb commands matches the blob behavior
@ -2895,7 +2895,7 @@ tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
CP_COND_REG_EXEC_0_BINNING);
for (uint32_t i = 0; i < bindingCount; i++) {
TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
VK_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
uint64_t iova = buf->iova + pOffsets[i];
uint32_t size = buf->bo->size - (iova - buf->bo->iova);
uint32_t idx = i + firstBinding;
@ -2924,7 +2924,7 @@ tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
@ -2945,7 +2945,7 @@ tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
if (!pCounterBuffers[i])
continue;
TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
VK_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
@ -2973,7 +2973,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
@ -2998,7 +2998,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
if (!pCounterBuffers[i])
continue;
TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
VK_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
/* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
@ -3037,7 +3037,7 @@ tu_CmdPushConstants(VkCommandBuffer commandBuffer,
uint32_t size,
const void *pValues)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
memcpy((char *) cmd->push_constants + offset, pValues, size);
cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
}
@ -3056,7 +3056,7 @@ template <chip CHIP>
VKAPI_ATTR VkResult VKAPI_CALL
tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
/* We currently flush CCU at the end of the command buffer, like
* what the blob does. There's implicit synchronization around every
@ -3190,8 +3190,8 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
cmd->state.shaders[MESA_SHADER_COMPUTE] =
@ -3830,7 +3830,7 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer *pCmdBuffers)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VkResult result;
assert(commandBufferCount > 0);
@ -3845,7 +3845,7 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
}
for (uint32_t i = 0; i < commandBufferCount; i++) {
TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
VK_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
if (secondary->usage_flags &
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
@ -4140,7 +4140,7 @@ tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdBeginRenderPass2(commandBuffer, pRenderPassBegin,
@ -4148,8 +4148,8 @@ tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
return;
}
TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
VK_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
VK_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
const struct VkRenderPassAttachmentBeginInfo *pAttachmentInfo =
vk_find_struct_const(pRenderPassBegin->pNext,
@ -4217,7 +4217,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
const VkRenderingInfo *pRenderingInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
tu_setup_dynamic_render_pass(cmd, pRenderingInfo);
tu_setup_dynamic_framebuffer(cmd, pRenderingInfo);
@ -4238,13 +4238,13 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
cmd->state.clear_values[a] =
pRenderingInfo->pColorAttachments[i].clearValue;
TU_FROM_HANDLE(tu_image_view, view,
VK_FROM_HANDLE(tu_image_view, view,
pRenderingInfo->pColorAttachments[i].imageView);
cmd->state.attachments[a] = view;
a = cmd->dynamic_subpass.resolve_attachments[i].attachment;
if (a != VK_ATTACHMENT_UNUSED) {
TU_FROM_HANDLE(tu_image_view, resolve_view,
VK_FROM_HANDLE(tu_image_view, resolve_view,
pRenderingInfo->pColorAttachments[i].resolveImageView);
cmd->state.attachments[a] = resolve_view;
}
@ -4258,7 +4258,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
pRenderingInfo->pDepthAttachment :
pRenderingInfo->pStencilAttachment;
if (common_info && common_info->imageView != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_image_view, view, common_info->imageView);
VK_FROM_HANDLE(tu_image_view, view, common_info->imageView);
cmd->state.attachments[a] = view;
if (pRenderingInfo->pDepthAttachment) {
cmd->state.clear_values[a].depthStencil.depth =
@ -4272,7 +4272,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
if (cmd->dynamic_subpass.resolve_count >
cmd->dynamic_subpass.color_count) {
TU_FROM_HANDLE(tu_image_view, resolve_view,
VK_FROM_HANDLE(tu_image_view, resolve_view,
common_info->resolveImageView);
a = cmd->dynamic_subpass.resolve_attachments[cmd->dynamic_subpass.color_count].attachment;
cmd->state.attachments[a] = resolve_view;
@ -4285,7 +4285,7 @@ tu_CmdBeginRendering(VkCommandBuffer commandBuffer,
const VkRenderingFragmentDensityMapAttachmentInfoEXT *fdm_info =
vk_find_struct_const(pRenderingInfo->pNext,
RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT);
TU_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
VK_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
cmd->state.attachments[a] = view;
}
@ -4371,7 +4371,7 @@ tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdNextSubpass2(commandBuffer, pSubpassBeginInfo,
@ -5320,7 +5320,7 @@ tu_CmdDraw(VkCommandBuffer commandBuffer,
uint32_t firstVertex,
uint32_t firstInstance)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_vs_params(cmd, 0, firstVertex, firstInstance);
@ -5343,7 +5343,7 @@ tu_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,
uint32_t firstInstance,
uint32_t stride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
if (!drawCount)
@ -5389,7 +5389,7 @@ tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
int32_t vertexOffset,
uint32_t firstInstance)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_vs_params(cmd, 0, vertexOffset, firstInstance);
@ -5416,7 +5416,7 @@ tu_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,
uint32_t stride,
const int32_t *pVertexOffset)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->draw_cs;
if (!drawCount)
@ -5479,8 +5479,8 @@ tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
uint32_t drawCount,
uint32_t stride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, _buffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd);
@ -5508,8 +5508,8 @@ tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
uint32_t drawCount,
uint32_t stride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, _buffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd);
@ -5541,9 +5541,9 @@ tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
uint32_t drawCount,
uint32_t stride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer);
TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, _buffer);
VK_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd);
@ -5578,9 +5578,9 @@ tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
uint32_t drawCount,
uint32_t stride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _buffer);
TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, _buffer);
VK_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
struct tu_cs *cs = &cmd->draw_cs;
tu6_emit_empty_vs_params(cmd);
@ -5612,8 +5612,8 @@ tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
uint32_t counterOffset,
uint32_t vertexStride)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
struct tu_cs *cs = &cmd->draw_cs;
/* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
@ -5998,7 +5998,7 @@ tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
uint32_t y,
uint32_t z)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
struct tu_dispatch_info info = {};
info.blocks[0] = x;
@ -6018,8 +6018,8 @@ tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buffer, _buffer);
struct tu_dispatch_info info = {};
info.indirect = buffer;
@ -6033,7 +6033,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
const VkSubpassEndInfo *pSubpassEndInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
if (TU_DEBUG(DYNAMIC)) {
vk_common_CmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
@ -6056,7 +6056,7 @@ tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
tu_CmdEndRendering(VkCommandBuffer commandBuffer)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
if (cmd_buffer->state.suspending)
cmd_buffer->state.suspended_pass.lrz = cmd_buffer->state.lrz;
@ -6148,7 +6148,7 @@ tu_barrier(struct tu_cmd_buffer *cmd,
* to the image. We don't want these entries being flushed later and
* overwriting the actual image, so we need to flush the CCU.
*/
TU_FROM_HANDLE(tu_image, image, dep_info->pImageMemoryBarriers[i].image);
VK_FROM_HANDLE(tu_image, image, dep_info->pImageMemoryBarriers[i].image);
if (vk_format_is_depth_or_stencil(image->vk.format)) {
src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
@ -6214,7 +6214,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdPipelineBarrier2(VkCommandBuffer commandBuffer,
const VkDependencyInfo *pDependencyInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
tu_barrier(cmd_buffer, pDependencyInfo);
}
@ -6266,8 +6266,8 @@ tu_CmdSetEvent2(VkCommandBuffer commandBuffer,
VkEvent _event,
const VkDependencyInfo *pDependencyInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_event, event, _event);
VkPipelineStageFlags2 src_stage_mask = 0;
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
@ -6287,8 +6287,8 @@ tu_CmdResetEvent2(VkCommandBuffer commandBuffer,
VkEvent _event,
VkPipelineStageFlags2 stageMask)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_event, event, _event);
write_event<CHIP>(cmd, event, stageMask, 0);
}
@ -6300,11 +6300,11 @@ tu_CmdWaitEvents2(VkCommandBuffer commandBuffer,
const VkEvent *pEvents,
const VkDependencyInfo* pDependencyInfos)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
for (uint32_t i = 0; i < eventCount; i++) {
TU_FROM_HANDLE(tu_event, event, pEvents[i]);
VK_FROM_HANDLE(tu_event, event, pEvents[i]);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
@ -6323,7 +6323,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
cmd->state.predication_active = true;
@ -6338,7 +6338,7 @@ tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
else
tu_emit_cache_flush<CHIP>(cmd);
TU_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
VK_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
uint64_t iova = buf->iova + pConditionalRenderingBegin->offset;
/* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
@ -6365,7 +6365,7 @@ TU_GENX(tu_CmdBeginConditionalRenderingEXT);
VKAPI_ATTR void VKAPI_CALL
tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
cmd->state.predication_active = false;
@ -6384,8 +6384,8 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
uint32_t marker)
{
/* Almost the same as write_event, but also allowed in renderpass */
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
uint64_t va = buffer->iova + dstOffset;

View File

@ -131,9 +131,6 @@
#define MAX_FDM_TEXEL_SIZE_LOG2 10
#define MAX_FDM_TEXEL_SIZE (1u << MAX_FDM_TEXEL_SIZE_LOG2)
#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
VK_FROM_HANDLE(__tu_type, __name, __handle)
#define TU_GPU_GENS A6XX, A7XX
#define TU_GENX(FUNC_NAME) \
template <chip... CHIPs> constexpr auto FUNC_NAME##instantiate() \

View File

@ -115,7 +115,7 @@ tu_CreateDescriptorSetLayout(
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType ==
@ -320,7 +320,7 @@ tu_GetDescriptorSetLayoutSupport(
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
VkDescriptorSetLayoutSupport *pSupport)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
VkDescriptorSetLayoutBinding *bindings = NULL;
VkResult result = vk_create_sorted_bindings(
@ -419,7 +419,7 @@ tu_GetDescriptorSetLayoutSizeEXT(
VkDescriptorSetLayout _layout,
VkDeviceSize *pLayoutSizeInBytes)
{
TU_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
VK_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
*pLayoutSizeInBytes = layout->size;
}
@ -431,7 +431,7 @@ tu_GetDescriptorSetLayoutBindingOffsetEXT(
uint32_t binding,
VkDeviceSize *pOffset)
{
TU_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
VK_FROM_HANDLE(tu_descriptor_set_layout, layout, _layout);
assert(binding < layout->binding_count);
*pOffset = layout->binding[binding].offset;
@ -509,7 +509,7 @@ tu_CreatePipelineLayout(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_pipeline_layout *layout;
assert(pCreateInfo->sType ==
@ -523,7 +523,7 @@ tu_CreatePipelineLayout(VkDevice _device,
layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout,
VK_FROM_HANDLE(tu_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
assert(set < device->physical_device->usable_sets);
@ -554,8 +554,8 @@ tu_DestroyPipelineLayout(VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, _pipelineLayout);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!pipeline_layout)
return;
@ -725,7 +725,7 @@ tu_CreateDescriptorPool(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_pool *pool;
uint64_t size = sizeof(struct tu_descriptor_pool);
uint64_t bo_size = 0, dynamic_size = 0;
@ -840,8 +840,8 @@ tu_DestroyDescriptorPool(VkDevice _device,
VkDescriptorPool _pool,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, _pool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_pool, pool, _pool);
if (!pool)
return;
@ -874,8 +874,8 @@ tu_ResetDescriptorPool(VkDevice _device,
VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
list_for_each_entry_safe(struct tu_descriptor_set, set,
&pool->desc_sets, pool_link) {
@ -901,8 +901,8 @@ tu_AllocateDescriptorSets(VkDevice _device,
const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
uint32_t i;
@ -915,7 +915,7 @@ tu_AllocateDescriptorSets(VkDevice _device,
/* allocate a set of buffers for each shader to contain descriptors */
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
TU_FROM_HANDLE(tu_descriptor_set_layout, layout,
VK_FROM_HANDLE(tu_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
@ -945,11 +945,11 @@ tu_FreeDescriptorSets(VkDevice _device,
uint32_t count,
const VkDescriptorSet *pDescriptorSets)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
VK_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
if (set) {
vk_descriptor_set_layout_unref(&device->vk, &set->layout->vk);
@ -983,7 +983,7 @@ write_texel_buffer_descriptor(uint32_t *dst, const VkBufferView buffer_view)
if (buffer_view == VK_NULL_HANDLE) {
memset(dst, 0, A6XX_TEX_CONST_DWORDS * sizeof(uint32_t));
} else {
TU_FROM_HANDLE(tu_buffer_view, view, buffer_view);
VK_FROM_HANDLE(tu_buffer_view, view, buffer_view);
memcpy(dst, view->descriptor, sizeof(view->descriptor));
}
@ -992,7 +992,7 @@ write_texel_buffer_descriptor(uint32_t *dst, const VkBufferView buffer_view)
static VkDescriptorAddressInfoEXT
buffer_info_to_address(const VkDescriptorBufferInfo *buffer_info)
{
TU_FROM_HANDLE(tu_buffer, buffer, buffer_info->buffer);
VK_FROM_HANDLE(tu_buffer, buffer, buffer_info->buffer);
uint32_t range = buffer ? vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range) : 0;
uint64_t va = buffer ? buffer->iova + buffer_info->offset : 0;
@ -1092,7 +1092,7 @@ write_image_descriptor(uint32_t *dst,
return;
}
TU_FROM_HANDLE(tu_image_view, iview, image_info->imageView);
VK_FROM_HANDLE(tu_image_view, iview, image_info->imageView);
if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
memcpy(dst, iview->view.storage_descriptor, sizeof(iview->view.storage_descriptor));
@ -1110,7 +1110,7 @@ write_combined_image_sampler_descriptor(uint32_t *dst,
write_image_descriptor(dst, descriptor_type, image_info);
/* copy over sampler state */
if (has_sampler) {
TU_FROM_HANDLE(tu_sampler, sampler, image_info->sampler);
VK_FROM_HANDLE(tu_sampler, sampler, image_info->sampler);
memcpy(dst + A6XX_TEX_CONST_DWORDS, sampler->descriptor, sizeof(sampler->descriptor));
}
@ -1119,7 +1119,7 @@ write_combined_image_sampler_descriptor(uint32_t *dst,
static void
write_sampler_descriptor(uint32_t *dst, VkSampler _sampler)
{
TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
VK_FROM_HANDLE(tu_sampler, sampler, _sampler);
memcpy(dst, sampler->descriptor, sizeof(sampler->descriptor));
}
@ -1138,7 +1138,7 @@ tu_GetDescriptorEXT(
size_t dataSize,
void *pDescriptor)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
uint32_t *dest = (uint32_t *) pDescriptor;
switch (pDescriptorInfo->type) {
@ -1195,7 +1195,7 @@ tu_update_descriptor_sets(const struct tu_device *device,
uint32_t i, j;
for (i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
TU_FROM_HANDLE(tu_descriptor_set, set, dstSetOverride ?: writeset->dstSet);
VK_FROM_HANDLE(tu_descriptor_set, set, dstSetOverride ?: writeset->dstSet);
const struct tu_descriptor_set_binding_layout *binding_layout =
set->layout->binding + writeset->dstBinding;
uint32_t *ptr = set->mapped_ptr;
@ -1302,9 +1302,9 @@ tu_update_descriptor_sets(const struct tu_device *device,
for (i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
TU_FROM_HANDLE(tu_descriptor_set, src_set,
VK_FROM_HANDLE(tu_descriptor_set, src_set,
copyset->srcSet);
TU_FROM_HANDLE(tu_descriptor_set, dst_set,
VK_FROM_HANDLE(tu_descriptor_set, dst_set,
copyset->dstSet);
const struct tu_descriptor_set_binding_layout *src_binding_layout =
src_set->layout->binding + copyset->srcBinding;
@ -1387,7 +1387,7 @@ tu_UpdateDescriptorSets(VkDevice _device,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
tu_update_descriptor_sets(device, VK_NULL_HANDLE,
descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies);
@ -1400,13 +1400,13 @@ tu_CreateDescriptorUpdateTemplate(
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_descriptor_set_layout *set_layout = NULL;
const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
uint32_t dst_entry_count = 0;
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
TU_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
VK_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
/* descriptorSetLayout should be ignored for push descriptors
* and instead it refers to pipelineLayout and set.
@ -1414,7 +1414,7 @@ tu_CreateDescriptorUpdateTemplate(
assert(pCreateInfo->set < device->physical_device->usable_sets);
set_layout = pipeline_layout->set[pCreateInfo->set].layout;
} else {
TU_FROM_HANDLE(tu_descriptor_set_layout, _set_layout,
VK_FROM_HANDLE(tu_descriptor_set_layout, _set_layout,
pCreateInfo->descriptorSetLayout);
set_layout = _set_layout;
}
@ -1543,8 +1543,8 @@ tu_DestroyDescriptorUpdateTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_update_template, templ,
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_update_template, templ,
descriptorUpdateTemplate);
if (!templ)
@ -1560,7 +1560,7 @@ tu_update_descriptor_set_with_template(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
TU_FROM_HANDLE(tu_descriptor_update_template, templ,
VK_FROM_HANDLE(tu_descriptor_update_template, templ,
descriptorUpdateTemplate);
for (uint32_t i = 0; i < templ->entry_count; i++) {
@ -1646,8 +1646,8 @@ tu_UpdateDescriptorSetWithTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_set, set, descriptorSet);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_descriptor_set, set, descriptorSet);
tu_update_descriptor_set_with_template(device, set, descriptorUpdateTemplate, pData);
}
@ -1659,7 +1659,7 @@ tu_CreateSamplerYcbcrConversion(
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_sampler_ycbcr_conversion *conversion;
conversion = (struct tu_sampler_ycbcr_conversion *) vk_object_alloc(
@ -1685,8 +1685,8 @@ tu_DestroySamplerYcbcrConversion(VkDevice _device,
VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
if (!ycbcr_conversion)
return;

View File

@ -1366,7 +1366,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_DestroyInstance(VkInstance _instance,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_instance, instance, _instance);
VK_FROM_HANDLE(tu_instance, instance, _instance);
if (!instance)
return;
@ -1453,7 +1453,7 @@ tu_GetPhysicalDeviceQueueFamilyProperties2(
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2 *pQueueFamilyProperties)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties2, out,
pQueueFamilyProperties, pQueueFamilyPropertyCount);
@ -1525,7 +1525,7 @@ VKAPI_ATTR void VKAPI_CALL
tu_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice pdev,
VkPhysicalDeviceMemoryProperties2 *props2)
{
TU_FROM_HANDLE(tu_physical_device, physical_device, pdev);
VK_FROM_HANDLE(tu_physical_device, physical_device, pdev);
VkPhysicalDeviceMemoryProperties *props = &props2->memoryProperties;
props->memoryHeapCount = 1;
@ -2100,7 +2100,7 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
const VkAllocationCallbacks *pAllocator,
VkDevice *pDevice)
{
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VkResult result;
struct tu_device *device;
bool custom_border_colors = false;
@ -2547,7 +2547,7 @@ fail_queues:
VKAPI_ATTR void VKAPI_CALL
tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (!device)
return;
@ -2705,7 +2705,7 @@ tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
{
TU_FROM_HANDLE(tu_instance, instance, _instance);
VK_FROM_HANDLE(tu_instance, instance, _instance);
return vk_instance_get_proc_addr(instance != NULL ? &instance->vk : NULL,
&tu_instance_entrypoints,
pName);
@ -2727,7 +2727,7 @@ tu_AllocateMemory(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkDeviceMemory *pMem)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_device_memory *mem;
VkResult result;
@ -2851,8 +2851,8 @@ tu_FreeMemory(VkDevice _device,
VkDeviceMemory _mem,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, _mem);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device_memory, mem, _mem);
if (mem == NULL)
return;
@ -2867,8 +2867,8 @@ tu_FreeMemory(VkDevice _device,
VKAPI_ATTR VkResult VKAPI_CALL
tu_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo, void **ppData)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, pMemoryMapInfo->memory);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device_memory, mem, pMemoryMapInfo->memory);
VkResult result;
if (mem == NULL) {
@ -2924,8 +2924,8 @@ tu_GetBufferMemoryRequirements2(
const VkBufferMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
tu_get_buffer_memory_requirements(device, buffer->vk.size, pMemoryRequirements);
}
@ -2936,7 +2936,7 @@ tu_GetDeviceBufferMemoryRequirements(
const VkDeviceBufferMemoryRequirements *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
tu_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pMemoryRequirements);
}
@ -2953,11 +2953,11 @@ tu_BindBufferMemory2(VkDevice device,
uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos)
{
TU_FROM_HANDLE(tu_device, dev, device);
VK_FROM_HANDLE(tu_device, dev, device);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
VK_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
VK_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
if (mem) {
buffer->bo = mem->bo;
@ -2993,7 +2993,7 @@ tu_CreateEvent(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_event *event = (struct tu_event *)
vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
@ -3028,8 +3028,8 @@ tu_DestroyEvent(VkDevice _device,
VkEvent _event,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_event, event, _event);
if (!event)
return;
@ -3043,8 +3043,8 @@ tu_DestroyEvent(VkDevice _device,
VKAPI_ATTR VkResult VKAPI_CALL
tu_GetEventStatus(VkDevice _device, VkEvent _event)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_event, event, _event);
if (vk_device_is_lost(&device->vk))
return VK_ERROR_DEVICE_LOST;
@ -3057,7 +3057,7 @@ tu_GetEventStatus(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL
tu_SetEvent(VkDevice _device, VkEvent _event)
{
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_event, event, _event);
*(uint64_t*) event->bo->map = 1;
return VK_SUCCESS;
@ -3066,7 +3066,7 @@ tu_SetEvent(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL
tu_ResetEvent(VkDevice _device, VkEvent _event)
{
TU_FROM_HANDLE(tu_event, event, _event);
VK_FROM_HANDLE(tu_event, event, _event);
*(uint64_t*) event->bo->map = 0;
return VK_SUCCESS;
@ -3078,7 +3078,7 @@ tu_CreateBuffer(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkBuffer *pBuffer)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer *buffer;
buffer = (struct tu_buffer *) vk_buffer_create(
@ -3102,8 +3102,8 @@ tu_DestroyBuffer(VkDevice _device,
VkBuffer _buffer,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_buffer, buffer, _buffer);
if (!buffer)
return;
@ -3123,13 +3123,13 @@ tu_CreateFramebuffer(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkFramebuffer *pFramebuffer)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC))
return vk_common_CreateFramebuffer(_device, pCreateInfo, pAllocator,
pFramebuffer);
TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
VK_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
struct tu_framebuffer *framebuffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
@ -3185,14 +3185,14 @@ tu_DestroyFramebuffer(VkDevice _device,
VkFramebuffer _fb,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) {
vk_common_DestroyFramebuffer(_device, _fb, pAllocator);
return;
}
TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
VK_FROM_HANDLE(tu_framebuffer, fb, _fb);
if (!fb)
return;
@ -3293,7 +3293,7 @@ tu_CreateSampler(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkSampler *pSampler)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
@ -3314,8 +3314,8 @@ tu_DestroySampler(VkDevice _device,
VkSampler _sampler,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_sampler, sampler, _sampler);
uint32_t border_color;
if (!sampler)
@ -3339,8 +3339,8 @@ tu_GetMemoryFdKHR(VkDevice _device,
const VkMemoryGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
@ -3384,7 +3384,7 @@ tu_GetMemoryFdPropertiesKHR(VkDevice _device,
int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
pMemoryFdProperties->memoryTypeBits =
(1 << device->physical_device->memory.type_count) - 1;
@ -3397,7 +3397,7 @@ tu_GetPhysicalDeviceMultisamplePropertiesEXT(
VkSampleCountFlagBits samples,
VkMultisamplePropertiesEXT* pMultisampleProperties)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->vk.supported_extensions.EXT_sample_locations)
pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
@ -3409,7 +3409,7 @@ VkDeviceAddress
tu_GetBufferDeviceAddress(VkDevice _device,
const VkBufferDeviceAddressInfo* pInfo)
{
TU_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
VK_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
return buffer->iova;
}
@ -3426,7 +3426,7 @@ uint64_t tu_GetDeviceMemoryOpaqueCaptureAddress(
VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
{
TU_FROM_HANDLE(tu_device_memory, mem, pInfo->memory);
VK_FROM_HANDLE(tu_device_memory, mem, pInfo->memory);
return mem->bo->iova;
}

View File

@ -73,7 +73,7 @@ get_cmd_buffer(struct tu_device *dev, struct tu_cmd_buffer **cmd_buffer_out)
if (result != VK_SUCCESS)
return result;
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, vk_buf);
VK_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, vk_buf);
struct dynamic_rendering_entry entry = {
.cmd_buffer = cmd_buffer,

View File

@ -402,7 +402,7 @@ tu_GetPhysicalDeviceFormatProperties2(
VkFormat format,
VkFormatProperties2 *pFormatProperties)
{
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VkFormatProperties3 local_props3;
VkFormatProperties3 *props3 =
@ -713,7 +713,7 @@ tu_GetPhysicalDeviceImageFormatProperties2(
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
const VkPhysicalDeviceImageViewImageFormatInfoEXT *image_view_info = NULL;
VkExternalImageFormatProperties *external_props = NULL;

View File

@ -167,7 +167,7 @@ tu_image_view_init(struct tu_device *device,
const VkImageViewCreateInfo *pCreateInfo,
bool has_z24uint_s8uint)
{
TU_FROM_HANDLE(tu_image, image, pCreateInfo->image);
VK_FROM_HANDLE(tu_image, image, pCreateInfo->image);
const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
VkFormat vk_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
@ -680,7 +680,7 @@ tu_CreateImage(VkDevice _device,
uint64_t modifier = DRM_FORMAT_MOD_INVALID;
const VkSubresourceLayout *plane_layouts = NULL;
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
#ifdef TU_USE_WSI_PLATFORM
/* Ignore swapchain creation info on Android. Since we don't have an
@ -771,8 +771,8 @@ tu_DestroyImage(VkDevice _device,
VkImage _image,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image, image, _image);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_image, image, _image);
if (!image)
return;
@ -796,11 +796,11 @@ tu_BindImageMemory2(VkDevice _device,
uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
VK_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
VK_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
/* Ignore this struct on Android, we cannot access swapchain structures there. */
#ifdef TU_USE_WSI_PLATFORM
@ -810,7 +810,7 @@ tu_BindImageMemory2(VkDevice _device,
if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE) {
VkImage _wsi_image = wsi_common_get_image(swapchain_info->swapchain,
swapchain_info->imageIndex);
TU_FROM_HANDLE(tu_image, wsi_img, _wsi_image);
VK_FROM_HANDLE(tu_image, wsi_img, _wsi_image);
image->bo = wsi_img->bo;
image->map = NULL;
@ -880,8 +880,8 @@ tu_GetImageMemoryRequirements2(VkDevice _device,
const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image, image, pInfo->image);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_image, image, pInfo->image);
tu_get_image_memory_requirements(device, image, pMemoryRequirements);
}
@ -902,7 +902,7 @@ tu_GetDeviceImageMemoryRequirements(
const VkDeviceImageMemoryRequirements *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image image = {0};
@ -957,7 +957,7 @@ tu_GetImageSubresourceLayout2KHR(VkDevice _device,
const VkImageSubresource2KHR *pSubresource,
VkSubresourceLayout2KHR *pLayout)
{
TU_FROM_HANDLE(tu_image, image, _image);
VK_FROM_HANDLE(tu_image, image, _image);
tu_get_image_subresource_layout(image, pSubresource, pLayout);
}
@ -967,7 +967,7 @@ tu_GetDeviceImageSubresourceLayoutKHR(VkDevice _device,
const VkDeviceImageSubresourceInfoKHR *pInfo,
VkSubresourceLayout2KHR *pLayout)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image image = {0};
@ -983,7 +983,7 @@ tu_CreateImageView(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkImageView *pView)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_image_view *view;
view = (struct tu_image_view *) vk_object_alloc(
@ -1003,8 +1003,8 @@ tu_DestroyImageView(VkDevice _device,
VkImageView _iview,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_image_view, iview, _iview);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_image_view, iview, _iview);
if (!iview)
return;
@ -1017,7 +1017,7 @@ tu_buffer_view_init(struct tu_buffer_view *view,
struct tu_device *device,
const VkBufferViewCreateInfo *pCreateInfo)
{
TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
VK_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
view->buffer = buffer;
@ -1037,7 +1037,7 @@ tu_CreateBufferView(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkBufferView *pView)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer_view *view;
view = (struct tu_buffer_view *) vk_object_alloc(
@ -1057,8 +1057,8 @@ tu_DestroyBufferView(VkDevice _device,
VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_buffer_view, view, bufferView);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_buffer_view, view, bufferView);
if (!view)
return;

View File

@ -74,7 +74,7 @@ sync_cache(VkDevice _device,
uint32_t count,
const VkMappedMemoryRange *ranges)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (!device->physical_device->has_cached_non_coherent_memory) {
tu_finishme(
@ -83,7 +83,7 @@ sync_cache(VkDevice _device,
}
for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
VK_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
tu_sync_cache_bo(device, mem->bo, ranges[i].offset, ranges[i].size, op);
}

View File

@ -238,7 +238,7 @@ kgsl_sync_cache(VkDevice _device,
uint32_t count,
const VkMappedMemoryRange *ranges)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
struct kgsl_gpuobj_sync_obj *sync_list =
(struct kgsl_gpuobj_sync_obj *) vk_zalloc(
@ -252,7 +252,7 @@ kgsl_sync_cache(VkDevice _device,
};
for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
VK_FROM_HANDLE(tu_device_memory, mem, ranges[i].memory);
sync_list[i].op = op;
sync_list[i].id = mem->bo->gem_handle;

View File

@ -799,7 +799,7 @@ tu_CreateRenderPass2(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC))
return vk_common_CreateRenderPass2(_device, pCreateInfo, pAllocator,
@ -1015,14 +1015,14 @@ tu_DestroyRenderPass(VkDevice _device,
VkRenderPass _pass,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
if (TU_DEBUG(DYNAMIC)) {
vk_common_DestroyRenderPass(_device, _pass, pAllocator);
return;
}
TU_FROM_HANDLE(tu_render_pass, pass, _pass);
VK_FROM_HANDLE(tu_render_pass, pass, _pass);
if (!_pass)
return;
@ -1078,7 +1078,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
continue;
}
TU_FROM_HANDLE(tu_image_view, view, att_info->imageView);
VK_FROM_HANDLE(tu_image_view, view, att_info->imageView);
tu_setup_dynamic_attachment(att, view);
att->gmem = true;
att->clear_views = info->viewMask;
@ -1094,7 +1094,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
if (att_info->resolveMode != VK_RESOLVE_MODE_NONE) {
struct tu_render_pass_attachment *resolve_att = &pass->attachments[a];
TU_FROM_HANDLE(tu_image_view, resolve_view, att_info->resolveImageView);
VK_FROM_HANDLE(tu_image_view, resolve_view, att_info->resolveImageView);
tu_setup_dynamic_attachment(resolve_att, resolve_view);
resolve_att->gmem = false;
attachment_set_ops(
@ -1117,7 +1117,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
info->pStencilAttachment;
if (common_info && common_info->imageView != VK_NULL_HANDLE) {
TU_FROM_HANDLE(tu_image_view, view, common_info->imageView);
VK_FROM_HANDLE(tu_image_view, view, common_info->imageView);
struct tu_render_pass_attachment *att = &pass->attachments[a];
tu_setup_dynamic_attachment(att, view);
@ -1141,7 +1141,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
if (common_info->resolveMode != VK_RESOLVE_MODE_NONE) {
unsigned i = subpass->resolve_count++;
struct tu_render_pass_attachment *resolve_att = &pass->attachments[a];
TU_FROM_HANDLE(tu_image_view, resolve_view,
VK_FROM_HANDLE(tu_image_view, resolve_view,
common_info->resolveImageView);
tu_setup_dynamic_attachment(resolve_att, resolve_view);
resolve_att->gmem = false;
@ -1170,7 +1170,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT);
if (fdm_info && fdm_info->imageView != VK_NULL_HANDLE &&
!tu_render_pass_disable_fdm(pass)) {
TU_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
VK_FROM_HANDLE(tu_image_view, view, fdm_info->imageView);
struct tu_render_pass_attachment *att = &pass->attachments[a];
tu_setup_dynamic_attachment(att, view);
@ -1264,7 +1264,7 @@ tu_GetRenderAreaGranularity(VkDevice _device,
VkRenderPass renderPass,
VkExtent2D *pGranularity)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
pGranularity->width = device->physical_device->info->gmem_align_w;
pGranularity->height = device->physical_device->info->gmem_align_h;
}
@ -1274,7 +1274,7 @@ tu_GetRenderingAreaGranularityKHR(VkDevice _device,
const VkRenderingAreaInfoKHR *pRenderingAreaInfo,
VkExtent2D *pGranularity)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
pGranularity->width = device->physical_device->info->gmem_align_w;
pGranularity->height = device->physical_device->info->gmem_align_h;
}

View File

@ -2014,7 +2014,7 @@ tu_pipeline_builder_parse_libraries(struct tu_pipeline_builder *builder,
assert(library_info->libraryCount <= MAX_LIBRARIES);
builder->num_libraries = library_info->libraryCount;
for (unsigned i = 0; i < library_info->libraryCount; i++) {
TU_FROM_HANDLE(tu_pipeline, library, library_info->pLibraries[i]);
VK_FROM_HANDLE(tu_pipeline, library, library_info->pLibraries[i]);
builder->libraries[i] = tu_pipeline_to_graphics_lib(library);
}
}
@ -2073,7 +2073,7 @@ static void
tu_pipeline_builder_parse_layout(struct tu_pipeline_builder *builder,
struct tu_pipeline *pipeline)
{
TU_FROM_HANDLE(tu_pipeline_layout, layout, builder->create_info->layout);
VK_FROM_HANDLE(tu_pipeline_layout, layout, builder->create_info->layout);
if (layout) {
/* Note: it's still valid to have a layout even if there are libraries.
@ -4039,8 +4039,8 @@ tu_graphics_pipeline_create(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
TU_FROM_HANDLE(tu_device, dev, device);
TU_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
VK_FROM_HANDLE(tu_device, dev, device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
cache = cache ? cache : dev->mem_cache;
@ -4109,9 +4109,9 @@ tu_compute_pipeline_create(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
TU_FROM_HANDLE(tu_device, dev, device);
TU_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
VK_FROM_HANDLE(tu_device, dev, device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
VK_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
VkResult result;
const struct ir3_shader_variant *v = NULL;
@ -4299,8 +4299,8 @@ tu_DestroyPipeline(VkDevice _device,
VkPipeline _pipeline,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, dev, _device);
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
VK_FROM_HANDLE(tu_device, dev, _device);
VK_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
@ -4333,8 +4333,8 @@ tu_GetPipelineExecutablePropertiesKHR(
uint32_t* pExecutableCount,
VkPipelineExecutablePropertiesKHR* pProperties)
{
TU_FROM_HANDLE(tu_device, dev, _device);
TU_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline);
VK_FROM_HANDLE(tu_device, dev, _device);
VK_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutablePropertiesKHR, out,
pProperties, pExecutableCount);
@ -4365,7 +4365,7 @@ tu_GetPipelineExecutableStatisticsKHR(
uint32_t* pStatisticCount,
VkPipelineExecutableStatisticKHR* pStatistics)
{
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableStatisticKHR, out,
pStatistics, pStatisticCount);
@ -4552,7 +4552,7 @@ tu_GetPipelineExecutableInternalRepresentationsKHR(
uint32_t* pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
{
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableInternalRepresentationKHR, out,
pInternalRepresentations, pInternalRepresentationCount);
bool incomplete_text = false;

View File

@ -214,7 +214,7 @@ tu_CreateQueryPool(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkQueryPool *pQueryPool)
{
TU_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_device, device, _device);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
assert(pCreateInfo->queryCount > 0);
@ -347,8 +347,8 @@ tu_DestroyQueryPool(VkDevice _device,
VkQueryPool _pool,
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_query_pool, pool, _pool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_query_pool, pool, _pool);
if (!pool)
return;
@ -572,8 +572,8 @@ tu_GetQueryPoolResults(VkDevice _device,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_device, device, _device);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(firstQuery + queryCount <= pool->size);
if (vk_device_is_lost(&device->vk))
@ -723,9 +723,9 @@ tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
struct tu_cs *cs = &cmdbuf->cs;
assert(firstQuery + queryCount <= pool->size);
@ -792,8 +792,8 @@ tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
uint32_t firstQuery,
uint32_t queryCount)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
switch (pool->type) {
case VK_QUERY_TYPE_TIMESTAMP:
@ -815,7 +815,7 @@ tu_ResetQueryPool(VkDevice device,
uint32_t firstQuery,
uint32_t queryCount)
{
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
for (uint32_t i = 0; i < queryCount; i++) {
struct query_slot *slot = slot_address(pool, i + firstQuery);
@ -1084,8 +1084,8 @@ tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
uint32_t query,
VkQueryControlFlags flags)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size);
switch (pool->type) {
@ -1124,8 +1124,8 @@ tu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,
VkQueryControlFlags flags,
uint32_t index)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size);
switch (pool->type) {
@ -1574,8 +1574,8 @@ tu_CmdEndQuery(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size);
switch (pool->type) {
@ -1611,8 +1611,8 @@ tu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,
uint32_t query,
uint32_t index)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
assert(query < pool->size);
switch (pool->type) {
@ -1635,8 +1635,8 @@ tu_CmdWriteTimestamp2(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_query_pool, pool, queryPool);
/* Inside a render pass, just write the timestamp multiple times so that
* the user gets the last one if we use GMEM. There isn't really much
@ -1715,7 +1715,7 @@ tu_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
VkPerformanceCounterKHR* pCounters,
VkPerformanceCounterDescriptionKHR* pCounterDescriptions)
{
TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
uint32_t desc_count = *pCounterCount;
uint32_t group_count;
@ -1765,7 +1765,7 @@ tu_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo,
uint32_t* pNumPasses)
{
TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
uint32_t group_count = 0;
uint32_t gid = 0, cid = 0, n_passes;
const struct fd_perfcntr_group *group =

View File

@ -16,7 +16,7 @@
static VkResult
capture_trace(VkQueue _queue)
{
TU_FROM_HANDLE(tu_queue, queue, _queue);
VK_FROM_HANDLE(tu_queue, queue, _queue);
struct tu_device *device = queue->device;
assert(device->vk.memory_trace_data.is_enabled);

View File

@ -17,14 +17,14 @@
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
tu_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
return vk_instance_get_proc_addr_unchecked(&pdevice->instance->vk, pName);
}
static bool
tu_wsi_can_present_on_device(VkPhysicalDevice physicalDevice, int fd)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
VK_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
return wsi_common_drm_devices_equal(fd, pdevice->local_fd);
}