venus: add more tracepoints for perf analysis

This change adds the tracepoints that can help understand app behavior
for debugging and performance optimization purposes.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17497>
This commit is contained in:
Yiwei Zhang 2022-07-12 06:31:37 +00:00 committed by Marge Bot
parent f96e25ae05
commit 62f79f9ec1
11 changed files with 61 additions and 0 deletions

View File

@ -994,6 +994,7 @@ vn_GetAndroidHardwareBufferPropertiesANDROID(
const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = VK_SUCCESS;
int dma_buf_fd = -1;

View File

@ -358,6 +358,7 @@ vn_CreateBuffer(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkBuffer *pBuffer)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -390,6 +391,7 @@ vn_DestroyBuffer(VkDevice device,
VkBuffer buffer,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_buffer *buf = vn_buffer_from_handle(buffer);
const VkAllocationCallbacks *alloc =

View File

@ -439,6 +439,7 @@ vn_CreateCommandPool(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkCommandPool *pCommandPool)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -469,6 +470,7 @@ vn_DestroyCommandPool(VkDevice device,
VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_command_pool *pool = vn_command_pool_from_handle(commandPool);
const VkAllocationCallbacks *alloc;
@ -501,6 +503,7 @@ vn_ResetCommandPool(VkDevice device,
VkCommandPool commandPool,
VkCommandPoolResetFlags flags)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_command_pool *pool = vn_command_pool_from_handle(commandPool);
@ -520,6 +523,7 @@ vn_TrimCommandPool(VkDevice device,
VkCommandPool commandPool,
VkCommandPoolTrimFlags flags)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkTrimCommandPool(dev->instance, device, commandPool, flags);
@ -532,6 +536,7 @@ vn_AllocateCommandBuffers(VkDevice device,
const VkCommandBufferAllocateInfo *pAllocateInfo,
VkCommandBuffer *pCommandBuffers)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_command_pool *pool =
vn_command_pool_from_handle(pAllocateInfo->commandPool);
@ -583,6 +588,7 @@ vn_FreeCommandBuffers(VkDevice device,
uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_command_pool *pool = vn_command_pool_from_handle(commandPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
@ -612,6 +618,7 @@ VkResult
vn_ResetCommandBuffer(VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags)
{
VN_TRACE_FUNC();
struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer);

View File

@ -276,6 +276,7 @@ vn_CreateDescriptorPool(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -338,6 +339,7 @@ vn_DestroyDescriptorPool(VkDevice device,
VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(descriptorPool);
@ -454,6 +456,7 @@ vn_ResetDescriptorPool(VkDevice device,
VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(descriptorPool);
@ -478,6 +481,7 @@ vn_AllocateDescriptorSets(VkDevice device,
const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
@ -596,6 +600,7 @@ vn_FreeDescriptorSets(VkDevice device,
uint32_t descriptorSetCount,
const VkDescriptorSet *pDescriptorSets)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(descriptorPool);
@ -761,6 +766,7 @@ vn_UpdateDescriptorSets(VkDevice device,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
@ -903,6 +909,7 @@ vn_CreateDescriptorUpdateTemplate(
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -941,6 +948,7 @@ vn_DestroyDescriptorUpdateTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_update_template *templ =
vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
@ -965,6 +973,7 @@ vn_UpdateDescriptorSetWithTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_set *set =
vn_descriptor_set_from_handle(descriptorSet);

View File

@ -412,6 +412,7 @@ vn_CreateDevice(VkPhysicalDevice physicalDevice,
const VkAllocationCallbacks *pAllocator,
VkDevice *pDevice)
{
VN_TRACE_FUNC();
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_instance *instance = physical_dev->instance;
@ -452,6 +453,7 @@ vn_CreateDevice(VkPhysicalDevice physicalDevice,
void
vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;

View File

@ -400,6 +400,7 @@ vn_AllocateMemory(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkDeviceMemory *pMemory)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -481,6 +482,7 @@ vn_FreeMemory(VkDevice device,
VkDeviceMemory memory,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
const VkAllocationCallbacks *alloc =
@ -529,6 +531,7 @@ vn_MapMemory(VkDevice device,
VkMemoryMapFlags flags,
void **ppData)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
const bool need_bo = !mem->base_bo;
@ -576,6 +579,7 @@ vn_MapMemory(VkDevice device,
void
vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
{
VN_TRACE_FUNC();
}
VkResult
@ -583,6 +587,7 @@ vn_FlushMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
for (uint32_t i = 0; i < memoryRangeCount; i++) {
@ -605,6 +610,7 @@ vn_InvalidateMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange *pMemoryRanges)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
for (uint32_t i = 0; i < memoryRangeCount; i++) {
@ -641,6 +647,7 @@ vn_GetMemoryFdKHR(VkDevice device,
const VkMemoryGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_device_memory *mem =
vn_device_memory_from_handle(pGetFdInfo->memory);
@ -706,6 +713,7 @@ vn_GetMemoryFdPropertiesKHR(VkDevice device,
int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
uint64_t alloc_size = 0;
uint32_t mem_type_bits = 0;

View File

@ -288,6 +288,7 @@ vn_CreateImage(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkImage *pImage)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -342,6 +343,7 @@ vn_DestroyImage(VkDevice device,
VkImage image,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_image *img = vn_image_from_handle(image);
const VkAllocationCallbacks *alloc =

View File

@ -691,6 +691,7 @@ vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance)
{
VN_TRACE_FUNC();
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : vk_default_allocator();
struct vn_instance *instance;
@ -832,6 +833,7 @@ void
vn_DestroyInstance(VkInstance _instance,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance *instance = vn_instance_from_handle(_instance);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &instance->base.base.alloc;

View File

@ -125,6 +125,7 @@ vn_CreatePipelineCache(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -163,6 +164,7 @@ vn_DestroyPipelineCache(VkDevice device,
VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_pipeline_cache *cache =
vn_pipeline_cache_from_handle(pipelineCache);
@ -185,6 +187,7 @@ vn_GetPipelineCacheData(VkDevice device,
size_t *pDataSize,
void *pData)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_physical_device *physical_dev = dev->physical_device;
@ -231,6 +234,7 @@ vn_MergePipelineCaches(VkDevice device,
uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
@ -261,6 +265,7 @@ vn_fix_graphics_pipeline_create_info(
const VkAllocationCallbacks *alloc,
VkGraphicsPipelineCreateInfo **out)
{
VN_TRACE_FUNC();
VkGraphicsPipelineCreateInfo *infos = NULL;
/* Defer allocation until we find a needed fix. */
@ -353,6 +358,7 @@ vn_CreateGraphicsPipelines(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -403,6 +409,7 @@ vn_CreateComputePipelines(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -437,6 +444,7 @@ vn_DestroyPipeline(VkDevice device,
VkPipeline _pipeline,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
const VkAllocationCallbacks *alloc =

View File

@ -22,6 +22,7 @@ vn_CreateQueryPool(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkQueryPool *pQueryPool)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -69,6 +70,7 @@ vn_DestroyQueryPool(VkDevice device,
VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
const VkAllocationCallbacks *alloc;
@ -90,6 +92,7 @@ vn_ResetQueryPool(VkDevice device,
uint32_t firstQuery,
uint32_t queryCount)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkResetQueryPool(dev->instance, device, queryPool, firstQuery,
@ -106,6 +109,7 @@ vn_GetQueryPoolResults(VkDevice device,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
const VkAllocationCallbacks *alloc = &pool->allocator;

View File

@ -611,6 +611,7 @@ vn_CreateFence(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -663,6 +664,7 @@ vn_DestroyFence(VkDevice device,
VkFence _fence,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_fence *fence = vn_fence_from_handle(_fence);
const VkAllocationCallbacks *alloc =
@ -685,6 +687,7 @@ vn_DestroyFence(VkDevice device,
VkResult
vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
/* TODO if the fence is shared-by-ref, this needs to be synchronous */
@ -874,6 +877,7 @@ VkResult
vn_ImportFenceFdKHR(VkDevice device,
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);
ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==
@ -901,6 +905,7 @@ vn_GetFenceFdKHR(VkDevice device,
const VkFenceGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);
const bool sync_file =
@ -968,6 +973,7 @@ vn_CreateSemaphore(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkSemaphore *pSemaphore)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -1010,6 +1016,7 @@ vn_DestroySemaphore(VkDevice device,
VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
const VkAllocationCallbacks *alloc =
@ -1032,6 +1039,7 @@ vn_GetSemaphoreCounterValue(VkDevice device,
VkSemaphore semaphore,
uint64_t *pValue)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
ASSERTED struct vn_sync_payload *payload = sem->payload;
@ -1044,6 +1052,7 @@ vn_GetSemaphoreCounterValue(VkDevice device,
VkResult
vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
/* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
@ -1148,6 +1157,7 @@ VkResult
vn_ImportSemaphoreFdKHR(
VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_semaphore *sem =
vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);
@ -1176,6 +1186,7 @@ vn_GetSemaphoreFdKHR(VkDevice device,
const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
int *pFd)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);
const bool sync_file =
@ -1237,6 +1248,7 @@ vn_CreateEvent(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
@ -1269,6 +1281,7 @@ vn_DestroyEvent(VkDevice device,
VkEvent event,
const VkAllocationCallbacks *pAllocator)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_event *ev = vn_event_from_handle(event);
const VkAllocationCallbacks *alloc =
@ -1288,6 +1301,7 @@ vn_DestroyEvent(VkDevice device,
VkResult
vn_GetEventStatus(VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_event *ev = vn_event_from_handle(event);
VkResult result;
@ -1303,6 +1317,7 @@ vn_GetEventStatus(VkDevice device, VkEvent event)
VkResult
vn_SetEvent(VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_event *ev = vn_event_from_handle(event);
@ -1321,6 +1336,7 @@ vn_SetEvent(VkDevice device, VkEvent event)
VkResult
vn_ResetEvent(VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_event *ev = vn_event_from_handle(event);