venus: add some trace points

Add trace points for

 - vn_AcquireNextImage2KHR and vn_QueuePresentKHR
 - vn_AcquireImageANDROID and vn_QueueSignalReleaseImageANDROID
 - vn_BeginCommandBuffer and vn_EndCommandBuffer
 - vn_*Wait*
 - vn_Queue*
 - vn_instance_wait_roundtrip
 - shmem allocations and cache miss/skip

v2: fix cache miss/skip trace points (Ryan)

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org> (v1)
Reviewed-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14215>
This commit is contained in:
Chia-I Wu 2021-05-17 10:25:52 -07:00 committed by Marge Bot
parent 631b3fe3e9
commit 108881cbcc
10 changed files with 20 additions and 0 deletions

View File

@ -537,6 +537,7 @@ vn_AcquireImageANDROID(VkDevice device,
VkSemaphore semaphore, VkSemaphore semaphore,
VkFence fence) VkFence fence)
{ {
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device); struct vn_device *dev = vn_device_from_handle(device);
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
@ -626,6 +627,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue queue,
VkImage image, VkImage image,
int *pNativeFenceFd) int *pNativeFenceFd)
{ {
VN_TRACE_FUNC();
struct vn_queue *que = vn_queue_from_handle(queue); struct vn_queue *que = vn_queue_from_handle(queue);
struct vn_device *dev = que->device; struct vn_device *dev = que->device;
const VkAllocationCallbacks *alloc = &dev->base.base.alloc; const VkAllocationCallbacks *alloc = &dev->base.base.alloc;

View File

@ -623,6 +623,7 @@ VkResult
vn_BeginCommandBuffer(VkCommandBuffer commandBuffer, vn_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const VkCommandBufferBeginInfo *pBeginInfo)
{ {
VN_TRACE_FUNC();
struct vn_command_buffer *cmd = struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer); vn_command_buffer_from_handle(commandBuffer);
struct vn_instance *instance = cmd->device->instance; struct vn_instance *instance = cmd->device->instance;
@ -703,6 +704,7 @@ vn_cmd_submit(struct vn_command_buffer *cmd)
VkResult VkResult
vn_EndCommandBuffer(VkCommandBuffer commandBuffer) vn_EndCommandBuffer(VkCommandBuffer commandBuffer)
{ {
VN_TRACE_FUNC();
struct vn_command_buffer *cmd = struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer); vn_command_buffer_from_handle(commandBuffer);
struct vn_instance *instance = cmd->device->instance; struct vn_instance *instance = cmd->device->instance;

View File

@ -184,6 +184,7 @@ vn_cs_encoder_grow_buffer_array(struct vn_cs_encoder *enc)
bool bool
vn_cs_encoder_reserve_internal(struct vn_cs_encoder *enc, size_t size) vn_cs_encoder_reserve_internal(struct vn_cs_encoder *enc, size_t size)
{ {
VN_TRACE_FUNC();
if (unlikely(enc->storage_type == VN_CS_ENCODER_STORAGE_POINTER)) if (unlikely(enc->storage_type == VN_CS_ENCODER_STORAGE_POINTER))
return false; return false;

View File

@ -420,6 +420,7 @@ vn_GetDeviceGroupPeerMemoryFeatures(
VkResult VkResult
vn_DeviceWaitIdle(VkDevice device) vn_DeviceWaitIdle(VkDevice device)
{ {
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device); struct vn_device *dev = vn_device_from_handle(device);
for (uint32_t i = 0; i < dev->queue_count; i++) { for (uint32_t i = 0; i < dev->queue_count; i++) {

View File

@ -332,6 +332,7 @@ void
vn_instance_wait_roundtrip(struct vn_instance *instance, vn_instance_wait_roundtrip(struct vn_instance *instance,
uint32_t roundtrip_seqno) uint32_t roundtrip_seqno)
{ {
VN_TRACE_FUNC();
const struct vn_ring *ring = &instance->ring.ring; const struct vn_ring *ring = &instance->ring.ring;
const volatile atomic_uint *ptr = ring->shared.extra; const volatile atomic_uint *ptr = ring->shared.extra;
uint32_t iter = 0; uint32_t iter = 0;
@ -478,6 +479,7 @@ static struct vn_cs_encoder *
vn_instance_ring_cs_upload_locked(struct vn_instance *instance, vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
const struct vn_cs_encoder *cs) const struct vn_cs_encoder *cs)
{ {
VN_TRACE_FUNC();
assert(cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER && assert(cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER &&
cs->buffer_count == 1); cs->buffer_count == 1);
const void *cs_data = cs->buffers[0].base; const void *cs_data = cs->buffers[0].base;
@ -554,6 +556,7 @@ vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
size_t size, size_t size,
void **out_ptr) void **out_ptr)
{ {
VN_TRACE_FUNC();
struct vn_renderer_shmem_pool *pool = &instance->reply_shmem_pool; struct vn_renderer_shmem_pool *pool = &instance->reply_shmem_pool;
const struct vn_renderer_shmem *saved_pool_shmem = pool->shmem; const struct vn_renderer_shmem *saved_pool_shmem = pool->shmem;

View File

@ -337,6 +337,7 @@ vn_QueueSubmit(VkQueue _queue,
const VkSubmitInfo *pSubmits, const VkSubmitInfo *pSubmits,
VkFence fence) VkFence fence)
{ {
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue); struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device; struct vn_device *dev = queue->device;
@ -396,6 +397,7 @@ vn_QueueBindSparse(VkQueue _queue,
const VkBindSparseInfo *pBindInfo, const VkBindSparseInfo *pBindInfo,
VkFence fence) VkFence fence)
{ {
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue); struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device; struct vn_device *dev = queue->device;
@ -421,6 +423,7 @@ vn_QueueBindSparse(VkQueue _queue,
VkResult VkResult
vn_QueueWaitIdle(VkQueue _queue) vn_QueueWaitIdle(VkQueue _queue)
{ {
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue); struct vn_queue *queue = vn_queue_from_handle(_queue);
VkDevice device = vn_device_to_handle(queue->device); VkDevice device = vn_device_to_handle(queue->device);
@ -632,6 +635,7 @@ vn_WaitForFences(VkDevice device,
VkBool32 waitAll, VkBool32 waitAll,
uint64_t timeout) uint64_t timeout)
{ {
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device); struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc; const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
@ -923,6 +927,7 @@ vn_WaitSemaphores(VkDevice device,
const VkSemaphoreWaitInfo *pWaitInfo, const VkSemaphoreWaitInfo *pWaitInfo,
uint64_t timeout) uint64_t timeout)
{ {
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device); struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc; const VkAllocationCallbacks *alloc = &dev->base.base.alloc;

View File

@ -277,6 +277,7 @@ vn_renderer_wait(struct vn_renderer *renderer,
static inline struct vn_renderer_shmem * static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size) vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
{ {
VN_TRACE_FUNC();
struct vn_renderer_shmem *shmem = struct vn_renderer_shmem *shmem =
renderer->shmem_ops.create(renderer, size); renderer->shmem_ops.create(renderer, size);
if (shmem) { if (shmem) {

View File

@ -124,6 +124,7 @@ vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
int idx; int idx;
struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx); struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx);
if (!bucket) { if (!bucket) {
VN_TRACE_SCOPE("shmem cache skip");
simple_mtx_lock(&cache->mutex); simple_mtx_lock(&cache->mutex);
cache->debug.cache_skip_count++; cache->debug.cache_skip_count++;
simple_mtx_unlock(&cache->mutex); simple_mtx_unlock(&cache->mutex);
@ -144,6 +145,7 @@ vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
cache->debug.cache_hit_count++; cache->debug.cache_hit_count++;
} else { } else {
VN_TRACE_SCOPE("shmem cache miss");
cache->debug.cache_miss_count++; cache->debug.cache_miss_count++;
} }
simple_mtx_unlock(&cache->mutex); simple_mtx_unlock(&cache->mutex);

View File

@ -68,6 +68,7 @@ vn_renderer_shmem_pool_grow(struct vn_renderer *renderer,
struct vn_renderer_shmem_pool *pool, struct vn_renderer_shmem_pool *pool,
size_t size) size_t size)
{ {
VN_TRACE_FUNC();
/* power-of-two to hit shmem cache */ /* power-of-two to hit shmem cache */
size_t alloc_size = pool->min_alloc_size; size_t alloc_size = pool->min_alloc_size;
while (alloc_size < size) { while (alloc_size < size) {

View File

@ -181,6 +181,7 @@ vn_DestroySwapchainKHR(VkDevice device,
VkResult VkResult
vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo) vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{ {
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue); struct vn_queue *queue = vn_queue_from_handle(_queue);
VkResult result = VkResult result =
@ -206,6 +207,7 @@ vn_AcquireNextImage2KHR(VkDevice device,
const VkAcquireNextImageInfoKHR *pAcquireInfo, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) uint32_t *pImageIndex)
{ {
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device); struct vn_device *dev = vn_device_from_handle(device);
VkResult result = wsi_common_acquire_next_image2( VkResult result = wsi_common_acquire_next_image2(