venus: add some trace points

Add trace points for

 - vn_AcquireNextImage2KHR and vn_QueuePresentKHR
 - vn_AcquireImageANDROID and vn_QueueSignalReleaseImageANDROID
 - vn_BeginCommandBuffer and vn_EndCommandBuffer
 - vn_*Wait*
 - vn_Queue*
 - vn_instance_wait_roundtrip
 - shmem allocations and cache miss/skip

v2: fix cache miss/skip trace points (Ryan)

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org> (v1)
Reviewed-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14215>
This commit is contained in:
Chia-I Wu 2021-05-17 10:25:52 -07:00 committed by Marge Bot
parent 631b3fe3e9
commit 108881cbcc
10 changed files with 20 additions and 0 deletions

View File

@ -537,6 +537,7 @@ vn_AcquireImageANDROID(VkDevice device,
VkSemaphore semaphore,
VkFence fence)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = VK_SUCCESS;
@ -626,6 +627,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue queue,
VkImage image,
int *pNativeFenceFd)
{
VN_TRACE_FUNC();
struct vn_queue *que = vn_queue_from_handle(queue);
struct vn_device *dev = que->device;
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;

View File

@ -623,6 +623,7 @@ VkResult
vn_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo)
{
VN_TRACE_FUNC();
struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer);
struct vn_instance *instance = cmd->device->instance;
@ -703,6 +704,7 @@ vn_cmd_submit(struct vn_command_buffer *cmd)
VkResult
vn_EndCommandBuffer(VkCommandBuffer commandBuffer)
{
VN_TRACE_FUNC();
struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer);
struct vn_instance *instance = cmd->device->instance;

View File

@ -184,6 +184,7 @@ vn_cs_encoder_grow_buffer_array(struct vn_cs_encoder *enc)
bool
vn_cs_encoder_reserve_internal(struct vn_cs_encoder *enc, size_t size)
{
VN_TRACE_FUNC();
if (unlikely(enc->storage_type == VN_CS_ENCODER_STORAGE_POINTER))
return false;

View File

@ -420,6 +420,7 @@ vn_GetDeviceGroupPeerMemoryFeatures(
VkResult
vn_DeviceWaitIdle(VkDevice device)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
for (uint32_t i = 0; i < dev->queue_count; i++) {

View File

@ -332,6 +332,7 @@ void
vn_instance_wait_roundtrip(struct vn_instance *instance,
uint32_t roundtrip_seqno)
{
VN_TRACE_FUNC();
const struct vn_ring *ring = &instance->ring.ring;
const volatile atomic_uint *ptr = ring->shared.extra;
uint32_t iter = 0;
@ -478,6 +479,7 @@ static struct vn_cs_encoder *
vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
const struct vn_cs_encoder *cs)
{
VN_TRACE_FUNC();
assert(cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER &&
cs->buffer_count == 1);
const void *cs_data = cs->buffers[0].base;
@ -554,6 +556,7 @@ vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
size_t size,
void **out_ptr)
{
VN_TRACE_FUNC();
struct vn_renderer_shmem_pool *pool = &instance->reply_shmem_pool;
const struct vn_renderer_shmem *saved_pool_shmem = pool->shmem;

View File

@ -337,6 +337,7 @@ vn_QueueSubmit(VkQueue _queue,
const VkSubmitInfo *pSubmits,
VkFence fence)
{
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device;
@ -396,6 +397,7 @@ vn_QueueBindSparse(VkQueue _queue,
const VkBindSparseInfo *pBindInfo,
VkFence fence)
{
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device;
@ -421,6 +423,7 @@ vn_QueueBindSparse(VkQueue _queue,
VkResult
vn_QueueWaitIdle(VkQueue _queue)
{
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue);
VkDevice device = vn_device_to_handle(queue->device);
@ -632,6 +635,7 @@ vn_WaitForFences(VkDevice device,
VkBool32 waitAll,
uint64_t timeout)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
@ -923,6 +927,7 @@ vn_WaitSemaphores(VkDevice device,
const VkSemaphoreWaitInfo *pWaitInfo,
uint64_t timeout)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;

View File

@ -277,6 +277,7 @@ vn_renderer_wait(struct vn_renderer *renderer,
static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
{
VN_TRACE_FUNC();
struct vn_renderer_shmem *shmem =
renderer->shmem_ops.create(renderer, size);
if (shmem) {

View File

@ -124,6 +124,7 @@ vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
int idx;
struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx);
if (!bucket) {
VN_TRACE_SCOPE("shmem cache skip");
simple_mtx_lock(&cache->mutex);
cache->debug.cache_skip_count++;
simple_mtx_unlock(&cache->mutex);
@ -144,6 +145,7 @@ vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
cache->debug.cache_hit_count++;
} else {
VN_TRACE_SCOPE("shmem cache miss");
cache->debug.cache_miss_count++;
}
simple_mtx_unlock(&cache->mutex);

View File

@ -68,6 +68,7 @@ vn_renderer_shmem_pool_grow(struct vn_renderer *renderer,
struct vn_renderer_shmem_pool *pool,
size_t size)
{
VN_TRACE_FUNC();
/* power-of-two to hit shmem cache */
size_t alloc_size = pool->min_alloc_size;
while (alloc_size < size) {

View File

@ -181,6 +181,7 @@ vn_DestroySwapchainKHR(VkDevice device,
VkResult
vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue);
VkResult result =
@ -206,6 +207,7 @@ vn_AcquireNextImage2KHR(VkDevice device,
const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex)
{
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = wsi_common_acquire_next_image2(