venus: initial support for events and queries

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Reviewed-by: Gert Wollny <gert.wollny@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5800>
This commit is contained in:
Chia-I Wu 2019-11-05 12:51:09 -08:00 committed by Marge Bot
parent 8e2844b377
commit 5782506597
3 changed files with 271 additions and 0 deletions

View File

@ -67,6 +67,8 @@ struct vn_descriptor_set;
struct vn_descriptor_update_template;
struct vn_render_pass;
struct vn_framebuffer;
struct vn_event;
struct vn_query_pool;
struct vn_command_buffer;
struct vn_cs_encoder;

View File

@ -6022,3 +6022,253 @@ vn_DestroyFramebuffer(VkDevice device,
vn_object_base_fini(&fb->base);
vk_free(alloc, fb);
}
/* event commands */
VkResult
vn_CreateEvent(VkDevice device,
const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!ev)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);
VkEvent ev_handle = vn_event_to_handle(ev);
vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
&ev_handle);
*pEvent = ev_handle;
return VK_SUCCESS;
}
void
vn_DestroyEvent(VkDevice device,
VkEvent event,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_event *ev = vn_event_from_handle(event);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
if (!ev)
return;
vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
vn_object_base_fini(&ev->base);
vk_free(alloc, ev);
}
VkResult
vn_GetEventStatus(VkDevice device, VkEvent event)
{
struct vn_device *dev = vn_device_from_handle(device);
/* TODO When the renderer supports it (requires a new vk extension), there
* should be a coherent memory backing the event.
*/
VkResult result = vn_call_vkGetEventStatus(dev->instance, device, event);
return vn_result(dev->instance, result);
}
VkResult
vn_SetEvent(VkDevice device, VkEvent event)
{
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
return vn_result(dev->instance, result);
}
VkResult
vn_ResetEvent(VkDevice device, VkEvent event)
{
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
return vn_result(dev->instance, result);
}
/* query pool commands */
VkResult
vn_CreateQueryPool(VkDevice device,
const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkQueryPool *pQueryPool)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
struct vn_query_pool *pool =
vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&pool->base, VK_OBJECT_TYPE_QUERY_POOL, &dev->base);
pool->allocator = *alloc;
switch (pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
pool->result_array_size = 1;
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
pool->result_array_size =
util_bitcount(pCreateInfo->pipelineStatistics);
break;
case VK_QUERY_TYPE_TIMESTAMP:
pool->result_array_size = 1;
break;
default:
unreachable("bad query type");
break;
}
VkQueryPool pool_handle = vn_query_pool_to_handle(pool);
vn_async_vkCreateQueryPool(dev->instance, device, pCreateInfo, NULL,
&pool_handle);
*pQueryPool = pool_handle;
return VK_SUCCESS;
}
void
vn_DestroyQueryPool(VkDevice device,
VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
const VkAllocationCallbacks *alloc;
if (!pool)
return;
alloc = pAllocator ? pAllocator : &pool->allocator;
vn_async_vkDestroyQueryPool(dev->instance, device, queryPool, NULL);
vn_object_base_fini(&pool->base);
vk_free(alloc, pool);
}
void
vn_ResetQueryPool(VkDevice device,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount)
{
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkResetQueryPool(dev->instance, device, queryPool, firstQuery,
queryCount);
}
VkResult
vn_GetQueryPoolResults(VkDevice device,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount,
size_t dataSize,
void *pData,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
const size_t result_width = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
const size_t result_size = pool->result_array_size * result_width;
const bool result_always_written =
flags & (VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_PARTIAL_BIT);
VkQueryResultFlags packed_flags = flags;
size_t packed_stride = result_size;
if (!result_always_written)
packed_flags |= VK_QUERY_RESULT_WITH_AVAILABILITY_BIT;
if (packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
packed_stride += result_width;
const size_t packed_size = packed_stride * queryCount;
void *packed_data;
if (result_always_written && packed_stride == stride) {
packed_data = pData;
} else {
packed_data = vk_alloc(alloc, packed_size, VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!packed_data)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
/* TODO the renderer should transparently vkCmdCopyQueryPoolResults to a
* coherent memory such that we can memcpy from the coherent memory to
* avoid this serialized round trip.
*/
VkResult result = vn_call_vkGetQueryPoolResults(
dev->instance, device, queryPool, firstQuery, queryCount, packed_size,
packed_data, packed_stride, packed_flags);
if (packed_data == pData)
return vn_result(dev->instance, result);
const size_t copy_size =
result_size +
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT ? result_width : 0);
const void *src = packed_data;
void *dst = pData;
if (result == VK_SUCCESS) {
for (uint32_t i = 0; i < queryCount; i++) {
memcpy(dst, src, copy_size);
src += packed_stride;
dst += stride;
}
} else if (result == VK_NOT_READY) {
assert(!result_always_written &&
(packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
if (flags & VK_QUERY_RESULT_64_BIT) {
for (uint32_t i = 0; i < queryCount; i++) {
const bool avail = *(const uint64_t *)(src + result_size);
if (avail)
memcpy(dst, src, copy_size);
else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
*(uint64_t *)(dst + result_size) = 0;
src += packed_stride;
dst += stride;
}
} else {
for (uint32_t i = 0; i < queryCount; i++) {
const bool avail = *(const uint32_t *)(src + result_size);
if (avail)
memcpy(dst, src, copy_size);
else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
*(uint32_t *)(dst + result_size) = 0;
src += packed_stride;
dst += stride;
}
}
}
vk_free(alloc, packed_data);
return vn_result(dev->instance, result);
}

View File

@ -330,6 +330,25 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(vn_framebuffer,
VkFramebuffer,
VK_OBJECT_TYPE_FRAMEBUFFER)
struct vn_event {
struct vn_object_base base;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_event,
base.base,
VkEvent,
VK_OBJECT_TYPE_EVENT)
struct vn_query_pool {
struct vn_object_base base;
VkAllocationCallbacks allocator;
uint32_t result_array_size;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_query_pool,
base.base,
VkQueryPool,
VK_OBJECT_TYPE_QUERY_POOL)
struct vn_command_buffer {
struct vn_object_base base;