venus: initial support for module and pipelines

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Reviewed-by: Gert Wollny <gert.wollny@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5800>
This commit is contained in:
Chia-I Wu 2019-11-05 12:52:14 -08:00 committed by Marge Bot
parent 5782506597
commit 723f0bf74a
3 changed files with 355 additions and 1 deletions

View File

@ -69,6 +69,10 @@ struct vn_render_pass;
struct vn_framebuffer;
struct vn_event;
struct vn_query_pool;
struct vn_shader_module;
struct vn_pipeline_layout;
struct vn_pipeline_cache;
struct vn_pipeline;
struct vn_command_buffer;
struct vn_cs_encoder;

View File

@ -895,7 +895,12 @@ vn_physical_device_init_uuids(struct vn_physical_device *physical_dev)
static_assert(VK_UUID_SIZE <= SHA1_DIGEST_LENGTH, "");
/* keep props->pipelineCacheUUID? */
_mesa_sha1_init(&sha1_ctx);
_mesa_sha1_update(&sha1_ctx, &props->pipelineCacheUUID,
sizeof(props->pipelineCacheUUID));
_mesa_sha1_final(&sha1_ctx, sha1);
memcpy(props->pipelineCacheUUID, sha1, VK_UUID_SIZE);
_mesa_sha1_init(&sha1_ctx);
_mesa_sha1_update(&sha1_ctx, &props->vendorID, sizeof(props->vendorID));
@ -6272,3 +6277,316 @@ vn_GetQueryPoolResults(VkDevice device,
vk_free(alloc, packed_data);
return vn_result(dev->instance, result);
}
/* shader module commands */
VkResult
vn_CreateShaderModule(VkDevice device,
const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkShaderModule *pShaderModule)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
struct vn_shader_module *mod =
vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!mod)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
&mod_handle);
*pShaderModule = mod_handle;
return VK_SUCCESS;
}
void
vn_DestroyShaderModule(VkDevice device,
VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
if (!mod)
return;
vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
vn_object_base_fini(&mod->base);
vk_free(alloc, mod);
}
/* pipeline layout commands */
VkResult
vn_CreatePipelineLayout(VkDevice device,
const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
struct vn_pipeline_layout *layout =
vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!layout)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
&dev->base);
VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
&layout_handle);
*pPipelineLayout = layout_handle;
return VK_SUCCESS;
}
void
vn_DestroyPipelineLayout(VkDevice device,
VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_pipeline_layout *layout =
vn_pipeline_layout_from_handle(pipelineLayout);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
if (!layout)
return;
vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
NULL);
vn_object_base_fini(&layout->base);
vk_free(alloc, layout);
}
/* pipeline cache commands */
VkResult
vn_CreatePipelineCache(VkDevice device,
const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
struct vn_pipeline_cache *cache =
vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!cache)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
&dev->base);
VkPipelineCacheCreateInfo local_create_info;
if (pCreateInfo->initialDataSize) {
local_create_info = *pCreateInfo;
local_create_info.pInitialData +=
sizeof(struct vk_pipeline_cache_header);
pCreateInfo = &local_create_info;
}
VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
&cache_handle);
*pPipelineCache = cache_handle;
return VK_SUCCESS;
}
void
vn_DestroyPipelineCache(VkDevice device,
VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_pipeline_cache *cache =
vn_pipeline_cache_from_handle(pipelineCache);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
if (!cache)
return;
vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
NULL);
vn_object_base_fini(&cache->base);
vk_free(alloc, cache);
}
VkResult
vn_GetPipelineCacheData(VkDevice device,
VkPipelineCache pipelineCache,
size_t *pDataSize,
void *pData)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_physical_device *physical_dev = dev->physical_device;
struct vk_pipeline_cache_header *header = pData;
VkResult result;
if (!pData) {
result = vn_call_vkGetPipelineCacheData(dev->instance, device,
pipelineCache, pDataSize, NULL);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
*pDataSize += sizeof(*header);
return VK_SUCCESS;
}
if (*pDataSize <= sizeof(*header)) {
*pDataSize = 0;
return VK_INCOMPLETE;
}
const VkPhysicalDeviceProperties *props =
&physical_dev->properties.properties;
header->header_size = sizeof(*header);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = props->vendorID;
header->device_id = props->deviceID;
memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
*pDataSize -= header->header_size;
result =
vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
pDataSize, pData + header->header_size);
if (result < VK_SUCCESS)
return vn_error(dev->instance, result);
*pDataSize += header->header_size;
return result;
}
VkResult
vn_MergePipelineCaches(VkDevice device,
VkPipelineCache dstCache,
uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches)
{
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
srcCacheCount, pSrcCaches);
return VK_SUCCESS;
}
/* pipeline commands */
VkResult
vn_CreateGraphicsPipelines(VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
for (uint32_t i = 0; i < createInfoCount; i++) {
struct vn_pipeline *pipeline =
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pipeline) {
for (uint32_t j = 0; j < i; j++)
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
&dev->base);
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
pPipelines[i] = pipeline_handle;
}
vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
createInfoCount, pCreateInfos, NULL,
pPipelines);
return VK_SUCCESS;
}
VkResult
vn_CreateComputePipelines(VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
for (uint32_t i = 0; i < createInfoCount; i++) {
struct vn_pipeline *pipeline =
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pipeline) {
for (uint32_t j = 0; j < i; j++)
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
&dev->base);
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
pPipelines[i] = pipeline_handle;
}
vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
createInfoCount, pCreateInfos, NULL,
pPipelines);
return VK_SUCCESS;
}
void
vn_DestroyPipeline(VkDevice device,
VkPipeline _pipeline,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
if (!pipeline)
return;
vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
vn_object_base_fini(&pipeline->base);
vk_free(alloc, pipeline);
}

View File

@ -349,6 +349,38 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(vn_query_pool,
VkQueryPool,
VK_OBJECT_TYPE_QUERY_POOL)
struct vn_shader_module {
struct vn_object_base base;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_shader_module,
base.base,
VkShaderModule,
VK_OBJECT_TYPE_SHADER_MODULE)
struct vn_pipeline_layout {
struct vn_object_base base;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_layout,
base.base,
VkPipelineLayout,
VK_OBJECT_TYPE_PIPELINE_LAYOUT)
struct vn_pipeline_cache {
struct vn_object_base base;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_cache,
base.base,
VkPipelineCache,
VK_OBJECT_TYPE_PIPELINE_CACHE)
struct vn_pipeline {
struct vn_object_base base;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline,
base.base,
VkPipeline,
VK_OBJECT_TYPE_PIPELINE)
struct vn_command_buffer {
struct vn_object_base base;