mesa/src/virtio/vulkan/vn_android.c

1009 lines
33 KiB
C
Raw Normal View History

/*
* Copyright 2021 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat
* Copyright © 2016 Bas Nieuwenhuizen
*/
#include "vn_android.h"
#include <dlfcn.h>
#include <drm/drm_fourcc.h>
#include <hardware/gralloc.h>
#include <hardware/hwvulkan.h>
#include <vndk/hardware_buffer.h>
#include <vulkan/vk_icd.h>
#include "util/libsync.h"
#include "util/os_file.h"
#include "vn_device.h"
#include "vn_device_memory.h"
#include "vn_image.h"
#include "vn_queue.h"
static int
vn_hal_open(const struct hw_module_t *mod,
const char *id,
struct hw_device_t **dev);
static void UNUSED
static_asserts(void)
{
STATIC_ASSERT(HWVULKAN_DISPATCH_MAGIC == ICD_LOADER_MAGIC);
}
PUBLIC struct hwvulkan_module_t HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = HWVULKAN_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = HWVULKAN_HARDWARE_MODULE_ID,
.name = "Venus Vulkan HAL",
.author = "Google LLC",
.methods = &(hw_module_methods_t) {
.open = vn_hal_open,
},
},
};
static const gralloc_module_t *gralloc = NULL;
static int
vn_hal_close(UNUSED struct hw_device_t *dev)
{
dlclose(gralloc->common.dso);
return 0;
}
static hwvulkan_device_t vn_hal_dev = {
.common = {
.tag = HARDWARE_DEVICE_TAG,
.version = HWVULKAN_DEVICE_API_VERSION_0_1,
.module = &HAL_MODULE_INFO_SYM.common,
.close = vn_hal_close,
},
.EnumerateInstanceExtensionProperties = vn_EnumerateInstanceExtensionProperties,
.CreateInstance = vn_CreateInstance,
.GetInstanceProcAddr = vn_GetInstanceProcAddr,
};
static int
vn_hal_open(const struct hw_module_t *mod,
const char *id,
struct hw_device_t **dev)
{
static const char CROS_GRALLOC_MODULE_NAME[] = "CrOS Gralloc";
assert(mod == &HAL_MODULE_INFO_SYM.common);
assert(strcmp(id, HWVULKAN_DEVICE_0) == 0);
/* get gralloc module for gralloc buffer info query */
int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
(const hw_module_t **)&gralloc);
if (ret) {
if (VN_DEBUG(WSI))
vn_log(NULL, "failed to open gralloc module(ret=%d)", ret);
return ret;
}
if (VN_DEBUG(WSI))
vn_log(NULL, "opened gralloc module name: %s", gralloc->common.name);
if (strcmp(gralloc->common.name, CROS_GRALLOC_MODULE_NAME) != 0 ||
!gralloc->perform) {
dlclose(gralloc->common.dso);
return -1;
}
*dev = &vn_hal_dev.common;
return 0;
}
static uint32_t
vn_android_ahb_format_from_vk_format(VkFormat format)
{
switch (format) {
case VK_FORMAT_R8G8B8A8_UNORM:
return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_R8G8B8_UNORM:
return AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R5G6B5_UNORM_PACK16:
return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
case VK_FORMAT_R16G16B16A16_SFLOAT:
return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
case VK_FORMAT_D16_UNORM:
return AHARDWAREBUFFER_FORMAT_D16_UNORM;
case VK_FORMAT_X8_D24_UNORM_PACK32:
return AHARDWAREBUFFER_FORMAT_D24_UNORM;
case VK_FORMAT_D24_UNORM_S8_UINT:
return AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT;
case VK_FORMAT_D32_SFLOAT:
return AHARDWAREBUFFER_FORMAT_D32_FLOAT;
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT;
case VK_FORMAT_S8_UINT:
return AHARDWAREBUFFER_FORMAT_S8_UINT;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
return AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420;
default:
return 0;
}
}
VkFormat
vn_android_ahb_format_to_vk_format(uint32_t format)
{
switch (format) {
case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
return VK_FORMAT_R8G8B8A8_UNORM;
case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
return VK_FORMAT_R8G8B8_UNORM;
case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
return VK_FORMAT_R5G6B5_UNORM_PACK16;
case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
case AHARDWAREBUFFER_FORMAT_D16_UNORM:
return VK_FORMAT_D16_UNORM;
case AHARDWAREBUFFER_FORMAT_D24_UNORM:
return VK_FORMAT_X8_D24_UNORM_PACK32;
case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
return VK_FORMAT_D24_UNORM_S8_UINT;
case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
return VK_FORMAT_D32_SFLOAT;
case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
return VK_FORMAT_D32_SFLOAT_S8_UINT;
case AHARDWAREBUFFER_FORMAT_S8_UINT:
return VK_FORMAT_S8_UINT;
case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
/* XXX Add a gralloc query for the resolved drm format and then map to a
* compatiable VkFormat.
*/
case AHARDWAREBUFFER_FORMAT_IMPLEMENTATION_DEFINED:
return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
default:
return VK_FORMAT_UNDEFINED;
}
}
uint64_t
vn_android_get_ahb_usage(const VkImageUsageFlags usage,
const VkImageCreateFlags flags)
{
uint64_t ahb_usage = 0;
if (usage &
(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
if (usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT))
ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
if (flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP;
if (flags & VK_IMAGE_CREATE_PROTECTED_BIT)
ahb_usage |= AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
/* must include at least one GPU usage flag */
if (ahb_usage == 0)
ahb_usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
return ahb_usage;
}
VkResult
vn_GetSwapchainGrallocUsage2ANDROID(
VkDevice device,
VkFormat format,
VkImageUsageFlags imageUsage,
VkSwapchainImageUsageFlagsANDROID swapchainImageUsage,
uint64_t *grallocConsumerUsage,
uint64_t *grallocProducerUsage)
{
struct vn_device *dev = vn_device_from_handle(device);
*grallocConsumerUsage = 0;
*grallocProducerUsage = 0;
if (swapchainImageUsage & VK_SWAPCHAIN_IMAGE_USAGE_SHARED_BIT_ANDROID)
return vn_error(dev->instance, VK_ERROR_INITIALIZATION_FAILED);
if (VN_DEBUG(WSI))
vn_log(dev->instance, "format=%d, imageUsage=0x%x", format, imageUsage);
if (imageUsage & (VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
*grallocProducerUsage |= AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
if (imageUsage &
(VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
*grallocConsumerUsage |= AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
return VK_SUCCESS;
}
struct cros_gralloc0_buffer_info {
uint32_t drm_fourcc; /* ignored */
int num_fds; /* ignored */
int fds[4]; /* ignored */
uint64_t modifier;
uint32_t offset[4];
uint32_t stride[4];
};
struct vn_android_gralloc_buffer_properties {
uint64_t modifier;
uint32_t offset[4];
uint32_t stride[4];
};
static VkResult
vn_android_get_dma_buf_from_native_handle(const native_handle_t *handle,
int *out_dma_buf)
{
/* There can be multiple fds wrapped inside a native_handle_t, but we
* expect only the 1st one points to the dma_buf. For multi-planar format,
* there should only exist one dma_buf as well. The other fd(s) may point
* to shared memory used to store buffer metadata or other vendor specific
* bits.
*/
if (handle->numFds < 1) {
vn_log(NULL, "handle->numFds is %d, expected >= 1", handle->numFds);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
if (handle->data[0] < 0)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
*out_dma_buf = handle->data[0];
return VK_SUCCESS;
}
static bool
vn_android_get_gralloc_buffer_properties(
buffer_handle_t handle,
struct vn_android_gralloc_buffer_properties *out_props)
{
static const int32_t CROS_GRALLOC_DRM_GET_BUFFER_INFO = 4;
struct cros_gralloc0_buffer_info info;
if (gralloc->perform(gralloc, CROS_GRALLOC_DRM_GET_BUFFER_INFO, handle,
&info) != 0)
return false;
if (info.modifier == DRM_FORMAT_MOD_INVALID)
return false;
for (uint32_t i = 0; i < 4; i++) {
out_props->stride[i] = info.stride[i];
out_props->offset[i] = info.offset[i];
}
out_props->modifier = info.modifier;
return true;
}
static VkResult
vn_android_get_modifier_properties(struct vn_device *dev,
VkFormat format,
uint64_t modifier,
const VkAllocationCallbacks *alloc,
VkDrmFormatModifierPropertiesEXT *out_props)
{
VkPhysicalDevice physical_device =
vn_physical_device_to_handle(dev->physical_device);
VkDrmFormatModifierPropertiesListEXT mod_prop_list = {
.sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
.pNext = NULL,
.drmFormatModifierCount = 0,
.pDrmFormatModifierProperties = NULL,
};
VkFormatProperties2 format_prop = {
.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2,
.pNext = &mod_prop_list,
};
VkDrmFormatModifierPropertiesEXT *mod_props = NULL;
bool modifier_found = false;
vn_GetPhysicalDeviceFormatProperties2(physical_device, format,
&format_prop);
if (!mod_prop_list.drmFormatModifierCount) {
/* XXX Remove this fallback after host VK_EXT_image_drm_format_modifier
* can properly support VK_FORMAT_G8_B8R8_2PLANE_420_UNORM.
*/
if (format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
vn_log(dev->instance, "No compatible modifier for VkFormat(%u)",
format);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
out_props->drmFormatModifier = modifier;
out_props->drmFormatModifierPlaneCount = 2;
out_props->drmFormatModifierTilingFeatures =
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
return VK_SUCCESS;
}
mod_props = vk_zalloc(
alloc, sizeof(*mod_props) * mod_prop_list.drmFormatModifierCount,
VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!mod_props)
return VK_ERROR_OUT_OF_HOST_MEMORY;
mod_prop_list.pDrmFormatModifierProperties = mod_props;
vn_GetPhysicalDeviceFormatProperties2(physical_device, format,
&format_prop);
for (uint32_t i = 0; i < mod_prop_list.drmFormatModifierCount; i++) {
if (mod_props[i].drmFormatModifier == modifier) {
*out_props = mod_props[i];
modifier_found = true;
break;
}
}
vk_free(alloc, mod_props);
if (!modifier_found) {
vn_log(dev->instance,
"No matching modifier(%" PRIu64 ") properties for VkFormat(%u)",
modifier, format);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
return VK_SUCCESS;
}
struct vn_android_image_builder {
VkImageCreateInfo create;
VkSubresourceLayout layouts[4];
VkImageDrmFormatModifierExplicitCreateInfoEXT modifier;
VkExternalMemoryImageCreateInfo external;
};
static VkResult
vn_android_get_image_builder(struct vn_device *dev,
const VkImageCreateInfo *create_info,
const native_handle_t *handle,
const VkAllocationCallbacks *alloc,
struct vn_android_image_builder *out_builder)
{
VkResult result = VK_SUCCESS;
struct vn_android_gralloc_buffer_properties buf_props;
VkDrmFormatModifierPropertiesEXT mod_props;
if (!vn_android_get_gralloc_buffer_properties(handle, &buf_props))
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
result = vn_android_get_modifier_properties(
dev, create_info->format, buf_props.modifier, alloc, &mod_props);
if (result != VK_SUCCESS)
return result;
memset(out_builder->layouts, 0, sizeof(out_builder->layouts));
for (uint32_t i = 0; i < mod_props.drmFormatModifierPlaneCount; i++) {
out_builder->layouts[i].offset = buf_props.offset[i];
out_builder->layouts[i].rowPitch = buf_props.stride[i];
}
out_builder->modifier = (VkImageDrmFormatModifierExplicitCreateInfoEXT){
.sType =
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT,
.pNext = create_info->pNext,
.drmFormatModifier = buf_props.modifier,
.drmFormatModifierPlaneCount = mod_props.drmFormatModifierPlaneCount,
.pPlaneLayouts = out_builder->layouts,
};
out_builder->external = (VkExternalMemoryImageCreateInfo){
.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
.pNext = &out_builder->modifier,
.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
};
out_builder->create = *create_info;
out_builder->create.pNext = &out_builder->external;
out_builder->create.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
return VK_SUCCESS;
}
VkResult
vn_android_image_from_anb(struct vn_device *dev,
const VkImageCreateInfo *create_info,
const VkNativeBufferANDROID *anb_info,
const VkAllocationCallbacks *alloc,
struct vn_image **out_img)
{
/* If anb_info->handle points to a classic resouce created from
* virtio_gpu_cmd_resource_create_3d, anb_info->stride is the stride of the
* guest shadow storage other than the host gpu storage.
*
* We also need to pass the correct stride to vn_CreateImage, which will be
* done via VkImageDrmFormatModifierExplicitCreateInfoEXT and will require
* VK_EXT_image_drm_format_modifier support in the host driver. The struct
* needs host storage info which can be queried from cros gralloc.
*/
VkResult result = VK_SUCCESS;
VkDevice device = vn_device_to_handle(dev);
VkDeviceMemory memory = VK_NULL_HANDLE;
VkImage image = VK_NULL_HANDLE;
struct vn_image *img = NULL;
uint64_t alloc_size = 0;
uint32_t mem_type_bits = 0;
int dma_buf_fd = -1;
int dup_fd = -1;
struct vn_android_image_builder builder;
result = vn_android_get_dma_buf_from_native_handle(anb_info->handle,
&dma_buf_fd);
if (result != VK_SUCCESS)
goto fail;
result = vn_android_get_image_builder(dev, create_info, anb_info->handle,
alloc, &builder);
if (result != VK_SUCCESS)
goto fail;
/* encoder will strip the Android specific pNext structs */
result = vn_image_create(dev, &builder.create, alloc, &img);
if (result != VK_SUCCESS)
goto fail;
image = vn_image_to_handle(img);
VkMemoryRequirements mem_req;
vn_GetImageMemoryRequirements(device, image, &mem_req);
if (!mem_req.memoryTypeBits) {
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto fail;
}
result = vn_get_memory_dma_buf_properties(dev, dma_buf_fd, &alloc_size,
&mem_type_bits);
if (result != VK_SUCCESS)
goto fail;
if (VN_DEBUG(WSI)) {
vn_log(dev->instance,
"size = img(%" PRIu64 ") fd(%" PRIu64 "), "
"memoryTypeBits = img(0x%X) & fd(0x%X)",
mem_req.size, alloc_size, mem_req.memoryTypeBits, mem_type_bits);
}
if (alloc_size < mem_req.size) {
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto fail;
}
mem_type_bits &= mem_req.memoryTypeBits;
if (!mem_type_bits) {
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto fail;
}
dup_fd = os_dupfd_cloexec(dma_buf_fd);
if (dup_fd < 0) {
result = (errno == EMFILE) ? VK_ERROR_TOO_MANY_OBJECTS
: VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
}
const VkImportMemoryFdInfoKHR import_fd_info = {
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
.pNext = NULL,
.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
.fd = dup_fd,
};
const VkMemoryAllocateInfo memory_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = &import_fd_info,
.allocationSize = mem_req.size,
.memoryTypeIndex = ffs(mem_type_bits) - 1,
};
result = vn_AllocateMemory(device, &memory_info, alloc, &memory);
if (result != VK_SUCCESS) {
/* only need to close the dup_fd on import failure */
close(dup_fd);
goto fail;
}
result = vn_BindImageMemory(device, image, memory, 0);
if (result != VK_SUCCESS)
goto fail;
img->is_wsi = true;
/* Android WSI image owns the memory */
img->private_memory = memory;
*out_img = img;
return VK_SUCCESS;
fail:
if (image != VK_NULL_HANDLE)
vn_DestroyImage(device, image, alloc);
if (memory != VK_NULL_HANDLE)
vn_FreeMemory(device, memory, alloc);
return vn_error(dev->instance, result);
}
VkResult
vn_AcquireImageANDROID(VkDevice device,
UNUSED VkImage image,
int nativeFenceFd,
VkSemaphore semaphore,
VkFence fence)
{
/* At this moment, out semaphore and fence are filled with already signaled
* payloads, and the native fence fd is waited inside until signaled.
*/
struct vn_device *dev = vn_device_from_handle(device);
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
struct vn_fence *fen = vn_fence_from_handle(fence);
if (nativeFenceFd >= 0) {
int ret = sync_wait(nativeFenceFd, INT32_MAX);
/* Android loader expects the ICD to always close the fd */
close(nativeFenceFd);
if (ret)
return vn_error(dev->instance, VK_ERROR_SURFACE_LOST_KHR);
}
if (sem)
vn_semaphore_signal_wsi(dev, sem);
if (fen)
vn_fence_signal_wsi(dev, fen);
return VK_SUCCESS;
}
VkResult
vn_QueueSignalReleaseImageANDROID(VkQueue queue,
uint32_t waitSemaphoreCount,
const VkSemaphore *pWaitSemaphores,
VkImage image,
int *pNativeFenceFd)
{
/* At this moment, the wait semaphores are converted to a VkFence via an
* empty submit. The VkFence is then waited inside until signaled, and the
* out native fence fd is set to -1.
*/
VkResult result = VK_SUCCESS;
struct vn_queue *que = vn_queue_from_handle(queue);
const VkAllocationCallbacks *alloc = &que->device->base.base.alloc;
VkDevice device = vn_device_to_handle(que->device);
VkPipelineStageFlags local_stage_masks[8];
VkPipelineStageFlags *stage_masks = local_stage_masks;
if (waitSemaphoreCount == 0)
goto out;
if (waitSemaphoreCount > ARRAY_SIZE(local_stage_masks)) {
stage_masks =
vk_alloc(alloc, sizeof(VkPipelineStageFlags) * waitSemaphoreCount,
VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!stage_masks) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
}
for (uint32_t i = 0; i < waitSemaphoreCount; i++)
stage_masks[i] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
const VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = NULL,
.waitSemaphoreCount = waitSemaphoreCount,
.pWaitSemaphores = pWaitSemaphores,
.pWaitDstStageMask = stage_masks,
.commandBufferCount = 0,
.pCommandBuffers = NULL,
.signalSemaphoreCount = 0,
.pSignalSemaphores = NULL,
};
result = vn_QueueSubmit(queue, 1, &submit_info, que->wait_fence);
if (result != VK_SUCCESS)
goto out;
result =
vn_WaitForFences(device, 1, &que->wait_fence, VK_TRUE, UINT64_MAX);
vn_ResetFences(device, 1, &que->wait_fence);
out:
*pNativeFenceFd = -1;
return result;
}
static VkResult
vn_android_get_ahb_format_properties(
struct vn_device *dev,
const struct AHardwareBuffer *ahb,
VkAndroidHardwareBufferFormatPropertiesANDROID *out_props)
{
AHardwareBuffer_Desc desc;
VkFormat format;
struct vn_android_gralloc_buffer_properties buf_props;
VkDrmFormatModifierPropertiesEXT mod_props;
AHardwareBuffer_describe(ahb, &desc);
if (!(desc.usage & (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
vn_log(dev->instance,
"AHB usage(%" PRIu64 ") must include at least one GPU bit",
desc.usage);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
/* We implement AHB extension support with EXT_image_drm_format_modifier.
* It requires us to have a compatible VkFormat but not DRM formats. So if
* the ahb is not intended for backing a VkBuffer, error out early if the
* format is VK_FORMAT_UNDEFINED.
*/
format = vn_android_ahb_format_to_vk_format(desc.format);
if (format == VK_FORMAT_UNDEFINED) {
if (desc.format != AHARDWAREBUFFER_FORMAT_BLOB) {
vn_log(dev->instance, "Unknown AHB format(0x%X)", desc.format);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
out_props->format = format;
out_props->externalFormat = desc.format;
return VK_SUCCESS;
}
if (!vn_android_get_gralloc_buffer_properties(
AHardwareBuffer_getNativeHandle(ahb), &buf_props))
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
VkResult result = vn_android_get_modifier_properties(
dev, format, buf_props.modifier, &dev->base.base.alloc, &mod_props);
if (result != VK_SUCCESS)
return result;
/* The spec requires that formatFeatures must include at least one of
* VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or
* VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT.
*/
const VkFormatFeatureFlags format_features =
mod_props.drmFormatModifierTilingFeatures |
VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT;
*out_props = (VkAndroidHardwareBufferFormatPropertiesANDROID) {
.sType = out_props->sType,
.pNext = out_props->pNext,
.format = format,
.externalFormat = desc.format,
.formatFeatures = format_features,
.samplerYcbcrConversionComponents = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
},
.suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601,
.suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL,
.suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT,
.suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT,
};
return VK_SUCCESS;
}
VkResult
vn_GetAndroidHardwareBufferPropertiesANDROID(
VkDevice device,
const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
{
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = VK_SUCCESS;
int dma_buf_fd = -1;
uint64_t alloc_size = 0;
uint32_t mem_type_bits = 0;
VkAndroidHardwareBufferFormatPropertiesANDROID *format_props =
vk_find_struct(pProperties->pNext,
ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID);
if (format_props) {
result =
vn_android_get_ahb_format_properties(dev, buffer, format_props);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
}
const native_handle_t *handle = AHardwareBuffer_getNativeHandle(buffer);
result = vn_android_get_dma_buf_from_native_handle(handle, &dma_buf_fd);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
result = vn_get_memory_dma_buf_properties(dev, dma_buf_fd, &alloc_size,
&mem_type_bits);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
pProperties->allocationSize = alloc_size;
pProperties->memoryTypeBits = mem_type_bits;
return VK_SUCCESS;
}
static AHardwareBuffer *
vn_android_ahb_allocate(uint32_t width,
uint32_t height,
uint32_t layers,
uint32_t format,
uint64_t usage)
{
AHardwareBuffer *ahb = NULL;
AHardwareBuffer_Desc desc;
int ret = 0;
memset(&desc, 0, sizeof(desc));
desc.width = width;
desc.height = height;
desc.layers = layers;
desc.format = format;
desc.usage = usage;
ret = AHardwareBuffer_allocate(&desc, &ahb);
if (ret) {
/* We just log the error code here for now since the platform falsely
* maps all gralloc allocation failures to oom.
*/
vn_log(NULL, "AHB alloc(w=%u,h=%u,l=%u,f=%u,u=%" PRIu64 ") failed(%d)",
width, height, layers, format, usage, ret);
return NULL;
}
return ahb;
}
bool
vn_android_get_drm_format_modifier_info(
const VkPhysicalDeviceImageFormatInfo2 *format_info,
VkPhysicalDeviceImageDrmFormatModifierInfoEXT *out_info)
{
/* To properly fill VkPhysicalDeviceImageDrmFormatModifierInfoEXT, we have
* to allocate an ahb to retrieve the drm format modifier. For the image
* sharing mode, we assume VK_SHARING_MODE_EXCLUSIVE for now.
*/
AHardwareBuffer *ahb = NULL;
uint32_t format = 0;
uint64_t usage = 0;
struct vn_android_gralloc_buffer_properties buf_props;
assert(format_info->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
format = vn_android_ahb_format_from_vk_format(format_info->format);
if (!format)
return false;
usage = vn_android_get_ahb_usage(format_info->usage, format_info->flags);
ahb = vn_android_ahb_allocate(16, 16, 1, format, usage);
if (!ahb)
return false;
if (!vn_android_get_gralloc_buffer_properties(
AHardwareBuffer_getNativeHandle(ahb), &buf_props)) {
AHardwareBuffer_release(ahb);
return false;
}
*out_info = (VkPhysicalDeviceImageDrmFormatModifierInfoEXT){
.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT,
.pNext = NULL,
.drmFormatModifier = buf_props.modifier,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = NULL,
};
AHardwareBuffer_release(ahb);
return true;
}
VkResult
vn_android_image_from_ahb(struct vn_device *dev,
const VkImageCreateInfo *create_info,
const VkAllocationCallbacks *alloc,
struct vn_image **out_img)
{
const VkExternalFormatANDROID *ext_info =
vk_find_struct_const(create_info->pNext, EXTERNAL_FORMAT_ANDROID);
VkImageCreateInfo local_info;
if (ext_info && ext_info->externalFormat) {
assert(create_info->format == VK_FORMAT_UNDEFINED);
assert(create_info->imageType == VK_IMAGE_TYPE_2D);
assert(create_info->usage == VK_IMAGE_USAGE_SAMPLED_BIT);
assert(create_info->tiling == VK_IMAGE_TILING_OPTIMAL);
local_info = *create_info;
local_info.format =
vn_android_ahb_format_to_vk_format(ext_info->externalFormat);
create_info = &local_info;
}
return vn_image_create_deferred(dev, create_info, alloc, out_img);
}
VkResult
vn_android_device_import_ahb(struct vn_device *dev,
struct vn_device_memory *mem,
const VkMemoryAllocateInfo *alloc_info,
const VkAllocationCallbacks *alloc,
struct AHardwareBuffer *ahb)
{
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
VkDevice device = vn_device_to_handle(dev);
const VkMemoryDedicatedAllocateInfo *dedicated_info =
vk_find_struct_const(alloc_info->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
const native_handle_t *handle = NULL;
int dma_buf_fd = -1;
int dup_fd = -1;
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
uint64_t alloc_size = 0;
uint32_t mem_type_bits = 0;
VkResult result = VK_SUCCESS;
handle = AHardwareBuffer_getNativeHandle(ahb);
result = vn_android_get_dma_buf_from_native_handle(handle, &dma_buf_fd);
if (result != VK_SUCCESS)
return result;
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
result = vn_get_memory_dma_buf_properties(dev, dma_buf_fd, &alloc_size,
&mem_type_bits);
if (result != VK_SUCCESS)
return result;
if (((1 << alloc_info->memoryTypeIndex) & mem_type_bits) == 0)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
/* If ahb is for an image, finish the deferred image creation first */
if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
struct vn_image *img = vn_image_from_handle(dedicated_info->image);
struct vn_android_image_builder builder;
result = vn_android_get_image_builder(dev, &img->deferred_info->create,
handle, alloc, &builder);
if (result != VK_SUCCESS)
return result;
result = vn_image_init_deferred(dev, &builder.create, img);
if (result != VK_SUCCESS)
return result;
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
VkMemoryRequirements mem_req;
vn_GetImageMemoryRequirements(device, dedicated_info->image, &mem_req);
if (alloc_size < mem_req.size)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
alloc_size = mem_req.size;
}
if (dedicated_info && dedicated_info->buffer != VK_NULL_HANDLE) {
VkMemoryRequirements mem_req;
venus: fix mismatched bo mmap_size for export and multiple imports Spec requires apps to use the size returned from vkGetAndroidHardwareBufferPropertiesANDROID for AHB import, which includes dedicated image/buffer import and non-dedicated buffer import. Spec requires venus to use the size from image and buffer memory requirement for dma_buf fd import if it's dedicated. If not dedicated, the actual payload size should be used. For AHB export allocation of VkImage, the app provided size will be 0, and internally we must use the size from image memory requirement. For AHB export allocation of VkBuffer, the app provided size comes from buffer memory requirements and it can be smaller than the actual AHB size due to page alignment. Internally that's the size we must use. For AHB import, app must use the size from AHB prop query. Internally, we have to override that with the size from memory requirement if the import operation is dedicated to a VkImage or a VkBuffer. If not dedicated, the actual payload size should be used. The not working scenario is: 1. App creates an AHB backed VkBuffer, and the exported AHB size is larger than the buffer memory requirement (very common). 2. App imports the AHB without a dedicated VkBuffer. Then the entire AHB payload will be imported and the mmap_size might increase. Test: dEQP-VK.api.external.memory.android_hardware_buffer.* Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11055>
2021-05-28 06:00:24 +01:00
vn_GetBufferMemoryRequirements(device, dedicated_info->buffer,
&mem_req);
if (alloc_size < mem_req.size)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
alloc_size = mem_req.size;
}
errno = 0;
dup_fd = os_dupfd_cloexec(dma_buf_fd);
if (dup_fd < 0)
return (errno == EMFILE) ? VK_ERROR_TOO_MANY_OBJECTS
: VK_ERROR_OUT_OF_HOST_MEMORY;
/* Spec requires AHB export info to be present, so we must strip it. In
* practice, the AHB import path here only needs the main allocation info
* and the dedicated_info.
*/
VkMemoryDedicatedAllocateInfo local_dedicated_info;
/* Override when dedicated_info exists and is not the tail struct. */
if (dedicated_info && dedicated_info->pNext) {
local_dedicated_info = *dedicated_info;
local_dedicated_info.pNext = NULL;
dedicated_info = &local_dedicated_info;
}
const VkMemoryAllocateInfo local_alloc_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = dedicated_info,
.allocationSize = alloc_size,
.memoryTypeIndex = alloc_info->memoryTypeIndex,
};
result =
vn_device_memory_import_dma_buf(dev, mem, &local_alloc_info, dup_fd);
if (result != VK_SUCCESS) {
close(dup_fd);
return result;
}
AHardwareBuffer_acquire(ahb);
mem->ahb = ahb;
return VK_SUCCESS;
}
VkResult
vn_android_device_allocate_ahb(struct vn_device *dev,
struct vn_device_memory *mem,
const VkMemoryAllocateInfo *alloc_info,
const VkAllocationCallbacks *alloc)
{
const VkMemoryDedicatedAllocateInfo *dedicated_info =
vk_find_struct_const(alloc_info->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
uint32_t width = 0;
uint32_t height = 1;
uint32_t layers = 1;
uint32_t format = 0;
uint64_t usage = 0;
struct AHardwareBuffer *ahb = NULL;
if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
const VkImageCreateInfo *image_info =
&vn_image_from_handle(dedicated_info->image)->deferred_info->create;
assert(image_info);
width = image_info->extent.width;
height = image_info->extent.height;
layers = image_info->arrayLayers;
format = vn_android_ahb_format_from_vk_format(image_info->format);
/* TODO Need to further resolve the gralloc usage bits for image format
* list info, which might involve disabling compression if there exists
* no universally applied compression strategy across the formats.
*/
usage = vn_android_get_ahb_usage(image_info->usage, image_info->flags);
} else {
width = alloc_info->allocationSize;
format = AHARDWAREBUFFER_FORMAT_BLOB;
/* TODO AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER is not supported by cros
* gralloc. So here we work around with CPU usage bits for VkBuffer.
*/
usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
}
ahb = vn_android_ahb_allocate(width, height, layers, format, usage);
if (!ahb)
return VK_ERROR_OUT_OF_HOST_MEMORY;
VkResult result =
vn_android_device_import_ahb(dev, mem, alloc_info, alloc, ahb);
/* ahb alloc has already acquired a ref and import will acquire another,
* must release one here to avoid leak.
*/
AHardwareBuffer_release(ahb);
return result;
}
void
vn_android_release_ahb(struct AHardwareBuffer *ahb)
{
AHardwareBuffer_release(ahb);
}
VkResult
vn_GetMemoryAndroidHardwareBufferANDROID(
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer)
{
struct vn_device_memory *mem = vn_device_memory_from_handle(pInfo->memory);
AHardwareBuffer_acquire(mem->ahb);
*pBuffer = mem->ahb;
return VK_SUCCESS;
}