venus: initial support for WSI

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Reviewed-by: Gert Wollny <gert.wollny@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5800>
This commit is contained in:
Chia-I Wu 2019-10-21 13:52:12 -07:00 committed by Marge Bot
parent 91f914338d
commit 31a3f2e4f4
9 changed files with 682 additions and 10 deletions

View File

@ -38,6 +38,7 @@ libvn_files = files(
'vn_ring.c',
'vn_renderer_virtgpu.c',
'vn_renderer_vtest.c',
'vn_wsi.c',
)
vn_deps = [
@ -52,6 +53,21 @@ vn_flags = [
no_override_init_args,
]
if with_platform_wayland
libvn_files += files('vn_wsi_wayland.c')
vn_deps += dep_wayland_client
vn_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
endif
if with_platform_x11
libvn_files += files('vn_wsi_x11.c')
vn_deps += dep_xcb_dri3
vn_flags += [
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
]
endif
libvulkan_virtio = shared_library(
'vulkan_virtio',
[libvn_files, vn_entrypoints, sha1_h],

View File

@ -26,6 +26,7 @@ static const struct debug_control vn_debug_options[] = {
{ "init", VN_DEBUG_INIT },
{ "result", VN_DEBUG_RESULT },
{ "vtest", VN_DEBUG_VTEST },
{ "wsi", VN_DEBUG_WSI },
{ NULL, 0 },
};

View File

@ -86,6 +86,7 @@ enum vn_debug {
VN_DEBUG_INIT = 1ull << 0,
VN_DEBUG_RESULT = 1ull << 1,
VN_DEBUG_VTEST = 1ull << 2,
VN_DEBUG_WSI = 1ull << 3,
};
typedef uint64_t vn_object_id;

View File

@ -37,6 +37,20 @@ static const struct vk_instance_extension_table
.KHR_external_memory_capabilities = true,
.KHR_external_semaphore_capabilities = true,
.KHR_get_physical_device_properties2 = true,
/* WSI */
.KHR_get_surface_capabilities2 = true,
.KHR_surface = true,
.KHR_surface_protected_capabilities = true,
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
.KHR_wayland_surface = true,
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
.KHR_xcb_surface = true,
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
.KHR_xlib_surface = true,
#endif
};
static const driOptionDescription vn_dri_options[] = {
@ -1388,7 +1402,12 @@ vn_physical_device_get_supported_extensions(
struct vk_device_extension_table *supported,
struct vk_device_extension_table *recognized)
{
memset(supported, 0, sizeof(*supported));
*supported = (struct vk_device_extension_table){
/* WSI */
.KHR_incremental_present = true,
.KHR_swapchain = true,
.KHR_swapchain_mutable_format = true,
};
*recognized = (struct vk_device_extension_table){
/* promoted to VK_VERSION_1_1 */
@ -1438,6 +1457,7 @@ vn_physical_device_get_supported_extensions(
.EXT_shader_viewport_index_layer = true,
/* EXT */
.EXT_image_drm_format_modifier = true,
.EXT_transform_feedback = true,
};
}
@ -1588,6 +1608,10 @@ vn_physical_device_init(struct vn_physical_device *physical_dev)
vn_physical_device_init_external_fence_handles(physical_dev);
vn_physical_device_init_external_semaphore_handles(physical_dev);
result = vn_wsi_init(physical_dev);
if (result != VK_SUCCESS)
goto fail;
return VK_SUCCESS;
fail:
@ -1602,6 +1626,7 @@ vn_physical_device_fini(struct vn_physical_device *physical_dev)
struct vn_instance *instance = physical_dev->instance;
const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
vn_wsi_fini(physical_dev);
vk_free(alloc, physical_dev->extension_spec_versions);
vk_free(alloc, physical_dev->queue_family_properties);
@ -2878,6 +2903,69 @@ vn_device_init_queues(struct vn_device *dev,
return VK_SUCCESS;
}
static bool
find_extension_names(const char *const *exts,
uint32_t ext_count,
const char *name)
{
for (uint32_t i = 0; i < ext_count; i++) {
if (!strcmp(exts[i], name))
return true;
}
return false;
}
static const char **
merge_extension_names(const char *const *exts,
uint32_t ext_count,
const char *const *extra_exts,
uint32_t extra_count,
const VkAllocationCallbacks *alloc,
uint32_t *merged_count)
{
const char **merged =
vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!merged)
return NULL;
memcpy(merged, exts, sizeof(*exts) * ext_count);
uint32_t count = ext_count;
for (uint32_t i = 0; i < extra_count; i++) {
if (!find_extension_names(exts, ext_count, extra_exts[i]))
merged[count++] = extra_exts[i];
}
*merged_count = count;
return merged;
}
static const VkDeviceCreateInfo *
vn_device_fix_create_info(const struct vn_physical_device *physical_dev,
const VkDeviceCreateInfo *dev_info,
const VkAllocationCallbacks *alloc,
VkDeviceCreateInfo *local_info)
{
const char *extra_exts[8];
uint32_t extra_count = 0;
if (physical_dev->wsi_device.supports_modifiers)
extra_exts[extra_count++] = "VK_EXT_image_drm_format_modifier";
if (!extra_count)
return dev_info;
*local_info = *dev_info;
local_info->ppEnabledExtensionNames = merge_extension_names(
dev_info->ppEnabledExtensionNames, dev_info->enabledExtensionCount,
extra_exts, extra_count, alloc, &local_info->enabledExtensionCount);
if (!local_info->ppEnabledExtensionNames)
return NULL;
return local_info;
}
VkResult
vn_CreateDevice(VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo *pCreateInfo,
@ -2910,6 +2998,14 @@ vn_CreateDevice(VkPhysicalDevice physicalDevice,
dev->instance = instance;
dev->physical_device = physical_dev;
VkDeviceCreateInfo local_create_info;
pCreateInfo = vn_device_fix_create_info(physical_dev, pCreateInfo, alloc,
&local_create_info);
if (!pCreateInfo) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
}
VkDevice dev_handle = vn_device_to_handle(dev);
result = vn_call_vkCreateDevice(instance, physicalDevice, pCreateInfo,
NULL, &dev_handle);
@ -2929,9 +3025,14 @@ vn_CreateDevice(VkPhysicalDevice physicalDevice,
*pDevice = dev_handle;
if (pCreateInfo == &local_create_info)
vk_free(alloc, (void *)pCreateInfo->ppEnabledExtensionNames);
return VK_SUCCESS;
fail:
if (pCreateInfo == &local_create_info)
vk_free(alloc, (void *)pCreateInfo->ppEnabledExtensionNames);
vn_device_base_fini(&dev->base);
vk_free(alloc, dev);
return vn_error(instance, result);
@ -3468,7 +3569,8 @@ static void
vn_queue_submit_syncs(struct vn_queue *queue,
struct vn_renderer_sync *const *syncs,
const uint64_t *sync_values,
uint32_t sync_count)
uint32_t sync_count,
struct vn_renderer_bo *wsi_bo)
{
struct vn_instance *instance = queue->device->instance;
const struct vn_renderer_submit_batch batch = {
@ -3479,6 +3581,8 @@ vn_queue_submit_syncs(struct vn_queue *queue,
.sync_count = sync_count,
};
const struct vn_renderer_submit submit = {
.bos = &wsi_bo,
.bo_count = wsi_bo ? 1 : 0,
.batches = &batch,
.batch_count = 1,
};
@ -3502,6 +3606,16 @@ vn_QueueSubmit(VkQueue _queue,
if (result != VK_SUCCESS)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
const struct vn_device_memory *wsi_mem = NULL;
if (submit.batch_count == 1) {
const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
if (info) {
wsi_mem = vn_device_memory_from_handle(info->memory);
assert(!wsi_mem->base_memory && wsi_mem->base_bo);
}
}
/* TODO this should be one trip to the renderer */
if (submit.signal_timeline_count) {
uint32_t sync_base = 0;
@ -3512,7 +3626,7 @@ vn_QueueSubmit(VkQueue _queue,
vn_queue_submit_syncs(queue, &submit.temp.syncs[sync_base],
&submit.temp.sync_values[sync_base],
submit.temp.batch_sync_counts[i]);
submit.temp.batch_sync_counts[i], NULL);
sync_base += submit.temp.batch_sync_counts[i];
}
@ -3524,10 +3638,11 @@ vn_QueueSubmit(VkQueue _queue,
return vn_error(dev->instance, result);
}
if (sync_base < submit.sync_count) {
if (sync_base < submit.sync_count || wsi_mem) {
vn_queue_submit_syncs(queue, &submit.temp.syncs[sync_base],
&submit.temp.sync_values[sync_base],
submit.sync_count - sync_base);
submit.sync_count - sync_base,
wsi_mem ? wsi_mem->base_bo : NULL);
}
} else {
result = vn_call_vkQueueSubmit(dev->instance, submit.queue,
@ -3538,12 +3653,28 @@ vn_QueueSubmit(VkQueue _queue,
return vn_error(dev->instance, result);
}
if (submit.sync_count) {
if (submit.sync_count || wsi_mem) {
vn_queue_submit_syncs(queue, submit.temp.syncs,
submit.temp.sync_values, submit.sync_count);
submit.temp.sync_values, submit.sync_count,
wsi_mem ? wsi_mem->base_bo : NULL);
}
}
/* XXX The implicit fence won't work because the host is not aware of it.
* It is guest-only and the guest kernel does not wait. We need kernel
* support, or better yet, an explicit fence that the host is aware of.
*
* vn_AcquireNextImage2KHR is also broken.
*/
if (wsi_mem && VN_DEBUG(WSI)) {
static uint32_t ratelimit;
if (ratelimit < 10) {
vn_log(dev->instance, "forcing vkQueueWaitIdle before presenting");
ratelimit++;
}
vn_QueueWaitIdle(submit.queue);
}
vn_queue_submission_cleanup(&submit);
return VK_SUCCESS;
@ -3575,7 +3706,7 @@ vn_QueueBindSparse(VkQueue _queue,
vn_queue_submit_syncs(queue, &submit.temp.syncs[sync_base],
&submit.temp.sync_values[sync_base],
submit.temp.batch_sync_counts[i]);
submit.temp.batch_sync_counts[i], NULL);
sync_base += submit.temp.batch_sync_counts[i];
}
@ -3590,7 +3721,7 @@ vn_QueueBindSparse(VkQueue _queue,
if (sync_base < submit.sync_count) {
vn_queue_submit_syncs(queue, &submit.temp.syncs[sync_base],
&submit.temp.sync_values[sync_base],
submit.sync_count - sync_base);
submit.sync_count - sync_base, NULL);
}
} else {
result = vn_call_vkQueueBindSparse(
@ -3603,7 +3734,8 @@ vn_QueueBindSparse(VkQueue _queue,
if (submit.sync_count) {
vn_queue_submit_syncs(queue, submit.temp.syncs,
submit.temp.sync_values, submit.sync_count);
submit.temp.sync_values, submit.sync_count,
NULL);
}
}
@ -4862,6 +4994,21 @@ vn_CreateImage(VkDevice device,
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
/* TODO wsi_create_native_image uses modifiers or set wsi_info->scanout to
* true. Instead of forcing VK_IMAGE_TILING_LINEAR, we should ask wsi to
* use wsi_create_prime_image instead.
*/
const struct wsi_image_create_info *wsi_info =
vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
VkImageCreateInfo local_create_info;
if (wsi_info && wsi_info->scanout) {
if (VN_DEBUG(WSI))
vn_log(dev->instance, "forcing scanout image linear");
local_create_info = *pCreateInfo;
local_create_info.tiling = VK_IMAGE_TILING_LINEAR;
pCreateInfo = &local_create_info;
}
struct vn_image *img = vk_zalloc(alloc, sizeof(*img), VN_DEFAULT_ALIGN,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!img)
@ -4880,6 +5027,9 @@ vn_CreateImage(VkDevice device,
uint32_t plane_count = 1;
if (pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT) {
/* TODO VkDrmFormatModifierPropertiesEXT::drmFormatModifierPlaneCount */
assert(pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
switch (pCreateInfo->format) {
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
@ -5122,6 +5272,19 @@ vn_BindImageMemory2(VkDevice device,
return VK_SUCCESS;
}
VkResult
vn_GetImageDrmFormatModifierPropertiesEXT(
VkDevice device,
VkImage image,
VkImageDrmFormatModifierPropertiesEXT *pProperties)
{
struct vn_device *dev = vn_device_from_handle(device);
/* TODO local cache */
return vn_call_vkGetImageDrmFormatModifierPropertiesEXT(
dev->instance, device, image, pProperties);
}
void
vn_GetImageSubresourceLayout(VkDevice device,
VkImage image,
@ -5998,6 +6161,8 @@ vn_CreateRenderPass(VkDevice device,
vn_object_base_init(&pass->base, VK_OBJECT_TYPE_RENDER_PASS, &dev->base);
/* XXX VK_IMAGE_LAYOUT_PRESENT_SRC_KHR */
VkRenderPass pass_handle = vn_render_pass_to_handle(pass);
vn_async_vkCreateRenderPass(dev->instance, device, pCreateInfo, NULL,
&pass_handle);
@ -6025,6 +6190,8 @@ vn_CreateRenderPass2(VkDevice device,
vn_object_base_init(&pass->base, VK_OBJECT_TYPE_RENDER_PASS, &dev->base);
/* XXX VK_IMAGE_LAYOUT_PRESENT_SRC_KHR */
VkRenderPass pass_handle = vn_render_pass_to_handle(pass);
vn_async_vkCreateRenderPass2(dev->instance, device, pCreateInfo, NULL,
&pass_handle);
@ -7633,6 +7800,8 @@ vn_CmdWaitEvents(VkCommandBuffer commandBuffer,
if (!vn_cs_encoder_reserve(&cmd->cs, cmd_size))
return;
/* XXX VK_IMAGE_LAYOUT_PRESENT_SRC_KHR */
vn_encode_vkCmdWaitEvents(&cmd->cs, 0, commandBuffer, eventCount, pEvents,
srcStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount,
@ -7663,6 +7832,8 @@ vn_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
if (!vn_cs_encoder_reserve(&cmd->cs, cmd_size))
return;
/* XXX VK_IMAGE_LAYOUT_PRESENT_SRC_KHR */
vn_encode_vkCmdPipelineBarrier(
&cmd->cs, 0, commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,

View File

@ -16,6 +16,7 @@
#include "vn_cs.h"
#include "vn_renderer.h"
#include "vn_ring.h"
#include "vn_wsi.h"
struct vn_instance {
struct vn_instance_base base;
@ -88,6 +89,8 @@ struct vn_physical_device {
VkExternalFenceHandleTypeFlags external_fence_handles;
VkExternalSemaphoreHandleTypeFlags external_binary_semaphore_handles;
VkExternalSemaphoreHandleTypeFlags external_timeline_semaphore_handles;
struct wsi_device wsi_device;
};
VK_DEFINE_HANDLE_CASTS(vn_physical_device,
base.base.base,

338
src/virtio/vulkan/vn_wsi.c Normal file
View File

@ -0,0 +1,338 @@
/*
* Copyright 2019 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#include "vn_wsi.h"
#include "vn_device.h"
/* The common WSI support makes some assumptions about the driver.
*
* In wsi_device_init, it assumes VK_EXT_pci_bus_info is available. In
* wsi_create_native_image and wsi_create_prime_image, it assumes
* VK_KHR_external_memory_fd and VK_EXT_external_memory_dma_buf are enabled.
*
* In wsi_create_native_image, if wsi_device::supports_modifiers is set and
* the window system supports modifiers, it assumes
* VK_EXT_image_drm_format_modifier is enabled. Otherwise, it assumes that
* wsi_image_create_info can be chained to VkImageCreateInfo and
* vkGetImageSubresourceLayout can be called even the tiling is
* VK_IMAGE_TILING_OPTIMAL.
*
* Together, it knows how to share dma-bufs, with explicit or implicit
* modifiers, to the window system.
*
* For venus, we use explicit modifiers when the renderer and the window
* system support them. Otherwise, we have to fall back to
* VK_IMAGE_TILING_LINEAR (or trigger the prime blit path). But the fallback
* can be problematic when the memory is scanned out directly and special
* requirements (e.g., alignments) must be met.
*
* The common WSI support makes other assumptions about the driver to support
* implicit fencing. In wsi_create_native_image and wsi_create_prime_image,
* it assumes wsi_memory_allocate_info can be chained to VkMemoryAllocateInfo.
* In wsi_common_queue_present, it assumes wsi_memory_signal_submit_info can
* be chained to VkSubmitInfo. Finally, in wsi_common_acquire_next_image2, it
* calls wsi_device::signal_semaphore_for_memory, and
* wsi_device::signal_fence_for_memory if the driver provides them.
*
* Some drivers use wsi_memory_allocate_info to set up implicit fencing.
* Others use wsi_memory_signal_submit_info to set up implicit IN-fences and
* use wsi_device::signal_*_for_memory to set up implicit OUT-fences.
*
* For venus, implicit fencing is broken (and there is no explicit fencing
* support yet). The kernel driver assumes everything is in the same fence
* context and no synchronization is needed. It should be fixed for
* correctness, but it is still not ideal. venus requires explicit fencing
* (and renderer-side synchronization) to work well.
*/
static PFN_vkVoidFunction
vn_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
return vk_instance_get_proc_addr_unchecked(
&physical_dev->instance->base.base, pName);
}
VkResult
vn_wsi_init(struct vn_physical_device *physical_dev)
{
const VkAllocationCallbacks *alloc =
&physical_dev->instance->base.base.alloc;
VkResult result = wsi_device_init(
&physical_dev->wsi_device, vn_physical_device_to_handle(physical_dev),
vn_wsi_proc_addr, alloc, -1, &physical_dev->instance->dri_options,
false);
if (result != VK_SUCCESS)
return result;
if (physical_dev->base.base.supported_extensions
.EXT_image_drm_format_modifier)
physical_dev->wsi_device.supports_modifiers = true;
return VK_SUCCESS;
}
void
vn_wsi_fini(struct vn_physical_device *physical_dev)
{
const VkAllocationCallbacks *alloc =
&physical_dev->instance->base.base.alloc;
wsi_device_finish(&physical_dev->wsi_device, alloc);
}
/* surface commands */
void
vn_DestroySurfaceKHR(VkInstance _instance,
VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator)
{
struct vn_instance *instance = vn_instance_from_handle(_instance);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surf, surface);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &instance->base.base.alloc;
vk_free(alloc, surf);
}
VkResult
vn_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32 *pSupported)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result = wsi_common_get_surface_support(
&physical_dev->wsi_device, queueFamilyIndex, surface, pSupported);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result = wsi_common_get_surface_capabilities(
&physical_dev->wsi_device, surface, pSurfaceCapabilities);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result = wsi_common_get_surface_capabilities2(
&physical_dev->wsi_device, pSurfaceInfo, pSurfaceCapabilities);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result =
wsi_common_get_surface_formats(&physical_dev->wsi_device, surface,
pSurfaceFormatCount, pSurfaceFormats);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetPhysicalDeviceSurfaceFormats2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result =
wsi_common_get_surface_formats2(&physical_dev->wsi_device, pSurfaceInfo,
pSurfaceFormatCount, pSurfaceFormats);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result = wsi_common_get_surface_present_modes(
&physical_dev->wsi_device, surface, pPresentModeCount, pPresentModes);
return vn_result(physical_dev->instance, result);
}
VkResult
vn_GetDeviceGroupPresentCapabilitiesKHR(
VkDevice device, VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
{
memset(pCapabilities->presentMask, 0, sizeof(pCapabilities->presentMask));
pCapabilities->presentMask[0] = 0x1;
pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult
vn_GetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR *pModes)
{
*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult
vn_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pRectCount,
VkRect2D *pRects)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
VkResult result = wsi_common_get_present_rectangles(
&physical_dev->wsi_device, surface, pRectCount, pRects);
return vn_result(physical_dev->instance, result);
}
/* swapchain commands */
VkResult
vn_CreateSwapchainKHR(VkDevice device,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchain)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
VkResult result =
wsi_common_create_swapchain(&dev->physical_device->wsi_device, device,
pCreateInfo, alloc, pSwapchain);
return vn_result(dev->instance, result);
}
void
vn_DestroySwapchainKHR(VkDevice device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator)
{
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
wsi_common_destroy_swapchain(device, swapchain, alloc);
}
VkResult
vn_GetSwapchainImagesKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages)
{
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = wsi_common_get_images(swapchain, pSwapchainImageCount,
pSwapchainImages);
return vn_result(dev->instance, result);
}
VkResult
vn_AcquireNextImageKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint64_t timeout,
VkSemaphore semaphore,
VkFence fence,
uint32_t *pImageIndex)
{
const VkAcquireNextImageInfoKHR acquire_info = {
.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
.swapchain = swapchain,
.timeout = timeout,
.semaphore = semaphore,
.fence = fence,
.deviceMask = 0x1,
};
return vn_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
}
VkResult
vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
struct vn_queue *queue = vn_queue_from_handle(_queue);
VkResult result =
wsi_common_queue_present(&queue->device->physical_device->wsi_device,
vn_device_to_handle(queue->device), _queue,
queue->family, pPresentInfo);
return vn_result(queue->device->instance, result);
}
VkResult
vn_AcquireNextImage2KHR(VkDevice device,
const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex)
{
struct vn_device *dev = vn_device_from_handle(device);
VkResult result = wsi_common_acquire_next_image2(
&dev->physical_device->wsi_device, device, pAcquireInfo, pImageIndex);
/* XXX this relies on implicit sync */
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
struct vn_semaphore *sem =
vn_semaphore_from_handle(pAcquireInfo->semaphore);
if (sem)
vn_semaphore_signal_wsi(dev, sem);
struct vn_fence *fence = vn_fence_from_handle(pAcquireInfo->fence);
if (fence)
vn_fence_signal_wsi(dev, fence);
}
return vn_result(dev->instance, result);
}

View File

@ -0,0 +1,24 @@
/*
* Copyright 2019 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#ifndef VN_WSI_H
#define VN_WSI_H
#include "vn_common.h"
#include "wsi_common.h"
VkResult
vn_wsi_init(struct vn_physical_device *physical_dev);
void
vn_wsi_fini(struct vn_physical_device *physical_dev);
#endif /* VN_WSI_H */

View File

@ -0,0 +1,41 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#include "wsi_common_wayland.h"
#include "vn_device.h"
#include "vn_wsi.h"
VkResult
vn_CreateWaylandSurfaceKHR(VkInstance _instance,
const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface)
{
struct vn_instance *instance = vn_instance_from_handle(_instance);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &instance->base.base.alloc;
VkResult result = wsi_create_wl_surface(alloc, pCreateInfo, pSurface);
return vn_result(instance, result);
}
VkBool32
vn_GetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
return wsi_wl_get_presentation_support(&physical_dev->wsi_device, display);
}

View File

@ -0,0 +1,77 @@
/*
* Copyright 2019 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#include <X11/Xlib-xcb.h>
#include "wsi_common_x11.h"
#include "vn_device.h"
#include "vn_wsi.h"
/* XCB surface commands */
VkResult
vn_CreateXcbSurfaceKHR(VkInstance _instance,
const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface)
{
struct vn_instance *instance = vn_instance_from_handle(_instance);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &instance->base.base.alloc;
VkResult result = wsi_create_xcb_surface(alloc, pCreateInfo, pSurface);
return vn_result(instance, result);
}
VkBool32
vn_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
xcb_connection_t *connection,
xcb_visualid_t visual_id)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&physical_dev->wsi_device, queueFamilyIndex, connection, visual_id);
}
/* Xlib surface commands */
VkResult
vn_CreateXlibSurfaceKHR(VkInstance _instance,
const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface)
{
struct vn_instance *instance = vn_instance_from_handle(_instance);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &instance->base.base.alloc;
VkResult result = wsi_create_xlib_surface(alloc, pCreateInfo, pSurface);
return vn_result(instance, result);
}
VkBool32
vn_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display *dpy,
VisualID visualID)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&physical_dev->wsi_device, queueFamilyIndex, XGetXCBConnection(dpy),
visualID);
}