vallium: initial import of the vulkan frontend

This is the initial import of the vallium frontend for gallium.
This is only good enough to run the triangle and the gears demo
(wrongly) from Sascha demos.

Improvements are mostly on the llvmpipe side after this.

It contains an implementation of the Vulkan API which is mapped
onto the gallium API, and is suitable only for SOFTWARE drivers.

Command buffers are recordred into malloced memory, then later
they are played back against the gallium API. The command buffers
are mostly just Vulkan API marshalling but in some places the information is
processed before being put into the command buffer (renderpass stuff).

Execution happens on a separate "graphics" thread, againt the gallium API.

There is only a single queue which wraps a single gallium context.

Resources are allocated via the new resource/memory APIs.
Shaders are created via the context and bound/unbound in the
second thread.

(No HW for reasons - memory management, sw paths for lots of paths,
pointless CPU side queue)

v2: drop mesa_icd, drop cpp_args, drop extra flags, change meson config (Eric)
v2.1: use meson-gallium job

meson pieces:
Reviewed-by: Eric Engestrom <eric@engestrom.ch>

overall:

Acked-by: Roland Scheidegger <sroland@vmware.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6082>
This commit is contained in:
Dave Airlie 2020-06-19 16:40:27 +10:00
parent 8004fa9c95
commit b38879f8c5
30 changed files with 11497 additions and 3 deletions

View File

@ -550,6 +550,7 @@ meson-gallium:
-D gallium-nine=true
-D gallium-opencl=disabled
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swr,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink"
VULKAN_DRIVERS: swrast
EXTRA_OPTION: >
-D osmesa=gallium
-D tools=all

View File

@ -243,9 +243,9 @@ _vulkan_drivers = get_option('vulkan-drivers')
if _vulkan_drivers.contains('auto')
if system_has_kms_drm
if host_machine.cpu_family().startswith('x86')
_vulkan_drivers = ['amd', 'intel']
_vulkan_drivers = ['amd', 'intel', 'swrast']
elif ['arm', 'aarch64'].contains(host_machine.cpu_family())
_vulkan_drivers = []
_vulkan_drivers = ['swrast']
else
error('Unknown architecture @0@. Please pass -Dvulkan-drivers to set driver options. Patches gladly accepted to fix this.'.format(
host_machine.cpu_family()))
@ -262,8 +262,12 @@ endif
with_intel_vk = _vulkan_drivers.contains('intel')
with_amd_vk = _vulkan_drivers.contains('amd')
with_freedreno_vk = _vulkan_drivers.contains('freedreno')
with_swrast_vk = _vulkan_drivers.contains('swrast')
with_any_vk = _vulkan_drivers.length() != 0
if with_swrast_vk and not with_gallium_softpipe
error('swrast vulkan requires gallium swrast')
endif
if with_dri_swrast and (with_gallium_softpipe or with_gallium_swr)
error('Only one swrast provider can be built')
endif

View File

@ -166,7 +166,7 @@ option(
'vulkan-drivers',
type : 'array',
value : ['auto'],
choices : ['auto', 'amd', 'freedreno', 'intel'],
choices : ['auto', 'amd', 'freedreno', 'intel', 'swrast'],
description : 'List of vulkan drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
option(

View File

@ -0,0 +1,66 @@
val_entrypoints = custom_target(
'val_entrypoints.[ch]',
input : ['val_entrypoints_gen.py', vk_api_xml],
output : ['val_entrypoints.h', 'val_entrypoints.c'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
meson.current_build_dir()
],
depend_files : files('val_extensions.py'),
)
val_extensions_c = custom_target(
'val_extensions.c',
input : ['val_extensions.py', vk_api_xml],
output : ['val_extensions.c', 'val_extensions.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
'--out-h', '@OUTPUT1@'
],
)
libval_files = files(
'val_device.c',
'val_cmd_buffer.c',
'val_descriptor_set.c',
'val_execute.c',
'val_util.c',
'val_image.c',
'val_formats.c',
'val_lower_vulkan_resource.c',
'val_lower_vulkan_resource.h',
'val_lower_input_attachments.c',
'val_pass.c',
'val_pipeline.c',
'val_pipeline_cache.c',
'val_query.c',
'val_wsi.c')
val_deps = []
val_flags = []
if with_platform_x11
val_deps += dep_xcb_dri3
val_flags += [
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
]
libval_files += files('val_wsi_x11.c')
endif
if with_platform_wayland
val_deps += dep_wayland_client
val_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
libval_files += files('val_wsi_wayland.c')
endif
libvallium_st = static_library(
'vallium_st',
[libval_files, val_entrypoints, val_extensions_c ],
link_with : [ libvulkan_wsi ],
c_args : [ val_flags ],
gnu_symbol_visibility : 'hidden',
include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,198 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#pragma once
static inline unsigned vk_cull_to_pipe(uint32_t vk_cull)
{
/* these correspond */
return vk_cull;
}
static inline unsigned vk_polygon_mode_to_pipe(uint32_t vk_poly_mode)
{
/* these correspond */
return vk_poly_mode;
}
static inline unsigned vk_conv_stencil_op(uint32_t vk_stencil_op)
{
switch (vk_stencil_op) {
case VK_STENCIL_OP_KEEP:
return PIPE_STENCIL_OP_KEEP;
case VK_STENCIL_OP_ZERO:
return PIPE_STENCIL_OP_ZERO;
case VK_STENCIL_OP_REPLACE:
return PIPE_STENCIL_OP_REPLACE;
case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
return PIPE_STENCIL_OP_INCR;
case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
return PIPE_STENCIL_OP_DECR;
case VK_STENCIL_OP_INVERT:
return PIPE_STENCIL_OP_INVERT;
case VK_STENCIL_OP_INCREMENT_AND_WRAP:
return PIPE_STENCIL_OP_INCR_WRAP;
case VK_STENCIL_OP_DECREMENT_AND_WRAP:
return PIPE_STENCIL_OP_DECR_WRAP;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_topology(VkPrimitiveTopology topology)
{
switch (topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
return PIPE_PRIM_POINTS;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
return PIPE_PRIM_LINES;
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
return PIPE_PRIM_LINE_STRIP;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
return PIPE_PRIM_TRIANGLES;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
return PIPE_PRIM_TRIANGLE_STRIP;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
return PIPE_PRIM_TRIANGLE_FAN;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
return PIPE_PRIM_LINES_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
return PIPE_PRIM_LINE_STRIP_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
return PIPE_PRIM_TRIANGLES_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
return PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
return PIPE_PRIM_PATCHES;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_wrap_mode(enum VkSamplerAddressMode addr_mode)
{
switch (addr_mode) {
case VK_SAMPLER_ADDRESS_MODE_REPEAT:
return PIPE_TEX_WRAP_REPEAT;
case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
return PIPE_TEX_WRAP_MIRROR_REPEAT;
case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
return PIPE_TEX_WRAP_CLAMP_TO_BORDER;
case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
return PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_blend_factor(enum VkBlendFactor vk_factor)
{
switch (vk_factor) {
case VK_BLEND_FACTOR_ZERO:
return PIPE_BLENDFACTOR_ZERO;
case VK_BLEND_FACTOR_ONE:
return PIPE_BLENDFACTOR_ONE;
case VK_BLEND_FACTOR_SRC_COLOR:
return PIPE_BLENDFACTOR_SRC_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return PIPE_BLENDFACTOR_INV_SRC_COLOR;
case VK_BLEND_FACTOR_DST_COLOR:
return PIPE_BLENDFACTOR_DST_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return PIPE_BLENDFACTOR_INV_DST_COLOR;
case VK_BLEND_FACTOR_SRC_ALPHA:
return PIPE_BLENDFACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
return PIPE_BLENDFACTOR_INV_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_ALPHA:
return PIPE_BLENDFACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
return PIPE_BLENDFACTOR_INV_DST_ALPHA;
case VK_BLEND_FACTOR_CONSTANT_COLOR:
return PIPE_BLENDFACTOR_CONST_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
return PIPE_BLENDFACTOR_INV_CONST_COLOR;
case VK_BLEND_FACTOR_CONSTANT_ALPHA:
return PIPE_BLENDFACTOR_CONST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
return PIPE_BLENDFACTOR_INV_CONST_ALPHA;
case VK_BLEND_FACTOR_SRC1_COLOR:
return PIPE_BLENDFACTOR_SRC1_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
return PIPE_BLENDFACTOR_INV_SRC1_COLOR;
case VK_BLEND_FACTOR_SRC1_ALPHA:
return PIPE_BLENDFACTOR_SRC1_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
return PIPE_BLENDFACTOR_INV_SRC1_ALPHA;
case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_blend_func(enum VkBlendOp op)
{
switch (op) {
case VK_BLEND_OP_ADD:
return PIPE_BLEND_ADD;
case VK_BLEND_OP_SUBTRACT:
return PIPE_BLEND_SUBTRACT;
case VK_BLEND_OP_REVERSE_SUBTRACT:
return PIPE_BLEND_REVERSE_SUBTRACT;
case VK_BLEND_OP_MIN:
return PIPE_BLEND_MIN;
case VK_BLEND_OP_MAX:
return PIPE_BLEND_MAX;
default:
assert(0);
return 0;
}
}
static inline enum pipe_swizzle vk_conv_swizzle(VkComponentSwizzle swiz)
{
switch (swiz) {
case VK_COMPONENT_SWIZZLE_ZERO:
return PIPE_SWIZZLE_0;
case VK_COMPONENT_SWIZZLE_ONE:
return PIPE_SWIZZLE_1;
case VK_COMPONENT_SWIZZLE_R:
return PIPE_SWIZZLE_X;
case VK_COMPONENT_SWIZZLE_G:
return PIPE_SWIZZLE_Y;
case VK_COMPONENT_SWIZZLE_B:
return PIPE_SWIZZLE_Z;
case VK_COMPONENT_SWIZZLE_A:
return PIPE_SWIZZLE_W;
case VK_COMPONENT_SWIZZLE_IDENTITY:
default:
return PIPE_SWIZZLE_NONE;
}
}

View File

@ -0,0 +1,501 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "vk_util.h"
#include "u_math.h"
VkResult val_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
uint32_t max_binding = 0;
uint32_t immutable_sampler_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
if (pCreateInfo->pBindings[j].pImmutableSamplers)
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
size_t size = sizeof(struct val_descriptor_set_layout) +
(max_binding + 1) * sizeof(set_layout->binding[0]) +
immutable_sampler_count * sizeof(struct val_sampler *);
set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &set_layout->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
/* We just allocate all the samplers at the end of the struct */
struct val_sampler **samplers =
(struct val_sampler **)&set_layout->binding[max_binding + 1];
set_layout->binding_count = max_binding + 1;
set_layout->shader_stages = 0;
set_layout->size = 0;
uint32_t dynamic_offset_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
uint32_t b = binding->binding;
set_layout->binding[b].array_size = binding->descriptorCount;
set_layout->binding[b].descriptor_index = set_layout->size;
set_layout->binding[b].type = binding->descriptorType;
set_layout->binding[b].valid = true;
set_layout->size += binding->descriptorCount;
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
set_layout->binding[b].stage[stage].const_buffer_index = -1;
set_layout->binding[b].stage[stage].shader_buffer_index = -1;
set_layout->binding[b].stage[stage].sampler_index = -1;
set_layout->binding[b].stage[stage].sampler_view_index = -1;
set_layout->binding[b].stage[stage].image_index = -1;
}
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
set_layout->binding[b].dynamic_index = dynamic_offset_count;
dynamic_offset_count += binding->descriptorCount;
}
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
set_layout->stage[s].sampler_count += binding->descriptorCount;
}
break;
default:
break;
}
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
set_layout->stage[s].const_buffer_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
set_layout->stage[s].image_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
set_layout->stage[s].sampler_view_count += binding->descriptorCount;
}
break;
default:
break;
}
if (binding->pImmutableSamplers) {
set_layout->binding[b].immutable_samplers = samplers;
samplers += binding->descriptorCount;
for (uint32_t i = 0; i < binding->descriptorCount; i++)
set_layout->binding[b].immutable_samplers[i] =
val_sampler_from_handle(binding->pImmutableSamplers[i]);
} else {
set_layout->binding[b].immutable_samplers = NULL;
}
set_layout->shader_stages |= binding->stageFlags;
}
set_layout->dynamic_offset_count = dynamic_offset_count;
*pSetLayout = val_descriptor_set_layout_to_handle(set_layout);
return VK_SUCCESS;
}
void val_DestroyDescriptorSetLayout(
VkDevice _device,
VkDescriptorSetLayout _set_layout,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout, _set_layout);
if (!_set_layout)
return;
vk_object_base_finish(&set_layout->base);
vk_free2(&device->alloc, pAllocator, set_layout);
}
VkResult val_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_pipeline_layout *layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &layout->base,
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
}
layout->push_constant_size = 0;
for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
layout->push_constant_size = MAX2(layout->push_constant_size,
range->offset + range->size);
}
layout->push_constant_size = align(layout->push_constant_size, 16);
*pPipelineLayout = val_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
}
void val_DestroyPipelineLayout(
VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!_pipelineLayout)
return;
vk_object_base_finish(&pipeline_layout->base);
vk_free2(&device->alloc, pAllocator, pipeline_layout);
}
VkResult
val_descriptor_set_create(struct val_device *device,
const struct val_descriptor_set_layout *layout,
struct val_descriptor_set **out_set)
{
struct val_descriptor_set *set;
size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
set = vk_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
/* A descriptor set may not be 100% filled. Clear the set so we can can
* later detect holes in it.
*/
memset(set, 0, size);
vk_object_base_init(&device->vk, &set->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
set->layout = layout;
/* Go through and fill out immutable samplers if we have any */
struct val_descriptor *desc = set->descriptors;
for (uint32_t b = 0; b < layout->binding_count; b++) {
if (layout->binding[b].immutable_samplers) {
for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
desc[i].sampler = layout->binding[b].immutable_samplers[i];
}
desc += layout->binding[b].array_size;
}
*out_set = set;
return VK_SUCCESS;
}
void
val_descriptor_set_destroy(struct val_device *device,
struct val_descriptor_set *set)
{
vk_object_base_finish(&set->base);
vk_free(&device->alloc, set);
}
VkResult val_AllocateDescriptorSets(
VkDevice _device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
struct val_descriptor_set *set;
uint32_t i;
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
VAL_FROM_HANDLE(val_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
result = val_descriptor_set_create(device, layout, &set);
if (result != VK_SUCCESS)
break;
list_addtail(&set->link, &pool->sets);
pDescriptorSets[i] = val_descriptor_set_to_handle(set);
}
if (result != VK_SUCCESS)
val_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
i, pDescriptorSets);
return result;
}
VkResult val_FreeDescriptorSets(
VkDevice _device,
VkDescriptorPool descriptorPool,
uint32_t count,
const VkDescriptorSet* pDescriptorSets)
{
VAL_FROM_HANDLE(val_device, device, _device);
for (uint32_t i = 0; i < count; i++) {
VAL_FROM_HANDLE(val_descriptor_set, set, pDescriptorSets[i]);
if (!set)
continue;
list_del(&set->link);
val_descriptor_set_destroy(device, set);
}
return VK_SUCCESS;
}
void val_UpdateDescriptorSets(
VkDevice _device,
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet* pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet* pDescriptorCopies)
{
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
VAL_FROM_HANDLE(val_descriptor_set, set, write->dstSet);
const struct val_descriptor_set_binding_layout *bind_layout =
&set->layout->binding[write->dstBinding];
struct val_descriptor *desc =
&set->descriptors[bind_layout->descriptor_index];
desc += write->dstArrayElement;
switch (write->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
VAL_FROM_HANDLE(val_sampler, sampler,
write->pImageInfo[j].sampler);
desc[j] = (struct val_descriptor) {
.type = VK_DESCRIPTOR_TYPE_SAMPLER,
.sampler = sampler,
};
}
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
VAL_FROM_HANDLE(val_image_view, iview,
write->pImageInfo[j].imageView);
VAL_FROM_HANDLE(val_sampler, sampler,
write->pImageInfo[j].sampler);
desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
desc[j].image_view = iview;
/* If this descriptor has an immutable sampler, we don't want
* to stomp on it.
*/
if (sampler)
desc[j].sampler = sampler;
}
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
VAL_FROM_HANDLE(val_image_view, iview,
write->pImageInfo[j].imageView);
desc[j] = (struct val_descriptor) {
.type = write->descriptorType,
.image_view = iview,
};
}
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
VAL_FROM_HANDLE(val_buffer_view, bview,
write->pTexelBufferView[j]);
desc[j] = (struct val_descriptor) {
.type = write->descriptorType,
.buffer_view = bview,
};
}
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
assert(write->pBufferInfo[j].buffer);
VAL_FROM_HANDLE(val_buffer, buffer, write->pBufferInfo[j].buffer);
assert(buffer);
desc[j] = (struct val_descriptor) {
.type = write->descriptorType,
.buf.offset = write->pBufferInfo[j].offset,
.buf.buffer = buffer,
.buf.range = write->pBufferInfo[j].range,
};
}
default:
break;
}
}
for (uint32_t i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
VAL_FROM_HANDLE(val_descriptor_set, src, copy->srcSet);
VAL_FROM_HANDLE(val_descriptor_set, dst, copy->dstSet);
const struct val_descriptor_set_binding_layout *src_layout =
&src->layout->binding[copy->srcBinding];
struct val_descriptor *src_desc =
&src->descriptors[src_layout->descriptor_index];
src_desc += copy->srcArrayElement;
const struct val_descriptor_set_binding_layout *dst_layout =
&dst->layout->binding[copy->dstBinding];
struct val_descriptor *dst_desc =
&dst->descriptors[dst_layout->descriptor_index];
dst_desc += copy->dstArrayElement;
for (uint32_t j = 0; j < copy->descriptorCount; j++)
dst_desc[j] = src_desc[j];
}
}
VkResult val_CreateDescriptorPool(
VkDevice _device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_descriptor_pool *pool;
size_t size = sizeof(struct val_descriptor_pool);
pool = vk_zalloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &pool->base,
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
pool->flags = pCreateInfo->flags;
list_inithead(&pool->sets);
*pDescriptorPool = val_descriptor_pool_to_handle(pool);
return VK_SUCCESS;
}
static void val_reset_descriptor_pool(struct val_device *device,
struct val_descriptor_pool *pool)
{
struct val_descriptor_set *set, *tmp;
LIST_FOR_EACH_ENTRY_SAFE(set, tmp, &pool->sets, link) {
list_del(&set->link);
vk_free(&device->alloc, set);
}
}
void val_DestroyDescriptorPool(
VkDevice _device,
VkDescriptorPool _pool,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
if (!_pool)
return;
val_reset_descriptor_pool(device, pool);
vk_object_base_finish(&pool->base);
vk_free2(&device->alloc, pAllocator, pool);
}
VkResult val_ResetDescriptorPool(
VkDevice _device,
VkDescriptorPool _pool,
VkDescriptorPoolResetFlags flags)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
val_reset_descriptor_pool(device, pool);
return VK_SUCCESS;
}
void val_GetDescriptorSetLayoutSupport(VkDevice device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
VkDescriptorSetLayoutSupport* pSupport)
{
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,816 @@
# coding=utf-8
#
# Copyright © 2015, 2017 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import argparse
import functools
import math
import os
import xml.etree.ElementTree as et
from collections import OrderedDict, namedtuple
from mako.template import Template
from val_extensions import *
# We generate a static hash table for entry point lookup
# (vkGetProcAddress). We use a linear congruential generator for our hash
# function and a power-of-two size table. The prime numbers are determined
# experimentally.
# We currently don't use layers in val, but keeping the ability for anv
# anyways, so we can use it for device groups.
LAYERS = [
'val'
]
TEMPLATE_H = Template("""\
/* This file generated from ${filename}, don't edit directly. */
struct val_instance_dispatch_table {
union {
void *entrypoints[${len(instance_entrypoints)}];
struct {
% for e in instance_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
PFN_${e.name} ${e.name};
#else
void *${e.name};
# endif
% else:
PFN_${e.name} ${e.name};
% endif
% endfor
};
};
};
struct val_physical_device_dispatch_table {
union {
void *entrypoints[${len(physical_device_entrypoints)}];
struct {
% for e in physical_device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
PFN_${e.name} ${e.name};
#else
void *${e.name};
# endif
% else:
PFN_${e.name} ${e.name};
% endif
% endfor
};
};
};
struct val_device_dispatch_table {
union {
void *entrypoints[${len(device_entrypoints)}];
struct {
% for e in device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
PFN_${e.name} ${e.name};
#else
void *${e.name};
# endif
% else:
PFN_${e.name} ${e.name};
% endif
% endfor
};
};
};
extern const struct val_instance_dispatch_table val_instance_dispatch_table;
%for layer in LAYERS:
extern const struct val_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
%endfor
%for layer in LAYERS:
extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
%endfor
% for e in instance_entrypoints:
% if e.alias and e.alias.enabled:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()});
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
% for e in physical_device_entrypoints:
% if e.alias:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
% for layer in LAYERS:
${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()});
% endfor
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
% for e in device_entrypoints:
% if e.alias and e.alias.enabled:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
% for layer in LAYERS:
${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()});
% endfor
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
""", output_encoding='utf-8')
TEMPLATE_C = Template(u"""\
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* This file generated from ${filename}, don't edit directly. */
#include "val_private.h"
#include "util/macros.h"
struct string_map_entry {
uint32_t name;
uint32_t hash;
uint32_t num;
};
/* We use a big string constant to avoid lots of relocations from the entry
* point table to lots of little strings. The entries in the entry point table
* store the index into this big string.
*/
<%def name="strmap(strmap, prefix)">
static const char ${prefix}_strings[] =
% for s in strmap.sorted_strings:
"${s.string}\\0"
% endfor
;
static const struct string_map_entry ${prefix}_string_map_entries[] = {
% for s in strmap.sorted_strings:
{ ${s.offset}, ${'{:0=#8x}'.format(s.hash)}, ${s.num} }, /* ${s.string} */
% endfor
};
/* Hash table stats:
* size ${len(strmap.sorted_strings)} entries
* collisions entries:
% for i in range(10):
* ${i}${'+' if i == 9 else ' '} ${strmap.collisions[i]}
% endfor
*/
#define none 0xffff
static const uint16_t ${prefix}_string_map[${strmap.hash_size}] = {
% for e in strmap.mapping:
${ '{:0=#6x}'.format(e) if e >= 0 else 'none' },
% endfor
};
static int
${prefix}_string_map_lookup(const char *str)
{
static const uint32_t prime_factor = ${strmap.prime_factor};
static const uint32_t prime_step = ${strmap.prime_step};
const struct string_map_entry *e;
uint32_t hash, h;
uint16_t i;
const char *p;
hash = 0;
for (p = str; *p; p++)
hash = hash * prime_factor + *p;
h = hash;
while (1) {
i = ${prefix}_string_map[h & ${strmap.hash_mask}];
if (i == none)
return -1;
e = &${prefix}_string_map_entries[i];
if (e->hash == hash && strcmp(str, ${prefix}_strings + e->name) == 0)
return e->num;
h += prime_step;
}
return -1;
}
static const char *
${prefix}_entry_name(int num)
{
for (int i = 0; i < ARRAY_SIZE(${prefix}_string_map_entries); i++) {
if (${prefix}_string_map_entries[i].num == num)
return &${prefix}_strings[${prefix}_string_map_entries[i].name];
}
return NULL;
}
</%def>
${strmap(instance_strmap, 'instance')}
${strmap(physical_device_strmap, 'physical_device')}
${strmap(device_strmap, 'device')}
/* Weak aliases for all potential implementations. These will resolve to
* NULL if they're not defined, which lets the resolve_entrypoint() function
* either pick the correct entry point.
*/
% for e in instance_entrypoints:
% if e.alias and e.alias.enabled:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
const struct val_instance_dispatch_table val_instance_dispatch_table = {
% for e in instance_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
% endif
.${e.name} = ${e.prefixed_name('val')},
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
};
% for e in physical_device_entrypoints:
% if e.alias and e.alias.enabled:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
const struct val_physical_device_dispatch_table val_physical_device_dispatch_table = {
% for e in physical_device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
% endif
.${e.name} = ${e.prefixed_name('val')},
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
};
% for layer in LAYERS:
% for e in device_entrypoints:
% if e.alias and e.alias.enabled:
<% continue %>
% endif
% if e.guard is not None:
#ifdef ${e.guard}
% endif
% if layer == 'val':
${e.return_type} __attribute__ ((weak))
${e.prefixed_name('val')}(${e.decl_params()})
{
% if e.params[0].type == 'VkDevice':
VAL_FROM_HANDLE(val_device, val_device, ${e.params[0].name});
return val_device->dispatch.${e.name}(${e.call_params()});
% elif e.params[0].type == 'VkCommandBuffer':
VAL_FROM_HANDLE(val_cmd_buffer, val_cmd_buffer, ${e.params[0].name});
return val_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
% elif e.params[0].type == 'VkQueue':
VAL_FROM_HANDLE(val_queue, val_queue, ${e.params[0].name});
return val_queue->device->dispatch.${e.name}(${e.call_params()});
% else:
assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
% endif
}
% else:
${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()}) __attribute__ ((weak));
% endif
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
const struct val_device_dispatch_table ${layer}_device_dispatch_table = {
% for e in device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
% endif
.${e.name} = ${e.prefixed_name(layer)},
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
};
% endfor
/** Return true if the core version or extension in which the given entrypoint
* is defined is enabled.
*
* If device is NULL, all device extensions are considered enabled.
*/
bool
val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance)
{
switch (index) {
% for e in instance_entrypoints:
case ${e.num}:
/* ${e.name} */
% if e.core_version:
return ${e.core_version.c_vk_version()} <= core_version;
% elif e.extensions:
% for ext in e.extensions:
% if ext.type == 'instance':
if (instance->${ext.name[3:]}) return true;
% else:
/* All device extensions are considered enabled at the instance level */
return true;
% endif
% endfor
return false;
% else:
return true;
% endif
% endfor
default:
return false;
}
}
/** Return true if the core version or extension in which the given entrypoint
* is defined is enabled.
*
* If device is NULL, all device extensions are considered enabled.
*/
bool
val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance)
{
switch (index) {
% for e in physical_device_entrypoints:
case ${e.num}:
/* ${e.name} */
% if e.core_version:
return ${e.core_version.c_vk_version()} <= core_version;
% elif e.extensions:
% for ext in e.extensions:
% if ext.type == 'instance':
if (instance->${ext.name[3:]}) return true;
% else:
/* All device extensions are considered enabled at the instance level */
return true;
% endif
% endfor
return false;
% else:
return true;
% endif
% endfor
default:
return false;
}
}
/** Return true if the core version or extension in which the given entrypoint
* is defined is enabled.
*
* If device is NULL, all device extensions are considered enabled.
*/
bool
val_device_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance,
const struct val_device_extension_table *device)
{
switch (index) {
% for e in device_entrypoints:
case ${e.num}:
/* ${e.name} */
% if e.core_version:
return ${e.core_version.c_vk_version()} <= core_version;
% elif e.extensions:
% for ext in e.extensions:
% if ext.type == 'instance':
<% assert False %>
% else:
if (!device || device->${ext.name[3:]}) return true;
% endif
% endfor
return false;
% else:
return true;
% endif
% endfor
default:
return false;
}
}
int
val_get_instance_entrypoint_index(const char *name)
{
return instance_string_map_lookup(name);
}
int
val_get_physical_device_entrypoint_index(const char *name)
{
return physical_device_string_map_lookup(name);
}
int
val_get_device_entrypoint_index(const char *name)
{
return device_string_map_lookup(name);
}
const char *
val_get_instance_entry_name(int index)
{
return instance_entry_name(index);
}
const char *
val_get_physical_device_entry_name(int index)
{
return physical_device_entry_name(index);
}
const char *
val_get_device_entry_name(int index)
{
return device_entry_name(index);
}
static void * __attribute__ ((noinline))
val_resolve_device_entrypoint(uint32_t index)
{
return val_device_dispatch_table.entrypoints[index];
}
void *
val_lookup_entrypoint(const char *name)
{
int idx = val_get_instance_entrypoint_index(name);
if (idx >= 0)
return val_instance_dispatch_table.entrypoints[idx];
idx = val_get_physical_device_entrypoint_index(name);
if (idx >= 0)
return val_physical_device_dispatch_table.entrypoints[idx];
idx = val_get_device_entrypoint_index(name);
if (idx >= 0)
return val_resolve_device_entrypoint(idx);
return NULL;
}""", output_encoding='utf-8')
U32_MASK = 2**32 - 1
PRIME_FACTOR = 5024183
PRIME_STEP = 19
def round_to_pow2(x):
return 2**int(math.ceil(math.log(x, 2)))
class StringIntMapEntry(object):
def __init__(self, string, num):
self.string = string
self.num = num
# Calculate the same hash value that we will calculate in C.
h = 0
for c in string:
h = ((h * PRIME_FACTOR) + ord(c)) & U32_MASK
self.hash = h
self.offset = None
class StringIntMap(object):
def __init__(self):
self.baked = False
self.strings = dict()
def add_string(self, string, num):
assert not self.baked
assert string not in self.strings
assert num >= 0 and num < 2**31
self.strings[string] = StringIntMapEntry(string, num)
def bake(self):
self.sorted_strings = \
sorted(self.strings.values(), key=lambda x: x.string)
offset = 0
for entry in self.sorted_strings:
entry.offset = offset
offset += len(entry.string) + 1
# Save off some values that we'll need in C
self.hash_size = round_to_pow2(len(self.strings) * 1.25)
self.hash_mask = self.hash_size - 1
self.prime_factor = PRIME_FACTOR
self.prime_step = PRIME_STEP
self.mapping = [-1] * self.hash_size
self.collisions = [0] * 10
for idx, s in enumerate(self.sorted_strings):
level = 0
h = s.hash
while self.mapping[h & self.hash_mask] >= 0:
h = h + PRIME_STEP
level = level + 1
self.collisions[min(level, 9)] += 1
self.mapping[h & self.hash_mask] = idx
EntrypointParam = namedtuple('EntrypointParam', 'type name decl')
class EntrypointBase(object):
def __init__(self, name):
self.name = name
self.alias = None
self.guard = None
self.enabled = False
self.num = None
# Extensions which require this entrypoint
self.core_version = None
self.extensions = []
def prefixed_name(self, prefix):
assert self.name.startswith('vk')
return prefix + '_' + self.name[2:]
class Entrypoint(EntrypointBase):
def __init__(self, name, return_type, params, guard = None):
super(Entrypoint, self).__init__(name)
self.return_type = return_type
self.params = params
self.guard = guard
def is_physical_device_entrypoint(self):
return self.params[0].type in ('VkPhysicalDevice', )
def is_device_entrypoint(self):
return self.params[0].type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')
def decl_params(self):
return ', '.join(p.decl for p in self.params)
def call_params(self):
return ', '.join(p.name for p in self.params)
class EntrypointAlias(EntrypointBase):
def __init__(self, name, entrypoint):
super(EntrypointAlias, self).__init__(name)
self.alias = entrypoint
def is_physical_device_entrypoint(self):
return self.alias.is_physical_device_entrypoint()
def is_device_entrypoint(self):
return self.alias.is_device_entrypoint()
def prefixed_name(self, prefix):
if self.alias.enabled:
return self.alias.prefixed_name(prefix)
return super(EntrypointAlias, self).prefixed_name(prefix)
@property
def params(self):
return self.alias.params
@property
def return_type(self):
return self.alias.return_type
def decl_params(self):
return self.alias.decl_params()
def call_params(self):
return self.alias.call_params()
def get_entrypoints(doc, entrypoints_to_defines):
"""Extract the entry points from the registry."""
entrypoints = OrderedDict()
for command in doc.findall('./commands/command'):
if 'alias' in command.attrib:
alias = command.attrib['name']
target = command.attrib['alias']
entrypoints[alias] = EntrypointAlias(alias, entrypoints[target])
else:
name = command.find('./proto/name').text
ret_type = command.find('./proto/type').text
params = [EntrypointParam(
type = p.find('./type').text,
name = p.find('./name').text,
decl = ''.join(p.itertext())
) for p in command.findall('./param')]
guard = entrypoints_to_defines.get(name)
# They really need to be unique
assert name not in entrypoints
entrypoints[name] = Entrypoint(name, ret_type, params, guard)
for feature in doc.findall('./feature'):
assert feature.attrib['api'] == 'vulkan'
version = VkVersion(feature.attrib['number'])
if version > MAX_API_VERSION:
continue
for command in feature.findall('./require/command'):
e = entrypoints[command.attrib['name']]
e.enabled = True
assert e.core_version is None
e.core_version = version
supported_exts = dict((ext.name, ext) for ext in EXTENSIONS)
for extension in doc.findall('.extensions/extension'):
ext_name = extension.attrib['name']
if ext_name not in supported_exts:
continue
ext = supported_exts[ext_name]
ext.type = extension.attrib['type']
for command in extension.findall('./require/command'):
e = entrypoints[command.attrib['name']]
e.enabled = True
assert e.core_version is None
e.extensions.append(ext)
# if the base command is not supported by the driver yet, don't alias aliases
for e in entrypoints.values():
if e.alias and not e.alias.enabled:
e_clone = copy.deepcopy(e.alias)
e_clone.enabled = True
e_clone.name = e.name
entrypoints[e.name] = e_clone
return [e for e in entrypoints.values() if e.enabled]
def get_entrypoints_defines(doc):
"""Maps entry points to extension defines."""
entrypoints_to_defines = {}
platform_define = {}
for platform in doc.findall('./platforms/platform'):
name = platform.attrib['name']
define = platform.attrib['protect']
platform_define[name] = define
for extension in doc.findall('./extensions/extension[@platform]'):
platform = extension.attrib['platform']
define = platform_define[platform]
for entrypoint in extension.findall('./require/command'):
fullname = entrypoint.attrib['name']
entrypoints_to_defines[fullname] = define
return entrypoints_to_defines
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', help='Where to write the files.',
required=True)
parser.add_argument('--xml',
help='Vulkan API XML file.',
required=True,
action='append',
dest='xml_files')
args = parser.parse_args()
entrypoints = []
for filename in args.xml_files:
doc = et.parse(filename)
entrypoints += get_entrypoints(doc, get_entrypoints_defines(doc))
device_entrypoints = []
physical_device_entrypoints = []
instance_entrypoints = []
for e in entrypoints:
if e.is_device_entrypoint():
device_entrypoints.append(e)
elif e.is_physical_device_entrypoint():
physical_device_entrypoints.append(e)
else:
instance_entrypoints.append(e)
device_strmap = StringIntMap()
for num, e in enumerate(device_entrypoints):
device_strmap.add_string(e.name, num)
e.num = num
device_strmap.bake()
physical_device_strmap = StringIntMap()
for num, e in enumerate(physical_device_entrypoints):
physical_device_strmap.add_string(e.name, num)
e.num = num
physical_device_strmap.bake()
instance_strmap = StringIntMap()
for num, e in enumerate(instance_entrypoints):
instance_strmap.add_string(e.name, num)
e.num = num
instance_strmap.bake()
# For outputting entrypoints.h we generate a val_EntryPoint() prototype
# per entry point.
try:
with open(os.path.join(args.outdir, 'val_entrypoints.h'), 'wb') as f:
f.write(TEMPLATE_H.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
LAYERS=LAYERS,
filename=os.path.basename(__file__)))
with open(os.path.join(args.outdir, 'val_entrypoints.c'), 'wb') as f:
f.write(TEMPLATE_C.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
LAYERS=LAYERS,
instance_strmap=instance_strmap,
physical_device_strmap=physical_device_strmap,
device_strmap=device_strmap,
filename=os.path.basename(__file__)))
except Exception:
# In the event there's an error, this imports some helpers from mako
# to print a useful stack trace and prints it, then exits with
# status 1, if python is run with debug; otherwise it just raises
# the exception
if __debug__:
import sys
from mako import exceptions
sys.stderr.write(exceptions.text_error_template().render() + '\n')
sys.exit(1)
raise
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,166 @@
COPYRIGHT = """\
/*
* Copyright 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
"""
import argparse
import os.path
import re
import sys
VULKAN_UTIL = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../vulkan/util'))
sys.path.append(VULKAN_UTIL)
from vk_extensions import *
from vk_extensions_gen import *
MAX_API_VERSION = '1.1.107'
# Supported API versions. Each one is the maximum patch version for the given
# version. Version come in increasing order and each version is available if
# it's provided "enable" condition is true and all previous versions are
# available.
# TODO: The patch version should be unified!
API_VERSIONS = [
ApiVersion('1.0.68', True),
ApiVersion('1.1.107', False),
ApiVersion('1.2.131', False),
]
MAX_API_VERSION = None # Computed later
# On Android, we disable all surface and swapchain extensions. Android's Vulkan
# loader implements VK_KHR_surface and VK_KHR_swapchain, and applications
# cannot access the driver's implementation. Moreoever, if the driver exposes
# the those extension strings, then tests dEQP-VK.api.info.instance.extensions
# and dEQP-VK.api.info.device fail due to the duplicated strings.
EXTENSIONS = [
Extension('VK_ANDROID_native_buffer', 5, False),
Extension('VK_KHR_16bit_storage', 1, False),
Extension('VK_KHR_bind_memory2', 1, True),
Extension('VK_KHR_create_renderpass2', 1, False),
Extension('VK_KHR_dedicated_allocation', 1, True),
Extension('VK_KHR_depth_stencil_resolve', 1, False),
Extension('VK_KHR_descriptor_update_template', 1, False),
Extension('VK_KHR_device_group', 1, False),
Extension('VK_KHR_device_group_creation', 1, False),
Extension('VK_KHR_draw_indirect_count', 1, False),
Extension('VK_KHR_driver_properties', 1, True),
Extension('VK_KHR_external_fence', 1, False),
Extension('VK_KHR_external_fence_capabilities', 1, True),
Extension('VK_KHR_external_fence_fd', 1, False),
Extension('VK_KHR_external_memory', 1, False),
Extension('VK_KHR_external_memory_capabilities', 1, True),
Extension('VK_KHR_external_memory_fd', 1, False),
Extension('VK_KHR_external_semaphore', 1, False),
Extension('VK_KHR_external_semaphore_capabilities', 1, True),
Extension('VK_KHR_external_semaphore_fd', 1, False),
Extension('VK_KHR_get_display_properties2', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_KHR_get_memory_requirements2', 1, True),
Extension('VK_KHR_get_physical_device_properties2', 1, True),
Extension('VK_KHR_get_surface_capabilities2', 1, 'VAL_HAS_SURFACE'),
Extension('VK_KHR_image_format_list', 1, False),
Extension('VK_KHR_imageless_framebuffer', 1, False),
Extension('VK_KHR_incremental_present', 1, 'VAL_HAS_SURFACE'),
Extension('VK_KHR_maintenance1', 1, True),
Extension('VK_KHR_maintenance2', 1, False),
Extension('VK_KHR_maintenance3', 1, False),
Extension('VK_KHR_pipeline_executable_properties', 1, False),
Extension('VK_KHR_push_descriptor', 1, False),
Extension('VK_KHR_relaxed_block_layout', 1, True),
Extension('VK_KHR_sampler_mirror_clamp_to_edge', 1, True),
Extension('VK_KHR_sampler_ycbcr_conversion', 1, False),
Extension('VK_KHR_shader_atomic_int64', 1, False),
Extension('VK_KHR_shader_draw_parameters', 1, False),
Extension('VK_KHR_shader_float16_int8', 1, True),
Extension('VK_KHR_storage_buffer_storage_class', 1, True),
Extension('VK_KHR_surface', 25, 'VAL_HAS_SURFACE'),
Extension('VK_KHR_surface_protected_capabilities', 1, 'VAL_HAS_SURFACE'),
Extension('VK_KHR_swapchain', 68, 'VAL_HAS_SURFACE'),
Extension('VK_KHR_uniform_buffer_standard_layout', 1, False),
Extension('VK_KHR_variable_pointers', 1, False),
Extension('VK_KHR_wayland_surface', 6, 'VK_USE_PLATFORM_WAYLAND_KHR'),
Extension('VK_KHR_xcb_surface', 6, 'VK_USE_PLATFORM_XCB_KHR'),
Extension('VK_KHR_xlib_surface', 6, 'VK_USE_PLATFORM_XLIB_KHR'),
Extension('VK_KHR_multiview', 1, False),
Extension('VK_KHR_display', 23, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_KHR_8bit_storage', 1, False),
Extension('VK_EXT_direct_mode_display', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_EXT_acquire_xlib_display', 1, 'VK_USE_PLATFORM_XLIB_XRANDR_EXT'),
Extension('VK_EXT_buffer_device_address', 1, False),
Extension('VK_EXT_calibrated_timestamps', 1, False),
Extension('VK_EXT_conditional_rendering', 1, False),
Extension('VK_EXT_conservative_rasterization', 1, False),
Extension('VK_EXT_display_surface_counter', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_EXT_display_control', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_EXT_debug_report', 9, True),
Extension('VK_EXT_depth_clip_enable', 1, False),
Extension('VK_EXT_depth_range_unrestricted', 1, False),
Extension('VK_EXT_descriptor_indexing', 2, False),
Extension('VK_EXT_discard_rectangles', 1, False),
Extension('VK_EXT_external_memory_dma_buf', 1, True),
Extension('VK_EXT_external_memory_host', 1, False),
Extension('VK_EXT_global_priority', 1, False),
Extension('VK_EXT_host_query_reset', 1, False),
Extension('VK_EXT_index_type_uint8', 1, False),
Extension('VK_EXT_inline_uniform_block', 1, False),
Extension('VK_EXT_memory_budget', 1, False),
Extension('VK_EXT_memory_priority', 1, False),
Extension('VK_EXT_pci_bus_info', 2, False),
Extension('VK_EXT_pipeline_creation_feedback', 1, False),
Extension('VK_EXT_post_depth_coverage', 1, False),
Extension('VK_EXT_private_data', 1, True),
Extension('VK_EXT_queue_family_foreign', 1, False),
Extension('VK_EXT_sample_locations', 1, False),
Extension('VK_EXT_sampler_filter_minmax', 1, False),
Extension('VK_EXT_scalar_block_layout', 1, False),
Extension('VK_EXT_shader_viewport_index_layer', 1, False),
Extension('VK_EXT_shader_stencil_export', 1, False),
Extension('VK_EXT_shader_subgroup_ballot', 1, False),
Extension('VK_EXT_shader_subgroup_vote', 1, False),
Extension('VK_EXT_transform_feedback', 1, False),
Extension('VK_EXT_vertex_attribute_divisor', 3, False),
Extension('VK_EXT_ycbcr_image_arrays', 1, False),
Extension('VK_GOOGLE_decorate_string', 1, True),
Extension('VK_GOOGLE_hlsl_functionality1', 1, True),
]
MAX_API_VERSION = VkVersion('0.0.0')
for version in API_VERSIONS:
version.version = VkVersion(version.version)
assert version.version > MAX_API_VERSION
MAX_API_VERSION = version.version
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out-c', help='Output C file.', required=True)
parser.add_argument('--out-h', help='Output H file.', required=True)
parser.add_argument('--xml',
help='Vulkan API XML file.',
required=True,
action='append',
dest='xml_files')
args = parser.parse_args()
gen_extensions('val', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)

View File

@ -0,0 +1,442 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "util/format/u_format.h"
#include "util/u_math.h"
#define COMMON_NAME(x) [VK_FORMAT_##x] = PIPE_FORMAT_##x
#define FLOAT_NAME(x) [VK_FORMAT_##x##_SFLOAT] = PIPE_FORMAT_##x##_FLOAT
static enum pipe_format format_to_vk_table[VK_FORMAT_ASTC_12x12_SRGB_BLOCK + 1] = {
COMMON_NAME(R8_UNORM),
COMMON_NAME(R8G8_UNORM),
COMMON_NAME(R8G8B8_UNORM),
COMMON_NAME(R8G8B8A8_UNORM),
COMMON_NAME(R8_SNORM),
COMMON_NAME(R8G8_SNORM),
COMMON_NAME(R8G8B8_SNORM),
COMMON_NAME(R8G8B8A8_SNORM),
// COMMON_NAME(R8_SRGB),
COMMON_NAME(R8G8B8_SRGB),
COMMON_NAME(R8G8B8A8_SRGB),
COMMON_NAME(B8G8R8A8_UNORM),
COMMON_NAME(R8G8B8A8_SRGB),
COMMON_NAME(B8G8R8A8_SRGB),
COMMON_NAME(R8_UINT),
COMMON_NAME(R8G8_UINT),
COMMON_NAME(R8G8B8_UINT),
COMMON_NAME(R8G8B8A8_UINT),
COMMON_NAME(R16_UINT),
COMMON_NAME(R16G16_UINT),
COMMON_NAME(R16G16B16_UINT),
COMMON_NAME(R16G16B16A16_UINT),
COMMON_NAME(R32_UINT),
COMMON_NAME(R32G32_UINT),
COMMON_NAME(R32G32B32_UINT),
COMMON_NAME(R32G32B32A32_UINT),
COMMON_NAME(R8_SINT),
COMMON_NAME(R8G8_SINT),
COMMON_NAME(R8G8B8_SINT),
COMMON_NAME(R8G8B8A8_SINT),
COMMON_NAME(R16_SINT),
COMMON_NAME(R16G16_SINT),
COMMON_NAME(R16G16B16_SINT),
COMMON_NAME(R16G16B16A16_SINT),
COMMON_NAME(R32_SINT),
COMMON_NAME(R32G32_SINT),
COMMON_NAME(R32G32B32_SINT),
COMMON_NAME(R32G32B32A32_SINT),
COMMON_NAME(R16_UNORM),
COMMON_NAME(R16G16_UNORM),
COMMON_NAME(R16G16B16A16_UNORM),
COMMON_NAME(R16_SNORM),
COMMON_NAME(R16G16_SNORM),
COMMON_NAME(R16G16B16A16_SNORM),
FLOAT_NAME(R16),
FLOAT_NAME(R16G16),
FLOAT_NAME(R16G16B16),
FLOAT_NAME(R16G16B16A16),
FLOAT_NAME(R32),
FLOAT_NAME(R32G32),
FLOAT_NAME(R32G32B32),
FLOAT_NAME(R32G32B32A32),
COMMON_NAME(S8_UINT),
[VK_FORMAT_UNDEFINED] = PIPE_FORMAT_NONE,
[VK_FORMAT_R5G6B5_UNORM_PACK16] = PIPE_FORMAT_B5G6R5_UNORM,
[VK_FORMAT_A1R5G5B5_UNORM_PACK16] = PIPE_FORMAT_B5G5R5A1_UNORM,
[VK_FORMAT_B4G4R4A4_UNORM_PACK16] = PIPE_FORMAT_A4R4G4B4_UNORM,
[VK_FORMAT_D16_UNORM] = PIPE_FORMAT_Z16_UNORM,
[VK_FORMAT_A8B8G8R8_UNORM_PACK32] = PIPE_FORMAT_R8G8B8A8_UNORM,
[VK_FORMAT_A8B8G8R8_SNORM_PACK32] = PIPE_FORMAT_R8G8B8A8_SNORM,
[VK_FORMAT_A8B8G8R8_UINT_PACK32] = PIPE_FORMAT_R8G8B8A8_UINT,
[VK_FORMAT_A8B8G8R8_SINT_PACK32] = PIPE_FORMAT_R8G8B8A8_SINT,
[VK_FORMAT_A8B8G8R8_SRGB_PACK32] = PIPE_FORMAT_R8G8B8A8_SRGB,
[VK_FORMAT_A2B10G10R10_UNORM_PACK32] = PIPE_FORMAT_R10G10B10A2_UNORM,
[VK_FORMAT_A2B10G10R10_UINT_PACK32] = PIPE_FORMAT_R10G10B10A2_UINT,
[VK_FORMAT_B10G11R11_UFLOAT_PACK32] = PIPE_FORMAT_R11G11B10_FLOAT,
[VK_FORMAT_E5B9G9R9_UFLOAT_PACK32] = PIPE_FORMAT_R9G9B9E5_FLOAT,
[VK_FORMAT_X8_D24_UNORM_PACK32] = PIPE_FORMAT_Z24X8_UNORM,
[VK_FORMAT_D32_SFLOAT] = PIPE_FORMAT_Z32_FLOAT,
[VK_FORMAT_D24_UNORM_S8_UINT] = PIPE_FORMAT_Z24_UNORM_S8_UINT,
[VK_FORMAT_D32_SFLOAT_S8_UINT] = PIPE_FORMAT_Z32_FLOAT_S8X24_UINT,
[VK_FORMAT_BC1_RGB_UNORM_BLOCK] = PIPE_FORMAT_DXT1_RGB,
[VK_FORMAT_BC1_RGBA_UNORM_BLOCK] = PIPE_FORMAT_DXT1_RGBA,
[VK_FORMAT_BC2_UNORM_BLOCK] = PIPE_FORMAT_DXT3_RGBA,
[VK_FORMAT_BC3_UNORM_BLOCK] = PIPE_FORMAT_DXT5_RGBA,
[VK_FORMAT_BC4_UNORM_BLOCK] = PIPE_FORMAT_RGTC1_UNORM,
[VK_FORMAT_BC5_UNORM_BLOCK] = PIPE_FORMAT_RGTC2_UNORM,
[VK_FORMAT_BC1_RGB_SRGB_BLOCK] = PIPE_FORMAT_DXT1_SRGB,
[VK_FORMAT_BC1_RGBA_SRGB_BLOCK] = PIPE_FORMAT_DXT1_SRGBA,
[VK_FORMAT_BC2_SRGB_BLOCK] = PIPE_FORMAT_DXT3_SRGBA,
[VK_FORMAT_BC3_SRGB_BLOCK] = PIPE_FORMAT_DXT5_SRGBA,
[VK_FORMAT_BC4_SNORM_BLOCK] = PIPE_FORMAT_RGTC1_SNORM,
[VK_FORMAT_BC5_SNORM_BLOCK] = PIPE_FORMAT_RGTC2_SNORM,
[VK_FORMAT_BC6H_UFLOAT_BLOCK] = PIPE_FORMAT_BPTC_RGB_UFLOAT,
[VK_FORMAT_BC6H_SFLOAT_BLOCK] = PIPE_FORMAT_BPTC_RGB_FLOAT,
[VK_FORMAT_BC7_UNORM_BLOCK] = PIPE_FORMAT_BPTC_RGBA_UNORM,
[VK_FORMAT_BC7_SRGB_BLOCK] = PIPE_FORMAT_BPTC_SRGBA,
};
enum pipe_format vk_format_to_pipe(VkFormat format)
{
if (format > VK_FORMAT_ASTC_12x12_SRGB_BLOCK)
return PIPE_FORMAT_NONE;
return format_to_vk_table[format];
}
static void
val_physical_device_get_format_properties(struct val_physical_device *physical_device,
VkFormat format,
VkFormatProperties *out_properties)
{
enum pipe_format pformat = vk_format_to_pipe(format);
unsigned features = 0, buffer_features = 0;
if (pformat == PIPE_FORMAT_NONE) {
out_properties->linearTilingFeatures = 0;
out_properties->optimalTilingFeatures = 0;
out_properties->bufferFeatures = 0;
return;
}
if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_DEPTH_STENCIL)) {
out_properties->linearTilingFeatures = 0;
out_properties->optimalTilingFeatures = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
out_properties->bufferFeatures = 0;
return;
}
if (util_format_is_compressed(pformat)) {
if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
}
out_properties->linearTilingFeatures = features;
out_properties->optimalTilingFeatures = features;
out_properties->bufferFeatures = buffer_features;
return;
}
buffer_features = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
if (!util_format_is_srgb(pformat) &&
physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_BUFFER, 0, 0, PIPE_BIND_VERTEX_BUFFER)) {
buffer_features |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
}
if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_BUFFER, 0, 0, PIPE_BIND_CONSTANT_BUFFER)) {
buffer_features |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
}
if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
}
if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_RENDER_TARGET)) {
features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
}
if (pformat == PIPE_FORMAT_R32_UINT || pformat == PIPE_FORMAT_R32_SINT) {
features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
buffer_features |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
}
if (pformat == PIPE_FORMAT_R11G11B10_FLOAT || pformat == PIPE_FORMAT_R9G9B9E5_FLOAT)
features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
if (pformat == PIPE_FORMAT_B5G6R5_UNORM)
features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
if ((pformat != PIPE_FORMAT_R9G9B9E5_FLOAT) && util_format_get_nr_components(pformat) != 3) {
features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
}
out_properties->linearTilingFeatures = features;
out_properties->optimalTilingFeatures = features;
out_properties->bufferFeatures = buffer_features;
return;
}
void val_GetPhysicalDeviceFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties* pFormatProperties)
{
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
val_physical_device_get_format_properties(physical_device,
format,
pFormatProperties);
}
void val_GetPhysicalDeviceFormatProperties2(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties2* pFormatProperties)
{
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
val_physical_device_get_format_properties(physical_device,
format,
&pFormatProperties->formatProperties);
}
static VkResult val_get_image_format_properties(struct val_physical_device *physical_device,
const VkPhysicalDeviceImageFormatInfo2 *info,
VkImageFormatProperties *pImageFormatProperties)
{
VkFormatProperties format_props;
VkFormatFeatureFlags format_feature_flags;
VkExtent3D maxExtent;
uint32_t maxMipLevels;
uint32_t maxArraySize;
VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
enum pipe_format pformat = vk_format_to_pipe(info->format);
val_physical_device_get_format_properties(physical_device, info->format,
&format_props);
if (info->tiling == VK_IMAGE_TILING_LINEAR) {
format_feature_flags = format_props.linearTilingFeatures;
} else if (info->tiling == VK_IMAGE_TILING_OPTIMAL) {
format_feature_flags = format_props.optimalTilingFeatures;
} else {
unreachable("bad VkImageTiling");
}
if (format_feature_flags == 0)
goto unsupported;
uint32_t max_2d_ext = physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
uint32_t max_layers = physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS);
switch (info->type) {
default:
unreachable("bad vkimage type\n");
case VK_IMAGE_TYPE_1D:
if (util_format_is_compressed(pformat))
goto unsupported;
maxExtent.width = max_2d_ext;
maxExtent.height = 1;
maxExtent.depth = 1;
maxMipLevels = util_logbase2(max_2d_ext);
maxArraySize = max_layers;
break;
case VK_IMAGE_TYPE_2D:
maxExtent.width = max_2d_ext;
maxExtent.height = max_2d_ext;
maxExtent.depth = 1;
maxMipLevels = util_logbase2(max_2d_ext);
maxArraySize = max_layers;
sampleCounts |= VK_SAMPLE_COUNT_4_BIT;
break;
case VK_IMAGE_TYPE_3D:
maxExtent.width = max_2d_ext;
maxExtent.height = max_2d_ext;
maxExtent.depth = (1 << physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_3D_LEVELS));
maxMipLevels = util_logbase2(max_2d_ext);
maxArraySize = 1;
break;
}
if (info->usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_STORAGE_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_TRANSFER_DST_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
if (!(format_feature_flags & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
goto unsupported;
}
}
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = maxExtent,
.maxMipLevels = maxMipLevels,
.maxArrayLayers = maxArraySize,
.sampleCounts = sampleCounts,
/* FINISHME: Accurately calculate
* VkImageFormatProperties::maxResourceSize.
*/
.maxResourceSize = UINT32_MAX,
};
return VK_SUCCESS;
unsupported:
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = { 0, 0, 0 },
.maxMipLevels = 0,
.maxArrayLayers = 0,
.sampleCounts = 0,
.maxResourceSize = 0,
};
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
VkResult val_GetPhysicalDeviceImageFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags createFlags,
VkImageFormatProperties* pImageFormatProperties)
{
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceImageFormatInfo2 info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
.pNext = NULL,
.format = format,
.type = type,
.tiling = tiling,
.usage = usage,
.flags = createFlags,
};
return val_get_image_format_properties(physical_device, &info,
pImageFormatProperties);
}
VkResult val_GetPhysicalDeviceImageFormatProperties2(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
VkResult result;
result = val_get_image_format_properties(physical_device, base_info,
&base_props->imageFormatProperties);
if (result != VK_SUCCESS)
return result;
return VK_SUCCESS;
}
void val_GetPhysicalDeviceSparseImageFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
uint32_t samples,
VkImageUsageFlags usage,
VkImageTiling tiling,
uint32_t* pNumProperties,
VkSparseImageFormatProperties* pProperties)
{
/* Sparse images are not yet supported. */
*pNumProperties = 0;
}
void val_GetPhysicalDeviceSparseImageFormatProperties2(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo,
uint32_t *pPropertyCount,
VkSparseImageFormatProperties2 *pProperties)
{
/* Sparse images are not yet supported. */
*pPropertyCount = 0;
}

View File

@ -0,0 +1,288 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "util/format/u_format.h"
#include "util/u_inlines.h"
#include "pipe/p_state.h"
VkResult
val_image_create(VkDevice _device,
const struct val_image_create_info *create_info,
const VkAllocationCallbacks* alloc,
VkImage *pImage)
{
VAL_FROM_HANDLE(val_device, device, _device);
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
struct val_image *image;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (image == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &image->base, VK_OBJECT_TYPE_IMAGE);
image->alignment = 16;
image->type = pCreateInfo->imageType;
{
struct pipe_resource template;
memset(&template, 0, sizeof(template));
template.screen = device->pscreen;
switch (pCreateInfo->imageType) {
case VK_IMAGE_TYPE_1D:
template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
break;
default:
case VK_IMAGE_TYPE_2D:
template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
template.target = pCreateInfo->arrayLayers == 6 ? PIPE_TEXTURE_CUBE : PIPE_TEXTURE_CUBE_ARRAY;
break;
case VK_IMAGE_TYPE_3D:
template.target = PIPE_TEXTURE_3D;
break;
}
template.format = vk_format_to_pipe(pCreateInfo->format);
template.width0 = pCreateInfo->extent.width;
template.height0 = pCreateInfo->extent.height;
template.depth0 = pCreateInfo->extent.depth;
template.array_size = pCreateInfo->arrayLayers;
template.last_level = pCreateInfo->mipLevels - 1;
template.nr_samples = pCreateInfo->samples;
template.nr_storage_samples = pCreateInfo->samples;
if (create_info->bind_flags)
template.bind = create_info->bind_flags;
image->bo = device->pscreen->resource_create_unbacked(device->pscreen,
&template,
&image->size);
}
*pImage = val_image_to_handle(image);
return VK_SUCCESS;
}
VkResult
val_CreateImage(VkDevice device,
const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImage *pImage)
{
return val_image_create(device,
&(struct val_image_create_info) {
.vk_info = pCreateInfo,
.bind_flags = 0,
},
pAllocator,
pImage);
}
void
val_DestroyImage(VkDevice _device, VkImage _image,
const VkAllocationCallbacks *pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_image, image, _image);
if (!_image)
return;
pipe_resource_reference(&image->bo, NULL);
vk_object_base_finish(&image->base);
vk_free2(&device->alloc, pAllocator, image);
}
VkResult
val_CreateImageView(VkDevice _device,
const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImageView *pView)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_image, image, pCreateInfo->image);
struct val_image_view *view;
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (view == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &view->base,
VK_OBJECT_TYPE_IMAGE_VIEW);
view->view_type = pCreateInfo->viewType;
view->format = pCreateInfo->format;
view->pformat = vk_format_to_pipe(pCreateInfo->format);
view->components = pCreateInfo->components;
view->subresourceRange = pCreateInfo->subresourceRange;
view->image = image;
view->surface = NULL;
*pView = val_image_view_to_handle(view);
return VK_SUCCESS;
}
void
val_DestroyImageView(VkDevice _device, VkImageView _iview,
const VkAllocationCallbacks *pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_image_view, iview, _iview);
if (!_iview)
return;
pipe_surface_reference(&iview->surface, NULL);
vk_object_base_finish(&iview->base);
vk_free2(&device->alloc, pAllocator, iview);
}
void val_GetImageSubresourceLayout(
VkDevice _device,
VkImage _image,
const VkImageSubresource* pSubresource,
VkSubresourceLayout* pLayout)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_image, image, _image);
uint32_t stride, offset;
device->pscreen->resource_get_info(device->pscreen,
image->bo,
&stride, &offset);
pLayout->offset = offset;
pLayout->rowPitch = stride;
pLayout->arrayPitch = 0;
pLayout->size = image->size;
switch (pSubresource->aspectMask) {
case VK_IMAGE_ASPECT_COLOR_BIT:
break;
case VK_IMAGE_ASPECT_DEPTH_BIT:
break;
case VK_IMAGE_ASPECT_STENCIL_BIT:
break;
default:
assert(!"Invalid image aspect");
}
}
VkResult val_CreateBuffer(
VkDevice _device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_buffer *buffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &buffer->base, VK_OBJECT_TYPE_BUFFER);
buffer->size = pCreateInfo->size;
buffer->usage = pCreateInfo->usage;
buffer->offset = 0;
{
struct pipe_resource template;
memset(&template, 0, sizeof(struct pipe_resource));
template.screen = device->pscreen;
template.target = PIPE_BUFFER;
template.format = PIPE_FORMAT_R8_UNORM;
template.width0 = buffer->size;
template.height0 = 1;
template.depth0 = 1;
template.array_size = 1;
template.flags = PIPE_RESOURCE_FLAG_DONT_OVER_ALLOCATE;
buffer->bo = device->pscreen->resource_create_unbacked(device->pscreen,
&template,
&buffer->total_size);
if (!buffer->bo) {
vk_free2(&device->alloc, pAllocator, buffer);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
*pBuffer = val_buffer_to_handle(buffer);
return VK_SUCCESS;
}
void val_DestroyBuffer(
VkDevice _device,
VkBuffer _buffer,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
if (!_buffer)
return;
pipe_resource_reference(&buffer->bo, NULL);
vk_object_base_finish(&buffer->base);
vk_free2(&device->alloc, pAllocator, buffer);
}
VkResult
val_CreateBufferView(VkDevice _device,
const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkBufferView *pView)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_buffer, buffer, pCreateInfo->buffer);
struct val_buffer_view *view;
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!view)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &view->base,
VK_OBJECT_TYPE_BUFFER_VIEW);
view->buffer = buffer;
view->format = pCreateInfo->format;
view->pformat = vk_format_to_pipe(pCreateInfo->format);
view->offset = pCreateInfo->offset;
view->range = pCreateInfo->range;
*pView = val_buffer_view_to_handle(view);
return VK_SUCCESS;
}
void
val_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_buffer_view, view, bufferView);
if (!bufferView)
return;
vk_object_base_finish(&view->base);
vk_free2(&device->alloc, pAllocator, view);
}

View File

@ -0,0 +1,109 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "nir.h"
#include "nir_builder.h"
#include "val_lower_vulkan_resource.h"
static nir_ssa_def *
load_frag_coord(nir_builder *b)
{
nir_variable *pos =
nir_find_variable_with_location(b->shader, nir_var_shader_in,
VARYING_SLOT_POS);
if (pos == NULL) {
nir_variable *pos = nir_variable_create(b->shader, nir_var_shader_in,
glsl_vec4_type(), NULL);
pos->data.location = VARYING_SLOT_POS;
}
/**
* From Vulkan spec:
* "The OriginLowerLeft execution mode must not be used; fragment entry
* points must declare OriginUpperLeft."
*
* So at this point origin_upper_left should be true
*/
assert(b->shader->info.fs.origin_upper_left == true);
return nir_load_var(b, pos);
}
static bool
try_lower_input_load(nir_function_impl *impl, nir_intrinsic_instr *load,
bool use_fragcoord_sysval)
{
nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
assert(glsl_type_is_image(deref->type));
enum glsl_sampler_dim image_dim = glsl_get_sampler_dim(deref->type);
if (image_dim != GLSL_SAMPLER_DIM_SUBPASS &&
image_dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
return false;
nir_builder b;
nir_builder_init(&b, impl);
b.cursor = nir_before_instr(&load->instr);
nir_ssa_def *frag_coord = use_fragcoord_sysval ? nir_load_frag_coord(&b)
: load_frag_coord(&b);
frag_coord = nir_f2i32(&b, frag_coord);
nir_ssa_def *offset = nir_ssa_for_src(&b, load->src[1], 2);
nir_ssa_def *pos = nir_iadd(&b, frag_coord, offset);
nir_ssa_def *layer = nir_imm_int(&b, 0);
nir_ssa_def *coord =
nir_vec4(&b, nir_channel(&b, pos, 0), nir_channel(&b, pos, 1), layer, layer);
nir_instr_rewrite_src(&load->instr, &load->src[1], nir_src_for_ssa(coord));
return true;
}
bool
val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
bool progress = false;
nir_foreach_function(function, shader) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
if (load->intrinsic != nir_intrinsic_image_deref_load)
continue;
progress |= try_lower_input_load(function->impl, load,
use_fragcoord_sysval);
}
}
}
return progress;
}

View File

@ -0,0 +1,176 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "nir.h"
#include "nir_builder.h"
#include "val_lower_vulkan_resource.h"
static bool
lower_vulkan_resource_index(const nir_instr *instr, const void *data_cb)
{
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index)
return true;
}
if (instr->type == nir_instr_type_tex) {
return true;
}
return false;
}
static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
nir_instr *instr, void *data_cb)
{
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
unsigned desc_set_idx = nir_intrinsic_desc_set(intrin);
unsigned binding_idx = nir_intrinsic_binding(intrin);
struct val_pipeline_layout *layout = data_cb;
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
int value = 0;
bool is_ubo = (binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
for (unsigned s = 0; s < desc_set_idx; s++) {
if (is_ubo)
value += layout->set[s].layout->stage[b->shader->info.stage].const_buffer_count;
else
value += layout->set[s].layout->stage[b->shader->info.stage].shader_buffer_count;
}
if (is_ubo)
value += binding->stage[b->shader->info.stage].const_buffer_index + 1;
else
value += binding->stage[b->shader->info.stage].shader_buffer_index;
if (nir_src_is_const(intrin->src[0])) {
value += nir_src_comp_as_int(intrin->src[0], 0);
return nir_imm_int(b, value);
} else
return nir_iadd_imm(b, intrin->src[0].ssa, value);
}
static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
nir_tex_src_type deref_src_type,
gl_shader_stage stage,
struct val_pipeline_layout *layout)
{
int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
if (deref_src_idx < 0)
return -1;
nir_deref_instr *deref_instr = nir_src_as_deref(tex->src[deref_src_idx].src);
nir_variable *var = nir_deref_instr_get_variable(deref_instr);
unsigned desc_set_idx = var->data.descriptor_set;
unsigned binding_idx = var->data.binding;
int value = 0;
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
nir_tex_instr_remove_src(tex, deref_src_idx);
for (unsigned s = 0; s < desc_set_idx; s++) {
if (deref_src_type == nir_tex_src_sampler_deref)
value += layout->set[s].layout->stage[stage].sampler_count;
else
value += layout->set[s].layout->stage[stage].sampler_view_count;
}
if (deref_src_type == nir_tex_src_sampler_deref)
value += binding->stage[stage].sampler_index;
else
value += binding->stage[stage].sampler_view_index;
if (deref_instr->deref_type == nir_deref_type_array) {
if (nir_src_is_const(deref_instr->arr.index))
value += nir_src_as_uint(deref_instr->arr.index);
else {
if (deref_src_type == nir_tex_src_sampler_deref)
nir_tex_instr_add_src(tex, nir_tex_src_sampler_offset, deref_instr->arr.index);
else
nir_tex_instr_add_src(tex, nir_tex_src_texture_offset, deref_instr->arr.index);
}
}
if (deref_src_type == nir_tex_src_sampler_deref)
tex->sampler_index = value;
else
tex->texture_index = value;
return value;
}
static void lower_vri_instr_tex(struct nir_builder *b,
nir_tex_instr *tex, void *data_cb)
{
struct val_pipeline_layout *layout = data_cb;
int tex_value = 0;
lower_vri_instr_tex_deref(tex, nir_tex_src_sampler_deref, b->shader->info.stage, layout);
tex_value = lower_vri_instr_tex_deref(tex, nir_tex_src_texture_deref, b->shader->info.stage, layout);
if (tex_value >= 0)
b->shader->info.textures_used |= (1 << tex_value);
}
static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
nir_instr *instr, void *data_cb)
{
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index)
return lower_vri_intrin_vri(b, instr, data_cb);
}
if (instr->type == nir_instr_type_tex)
lower_vri_instr_tex(b, nir_instr_as_tex(instr), data_cb);
return NULL;
}
void val_lower_pipeline_layout(const struct val_device *device,
struct val_pipeline_layout *layout,
nir_shader *shader)
{
nir_shader_lower_instructions(shader, lower_vulkan_resource_index, lower_vri_instr, layout);
nir_foreach_uniform_variable(var, shader) {
const struct glsl_type *type = var->type;
enum glsl_base_type base_type =
glsl_get_base_type(glsl_without_array(type));
unsigned desc_set_idx = var->data.descriptor_set;
unsigned binding_idx = var->data.binding;
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
int value = 0;
var->data.descriptor_set = 0;
if (base_type == GLSL_TYPE_SAMPLER) {
if (binding->type == VK_DESCRIPTOR_TYPE_SAMPLER) {
for (unsigned s = 0; s < desc_set_idx; s++)
value += layout->set[s].layout->stage[shader->info.stage].sampler_count;
value += binding->stage[shader->info.stage].sampler_index;
} else {
for (unsigned s = 0; s < desc_set_idx; s++)
value += layout->set[s].layout->stage[shader->info.stage].sampler_view_count;
value += binding->stage[shader->info.stage].sampler_view_index;
}
var->data.binding = value;
}
if (base_type == GLSL_TYPE_IMAGE) {
var->data.descriptor_set = 0;
for (unsigned s = 0; s < desc_set_idx; s++)
value += layout->set[s].layout->stage[shader->info.stage].image_count;
value += binding->stage[shader->info.stage].image_index;
var->data.binding = value;
}
}
}

View File

@ -0,0 +1,36 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef VAL_LOWER_VULKAN_RESOURCE_H
#define VAL_LOWER_VULKAN_RESOURCE_H
struct val_pipeline_layout;
struct val_device;
void val_lower_pipeline_layout(const struct val_device *device,
struct val_pipeline_layout *layout,
nir_shader *shader);
bool
val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
#endif

View File

@ -0,0 +1,290 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
static void
val_render_pass_compile(struct val_render_pass *pass)
{
for (uint32_t i = 0; i < pass->subpass_count; i++) {
struct val_subpass *subpass = &pass->subpasses[i];
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
struct val_subpass_attachment *subpass_att =
&subpass->attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
struct val_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
pass_att->first_subpass_idx = UINT32_MAX;
}
}
for (uint32_t i = 0; i < pass->subpass_count; i++) {
struct val_subpass *subpass = &pass->subpasses[i];
uint32_t color_sample_count = 1, depth_sample_count = 1;
/* We don't allow depth_stencil_attachment to be non-NULL and
* be VK_ATTACHMENT_UNUSED. This way something can just check
* for NULL and be guaranteed that they have a valid
* attachment.
*/
if (subpass->depth_stencil_attachment &&
subpass->depth_stencil_attachment->attachment == VK_ATTACHMENT_UNUSED)
subpass->depth_stencil_attachment = NULL;
if (subpass->ds_resolve_attachment &&
subpass->ds_resolve_attachment->attachment == VK_ATTACHMENT_UNUSED)
subpass->ds_resolve_attachment = NULL;
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
struct val_subpass_attachment *subpass_att =
&subpass->attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
struct val_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
if (i < pass_att->first_subpass_idx)
pass_att->first_subpass_idx = i;
pass_att->last_subpass_idx = i;
}
subpass->has_color_att = false;
for (uint32_t j = 0; j < subpass->color_count; j++) {
struct val_subpass_attachment *subpass_att =
&subpass->color_attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
subpass->has_color_att = true;
struct val_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
color_sample_count = pass_att->samples;
}
if (subpass->depth_stencil_attachment) {
const uint32_t a =
subpass->depth_stencil_attachment->attachment;
struct val_render_pass_attachment *pass_att =
&pass->attachments[a];
depth_sample_count = pass_att->samples;
}
subpass->max_sample_count = MAX2(color_sample_count,
depth_sample_count);
/* We have to handle resolve attachments specially */
subpass->has_color_resolve = false;
if (subpass->resolve_attachments) {
for (uint32_t j = 0; j < subpass->color_count; j++) {
struct val_subpass_attachment *resolve_att =
&subpass->resolve_attachments[j];
if (resolve_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
subpass->has_color_resolve = true;
}
}
for (uint32_t j = 0; j < subpass->input_count; ++j) {
if (subpass->input_attachments[j].attachment == VK_ATTACHMENT_UNUSED)
continue;
for (uint32_t k = 0; k < subpass->color_count; ++k) {
if (subpass->color_attachments[k].attachment == subpass->input_attachments[j].attachment) {
subpass->input_attachments[j].in_render_loop = true;
subpass->color_attachments[k].in_render_loop = true;
}
}
if (subpass->depth_stencil_attachment &&
subpass->depth_stencil_attachment->attachment == subpass->input_attachments[j].attachment) {
subpass->input_attachments[j].in_render_loop = true;
subpass->depth_stencil_attachment->in_render_loop = true;
}
}
}
}
static unsigned
val_num_subpass_attachments(const VkSubpassDescription *desc)
{
return desc->inputAttachmentCount +
desc->colorAttachmentCount +
(desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
(desc->pDepthStencilAttachment != NULL);
}
VkResult val_CreateRenderPass(
VkDevice _device,
const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkRenderPass* pRenderPass)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_render_pass *pass;
size_t size;
size_t attachments_offset;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
size = sizeof(*pass);
size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
pass = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
/* Clear the subpasses along with the parent pass. This required because
* each array member of val_subpass must be a valid pointer if not NULL.
*/
memset(pass, 0, size);
vk_object_base_init(&device->vk, &pass->base,
VK_OBJECT_TYPE_RENDER_PASS);
pass->attachment_count = pCreateInfo->attachmentCount;
pass->subpass_count = pCreateInfo->subpassCount;
pass->attachments = (void *) pass + attachments_offset;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
struct val_render_pass_attachment *att = &pass->attachments[i];
att->format = pCreateInfo->pAttachments[i].format;
att->samples = pCreateInfo->pAttachments[i].samples;
att->load_op = pCreateInfo->pAttachments[i].loadOp;
att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
att->final_layout = pCreateInfo->pAttachments[i].finalLayout;
att->first_subpass_idx = UINT32_MAX;
}
uint32_t subpass_attachment_count = 0;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
subpass_attachment_count += val_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
}
if (subpass_attachment_count) {
pass->subpass_attachments =
vk_alloc2(&device->alloc, pAllocator,
subpass_attachment_count * sizeof(struct val_subpass_attachment), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
vk_free2(&device->alloc, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else
pass->subpass_attachments = NULL;
struct val_subpass_attachment *p = pass->subpass_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
struct val_subpass *subpass = &pass->subpasses[i];
subpass->input_count = desc->inputAttachmentCount;
subpass->color_count = desc->colorAttachmentCount;
subpass->attachment_count = val_num_subpass_attachments(desc);
subpass->attachments = p;
if (desc->inputAttachmentCount > 0) {
subpass->input_attachments = p;
p += desc->inputAttachmentCount;
for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
subpass->input_attachments[j] = (struct val_subpass_attachment) {
.attachment = desc->pInputAttachments[j].attachment,
.layout = desc->pInputAttachments[j].layout,
};
}
}
if (desc->colorAttachmentCount > 0) {
subpass->color_attachments = p;
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
subpass->color_attachments[j] = (struct val_subpass_attachment) {
.attachment = desc->pColorAttachments[j].attachment,
.layout = desc->pColorAttachments[j].layout,
};
}
}
if (desc->pResolveAttachments) {
subpass->resolve_attachments = p;
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
subpass->resolve_attachments[j] = (struct val_subpass_attachment) {
.attachment = desc->pResolveAttachments[j].attachment,
.layout = desc->pResolveAttachments[j].layout,
};
}
}
if (desc->pDepthStencilAttachment) {
subpass->depth_stencil_attachment = p++;
*subpass->depth_stencil_attachment = (struct val_subpass_attachment) {
.attachment = desc->pDepthStencilAttachment->attachment,
.layout = desc->pDepthStencilAttachment->layout,
};
}
}
val_render_pass_compile(pass);
*pRenderPass = val_render_pass_to_handle(pass);
return VK_SUCCESS;
}
void val_DestroyRenderPass(
VkDevice _device,
VkRenderPass _pass,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_render_pass, pass, _pass);
if (!_pass)
return;
vk_object_base_finish(&pass->base);
vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
vk_free2(&device->alloc, pAllocator, pass);
}
void val_GetRenderAreaGranularity(
VkDevice device,
VkRenderPass renderPass,
VkExtent2D* pGranularity)
{
*pGranularity = (VkExtent2D) { 1, 1 };
}

View File

@ -0,0 +1,943 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "glsl_types.h"
#include "spirv/nir_spirv.h"
#include "nir/nir_builder.h"
#include "val_lower_vulkan_resource.h"
#include "pipe/p_state.h"
#include "pipe/p_context.h"
#define SPIR_V_MAGIC_NUMBER 0x07230203
VkResult val_CreateShaderModule(
VkDevice _device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_shader_module *module;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
module = vk_alloc2(&device->alloc, pAllocator,
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &module->base,
VK_OBJECT_TYPE_SHADER_MODULE);
module->size = pCreateInfo->codeSize;
memcpy(module->data, pCreateInfo->pCode, module->size);
*pShaderModule = val_shader_module_to_handle(module);
return VK_SUCCESS;
}
void val_DestroyShaderModule(
VkDevice _device,
VkShaderModule _module,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_shader_module, module, _module);
if (!_module)
return;
vk_object_base_finish(&module->base);
vk_free2(&device->alloc, pAllocator, module);
}
void val_DestroyPipeline(
VkDevice _device,
VkPipeline _pipeline,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
if (!pipeline->is_compute_pipeline) {
for (unsigned i = 0; i < pipeline->graphics_create_info.stageCount; i++)
if (pipeline->graphics_create_info.pStages[i].pSpecializationInfo)
free((void *)pipeline->graphics_create_info.pStages[i].pSpecializationInfo);
free((void *)pipeline->graphics_create_info.pStages);
free((void *)pipeline->graphics_create_info.pVertexInputState->pVertexBindingDescriptions);
free((void *)pipeline->graphics_create_info.pVertexInputState->pVertexAttributeDescriptions);
free((void *)pipeline->graphics_create_info.pVertexInputState);
free((void *)pipeline->graphics_create_info.pInputAssemblyState);
if (pipeline->graphics_create_info.pViewportState) {
free((void *)pipeline->graphics_create_info.pViewportState->pViewports);
free((void *)pipeline->graphics_create_info.pViewportState->pScissors);
}
free((void *)pipeline->graphics_create_info.pViewportState);
if (pipeline->graphics_create_info.pTessellationState)
free((void *)pipeline->graphics_create_info.pTessellationState);
free((void *)pipeline->graphics_create_info.pRasterizationState);
free((void *)pipeline->graphics_create_info.pMultisampleState);
free((void *)pipeline->graphics_create_info.pDepthStencilState);
if (pipeline->graphics_create_info.pColorBlendState)
free((void *)pipeline->graphics_create_info.pColorBlendState->pAttachments);
free((void *)pipeline->graphics_create_info.pColorBlendState);
if (pipeline->graphics_create_info.pDynamicState)
free((void *)pipeline->graphics_create_info.pDynamicState->pDynamicStates);
free((void *)pipeline->graphics_create_info.pDynamicState);
} else
if (pipeline->compute_create_info.stage.pSpecializationInfo)
free((void *)pipeline->compute_create_info.stage.pSpecializationInfo);
vk_object_base_finish(&pipeline->base);
vk_free2(&device->alloc, pAllocator, pipeline);
}
static VkResult
deep_copy_shader_stage(struct VkPipelineShaderStageCreateInfo *dst,
const struct VkPipelineShaderStageCreateInfo *src)
{
dst->sType = src->sType;
dst->pNext = NULL;
dst->flags = src->flags;
dst->stage = src->stage;
dst->module = src->module;
dst->pName = src->pName;
dst->pSpecializationInfo = NULL;
if (src->pSpecializationInfo) {
const VkSpecializationInfo *src_spec = src->pSpecializationInfo;
VkSpecializationInfo *dst_spec = malloc(sizeof(VkSpecializationInfo) +
src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry) +
src_spec->dataSize);
VkSpecializationMapEntry *maps = (VkSpecializationMapEntry *)(dst_spec + 1);
dst_spec->pMapEntries = maps;
void *pdata = (void *)(dst_spec->pMapEntries + src_spec->mapEntryCount);
dst_spec->pData = pdata;
dst_spec->mapEntryCount = src_spec->mapEntryCount;
dst_spec->dataSize = src_spec->dataSize;
memcpy(pdata, src_spec->pData, src->pSpecializationInfo->dataSize);
memcpy(maps, src_spec->pMapEntries, src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry));
dst->pSpecializationInfo = dst_spec;
}
return VK_SUCCESS;
}
static VkResult
deep_copy_vertex_input_state(struct VkPipelineVertexInputStateCreateInfo *dst,
const struct VkPipelineVertexInputStateCreateInfo *src)
{
int i;
VkVertexInputBindingDescription *dst_binding_descriptions;
VkVertexInputAttributeDescription *dst_attrib_descriptions;
dst->sType = src->sType;
dst->pNext = NULL;
dst->flags = src->flags;
dst->vertexBindingDescriptionCount = src->vertexBindingDescriptionCount;
dst_binding_descriptions = malloc(src->vertexBindingDescriptionCount * sizeof(VkVertexInputBindingDescription));
if (!dst_binding_descriptions)
return VK_ERROR_OUT_OF_HOST_MEMORY;
for (i = 0; i < dst->vertexBindingDescriptionCount; i++) {
memcpy(&dst_binding_descriptions[i], &src->pVertexBindingDescriptions[i], sizeof(VkVertexInputBindingDescription));
}
dst->pVertexBindingDescriptions = dst_binding_descriptions;
dst->vertexAttributeDescriptionCount = src->vertexAttributeDescriptionCount;
dst_attrib_descriptions = malloc(src->vertexAttributeDescriptionCount * sizeof(VkVertexInputAttributeDescription));
if (!dst_attrib_descriptions)
return VK_ERROR_OUT_OF_HOST_MEMORY;
for (i = 0; i < dst->vertexAttributeDescriptionCount; i++) {
memcpy(&dst_attrib_descriptions[i], &src->pVertexAttributeDescriptions[i], sizeof(VkVertexInputAttributeDescription));
}
dst->pVertexAttributeDescriptions = dst_attrib_descriptions;
return VK_SUCCESS;
}
static VkResult
deep_copy_viewport_state(VkPipelineViewportStateCreateInfo *dst,
const VkPipelineViewportStateCreateInfo *src)
{
int i;
VkViewport *viewports;
VkRect2D *scissors;
dst->sType = src->sType;
dst->pNext = src->pNext;
dst->flags = src->flags;
if (src->pViewports) {
viewports = malloc(src->viewportCount * sizeof(VkViewport));
for (i = 0; i < src->viewportCount; i++)
memcpy(&viewports[i], &src->pViewports[i], sizeof(VkViewport));
dst->pViewports = viewports;
} else
dst->pViewports = NULL;
dst->viewportCount = src->viewportCount;
if (src->pScissors) {
scissors = malloc(src->scissorCount * sizeof(VkRect2D));
for (i = 0; i < src->scissorCount; i++)
memcpy(&scissors[i], &src->pScissors[i], sizeof(VkRect2D));
dst->pScissors = scissors;
} else
dst->pScissors = NULL;
dst->scissorCount = src->scissorCount;
return VK_SUCCESS;
}
static VkResult
deep_copy_color_blend_state(VkPipelineColorBlendStateCreateInfo *dst,
const VkPipelineColorBlendStateCreateInfo *src)
{
VkPipelineColorBlendAttachmentState *attachments;
dst->sType = src->sType;
dst->pNext = src->pNext;
dst->flags = src->flags;
dst->logicOpEnable = src->logicOpEnable;
dst->logicOp = src->logicOp;
attachments = malloc(src->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState));
memcpy(attachments, src->pAttachments, src->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState));
dst->attachmentCount = src->attachmentCount;
dst->pAttachments = attachments;
memcpy(&dst->blendConstants, &src->blendConstants, sizeof(float) * 4);
return VK_SUCCESS;
}
static VkResult
deep_copy_dynamic_state(VkPipelineDynamicStateCreateInfo *dst,
const VkPipelineDynamicStateCreateInfo *src)
{
VkDynamicState *dynamic_states;
dst->sType = src->sType;
dst->pNext = src->pNext;
dst->flags = src->flags;
dynamic_states = malloc(src->dynamicStateCount * sizeof(VkDynamicState));
if (!dynamic_states)
return VK_ERROR_OUT_OF_HOST_MEMORY;
memcpy(dynamic_states, src->pDynamicStates, src->dynamicStateCount * sizeof(VkDynamicState));
dst->dynamicStateCount = src->dynamicStateCount;
dst->pDynamicStates = dynamic_states;
return VK_SUCCESS;
}
static VkResult
deep_copy_graphics_create_info(VkGraphicsPipelineCreateInfo *dst,
const VkGraphicsPipelineCreateInfo *src)
{
int i;
VkResult result;
VkPipelineShaderStageCreateInfo *stages;
VkPipelineVertexInputStateCreateInfo *vertex_input;
VkPipelineInputAssemblyStateCreateInfo *input_assembly;
VkPipelineRasterizationStateCreateInfo* raster_state;
dst->sType = src->sType;
dst->pNext = NULL;
dst->flags = src->flags;
dst->layout = src->layout;
dst->renderPass = src->renderPass;
dst->subpass = src->subpass;
dst->basePipelineHandle = src->basePipelineHandle;
dst->basePipelineIndex = src->basePipelineIndex;
/* pStages */
dst->stageCount = src->stageCount;
stages = malloc(dst->stageCount * sizeof(VkPipelineShaderStageCreateInfo));
for (i = 0 ; i < dst->stageCount; i++) {
result = deep_copy_shader_stage(&stages[i], &src->pStages[i]);
if (result != VK_SUCCESS)
return result;
}
dst->pStages = stages;
/* pVertexInputState */
vertex_input = malloc(sizeof(VkPipelineVertexInputStateCreateInfo));
result = deep_copy_vertex_input_state(vertex_input,
src->pVertexInputState);
if (result != VK_SUCCESS)
return result;
dst->pVertexInputState = vertex_input;
/* pInputAssemblyState */
input_assembly = malloc(sizeof(VkPipelineInputAssemblyStateCreateInfo));
if (!input_assembly)
return VK_ERROR_OUT_OF_HOST_MEMORY;
memcpy(input_assembly, src->pInputAssemblyState, sizeof(VkPipelineInputAssemblyStateCreateInfo));
dst->pInputAssemblyState = input_assembly;
/* pTessellationState */
if (src->pTessellationState) {
VkPipelineTessellationStateCreateInfo *tess_state;
tess_state = malloc(sizeof(VkPipelineTessellationStateCreateInfo));
if (!tess_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
memcpy(tess_state, src->pTessellationState, sizeof(VkPipelineTessellationStateCreateInfo));
dst->pTessellationState = tess_state;
}
/* pViewportState */
if (src->pViewportState) {
VkPipelineViewportStateCreateInfo *viewport_state;
viewport_state = malloc(sizeof(VkPipelineViewportStateCreateInfo));
if (!viewport_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_viewport_state(viewport_state, src->pViewportState);
dst->pViewportState = viewport_state;
} else
dst->pViewportState = NULL;
/* pRasterizationState */
raster_state = malloc(sizeof(VkPipelineRasterizationStateCreateInfo));
if (!raster_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
memcpy(raster_state, src->pRasterizationState, sizeof(VkPipelineRasterizationStateCreateInfo));
dst->pRasterizationState = raster_state;
/* pMultisampleState */
if (src->pMultisampleState) {
VkPipelineMultisampleStateCreateInfo* ms_state;
ms_state = malloc(sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
if (!ms_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* does samplemask need deep copy? */
memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
if (src->pMultisampleState->pSampleMask) {
VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
sample_mask[0] = src->pMultisampleState->pSampleMask[0];
ms_state->pSampleMask = sample_mask;
}
dst->pMultisampleState = ms_state;
} else
dst->pMultisampleState = NULL;
/* pDepthStencilState */
if (src->pDepthStencilState) {
VkPipelineDepthStencilStateCreateInfo* ds_state;
ds_state = malloc(sizeof(VkPipelineDepthStencilStateCreateInfo));
if (!ds_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
memcpy(ds_state, src->pDepthStencilState, sizeof(VkPipelineDepthStencilStateCreateInfo));
dst->pDepthStencilState = ds_state;
} else
dst->pDepthStencilState = NULL;
/* pColorBlendState */
if (src->pColorBlendState) {
VkPipelineColorBlendStateCreateInfo* cb_state;
cb_state = malloc(sizeof(VkPipelineColorBlendStateCreateInfo));
if (!cb_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_color_blend_state(cb_state, src->pColorBlendState);
dst->pColorBlendState = cb_state;
} else
dst->pColorBlendState = NULL;
if (src->pDynamicState) {
VkPipelineDynamicStateCreateInfo* dyn_state;
/* pDynamicState */
dyn_state = malloc(sizeof(VkPipelineDynamicStateCreateInfo));
if (!dyn_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_dynamic_state(dyn_state, src->pDynamicState);
dst->pDynamicState = dyn_state;
} else
dst->pDynamicState = NULL;
return VK_SUCCESS;
}
static VkResult
deep_copy_compute_create_info(VkComputePipelineCreateInfo *dst,
const VkComputePipelineCreateInfo *src)
{
VkResult result;
dst->sType = src->sType;
dst->pNext = NULL;
dst->flags = src->flags;
dst->layout = src->layout;
dst->basePipelineHandle = src->basePipelineHandle;
dst->basePipelineIndex = src->basePipelineIndex;
result = deep_copy_shader_stage(&dst->stage, &src->stage);
if (result != VK_SUCCESS)
return result;
return VK_SUCCESS;
}
static inline unsigned
st_shader_stage_to_ptarget(gl_shader_stage stage)
{
switch (stage) {
case MESA_SHADER_VERTEX:
return PIPE_SHADER_VERTEX;
case MESA_SHADER_FRAGMENT:
return PIPE_SHADER_FRAGMENT;
case MESA_SHADER_GEOMETRY:
return PIPE_SHADER_GEOMETRY;
case MESA_SHADER_TESS_CTRL:
return PIPE_SHADER_TESS_CTRL;
case MESA_SHADER_TESS_EVAL:
return PIPE_SHADER_TESS_EVAL;
case MESA_SHADER_COMPUTE:
return PIPE_SHADER_COMPUTE;
default:
break;
}
assert(!"should not be reached");
return PIPE_SHADER_VERTEX;
}
static void
shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
{
assert(glsl_type_is_vector_or_scalar(type));
uint32_t comp_size = glsl_type_is_boolean(type)
? 4 : glsl_get_bit_size(type) / 8;
unsigned length = glsl_get_vector_elements(type);
*size = comp_size * length,
*align = comp_size;
}
#define OPT(pass, ...) ({ \
bool this_progress = false; \
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
if (this_progress) \
progress = true; \
this_progress; \
})
static void
val_shader_compile_to_ir(struct val_pipeline *pipeline,
struct val_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
nir_shader *nir;
const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
bool progress;
uint32_t *spirv = (uint32_t *) module->data;
assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
assert(module->size % 4 == 0);
uint32_t num_spec_entries = 0;
struct nir_spirv_specialization *spec_entries = NULL;
if (spec_info && spec_info->mapEntryCount > 0) {
num_spec_entries = spec_info->mapEntryCount;
spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
for (uint32_t i = 0; i < num_spec_entries; i++) {
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
const void *data =
spec_info->pData + entry.offset;
assert((const void *)(data + entry.size) <=
spec_info->pData + spec_info->dataSize);
spec_entries[i].id = entry.constantID;
switch (entry.size) {
case 8:
spec_entries[i].value.u64 = *(const uint64_t *)data;
break;
case 4:
spec_entries[i].value.u32 = *(const uint32_t *)data;
break;
case 2:
spec_entries[i].value.u16 = *(const uint16_t *)data;
break;
case 1:
spec_entries[i].value.u8 = *(const uint8_t *)data;
break;
default:
assert(!"Invalid spec constant size");
break;
}
}
}
struct val_device *pdevice = pipeline->device;
const struct spirv_to_nir_options spirv_options = {
.environment = NIR_SPIRV_VULKAN,
.lower_ubo_ssbo_access_to_offsets = true,
.caps = {
.float64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
.int16 = true,
.int64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
.tessellation = true,
.image_ms_array = true,
.storage_image_ms = true,
.geometry_streams = true,
.storage_16bit = true,
.variable_pointers = true,
},
.ubo_addr_format = nir_address_format_32bit_index_offset,
.ssbo_addr_format = nir_address_format_32bit_index_offset,
.phys_ssbo_addr_format = nir_address_format_64bit_global,
.push_const_addr_format = nir_address_format_logical,
.shared_addr_format = nir_address_format_32bit_offset,
.frag_coord_is_sysval = false,
};
nir = spirv_to_nir(spirv, module->size / 4,
spec_entries, num_spec_entries,
stage, entrypoint_name, &spirv_options, drv_options);
nir_validate_shader(nir, NULL);
free(spec_entries);
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
if (!func->is_entrypoint)
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_per_member_structs);
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
if (stage == MESA_SHADER_FRAGMENT)
val_lower_input_attachments(nir, false);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
nir_remove_dead_variables(nir, nir_var_uniform, NULL);
val_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
if (nir->info.stage == MESA_SHADER_COMPUTE) {
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
}
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
if (nir->info.stage == MESA_SHADER_VERTEX ||
nir->info.stage == MESA_SHADER_GEOMETRY) {
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
}
do {
progress = false;
progress |= OPT(nir_lower_flrp, 32|64, true, false);
progress |= OPT(nir_split_array_vars, nir_var_function_temp);
progress |= OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
progress |= OPT(nir_opt_deref);
progress |= OPT(nir_lower_vars_to_ssa);
progress |= nir_copy_prop(nir);
progress |= nir_opt_dce(nir);
progress |= nir_opt_dead_cf(nir);
progress |= nir_opt_cse(nir);
progress |= nir_opt_algebraic(nir);
progress |= nir_opt_constant_folding(nir);
progress |= nir_opt_undef(nir);
progress |= nir_opt_deref(nir);
progress |= nir_lower_alu_to_scalar(nir, NULL, NULL);
} while (progress);
nir_lower_var_copies(nir);
nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
nir_validate_shader(nir, NULL);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
if (nir->info.stage != MESA_SHADER_VERTEX)
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
else {
nir->num_inputs = util_last_bit64(nir->info.inputs_read);
nir_foreach_shader_in_variable(var, nir) {
var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
}
}
nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
nir->info.stage);
pipeline->pipeline_nir[stage] = nir;
}
static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct val_pipeline *pipeline)
{
state->type = PIPE_SHADER_IR_NIR;
state->ir.nir = pipeline->pipeline_nir[stage];
}
static void
merge_tess_info(struct shader_info *tes_info,
const struct shader_info *tcs_info)
{
/* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
*
* "PointMode. Controls generation of points rather than triangles
* or lines. This functionality defaults to disabled, and is
* enabled if either shader stage includes the execution mode.
*
* and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
* PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
* and OutputVertices, it says:
*
* "One mode must be set in at least one of the tessellation
* shader stages."
*
* So, the fields can be set in either the TCS or TES, but they must
* agree if set in both. Our backend looks at TES, so bitwise-or in
* the values from the TCS.
*/
assert(tcs_info->tess.tcs_vertices_out == 0 ||
tes_info->tess.tcs_vertices_out == 0 ||
tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
tcs_info->tess.spacing == tes_info->tess.spacing);
tes_info->tess.spacing |= tcs_info->tess.spacing;
assert(tcs_info->tess.primitive_mode == 0 ||
tes_info->tess.primitive_mode == 0 ||
tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
tes_info->tess.ccw |= tcs_info->tess.ccw;
tes_info->tess.point_mode |= tcs_info->tess.point_mode;
}
static gl_shader_stage
val_shader_stage(VkShaderStageFlagBits stage)
{
switch (stage) {
case VK_SHADER_STAGE_VERTEX_BIT:
return MESA_SHADER_VERTEX;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
return MESA_SHADER_TESS_CTRL;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
return MESA_SHADER_TESS_EVAL;
case VK_SHADER_STAGE_GEOMETRY_BIT:
return MESA_SHADER_GEOMETRY;
case VK_SHADER_STAGE_FRAGMENT_BIT:
return MESA_SHADER_FRAGMENT;
case VK_SHADER_STAGE_COMPUTE_BIT:
return MESA_SHADER_COMPUTE;
default:
unreachable("invalid VkShaderStageFlagBits");
return MESA_SHADER_NONE;
}
}
static VkResult
val_pipeline_compile(struct val_pipeline *pipeline,
gl_shader_stage stage)
{
struct val_device *device = pipeline->device;
device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
if (stage == MESA_SHADER_COMPUTE) {
struct pipe_compute_state shstate = {};
shstate.prog = (void *)pipeline->pipeline_nir[MESA_SHADER_COMPUTE];
shstate.ir_type = PIPE_SHADER_IR_NIR;
shstate.req_local_mem = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.shared_size;
pipeline->shader_cso[PIPE_SHADER_COMPUTE] = device->queue.ctx->create_compute_state(device->queue.ctx, &shstate);
} else {
struct pipe_shader_state shstate = {};
fill_shader_prog(&shstate, stage, pipeline);
switch (stage) {
case MESA_SHADER_FRAGMENT:
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
break;
case MESA_SHADER_VERTEX:
pipeline->shader_cso[PIPE_SHADER_VERTEX] = device->queue.ctx->create_vs_state(device->queue.ctx, &shstate);
break;
case MESA_SHADER_GEOMETRY:
pipeline->shader_cso[PIPE_SHADER_GEOMETRY] = device->queue.ctx->create_gs_state(device->queue.ctx, &shstate);
break;
case MESA_SHADER_TESS_CTRL:
pipeline->shader_cso[PIPE_SHADER_TESS_CTRL] = device->queue.ctx->create_tcs_state(device->queue.ctx, &shstate);
break;
case MESA_SHADER_TESS_EVAL:
pipeline->shader_cso[PIPE_SHADER_TESS_EVAL] = device->queue.ctx->create_tes_state(device->queue.ctx, &shstate);
break;
default:
unreachable("illegal shader");
break;
}
}
return VK_SUCCESS;
}
static VkResult
val_graphics_pipeline_init(struct val_pipeline *pipeline,
struct val_device *device,
struct val_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
{
if (alloc == NULL)
alloc = &device->alloc;
pipeline->device = device;
pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
pipeline->force_min_sample = false;
/* recreate createinfo */
deep_copy_graphics_create_info(&pipeline->graphics_create_info, pCreateInfo);
pipeline->is_compute_pipeline = false;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
VAL_FROM_HANDLE(val_shader_module, module,
pCreateInfo->pStages[i].module);
gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
val_shader_compile_to_ir(pipeline, module,
pCreateInfo->pStages[i].pName,
stage,
pCreateInfo->pStages[i].pSpecializationInfo);
}
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) {
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
SYSTEM_BIT_SAMPLE_POS))
pipeline->force_min_sample = true;
}
if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) {
nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
}
bool has_fragment_shader = false;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
val_pipeline_compile(pipeline, stage);
if (stage == MESA_SHADER_FRAGMENT)
has_fragment_shader = true;
}
if (has_fragment_shader == false) {
/* create a dummy fragment shader for this pipeline. */
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
b.shader->info.name = ralloc_strdup(b.shader, "dummy_frag");
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
struct pipe_shader_state shstate = {};
shstate.type = PIPE_SHADER_IR_NIR;
shstate.ir.nir = pipeline->pipeline_nir[MESA_SHADER_FRAGMENT];
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
}
return VK_SUCCESS;
}
static VkResult
val_graphics_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
struct val_pipeline *pipeline;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
result = val_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, pipeline);
return result;
}
*pPipeline = val_pipeline_to_handle(pipeline);
return VK_SUCCESS;
}
VkResult val_CreateGraphicsPipelines(
VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t count,
const VkGraphicsPipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines)
{
VkResult result = VK_SUCCESS;
unsigned i = 0;
for (; i < count; i++) {
VkResult r;
r = val_graphics_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;
}
}
return result;
}
static VkResult
val_compute_pipeline_init(struct val_pipeline *pipeline,
struct val_device *device,
struct val_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
{
VAL_FROM_HANDLE(val_shader_module, module,
pCreateInfo->stage.module);
if (alloc == NULL)
alloc = &device->alloc;
pipeline->device = device;
pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
pipeline->force_min_sample = false;
deep_copy_compute_create_info(&pipeline->compute_create_info, pCreateInfo);
pipeline->is_compute_pipeline = true;
val_shader_compile_to_ir(pipeline, module,
pCreateInfo->stage.pName,
MESA_SHADER_COMPUTE,
pCreateInfo->stage.pSpecializationInfo);
val_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
return VK_SUCCESS;
}
static VkResult
val_compute_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
struct val_pipeline *pipeline;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
result = val_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, pipeline);
return result;
}
*pPipeline = val_pipeline_to_handle(pipeline);
return VK_SUCCESS;
}
VkResult val_CreateComputePipelines(
VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t count,
const VkComputePipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines)
{
VkResult result = VK_SUCCESS;
unsigned i = 0;
for (; i < count; i++) {
VkResult r;
r = val_compute_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;
}
}
return result;
}

View File

@ -0,0 +1,103 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
VkResult val_CreatePipelineCache(
VkDevice _device,
const VkPipelineCacheCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineCache* pPipelineCache)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_pipeline_cache *cache;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
cache = vk_alloc2(&device->alloc, pAllocator,
sizeof(*cache), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cache == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &cache->base,
VK_OBJECT_TYPE_PIPELINE_CACHE);
if (pAllocator)
cache->alloc = *pAllocator;
else
cache->alloc = device->alloc;
cache->device = device;
*pPipelineCache = val_pipeline_cache_to_handle(cache);
return VK_SUCCESS;
}
void val_DestroyPipelineCache(
VkDevice _device,
VkPipelineCache _cache,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
if (!_cache)
return;
// val_pipeline_cache_finish(cache);
vk_object_base_finish(&cache->base);
vk_free2(&device->alloc, pAllocator, cache);
}
VkResult val_GetPipelineCacheData(
VkDevice _device,
VkPipelineCache _cache,
size_t* pDataSize,
void* pData)
{
VkResult result = VK_SUCCESS;
if (pData) {
if (*pDataSize < 32) {
*pDataSize = 0;
result = VK_INCOMPLETE;
} else {
uint32_t *hdr = (uint32_t *)pData;
hdr[0] = 32;
hdr[1] = 1;
hdr[2] = VK_VENDOR_ID_MESA;
hdr[3] = 0;
val_device_get_cache_uuid(&hdr[4]);
}
} else
*pDataSize = 32;
return result;
}
VkResult val_MergePipelineCaches(
VkDevice _device,
VkPipelineCache destCache,
uint32_t srcCacheCount,
const VkPipelineCache* pSrcCaches)
{
return VK_SUCCESS;
}

View File

@ -0,0 +1,989 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#pragma once
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <assert.h>
#include <stdint.h>
#include "util/macros.h"
#include "util/list.h"
#include "compiler/shader_enums.h"
#include "pipe/p_screen.h"
#include "pipe/p_state.h"
#include "nir.h"
/* Pre-declarations needed for WSI entrypoints */
struct wl_surface;
struct wl_display;
typedef struct xcb_connection_t xcb_connection_t;
typedef uint32_t xcb_visualid_t;
typedef uint32_t xcb_window_t;
#define VK_PROTOTYPES
#include <vulkan/vulkan.h>
#include <vulkan/vk_icd.h>
#include "val_extensions.h"
#include "val_entrypoints.h"
#include "vk_object.h"
#include "wsi_common.h"
#include <assert.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_SETS 8
#define MAX_PUSH_CONSTANTS_SIZE 128
#define val_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
#define typed_memcpy(dest, src, count) ({ \
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
int val_get_instance_entrypoint_index(const char *name);
int val_get_device_entrypoint_index(const char *name);
int val_get_physical_device_entrypoint_index(const char *name);
const char *val_get_instance_entry_name(int index);
const char *val_get_physical_device_entry_name(int index);
const char *val_get_device_entry_name(int index);
bool val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance);
bool val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance);
bool val_device_entrypoint_is_enabled(int index, uint32_t core_version,
const struct val_instance_extension_table *instance,
const struct val_device_extension_table *device);
void *val_lookup_entrypoint(const char *name);
#define VAL_DEFINE_HANDLE_CASTS(__val_type, __VkType) \
\
static inline struct __val_type * \
__val_type ## _from_handle(__VkType _handle) \
{ \
return (struct __val_type *) _handle; \
} \
\
static inline __VkType \
__val_type ## _to_handle(struct __val_type *_obj) \
{ \
return (__VkType) _obj; \
}
#define VAL_DEFINE_NONDISP_HANDLE_CASTS(__val_type, __VkType) \
\
static inline struct __val_type * \
__val_type ## _from_handle(__VkType _handle) \
{ \
return (struct __val_type *)(uintptr_t) _handle; \
} \
\
static inline __VkType \
__val_type ## _to_handle(struct __val_type *_obj) \
{ \
return (__VkType)(uintptr_t) _obj; \
}
#define VAL_FROM_HANDLE(__val_type, __name, __handle) \
struct __val_type *__name = __val_type ## _from_handle(__handle)
VAL_DEFINE_HANDLE_CASTS(val_cmd_buffer, VkCommandBuffer)
VAL_DEFINE_HANDLE_CASTS(val_device, VkDevice)
VAL_DEFINE_HANDLE_CASTS(val_instance, VkInstance)
VAL_DEFINE_HANDLE_CASTS(val_physical_device, VkPhysicalDevice)
VAL_DEFINE_HANDLE_CASTS(val_queue, VkQueue)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_cmd_pool, VkCommandPool)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer, VkBuffer)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer_view, VkBufferView)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_pool, VkDescriptorPool)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set, VkDescriptorSet)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set_layout, VkDescriptorSetLayout)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_device_memory, VkDeviceMemory)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_event, VkEvent)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_framebuffer, VkFramebuffer)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image, VkImage)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image_view, VkImageView);
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_cache, VkPipelineCache)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline, VkPipeline)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_layout, VkPipelineLayout)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_query_pool, VkQueryPool)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_render_pass, VkRenderPass)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_sampler, VkSampler)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_shader_module, VkShaderModule)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_fence, VkFence);
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_semaphore, VkSemaphore);
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
*/
VkResult __vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...);
#define VAL_DEBUG_ALL_ENTRYPOINTS (1 << 0)
#define vk_error(instance, error) __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
#define vk_errorf(instance, error, format, ...) __vk_errorf(instance, error, __FILE__, __LINE__, format, ## __VA_ARGS__);
void __val_finishme(const char *file, int line, const char *format, ...)
val_printflike(3, 4);
#define val_finishme(format, ...) \
__val_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
#define stub_return(v) \
do { \
val_finishme("stub %s", __func__); \
return (v); \
} while (0)
#define stub() \
do { \
val_finishme("stub %s", __func__); \
return; \
} while (0)
struct val_shader_module {
struct vk_object_base base;
uint32_t size;
char data[0];
};
static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
{
assert(__builtin_popcount(vk_stage) == 1);
return ffs(vk_stage) - 1;
}
static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
{
return (1 << mesa_stage);
}
#define VAL_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
#define val_foreach_stage(stage, stage_bits) \
for (gl_shader_stage stage, \
__tmp = (gl_shader_stage)((stage_bits) & VAL_STAGE_MASK); \
stage = __builtin_ffs(__tmp) - 1, __tmp; \
__tmp &= ~(1 << (stage)))
struct val_physical_device {
VK_LOADER_DATA _loader_data;
struct val_instance * instance;
struct pipe_loader_device *pld;
struct pipe_screen *pscreen;
uint32_t max_images;
struct wsi_device wsi_device;
struct val_device_extension_table supported_extensions;
};
struct val_instance {
struct vk_object_base base;
VkAllocationCallbacks alloc;
uint32_t apiVersion;
int physicalDeviceCount;
struct val_physical_device physicalDevice;
uint64_t debug_flags;
struct pipe_loader_device *devs;
int num_devices;
struct val_instance_extension_table enabled_extensions;
struct val_instance_dispatch_table dispatch;
struct val_physical_device_dispatch_table physical_device_dispatch;
struct val_device_dispatch_table device_dispatch;
};
VkResult val_init_wsi(struct val_physical_device *physical_device);
void val_finish_wsi(struct val_physical_device *physical_device);
bool val_instance_extension_supported(const char *name);
uint32_t val_physical_device_api_version(struct val_physical_device *dev);
bool val_physical_device_extension_supported(struct val_physical_device *dev,
const char *name);
struct val_queue {
VK_LOADER_DATA _loader_data;
VkDeviceQueueCreateFlags flags;
struct val_device * device;
struct pipe_context *ctx;
bool shutdown;
thrd_t exec_thread;
mtx_t m;
cnd_t new_work;
struct list_head workqueue;
uint32_t count;
};
struct val_queue_work {
struct list_head list;
uint32_t cmd_buffer_count;
struct val_cmd_buffer **cmd_buffers;
struct val_fence *fence;
};
struct val_pipeline_cache {
struct vk_object_base base;
struct val_device * device;
VkAllocationCallbacks alloc;
};
struct val_device {
struct vk_device vk;
VkAllocationCallbacks alloc;
struct val_queue queue;
struct val_instance * instance;
struct val_physical_device *physical_device;
struct pipe_screen *pscreen;
mtx_t fence_lock;
struct val_device_extension_table enabled_extensions;
struct val_device_dispatch_table dispatch;
};
void val_device_get_cache_uuid(void *uuid);
struct val_device_memory {
struct vk_object_base base;
struct pipe_memory_allocation *pmem;
uint32_t type_index;
VkDeviceSize map_size;
void * map;
};
struct val_image {
struct vk_object_base base;
VkImageType type;
VkFormat vk_format;
VkDeviceSize size;
uint32_t alignment;
struct pipe_resource *bo;
};
static inline uint32_t
val_get_layerCount(const struct val_image *image,
const VkImageSubresourceRange *range)
{
return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
image->bo->array_size - range->baseArrayLayer : range->layerCount;
}
static inline uint32_t
val_get_levelCount(const struct val_image *image,
const VkImageSubresourceRange *range)
{
return range->levelCount == VK_REMAINING_MIP_LEVELS ?
(image->bo->last_level + 1) - range->baseMipLevel : range->levelCount;
}
struct val_image_create_info {
const VkImageCreateInfo *vk_info;
uint32_t bind_flags;
uint32_t stride;
};
VkResult
val_image_create(VkDevice _device,
const struct val_image_create_info *create_info,
const VkAllocationCallbacks* alloc,
VkImage *pImage);
struct val_image_view {
struct vk_object_base base;
const struct val_image *image; /**< VkImageViewCreateInfo::image */
VkImageViewType view_type;
VkFormat format;
enum pipe_format pformat;
VkComponentMapping components;
VkImageSubresourceRange subresourceRange;
struct pipe_surface *surface; /* have we created a pipe surface for this? */
};
struct val_subpass_attachment {
uint32_t attachment;
VkImageLayout layout;
bool in_render_loop;
};
struct val_subpass {
uint32_t attachment_count;
struct val_subpass_attachment * attachments;
uint32_t input_count;
uint32_t color_count;
struct val_subpass_attachment * input_attachments;
struct val_subpass_attachment * color_attachments;
struct val_subpass_attachment * resolve_attachments;
struct val_subpass_attachment * depth_stencil_attachment;
struct val_subpass_attachment * ds_resolve_attachment;
/** Subpass has at least one color resolve attachment */
bool has_color_resolve;
/** Subpass has at least one color attachment */
bool has_color_att;
VkSampleCountFlagBits max_sample_count;
};
struct val_render_pass_attachment {
VkFormat format;
uint32_t samples;
VkAttachmentLoadOp load_op;
VkAttachmentLoadOp stencil_load_op;
VkImageLayout initial_layout;
VkImageLayout final_layout;
/* The subpass id in which the attachment will be used first/last. */
uint32_t first_subpass_idx;
uint32_t last_subpass_idx;
};
struct val_render_pass {
struct vk_object_base base;
uint32_t attachment_count;
uint32_t subpass_count;
struct val_subpass_attachment * subpass_attachments;
struct val_render_pass_attachment * attachments;
struct val_subpass subpasses[0];
};
struct val_sampler {
struct vk_object_base base;
VkSamplerCreateInfo create_info;
uint32_t state[4];
};
struct val_framebuffer {
struct vk_object_base base;
uint32_t width;
uint32_t height;
uint32_t layers;
uint32_t attachment_count;
struct val_image_view * attachments[0];
};
struct val_descriptor_set_binding_layout {
uint16_t descriptor_index;
/* Number of array elements in this binding */
VkDescriptorType type;
uint16_t array_size;
bool valid;
int16_t dynamic_index;
struct {
int16_t const_buffer_index;
int16_t shader_buffer_index;
int16_t sampler_index;
int16_t sampler_view_index;
int16_t image_index;
} stage[MESA_SHADER_STAGES];
/* Immutable samplers (or NULL if no immutable samplers) */
struct val_sampler **immutable_samplers;
};
struct val_descriptor_set_layout {
struct vk_object_base base;
/* Number of bindings in this descriptor set */
uint16_t binding_count;
/* Total size of the descriptor set with room for all array entries */
uint16_t size;
/* Shader stages affected by this descriptor set */
uint16_t shader_stages;
struct {
uint16_t const_buffer_count;
uint16_t shader_buffer_count;
uint16_t sampler_count;
uint16_t sampler_view_count;
uint16_t image_count;
} stage[MESA_SHADER_STAGES];
/* Number of dynamic offsets used by this descriptor set */
uint16_t dynamic_offset_count;
/* Bindings in this descriptor set */
struct val_descriptor_set_binding_layout binding[0];
};
struct val_descriptor {
VkDescriptorType type;
union {
struct {
struct val_image_view *image_view;
struct val_sampler *sampler;
};
struct {
uint64_t offset;
uint64_t range;
struct val_buffer *buffer;
} buf;
struct val_buffer_view *buffer_view;
};
};
struct val_descriptor_set {
struct vk_object_base base;
const struct val_descriptor_set_layout *layout;
struct list_head link;
struct val_descriptor descriptors[0];
};
struct val_descriptor_pool {
struct vk_object_base base;
VkDescriptorPoolCreateFlags flags;
uint32_t max_sets;
struct list_head sets;
};
VkResult
val_descriptor_set_create(struct val_device *device,
const struct val_descriptor_set_layout *layout,
struct val_descriptor_set **out_set);
void
val_descriptor_set_destroy(struct val_device *device,
struct val_descriptor_set *set);
struct val_pipeline_layout {
struct vk_object_base base;
struct {
struct val_descriptor_set_layout *layout;
uint32_t dynamic_offset_start;
} set[MAX_SETS];
uint32_t num_sets;
uint32_t push_constant_size;
struct {
bool has_dynamic_offsets;
} stage[MESA_SHADER_STAGES];
};
struct val_pipeline {
struct vk_object_base base;
struct val_device * device;
struct val_pipeline_layout * layout;
bool is_compute_pipeline;
bool force_min_sample;
nir_shader *pipeline_nir[MESA_SHADER_STAGES];
void *shader_cso[PIPE_SHADER_TYPES];
VkGraphicsPipelineCreateInfo graphics_create_info;
VkComputePipelineCreateInfo compute_create_info;
};
struct val_event {
struct vk_object_base base;
uint64_t event_storage;
};
struct val_fence {
struct vk_object_base base;
bool signaled;
struct pipe_fence_handle *handle;
};
struct val_semaphore {
struct vk_object_base base;
bool dummy;
};
struct val_buffer {
struct vk_object_base base;
struct val_device * device;
VkDeviceSize size;
VkBufferUsageFlags usage;
VkDeviceSize offset;
struct pipe_resource *bo;
uint64_t total_size;
};
struct val_buffer_view {
struct vk_object_base base;
VkFormat format;
enum pipe_format pformat;
struct val_buffer *buffer;
uint32_t offset;
uint64_t range;
};
struct val_query_pool {
struct vk_object_base base;
VkQueryType type;
uint32_t count;
enum pipe_query_type base_type;
struct pipe_query *queries[0];
};
struct val_cmd_pool {
struct vk_object_base base;
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
struct list_head free_cmd_buffers;
};
enum val_cmd_buffer_status {
VAL_CMD_BUFFER_STATUS_INVALID,
VAL_CMD_BUFFER_STATUS_INITIAL,
VAL_CMD_BUFFER_STATUS_RECORDING,
VAL_CMD_BUFFER_STATUS_EXECUTABLE,
VAL_CMD_BUFFER_STATUS_PENDING,
};
struct val_cmd_buffer {
struct vk_object_base base;
struct val_device * device;
VkCommandBufferLevel level;
enum val_cmd_buffer_status status;
struct val_cmd_pool * pool;
struct list_head pool_link;
struct list_head cmds;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
};
/* in same order and buffer building commands in spec. */
enum val_cmds {
VAL_CMD_BIND_PIPELINE,
VAL_CMD_SET_VIEWPORT,
VAL_CMD_SET_SCISSOR,
VAL_CMD_SET_LINE_WIDTH,
VAL_CMD_SET_DEPTH_BIAS,
VAL_CMD_SET_BLEND_CONSTANTS,
VAL_CMD_SET_DEPTH_BOUNDS,
VAL_CMD_SET_STENCIL_COMPARE_MASK,
VAL_CMD_SET_STENCIL_WRITE_MASK,
VAL_CMD_SET_STENCIL_REFERENCE,
VAL_CMD_BIND_DESCRIPTOR_SETS,
VAL_CMD_BIND_INDEX_BUFFER,
VAL_CMD_BIND_VERTEX_BUFFERS,
VAL_CMD_DRAW,
VAL_CMD_DRAW_INDEXED,
VAL_CMD_DRAW_INDIRECT,
VAL_CMD_DRAW_INDEXED_INDIRECT,
VAL_CMD_DISPATCH,
VAL_CMD_DISPATCH_INDIRECT,
VAL_CMD_COPY_BUFFER,
VAL_CMD_COPY_IMAGE,
VAL_CMD_BLIT_IMAGE,
VAL_CMD_COPY_BUFFER_TO_IMAGE,
VAL_CMD_COPY_IMAGE_TO_BUFFER,
VAL_CMD_UPDATE_BUFFER,
VAL_CMD_FILL_BUFFER,
VAL_CMD_CLEAR_COLOR_IMAGE,
VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE,
VAL_CMD_CLEAR_ATTACHMENTS,
VAL_CMD_RESOLVE_IMAGE,
VAL_CMD_SET_EVENT,
VAL_CMD_RESET_EVENT,
VAL_CMD_WAIT_EVENTS,
VAL_CMD_PIPELINE_BARRIER,
VAL_CMD_BEGIN_QUERY,
VAL_CMD_END_QUERY,
VAL_CMD_RESET_QUERY_POOL,
VAL_CMD_WRITE_TIMESTAMP,
VAL_CMD_COPY_QUERY_POOL_RESULTS,
VAL_CMD_PUSH_CONSTANTS,
VAL_CMD_BEGIN_RENDER_PASS,
VAL_CMD_NEXT_SUBPASS,
VAL_CMD_END_RENDER_PASS,
VAL_CMD_EXECUTE_COMMANDS,
};
struct val_cmd_bind_pipeline {
VkPipelineBindPoint bind_point;
struct val_pipeline *pipeline;
};
struct val_cmd_set_viewport {
uint32_t first_viewport;
uint32_t viewport_count;
VkViewport viewports[16];
};
struct val_cmd_set_scissor {
uint32_t first_scissor;
uint32_t scissor_count;
VkRect2D scissors[16];
};
struct val_cmd_set_line_width {
float line_width;
};
struct val_cmd_set_depth_bias {
float constant_factor;
float clamp;
float slope_factor;
};
struct val_cmd_set_blend_constants {
float blend_constants[4];
};
struct val_cmd_set_depth_bounds {
float min_depth;
float max_depth;
};
struct val_cmd_set_stencil_vals {
VkStencilFaceFlags face_mask;
uint32_t value;
};
struct val_cmd_bind_descriptor_sets {
VkPipelineBindPoint bind_point;
struct val_pipeline_layout *layout;
uint32_t first;
uint32_t count;
struct val_descriptor_set **sets;
uint32_t dynamic_offset_count;
const uint32_t *dynamic_offsets;
};
struct val_cmd_bind_index_buffer {
const struct val_buffer *buffer;
VkDeviceSize offset;
VkIndexType index_type;
};
struct val_cmd_bind_vertex_buffers {
uint32_t first;
uint32_t binding_count;
struct val_buffer **buffers;
const VkDeviceSize *offsets;
};
struct val_cmd_draw {
uint32_t vertex_count;
uint32_t instance_count;
uint32_t first_vertex;
uint32_t first_instance;
};
struct val_cmd_draw_indexed {
uint32_t index_count;
uint32_t instance_count;
uint32_t first_index;
uint32_t vertex_offset;
uint32_t first_instance;
};
struct val_cmd_draw_indirect {
VkDeviceSize offset;
struct val_buffer *buffer;
uint32_t draw_count;
uint32_t stride;
};
struct val_cmd_dispatch {
uint32_t x;
uint32_t y;
uint32_t z;
};
struct val_cmd_dispatch_indirect {
const struct val_buffer *buffer;
VkDeviceSize offset;
};
struct val_cmd_copy_buffer {
struct val_buffer *src;
struct val_buffer *dst;
uint32_t region_count;
const VkBufferCopy *regions;
};
struct val_cmd_copy_image {
struct val_image *src;
struct val_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
const VkImageCopy *regions;
};
struct val_cmd_blit_image {
struct val_image *src;
struct val_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
const VkImageBlit *regions;
VkFilter filter;
};
struct val_cmd_copy_buffer_to_image {
struct val_buffer *src;
struct val_image *dst;
VkImageLayout dst_layout;
uint32_t region_count;
const VkBufferImageCopy *regions;
};
struct val_cmd_copy_image_to_buffer {
struct val_image *src;
struct val_buffer *dst;
VkImageLayout src_layout;
uint32_t region_count;
const VkBufferImageCopy *regions;
};
struct val_cmd_update_buffer {
struct val_buffer *buffer;
VkDeviceSize offset;
VkDeviceSize data_size;
char data[0];
};
struct val_cmd_fill_buffer {
struct val_buffer *buffer;
VkDeviceSize offset;
VkDeviceSize fill_size;
uint32_t data;
};
struct val_cmd_clear_color_image {
struct val_image *image;
VkImageLayout layout;
VkClearColorValue clear_val;
uint32_t range_count;
VkImageSubresourceRange *ranges;
};
struct val_cmd_clear_ds_image {
struct val_image *image;
VkImageLayout layout;
VkClearDepthStencilValue clear_val;
uint32_t range_count;
VkImageSubresourceRange *ranges;
};
struct val_cmd_clear_attachments {
uint32_t attachment_count;
VkClearAttachment *attachments;
uint32_t rect_count;
VkClearRect *rects;
};
struct val_cmd_resolve_image {
struct val_image *src;
struct val_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
VkImageResolve *regions;
};
struct val_cmd_event_set {
struct val_event *event;
bool value;
bool flush;
};
struct val_cmd_wait_events {
uint32_t event_count;
struct val_event **events;
VkPipelineStageFlags src_stage_mask;
VkPipelineStageFlags dst_stage_mask;
uint32_t memory_barrier_count;
VkMemoryBarrier *memory_barriers;
uint32_t buffer_memory_barrier_count;
VkBufferMemoryBarrier *buffer_memory_barriers;
uint32_t image_memory_barrier_count;
VkImageMemoryBarrier *image_memory_barriers;
};
struct val_cmd_pipeline_barrier {
VkPipelineStageFlags src_stage_mask;
VkPipelineStageFlags dst_stage_mask;
bool by_region;
uint32_t memory_barrier_count;
VkMemoryBarrier *memory_barriers;
uint32_t buffer_memory_barrier_count;
VkBufferMemoryBarrier *buffer_memory_barriers;
uint32_t image_memory_barrier_count;
VkImageMemoryBarrier *image_memory_barriers;
};
struct val_cmd_query_cmd {
struct val_query_pool *pool;
uint32_t query;
uint32_t index;
bool precise;
bool flush;
};
struct val_cmd_copy_query_pool_results {
struct val_query_pool *pool;
uint32_t first_query;
uint32_t query_count;
struct val_buffer *dst;
VkDeviceSize dst_offset;
VkDeviceSize stride;
VkQueryResultFlags flags;
};
struct val_cmd_push_constants {
VkShaderStageFlags stage;
uint32_t offset;
uint32_t size;
uint32_t val[1];
};
struct val_attachment_state {
VkImageAspectFlags pending_clear_aspects;
VkClearValue clear_value;
};
struct val_cmd_begin_render_pass {
struct val_framebuffer *framebuffer;
struct val_render_pass *render_pass;
VkRect2D render_area;
struct val_attachment_state *attachments;
};
struct val_cmd_next_subpass {
VkSubpassContents contents;
};
struct val_cmd_execute_commands {
uint32_t command_buffer_count;
struct val_cmd_buffer *cmd_buffers[0];
};
struct val_cmd_buffer_entry {
struct list_head cmd_link;
uint32_t cmd_type;
union {
struct val_cmd_bind_pipeline pipeline;
struct val_cmd_set_viewport set_viewport;
struct val_cmd_set_scissor set_scissor;
struct val_cmd_set_line_width set_line_width;
struct val_cmd_set_depth_bias set_depth_bias;
struct val_cmd_set_blend_constants set_blend_constants;
struct val_cmd_set_depth_bounds set_depth_bounds;
struct val_cmd_set_stencil_vals stencil_vals;
struct val_cmd_bind_descriptor_sets descriptor_sets;
struct val_cmd_bind_vertex_buffers vertex_buffers;
struct val_cmd_bind_index_buffer index_buffer;
struct val_cmd_draw draw;
struct val_cmd_draw_indexed draw_indexed;
struct val_cmd_draw_indirect draw_indirect;
struct val_cmd_dispatch dispatch;
struct val_cmd_dispatch_indirect dispatch_indirect;
struct val_cmd_copy_buffer copy_buffer;
struct val_cmd_copy_image copy_image;
struct val_cmd_blit_image blit_image;
struct val_cmd_copy_buffer_to_image buffer_to_img;
struct val_cmd_copy_image_to_buffer img_to_buffer;
struct val_cmd_update_buffer update_buffer;
struct val_cmd_fill_buffer fill_buffer;
struct val_cmd_clear_color_image clear_color_image;
struct val_cmd_clear_ds_image clear_ds_image;
struct val_cmd_clear_attachments clear_attachments;
struct val_cmd_resolve_image resolve_image;
struct val_cmd_event_set event_set;
struct val_cmd_wait_events wait_events;
struct val_cmd_pipeline_barrier pipeline_barrier;
struct val_cmd_query_cmd query;
struct val_cmd_copy_query_pool_results copy_query_pool_results;
struct val_cmd_push_constants push_constants;
struct val_cmd_begin_render_pass begin_render_pass;
struct val_cmd_next_subpass next_subpass;
struct val_cmd_execute_commands execute_commands;
} u;
};
VkResult val_execute_cmds(struct val_device *device,
struct val_queue *queue,
struct val_fence *fence,
struct val_cmd_buffer *cmd_buffer);
enum pipe_format vk_format_to_pipe(VkFormat format);
static inline VkImageAspectFlags
vk_format_aspects(VkFormat format)
{
switch (format) {
case VK_FORMAT_UNDEFINED:
return 0;
case VK_FORMAT_S8_UINT:
return VK_IMAGE_ASPECT_STENCIL_BIT;
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT:
return VK_IMAGE_ASPECT_DEPTH_BIT;
default:
return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,136 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "pipe/p_context.h"
VkResult val_CreateQueryPool(
VkDevice _device,
const VkQueryPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkQueryPool* pQueryPool)
{
VAL_FROM_HANDLE(val_device, device, _device);
enum pipe_query_type pipeq;
switch (pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
pipeq = PIPE_QUERY_OCCLUSION_COUNTER;
break;
case VK_QUERY_TYPE_TIMESTAMP:
pipeq = PIPE_QUERY_TIMESTAMP;
break;
default:
return VK_ERROR_FEATURE_NOT_PRESENT;
}
struct val_query_pool *pool;
uint32_t pool_size = sizeof(*pool) + pCreateInfo->queryCount * sizeof(struct pipe_query *);
pool = vk_zalloc2(&device->alloc, pAllocator,
pool_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &pool->base,
VK_OBJECT_TYPE_QUERY_POOL);
pool->type = pCreateInfo->queryType;
pool->count = pCreateInfo->queryCount;
pool->base_type = pipeq;
*pQueryPool = val_query_pool_to_handle(pool);
return VK_SUCCESS;
}
void val_DestroyQueryPool(
VkDevice _device,
VkQueryPool _pool,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_query_pool, pool, _pool);
if (!pool)
return;
for (unsigned i = 0; i < pool->count; i++)
if (pool->queries[i])
device->queue.ctx->destroy_query(device->queue.ctx, pool->queries[i]);
vk_object_base_finish(&pool->base);
vk_free2(&device->alloc, pAllocator, pool);
}
VkResult val_GetQueryPoolResults(
VkDevice _device,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount,
size_t dataSize,
void* pData,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_query_pool, pool, queryPool);
VkResult vk_result = VK_SUCCESS;
val_DeviceWaitIdle(_device);
for (unsigned i = firstQuery; i < firstQuery + queryCount; i++) {
uint8_t *dptr = (uint8_t *)((char *)pData + (stride * (i - firstQuery)));
union pipe_query_result result;
bool ready = false;
if (pool->queries[i]) {
ready = device->queue.ctx->get_query_result(device->queue.ctx,
pool->queries[i],
(flags & VK_QUERY_RESULT_WAIT_BIT),
&result);
} else {
result.u64 = 0;
}
if (!ready && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
vk_result = VK_NOT_READY;
if (flags & VK_QUERY_RESULT_64_BIT) {
if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
*(uint64_t *)dptr = result.u64;
dptr += 8;
} else {
if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
if (result.u64 > UINT32_MAX)
*(uint32_t *)dptr = UINT32_MAX;
else
*(uint32_t *)dptr = result.u32;
}
dptr += 4;
}
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
if (flags & VK_QUERY_RESULT_64_BIT)
*(uint64_t *)dptr = ready;
else
*(uint32_t *)dptr = ready;
}
}
return vk_result;
}

View File

@ -0,0 +1,58 @@
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "vk_enum_to_str.h"
void val_printflike(3, 4)
__val_finishme(const char *file, int line, const char *format, ...)
{
va_list ap;
char buffer[256];
va_start(ap, format);
vsnprintf(buffer, sizeof(buffer), format, ap);
va_end(ap);
fprintf(stderr, "%s:%d: FINISHME: %s\n", file, line, buffer);
}
VkResult
__vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
{
va_list ap;
char buffer[256];
const char *error_str = vk_Result_to_str(error);
if (format) {
va_start(ap, format);
vsnprintf(buffer, sizeof(buffer), format, ap);
va_end(ap);
fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
} else {
fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
}
return error;
}

View File

@ -0,0 +1,277 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_wsi.h"
static PFN_vkVoidFunction
val_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
return val_lookup_entrypoint(pName);
}
VkResult
val_init_wsi(struct val_physical_device *physical_device)
{
return wsi_device_init(&physical_device->wsi_device,
val_physical_device_to_handle(physical_device),
val_wsi_proc_addr,
&physical_device->instance->alloc,
-1, NULL, true);
}
void
val_finish_wsi(struct val_physical_device *physical_device)
{
wsi_device_finish(&physical_device->wsi_device,
&physical_device->instance->alloc);
}
void val_DestroySurfaceKHR(
VkInstance _instance,
VkSurfaceKHR _surface,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_instance, instance, _instance);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
vk_free2(&instance->alloc, pAllocator, surface);
}
VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_support(&device->wsi_device,
queueFamilyIndex,
surface,
pSupported);
}
VkResult val_GetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities(&device->wsi_device,
surface,
pSurfaceCapabilities);
}
VkResult val_GetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2(&device->wsi_device,
pSurfaceInfo,
pSurfaceCapabilities);
}
VkResult val_GetPhysicalDeviceSurfaceCapabilities2EXT(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2ext(&device->wsi_device,
surface,
pSurfaceCapabilities);
}
VkResult val_GetPhysicalDeviceSurfaceFormatsKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_formats(&device->wsi_device,
surface,
pSurfaceFormatCount,
pSurfaceFormats);
}
VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_surface_present_modes(&device->wsi_device,
surface,
pPresentModeCount,
pPresentModes);
}
VkResult val_CreateSwapchainKHR(
VkDevice _device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSwapchainKHR* pSwapchain)
{
VAL_FROM_HANDLE(val_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
alloc = pAllocator;
else
alloc = &device->alloc;
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
val_device_to_handle(device),
pCreateInfo,
alloc,
pSwapchain);
}
void val_DestroySwapchainKHR(
VkDevice _device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
alloc = pAllocator;
else
alloc = &device->alloc;
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}
VkResult val_GetSwapchainImagesKHR(
VkDevice device,
VkSwapchainKHR swapchain,
uint32_t* pSwapchainImageCount,
VkImage* pSwapchainImages)
{
return wsi_common_get_images(swapchain,
pSwapchainImageCount,
pSwapchainImages);
}
VkResult val_AcquireNextImageKHR(
VkDevice device,
VkSwapchainKHR swapchain,
uint64_t timeout,
VkSemaphore semaphore,
VkFence fence,
uint32_t* pImageIndex)
{
VkAcquireNextImageInfoKHR acquire_info = {
.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
.swapchain = swapchain,
.timeout = timeout,
.semaphore = semaphore,
.fence = fence,
.deviceMask = 0,
};
return val_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
}
VkResult val_AcquireNextImage2KHR(
VkDevice _device,
const VkAcquireNextImageInfoKHR* pAcquireInfo,
uint32_t* pImageIndex)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_physical_device *pdevice = device->physical_device;
VkResult result = wsi_common_acquire_next_image2(&pdevice->wsi_device,
_device,
pAcquireInfo,
pImageIndex);
#if 0
VAL_FROM_HANDLE(val_fence, fence, pAcquireInfo->fence);
if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
if (fence->fence)
device->ws->signal_fence(fence->fence);
if (fence->temp_syncobj) {
device->ws->signal_syncobj(device->ws, fence->temp_syncobj);
} else if (fence->syncobj) {
device->ws->signal_syncobj(device->ws, fence->syncobj);
}
}
#endif
return result;
}
VkResult val_QueuePresentKHR(
VkQueue _queue,
const VkPresentInfoKHR* pPresentInfo)
{
VAL_FROM_HANDLE(val_queue, queue, _queue);
return wsi_common_queue_present(&queue->device->physical_device->wsi_device,
val_device_to_handle(queue->device),
_queue, 0,
pPresentInfo);
}
VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
VkDevice device,
VkDeviceGroupPresentCapabilitiesKHR* pCapabilities)
{
memset(pCapabilities->presentMask, 0,
sizeof(pCapabilities->presentMask));
pCapabilities->presentMask[0] = 0x1;
pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult val_GetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR* pModes)
{
*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult val_GetPhysicalDevicePresentRectanglesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pRectCount,
VkRect2D* pRects)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_common_get_present_rectangles(&device->wsi_device,
surface,
pRectCount, pRects);
}

View File

@ -0,0 +1,74 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#pragma once
#include "val_private.h"
struct val_swapchain;
struct val_wsi_interface {
VkResult (*get_support)(VkIcdSurfaceBase *surface,
struct val_physical_device *device,
uint32_t queueFamilyIndex,
VkBool32* pSupported);
VkResult (*get_capabilities)(VkIcdSurfaceBase *surface,
struct val_physical_device *device,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
VkResult (*get_formats)(VkIcdSurfaceBase *surface,
struct val_physical_device *device,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats);
VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
struct val_physical_device *device,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes);
VkResult (*create_swapchain)(VkIcdSurfaceBase *surface,
struct val_device *device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
struct val_swapchain **swapchain);
};
struct val_swapchain {
struct val_device *device;
VkResult (*destroy)(struct val_swapchain *swapchain,
const VkAllocationCallbacks *pAllocator);
VkResult (*get_images)(struct val_swapchain *swapchain,
uint32_t *pCount, VkImage *pSwapchainImages);
VkResult (*acquire_next_image)(struct val_swapchain *swap_chain,
uint64_t timeout, VkSemaphore semaphore,
uint32_t *image_index);
VkResult (*queue_present)(struct val_swapchain *swap_chain,
struct val_queue *queue,
uint32_t image_index);
};
VAL_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_swapchain, VkSwapchainKHR)
VkResult val_x11_init_wsi(struct val_instance *instance);
void val_x11_finish_wsi(struct val_instance *instance);
VkResult val_wl_init_wsi(struct val_instance *instance);
void val_wl_finish_wsi(struct val_instance *instance);

View File

@ -0,0 +1,55 @@
/*
* Copyright © 2016 Red Hat
* based on intel anv code:
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "wsi_common_wayland.h"
#include "val_private.h"
VkBool32 val_GetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display* display)
{
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
return wsi_wl_get_presentation_support(&physical_device->wsi_device, display);
}
VkResult val_CreateWaylandSurfaceKHR(
VkInstance _instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
VAL_FROM_HANDLE(val_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
if (pAllocator)
alloc = pAllocator;
else
alloc = &instance->alloc;
return wsi_create_wl_surface(alloc, pCreateInfo, pSurface);
}

View File

@ -0,0 +1,93 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <X11/Xlib-xcb.h>
#include <xcb/xcb.h>
#include "wsi_common_x11.h"
#include "val_private.h"
VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
xcb_connection_t* connection,
xcb_visualid_t visual_id)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&device->wsi_device,
queueFamilyIndex,
connection, visual_id);
}
VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display* dpy,
VisualID visualID)
{
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&device->wsi_device,
queueFamilyIndex,
XGetXCBConnection(dpy), visualID);
}
VkResult val_CreateXcbSurfaceKHR(
VkInstance _instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
VAL_FROM_HANDLE(val_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
if (pAllocator)
alloc = pAllocator;
else
alloc = &instance->alloc;
return wsi_create_xcb_surface(alloc, pCreateInfo, pSurface);
}
VkResult val_CreateXlibSurfaceKHR(
VkInstance _instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
VAL_FROM_HANDLE(val_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
if (pAllocator)
alloc = pAllocator;
else
alloc = &instance->alloc;
return wsi_create_xlib_surface(alloc, pCreateInfo, pSurface);
}

View File

@ -225,3 +225,7 @@ if with_tests
endif
subdir('tests')
endif
if with_swrast_vk
subdir('frontends/vallium')
subdir('targets/vallium')
endif

View File

@ -0,0 +1,27 @@
libvulkan_val = shared_library(
'vulkan_val',
[ 'target.c' ],
include_directories : [ inc_src, inc_util, inc_include, inc_gallium, inc_gallium_aux, inc_gallium_winsys, inc_gallium_drivers ],
link_whole : [ libvallium_st ],
link_with : [libpipe_loader_static, libmegadriver_stub, libdri, libdricommon ,libgallium, libwsw, libswdri, libws_null, libswkmsdri ],
gnu_symbol_visibility : 'hidden',
link_args : [ld_args_bsymbolic, ld_args_gc_sections],
dependencies : driver_swrast,
install : true,
name_suffix : 'so',
)
val_icd = custom_target(
'val_icd',
input : 'val_icd.py',
output : 'val_icd.@0@.json'.format(host_machine.cpu()),
command : [
prog_python, '@INPUT@',
'--lib-path', join_paths(get_option('prefix'), get_option('libdir')),
'--out', '@OUTPUT@',
],
depend_files : files('../../frontends/vallium/val_extensions.py'),
build_by_default : true,
install_dir : with_vulkan_icd_dir,
install : true,
)

View File

@ -0,0 +1,3 @@
#include "target-helpers/drm_helper.h"
#include "target-helpers/sw_helper.h"

View File

@ -0,0 +1,47 @@
# Copyright 2017 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os.path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', help='Output json file.', required=True)
parser.add_argument('--lib-path', help='Path to libvulkan_val.so')
args = parser.parse_args()
path = 'libvulkan_val.so'
if args.lib_path:
path = os.path.join(args.lib_path, path)
json_data = {
'file_format_version': '1.0.0',
'ICD': {
'library_path': path,
'api_version': str('1.1.107'),
},
}
with open(args.out, 'w') as f:
json.dump(json_data, f, indent = 4, sort_keys=True, separators=(',', ': '))