panfrost: Add a Vulkan driver for Midgard/Bifrost GPUs

Based on turnip.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Kristian H. Kristensen <hoegsberg@google.com>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11139>
This commit is contained in:
Boris Brezillon 2019-07-16 19:20:49 +02:00 committed by Marge Bot
parent 20b22efdcb
commit d970fe2e9d
26 changed files with 10703 additions and 2 deletions

View File

@ -278,6 +278,7 @@ endif
with_intel_vk = _vulkan_drivers.contains('intel')
with_amd_vk = _vulkan_drivers.contains('amd')
with_freedreno_vk = _vulkan_drivers.contains('freedreno')
with_panfrost_vk = _vulkan_drivers.contains('panfrost')
with_swrast_vk = _vulkan_drivers.contains('swrast')
with_virtio_vk = _vulkan_drivers.contains('virtio-experimental')
with_freedreno_kgsl = get_option('freedreno-kgsl')

View File

@ -184,7 +184,7 @@ option(
'vulkan-drivers',
type : 'array',
value : ['auto'],
choices : ['auto', 'amd', 'broadcom', 'freedreno', 'intel', 'swrast', 'virtio-experimental'],
choices : ['auto', 'amd', 'broadcom', 'freedreno', 'intel', 'panfrost', 'swrast', 'virtio-experimental'],
description : 'List of vulkan drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
option(

View File

@ -86,7 +86,7 @@ endif
if with_gallium_freedreno or with_freedreno_vk
subdir('freedreno')
endif
if with_gallium_panfrost or with_gallium_lima
if with_gallium_panfrost or with_gallium_lima or with_panfrost_vk
subdir('panfrost')
endif
if with_gallium_virgl or with_virtio_vk

View File

@ -66,3 +66,7 @@ bifrost_compiler = executable(
],
build_by_default : with_tools.contains('panfrost')
)
if with_panfrost_vk
subdir('vulkan')
endif

View File

@ -0,0 +1,122 @@
# Copyright © 2021 Collabora Ltd.
#
# Derived from the freedreno driver which is:
# Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
panvk_entrypoints = custom_target(
'panvk_entrypoints.[ch]',
input : [vk_entrypoints_gen, vk_api_xml],
output : ['panvk_entrypoints.h', 'panvk_entrypoints.c'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--proto', '--weak',
'--out-h', '@OUTPUT0@', '--out-c', '@OUTPUT1@', '--prefix', 'panvk',
],
)
libpanvk_files = files(
'panvk_cmd_buffer.c',
'panvk_cs.c',
'panvk_device.c',
'panvk_descriptor_set.c',
'panvk_formats.c',
'panvk_image.c',
'panvk_meta.c',
'panvk_pass.c',
'panvk_pipeline.c',
'panvk_pipeline_cache.c',
'panvk_private.h',
'panvk_query.c',
'panvk_shader.c',
'panvk_sync.c',
'panvk_util.c',
'panvk_varyings.c',
'panvk_wsi.c',
'panvk_wsi_display.c',
)
panvk_deps = []
panvk_flags = []
if system_has_kms_drm
panvk_flags += '-DVK_USE_PLATFORM_DISPLAY_KHR'
libpanvk_files += files('panvk_wsi_display.c')
endif
if with_platform_wayland
panvk_deps += [dep_wayland_client, dep_wl_protocols]
panvk_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
libpanvk_files += files('panvk_wsi_wayland.c')
libpanvk_files += [wayland_drm_client_protocol_h, wayland_drm_protocol_c]
endif
libvulkan_panfrost = shared_library(
'vulkan_panfrost',
[libpanvk_files, panvk_entrypoints],
include_directories : [
inc_include,
inc_src,
inc_compiler,
inc_gallium, # XXX: pipe/p_format.h
inc_gallium_aux, # XXX: renderonly
inc_vulkan_wsi,
inc_panfrost,
],
link_with : [
libvulkan_wsi,
libpanfrost_shared,
libpanfrost_midgard,
libpanfrost_bifrost,
libpanfrost_decode,
libpanfrost_lib,
libpanfrost_util,
],
dependencies : [
dep_dl,
dep_elf,
dep_libdrm,
dep_m,
dep_thread,
dep_valgrind,
idep_nir,
panvk_deps,
idep_vulkan_util,
idep_mesautil,
],
c_args : [no_override_init_args, panvk_flags],
link_args : [ld_args_bsymbolic, ld_args_gc_sections],
install : true,
)
panfrost_icd = custom_target(
'panfrost_icd',
input : [vk_icd_gen, vk_api_xml],
output : 'panfrost_icd.@0@.json'.format(host_machine.cpu()),
command : [
prog_python, '@INPUT0@',
'--api-version', '1.1', '--xml', '@INPUT1@',
'--lib-path', join_paths(get_option('prefix'), get_option('libdir'),
'libvulkan_panfrost.so'),
'--out', '@OUTPUT@',
],
build_by_default : true,
install_dir : with_vulkan_icd_dir,
install : true,
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,850 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "util/macros.h"
#include "compiler/shader_enums.h"
#include "panfrost-quirks.h"
#include "pan_cs.h"
#include "pan_encoder.h"
#include "pan_pool.h"
#include "panvk_cs.h"
#include "panvk_private.h"
#include "panvk_varyings.h"
static mali_pixel_format
panvk_varying_hw_format(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
gl_shader_stage stage, unsigned idx)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
gl_varying_slot loc = varyings->stage[stage].loc[idx];
bool fs = stage == MESA_SHADER_FRAGMENT;
switch (loc) {
case VARYING_SLOT_PNTC:
case VARYING_SLOT_PSIZ:
return (MALI_R16F << 12) |
(pdev->quirks & HAS_SWIZZLES ?
panfrost_get_default_swizzle(1) :
panfrost_bifrost_swizzle(1));
case VARYING_SLOT_POS:
return ((fs ? MALI_RGBA32F : MALI_SNAP_4) << 12) |
(pdev->quirks & HAS_SWIZZLES ?
panfrost_get_default_swizzle(4) :
panfrost_bifrost_swizzle(4));
default:
assert(!panvk_varying_is_builtin(stage, loc));
return pdev->formats[varyings->varying[loc].format].hw;
}
}
static void
panvk_emit_varying(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
gl_shader_stage stage, unsigned idx,
void *attrib)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
gl_varying_slot loc = varyings->stage[stage].loc[idx];
bool fs = stage == MESA_SHADER_FRAGMENT;
pan_pack(attrib, ATTRIBUTE, cfg) {
if (!panvk_varying_is_builtin(stage, loc)) {
cfg.buffer_index = varyings->varying[loc].buf;
cfg.offset = varyings->varying[loc].offset;
} else {
cfg.buffer_index =
panvk_varying_buf_index(varyings,
panvk_varying_buf_id(fs, loc));
}
cfg.offset_enable = !pan_is_bifrost(pdev);
cfg.format = panvk_varying_hw_format(dev, varyings, stage, idx);
}
}
void
panvk_emit_varyings(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
gl_shader_stage stage,
void *descs)
{
struct mali_attribute_packed *attrib = descs;
for (unsigned i = 0; i < varyings->stage[stage].count; i++)
panvk_emit_varying(dev, varyings, stage, i, attrib++);
}
static void
panvk_emit_varying_buf(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
enum panvk_varying_buf_id id, void *buf)
{
unsigned buf_idx = panvk_varying_buf_index(varyings, id);
enum mali_attribute_special special_id = panvk_varying_special_buf_id(id);
pan_pack(buf, ATTRIBUTE_BUFFER, cfg) {
if (special_id) {
cfg.type = 0;
cfg.special = special_id;
} else {
unsigned offset = varyings->buf[buf_idx].address & 63;
cfg.stride = varyings->buf[buf_idx].stride;
cfg.size = varyings->buf[buf_idx].size + offset;
cfg.pointer = varyings->buf[buf_idx].address & ~63ULL;
}
}
}
void
panvk_emit_varying_bufs(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
void *descs)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
struct mali_attribute_buffer_packed *buf = descs;
for (unsigned i = 0; i < PANVK_VARY_BUF_MAX; i++) {
if (varyings->buf_mask & (1 << i))
panvk_emit_varying_buf(dev, varyings, i, buf++);
}
if (pan_is_bifrost(pdev))
memset(buf, 0, sizeof(*buf));
}
static void
panvk_emit_attrib_buf(const struct panvk_device *dev,
const struct panvk_attribs_info *info,
const struct panvk_draw_info *draw,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
unsigned idx, void *desc)
{
ASSERTED const struct panfrost_device *pdev = &dev->physical_device->pdev;
const struct panvk_attrib_buf_info *buf_info = &info->buf[idx];
if (buf_info->special) {
assert(!pan_is_bifrost(pdev));
switch (buf_info->special_id) {
case PAN_VERTEX_ID:
panfrost_vertex_id(draw->padded_vertex_count, desc,
draw->instance_count > 1);
return;
case PAN_INSTANCE_ID:
panfrost_instance_id(draw->padded_vertex_count, desc,
draw->instance_count > 1);
return;
default:
unreachable("Invalid attribute ID");
}
}
assert(idx < buf_count);
const struct panvk_attrib_buf *buf = &bufs[idx];
unsigned divisor = buf_info->per_instance ?
draw->padded_vertex_count : 0;
unsigned stride = divisor && draw->instance_count == 1 ?
0 : buf_info->stride;
mali_ptr addr = buf->address & ~63ULL;
unsigned size = buf->size + (buf->address & 63);
/* TODO: support instanced arrays */
pan_pack(desc, ATTRIBUTE_BUFFER, cfg) {
if (draw->instance_count > 1 && divisor) {
cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
cfg.divisor = divisor;
}
cfg.pointer = addr;
cfg.stride = stride;
cfg.size = size;
}
}
void
panvk_emit_attrib_bufs(const struct panvk_device *dev,
const struct panvk_attribs_info *info,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
const struct panvk_draw_info *draw,
void *descs)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
struct mali_attribute_buffer_packed *buf = descs;
for (unsigned i = 0; i < info->buf_count; i++)
panvk_emit_attrib_buf(dev, info, draw, bufs, buf_count, i, buf++);
/* A NULL entry is needed to stop prefecting on Bifrost */
if (pan_is_bifrost(pdev))
memset(buf, 0, sizeof(*buf));
}
static void
panvk_emit_attrib(const struct panvk_device *dev,
const struct panvk_attribs_info *attribs,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
unsigned idx, void *attrib)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
pan_pack(attrib, ATTRIBUTE, cfg) {
cfg.buffer_index = attribs->attrib[idx].buf;
cfg.offset = attribs->attrib[idx].offset +
(bufs[cfg.buffer_index].address & 63);
cfg.format = pdev->formats[attribs->attrib[idx].format].hw;
}
}
void
panvk_emit_attribs(const struct panvk_device *dev,
const struct panvk_attribs_info *attribs,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
void *descs)
{
struct mali_attribute_packed *attrib = descs;
for (unsigned i = 0; i < attribs->attrib_count; i++)
panvk_emit_attrib(dev, attribs, bufs, buf_count, i, attrib++);
}
void
panvk_emit_ubos(const struct panvk_pipeline *pipeline,
const struct panvk_descriptor_state *state,
void *descs)
{
struct mali_uniform_buffer_packed *ubos = descs;
for (unsigned i = 0; i < ARRAY_SIZE(state->sets); i++) {
const struct panvk_descriptor_set_layout *set_layout =
pipeline->layout->sets[i].layout;
const struct panvk_descriptor_set *set = state->sets[i].set;
unsigned offset = pipeline->layout->sets[i].ubo_offset;
if (!set_layout)
continue;
if (!set) {
unsigned num_ubos = (set_layout->num_dynoffsets != 0) + set_layout->num_ubos;
memset(&ubos[offset], 0, num_ubos * sizeof(*ubos));
} else {
memcpy(&ubos[offset], set->ubos, set_layout->num_ubos * sizeof(*ubos));
if (set_layout->num_dynoffsets) {
pan_pack(&ubos[offset + set_layout->num_ubos], UNIFORM_BUFFER, cfg) {
cfg.pointer = state->sets[i].dynoffsets.gpu;
cfg.entries = DIV_ROUND_UP(set->layout->num_dynoffsets, 16);
}
}
}
}
for (unsigned i = 0; i < ARRAY_SIZE(pipeline->sysvals); i++) {
if (!pipeline->sysvals[i].ids.sysval_count)
continue;
pan_pack(&ubos[pipeline->sysvals[i].ubo_idx], UNIFORM_BUFFER, cfg) {
cfg.pointer = pipeline->sysvals[i].ubo ? :
state->sysvals[i];
cfg.entries = pipeline->sysvals[i].ids.sysval_count;
}
}
}
void
panvk_emit_vertex_job(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_draw_info *draw,
void *job)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
void *section = pan_section_ptr(job, COMPUTE_JOB, INVOCATION);
memcpy(section, &draw->invocation, MALI_INVOCATION_LENGTH);
pan_section_pack(job, COMPUTE_JOB, PARAMETERS, cfg) {
cfg.job_task_split = 5;
}
pan_section_pack(job, COMPUTE_JOB, DRAW, cfg) {
cfg.draw_descriptor_is_64b = true;
if (!pan_is_bifrost(pdev))
cfg.texture_descriptor_is_64b = true;
cfg.state = pipeline->rsds[MESA_SHADER_VERTEX];
cfg.attributes = draw->stages[MESA_SHADER_VERTEX].attributes;
cfg.attribute_buffers = draw->attribute_bufs;
cfg.varyings = draw->stages[MESA_SHADER_VERTEX].varyings;
cfg.varying_buffers = draw->varying_bufs;
cfg.thread_storage = draw->tls;
cfg.offset_start = draw->offset_start;
cfg.instance_size = draw->instance_count > 1 ?
draw->padded_vertex_count : 1;
cfg.uniform_buffers = draw->ubos;
cfg.push_uniforms = draw->stages[PIPE_SHADER_VERTEX].push_constants;
cfg.textures = draw->textures;
cfg.samplers = draw->samplers;
}
pan_section_pack(job, COMPUTE_JOB, DRAW_PADDING, cfg);
}
void
panvk_emit_tiler_job(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_draw_info *draw,
void *job)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
void *section = pan_is_bifrost(pdev) ?
pan_section_ptr(job, BIFROST_TILER_JOB, INVOCATION) :
pan_section_ptr(job, MIDGARD_TILER_JOB, INVOCATION);
memcpy(section, &draw->invocation, MALI_INVOCATION_LENGTH);
section = pan_is_bifrost(pdev) ?
pan_section_ptr(job, BIFROST_TILER_JOB, PRIMITIVE) :
pan_section_ptr(job, MIDGARD_TILER_JOB, PRIMITIVE);
pan_pack(section, PRIMITIVE, cfg) {
cfg.draw_mode = pipeline->ia.topology;
if (pipeline->ia.writes_point_size)
cfg.point_size_array_format = MALI_POINT_SIZE_ARRAY_FORMAT_FP16;
cfg.first_provoking_vertex = true;
if (pipeline->ia.primitive_restart)
cfg.primitive_restart = MALI_PRIMITIVE_RESTART_IMPLICIT;
cfg.job_task_split = 6;
/* TODO: indexed draws */
cfg.index_count = draw->vertex_count;
}
section = pan_is_bifrost(pdev) ?
pan_section_ptr(job, BIFROST_TILER_JOB, PRIMITIVE_SIZE) :
pan_section_ptr(job, MIDGARD_TILER_JOB, PRIMITIVE_SIZE);
pan_pack(section, PRIMITIVE_SIZE, cfg) {
if (pipeline->ia.writes_point_size) {
cfg.size_array = draw->psiz;
} else {
cfg.constant = draw->line_width;
}
}
section = pan_is_bifrost(pdev) ?
pan_section_ptr(job, BIFROST_TILER_JOB, DRAW) :
pan_section_ptr(job, MIDGARD_TILER_JOB, DRAW);
pan_pack(section, DRAW, cfg) {
cfg.four_components_per_vertex = true;
cfg.draw_descriptor_is_64b = true;
if (!pan_is_bifrost(pdev))
cfg.texture_descriptor_is_64b = true;
cfg.front_face_ccw = pipeline->rast.front_ccw;
cfg.cull_front_face = pipeline->rast.cull_front_face;
cfg.cull_back_face = pipeline->rast.cull_back_face;
cfg.position = draw->position;
cfg.state = draw->fs_rsd;
cfg.attributes = draw->stages[MESA_SHADER_FRAGMENT].attributes;
cfg.attribute_buffers = draw->attribute_bufs;
cfg.viewport = draw->viewport;
cfg.varyings = draw->stages[MESA_SHADER_FRAGMENT].varyings;
cfg.varying_buffers = cfg.varyings ? draw->varying_bufs : 0;
if (pan_is_bifrost(pdev))
cfg.thread_storage = draw->tls;
else
cfg.fbd = draw->fb;
/* For all primitives but lines DRAW.flat_shading_vertex must
* be set to 0 and the provoking vertex is selected with the
* PRIMITIVE.first_provoking_vertex field.
*/
if (pipeline->ia.topology == MALI_DRAW_MODE_LINES ||
pipeline->ia.topology == MALI_DRAW_MODE_LINE_STRIP ||
pipeline->ia.topology == MALI_DRAW_MODE_LINE_LOOP) {
/* The logic is inverted on bifrost. */
cfg.flat_shading_vertex = pan_is_bifrost(pdev) ?
true : false;
}
cfg.offset_start = draw->offset_start;
cfg.instance_size = draw->instance_count > 1 ?
draw->padded_vertex_count : 1;
cfg.uniform_buffers = draw->ubos;
cfg.push_uniforms = draw->stages[PIPE_SHADER_FRAGMENT].push_constants;
cfg.textures = draw->textures;
cfg.samplers = draw->samplers;
/* TODO: occlusion queries */
}
if (pan_is_bifrost(pdev)) {
pan_section_pack(job, BIFROST_TILER_JOB, TILER, cfg) {
cfg.address = draw->tiler_ctx->bifrost;
}
pan_section_pack(job, BIFROST_TILER_JOB, DRAW_PADDING, padding);
pan_section_pack(job, BIFROST_TILER_JOB, PADDING, padding);
}
}
void
panvk_emit_fragment_job(const struct panvk_device *dev,
const struct panvk_framebuffer *fb,
mali_ptr fbdesc,
void *job)
{
pan_section_pack(job, FRAGMENT_JOB, HEADER, header) {
header.type = MALI_JOB_TYPE_FRAGMENT;
header.index = 1;
}
pan_section_pack(job, FRAGMENT_JOB, PAYLOAD, payload) {
payload.bound_min_x = 0;
payload.bound_min_y = 0;
payload.bound_max_x = (fb->width - 1) >> MALI_TILE_SHIFT;
payload.bound_max_y = (fb->height - 1) >> MALI_TILE_SHIFT;
payload.framebuffer = fbdesc;
}
}
void
panvk_emit_viewport(const VkViewport *viewport, const VkRect2D *scissor,
void *vpd)
{
/* The spec says "width must be greater than 0.0" */
assert(viewport->x >= 0);
int minx = (int)viewport->x;
int maxx = (int)(viewport->x + viewport->width);
/* Viewport height can be negative */
int miny = MIN2((int)viewport->y, (int)(viewport->y + viewport->height));
int maxy = MAX2((int)viewport->y, (int)(viewport->y + viewport->height));
assert(scissor->offset.x >= 0 && scissor->offset.y >= 0);
miny = MAX2(scissor->offset.x, minx);
miny = MAX2(scissor->offset.y, miny);
maxx = MIN2(scissor->offset.x + scissor->extent.width, maxx);
maxy = MIN2(scissor->offset.y + scissor->extent.height, maxy);
/* Make sure we don't end up with a max < min when width/height is 0 */
maxx = maxx > minx ? maxx - 1 : maxx;
maxy = maxy > miny ? maxy - 1 : maxy;
assert(viewport->minDepth >= 0.0f && viewport->minDepth <= 1.0f);
assert(viewport->maxDepth >= 0.0f && viewport->maxDepth <= 1.0f);
pan_pack(vpd, VIEWPORT, cfg) {
cfg.scissor_minimum_x = minx;
cfg.scissor_minimum_y = miny;
cfg.scissor_maximum_x = maxx;
cfg.scissor_maximum_y = maxy;
cfg.minimum_z = MIN2(viewport->minDepth, viewport->maxDepth);
cfg.maximum_z = MAX2(viewport->minDepth, viewport->maxDepth);
}
}
void
panvk_sysval_upload_viewport_scale(const VkViewport *viewport,
union panvk_sysval_data *data)
{
data->f32[0] = 0.5f * viewport->width;
data->f32[1] = 0.5f * viewport->height;
data->f32[2] = 0.5f * (viewport->maxDepth - viewport->minDepth);
}
void
panvk_sysval_upload_viewport_offset(const VkViewport *viewport,
union panvk_sysval_data *data)
{
data->f32[0] = (0.5f * viewport->width) + viewport->x;
data->f32[1] = (0.5f * viewport->height) + viewport->y;
data->f32[2] = (0.5f * (viewport->maxDepth - viewport->minDepth)) + viewport->minDepth;
}
static enum mali_bifrost_register_file_format
bifrost_blend_type_from_nir(nir_alu_type nir_type)
{
switch(nir_type) {
case 0: /* Render target not in use */
return 0;
case nir_type_float16:
return MALI_BIFROST_REGISTER_FILE_FORMAT_F16;
case nir_type_float32:
return MALI_BIFROST_REGISTER_FILE_FORMAT_F32;
case nir_type_int32:
return MALI_BIFROST_REGISTER_FILE_FORMAT_I32;
case nir_type_uint32:
return MALI_BIFROST_REGISTER_FILE_FORMAT_U32;
case nir_type_int16:
return MALI_BIFROST_REGISTER_FILE_FORMAT_I16;
case nir_type_uint16:
return MALI_BIFROST_REGISTER_FILE_FORMAT_U16;
default:
unreachable("Unsupported blend shader type for NIR alu type");
}
}
static void
panvk_emit_bifrost_blend(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, void *bd)
{
const struct pan_blend_state *blend = &pipeline->blend.state;
const struct panfrost_device *pdev = &dev->physical_device->pdev;
const struct pan_blend_rt_state *rts = &blend->rts[rt];
pan_pack(bd, BLEND, cfg) {
if (!blend->rt_count || !rts->equation.color_mask) {
cfg.enable = false;
cfg.bifrost.internal.mode = MALI_BIFROST_BLEND_MODE_OFF;
continue;
}
cfg.srgb = util_format_is_srgb(rts->format);
cfg.load_destination = pan_blend_reads_dest(blend->rts[rt].equation);
cfg.round_to_fb_precision = true;
const struct util_format_description *format_desc =
util_format_description(rts->format);
unsigned chan_size = 0;
for (unsigned i = 0; i < format_desc->nr_channels; i++)
chan_size = MAX2(format_desc->channel[0].size, chan_size);
pan_blend_to_fixed_function_equation(blend->rts[rt].equation,
&cfg.bifrost.equation);
/* Fixed point constant */
float fconst =
pan_blend_get_constant(pan_blend_constant_mask(blend->rts[rt].equation),
blend->constants);
u16 constant = fconst * ((1 << chan_size) - 1);
constant <<= 16 - chan_size;
cfg.bifrost.constant = constant;
if (pan_blend_is_opaque(blend->rts[rt].equation))
cfg.bifrost.internal.mode = MALI_BIFROST_BLEND_MODE_OPAQUE;
else
cfg.bifrost.internal.mode = MALI_BIFROST_BLEND_MODE_FIXED_FUNCTION;
/* If we want the conversion to work properly,
* num_comps must be set to 4
*/
cfg.bifrost.internal.fixed_function.num_comps = 4;
cfg.bifrost.internal.fixed_function.conversion.memory_format =
panfrost_format_to_bifrost_blend(pdev, rts->format);
cfg.bifrost.internal.fixed_function.conversion.register_format =
bifrost_blend_type_from_nir(pipeline->fs.info.bifrost.blend[rt].type);
cfg.bifrost.internal.fixed_function.rt = rt;
}
}
static void
panvk_emit_midgard_blend(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, void *bd)
{
const struct pan_blend_state *blend = &pipeline->blend.state;
const struct pan_blend_rt_state *rts = &blend->rts[rt];
pan_pack(bd, BLEND, cfg) {
if (!blend->rt_count || !rts->equation.color_mask) {
cfg.enable = false;
continue;
}
cfg.srgb = util_format_is_srgb(rts->format);
cfg.load_destination = pan_blend_reads_dest(blend->rts[rt].equation);
cfg.round_to_fb_precision = true;
cfg.midgard.blend_shader = false;
pan_blend_to_fixed_function_equation(blend->rts[rt].equation,
&cfg.midgard.equation);
cfg.midgard.constant =
pan_blend_get_constant(pan_blend_constant_mask(blend->rts[rt].equation),
blend->constants);
}
}
void
panvk_emit_blend(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, void *bd)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
if (pan_is_bifrost(pdev))
panvk_emit_bifrost_blend(dev, pipeline, rt, bd);
else
panvk_emit_midgard_blend(dev, pipeline, rt, bd);
}
void
panvk_emit_blend_constant(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, const float *constants, void *bd)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
float constant = constants[pipeline->blend.constant[rt].index];
pan_pack(bd, BLEND, cfg) {
cfg.enable = false;
if (pan_is_bifrost(pdev)) {
cfg.bifrost.constant = constant * pipeline->blend.constant[rt].bifrost_factor;
} else {
cfg.midgard.constant = constant;
}
}
}
void
panvk_emit_dyn_fs_rsd(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_cmd_state *state,
void *rsd)
{
pan_pack(rsd, RENDERER_STATE, cfg) {
if (pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
cfg.depth_units = state->rast.depth_bias.constant_factor * 2.0f;
cfg.depth_factor = state->rast.depth_bias.slope_factor;
cfg.depth_bias_clamp = state->rast.depth_bias.clamp;
}
if (pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
cfg.stencil_front.mask = state->zs.s_front.compare_mask;
cfg.stencil_back.mask = state->zs.s_back.compare_mask;
}
if (pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
cfg.stencil_mask_misc.stencil_mask_front = state->zs.s_front.write_mask;
cfg.stencil_mask_misc.stencil_mask_back = state->zs.s_back.write_mask;
}
if (pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
cfg.stencil_front.reference_value = state->zs.s_front.ref;
cfg.stencil_back.reference_value = state->zs.s_back.ref;
}
}
}
void
panvk_emit_base_fs_rsd(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
void *rsd)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
const struct pan_shader_info *info = &pipeline->fs.info;
pan_pack(rsd, RENDERER_STATE, cfg) {
if (pipeline->fs.required) {
pan_shader_prepare_rsd(pdev, info, pipeline->fs.address, &cfg);
if (pan_is_bifrost(pdev)) {
cfg.properties.bifrost.allow_forward_pixel_to_kill = info->fs.can_fpk;
} else {
/* If either depth or stencil is enabled, discard matters */
bool zs_enabled =
(pipeline->zs.z_test && pipeline->zs.z_compare_func != MALI_FUNC_ALWAYS) ||
pipeline->zs.s_test;
cfg.properties.midgard.work_register_count = info->work_reg_count;
cfg.properties.midgard.force_early_z =
info->fs.can_early_z && !pipeline->ms.alpha_to_coverage &&
pipeline->zs.z_compare_func == MALI_FUNC_ALWAYS;
/* Workaround a hardware errata where early-z cannot be enabled
* when discarding even when the depth buffer is read-only, by
* lying to the hardware about the discard and setting the
* reads tilebuffer? flag to compensate */
cfg.properties.midgard.shader_reads_tilebuffer =
info->fs.outputs_read ||
(!zs_enabled && info->fs.can_discard);
cfg.properties.midgard.shader_contains_discard =
zs_enabled && info->fs.can_discard;
}
} else {
if (pan_is_bifrost(pdev)) {
cfg.properties.bifrost.shader_modifies_coverage = true;
cfg.properties.bifrost.allow_forward_pixel_to_kill = true;
cfg.properties.bifrost.allow_forward_pixel_to_be_killed = true;
cfg.properties.bifrost.zs_update_operation = MALI_PIXEL_KILL_STRONG_EARLY;
} else {
cfg.shader.shader = 0x1;
cfg.properties.midgard.work_register_count = 1;
cfg.properties.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
cfg.properties.midgard.force_early_z = true;
}
}
bool msaa = pipeline->ms.rast_samples > 1;
cfg.multisample_misc.multisample_enable = msaa;
cfg.multisample_misc.sample_mask =
msaa ? pipeline->ms.sample_mask : UINT16_MAX;
cfg.multisample_misc.depth_function =
pipeline->zs.z_test ? pipeline->zs.z_compare_func : MALI_FUNC_ALWAYS;
cfg.multisample_misc.depth_write_mask = pipeline->zs.z_write;
cfg.multisample_misc.fixed_function_near_discard = !pipeline->rast.clamp_depth;
cfg.multisample_misc.fixed_function_far_discard = !pipeline->rast.clamp_depth;
cfg.multisample_misc.shader_depth_range_fixed = true;
cfg.stencil_mask_misc.stencil_enable = pipeline->zs.s_test;
cfg.stencil_mask_misc.alpha_to_coverage = pipeline->ms.alpha_to_coverage;
cfg.stencil_mask_misc.alpha_test_compare_function = MALI_FUNC_ALWAYS;
cfg.stencil_mask_misc.depth_range_1 = pipeline->rast.depth_bias.enable;
cfg.stencil_mask_misc.depth_range_2 = pipeline->rast.depth_bias.enable;
cfg.stencil_mask_misc.single_sampled_lines = pipeline->ms.rast_samples <= 1;
if (!(pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))) {
cfg.depth_units = pipeline->rast.depth_bias.constant_factor * 2.0f;
cfg.depth_factor = pipeline->rast.depth_bias.slope_factor;
cfg.depth_bias_clamp = pipeline->rast.depth_bias.clamp;
}
if (!(pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))) {
cfg.stencil_front.mask = pipeline->zs.s_front.compare_mask;
cfg.stencil_back.mask = pipeline->zs.s_back.compare_mask;
}
if (!(pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))) {
cfg.stencil_mask_misc.stencil_mask_front = pipeline->zs.s_front.write_mask;
cfg.stencil_mask_misc.stencil_mask_back = pipeline->zs.s_back.write_mask;
}
if (!(pipeline->dynamic_state_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))) {
cfg.stencil_front.reference_value = pipeline->zs.s_front.ref;
cfg.stencil_back.reference_value = pipeline->zs.s_back.ref;
}
cfg.stencil_front.compare_function = pipeline->zs.s_front.compare_func;
cfg.stencil_front.stencil_fail = pipeline->zs.s_front.fail_op;
cfg.stencil_front.depth_fail = pipeline->zs.s_front.z_fail_op;
cfg.stencil_front.depth_pass = pipeline->zs.s_front.pass_op;
cfg.stencil_back.compare_function = pipeline->zs.s_back.compare_func;
cfg.stencil_back.stencil_fail = pipeline->zs.s_back.fail_op;
cfg.stencil_back.depth_fail = pipeline->zs.s_back.z_fail_op;
cfg.stencil_back.depth_pass = pipeline->zs.s_back.pass_op;
}
}
void
panvk_emit_non_fs_rsd(const struct panvk_device *dev,
const struct pan_shader_info *shader_info,
mali_ptr shader_ptr,
void *rsd)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
assert(shader_info->stage != MESA_SHADER_FRAGMENT);
pan_pack(rsd, RENDERER_STATE, cfg) {
pan_shader_prepare_rsd(pdev, shader_info, shader_ptr, &cfg);
}
}
void
panvk_emit_bifrost_tiler_context(const struct panvk_device *dev,
unsigned width, unsigned height,
const struct panfrost_ptr *descs)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
pan_pack(descs->cpu + MALI_BIFROST_TILER_LENGTH, BIFROST_TILER_HEAP, cfg) {
cfg.size = pdev->tiler_heap->size;
cfg.base = pdev->tiler_heap->ptr.gpu;
cfg.bottom = pdev->tiler_heap->ptr.gpu;
cfg.top = pdev->tiler_heap->ptr.gpu + pdev->tiler_heap->size;
}
pan_pack(descs->cpu, BIFROST_TILER, cfg) {
cfg.hierarchy_mask = 0x28;
cfg.fb_width = width;
cfg.fb_height = height;
cfg.heap = descs->gpu + MALI_BIFROST_TILER_LENGTH;
}
}
unsigned
panvk_emit_fb(const struct panvk_device *dev,
const struct panvk_batch *batch,
const struct panvk_subpass *subpass,
const struct panvk_pipeline *pipeline,
const struct panvk_framebuffer *fb,
const struct panvk_clear_value *clears,
const struct pan_tls_info *tlsinfo,
const struct pan_tiler_context *tilerctx,
void *desc)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
struct panvk_image_view *view;
bool crc_valid[8] = { false };
struct pan_fb_info fbinfo = {
.width = fb->width,
.height = fb->height,
.extent.maxx = fb->width - 1,
.extent.maxy = fb->height - 1,
.nr_samples = 1,
};
for (unsigned cb = 0; cb < subpass->color_count; cb++) {
int idx = subpass->color_attachments[cb].idx;
view = idx != VK_ATTACHMENT_UNUSED ?
fb->attachments[idx].iview : NULL;
if (!view)
continue;
fbinfo.rts[cb].view = &view->pview;
fbinfo.rts[cb].clear = subpass->color_attachments[idx].clear;
fbinfo.rts[cb].crc_valid = &crc_valid[cb];
memcpy(fbinfo.rts[cb].clear_value, clears[idx].color,
sizeof(fbinfo.rts[cb].clear_value));
fbinfo.nr_samples =
MAX2(fbinfo.nr_samples, view->pview.image->layout.nr_samples);
}
if (subpass->zs_attachment.idx != VK_ATTACHMENT_UNUSED) {
view = fb->attachments[subpass->zs_attachment.idx].iview;
const struct util_format_description *fdesc =
util_format_description(view->pview.format);
fbinfo.nr_samples =
MAX2(fbinfo.nr_samples, view->pview.image->layout.nr_samples);
if (util_format_has_depth(fdesc)) {
fbinfo.zs.clear.z = subpass->zs_attachment.clear;
fbinfo.zs.clear_value.depth = clears[subpass->zs_attachment.idx].depth;
fbinfo.zs.view.zs = &view->pview;
}
if (util_format_has_depth(fdesc)) {
fbinfo.zs.clear.s = subpass->zs_attachment.clear;
fbinfo.zs.clear_value.stencil = clears[subpass->zs_attachment.idx].depth;
if (!fbinfo.zs.view.zs)
fbinfo.zs.view.s = &view->pview;
}
}
return pan_emit_fbd(pdev, &fbinfo, tlsinfo, tilerctx, desc);
}

View File

@ -0,0 +1,164 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef PANVK_CS_H
#define PANVK_CS_H
#include <vulkan/vulkan.h>
#include "compiler/shader_enums.h"
#include "panfrost-job.h"
#include "pan_cs.h"
struct pan_blend_state;
struct pan_shader_info;
struct panfrost_ptr;
struct pan_pool;
union panvk_sysval_data;
struct panvk_framebuffer;
struct panvk_cmd_state;
struct panvk_compute_dim;
struct panvk_device;
struct panvk_batch;
struct panvk_varyings_info;
struct panvk_attrib_buf;
struct panvk_attribs_info;
struct panvk_pipeline;
struct panvk_draw_info;
struct panvk_descriptor_state;
struct panvk_subpass;
struct panvk_clear_value;
void
panvk_emit_varyings(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
gl_shader_stage stage,
void *descs);
void
panvk_emit_varying_bufs(const struct panvk_device *dev,
const struct panvk_varyings_info *varyings,
void *descs);
void
panvk_emit_attrib_bufs(const struct panvk_device *dev,
const struct panvk_attribs_info *info,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
const struct panvk_draw_info *draw,
void *descs);
void
panvk_emit_attribs(const struct panvk_device *dev,
const struct panvk_attribs_info *attribs,
const struct panvk_attrib_buf *bufs,
unsigned buf_count,
void *descs);
void
panvk_emit_ubos(const struct panvk_pipeline *pipeline,
const struct panvk_descriptor_state *state,
void *descs);
void
panvk_emit_vertex_job(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_draw_info *draw,
void *job);
void
panvk_emit_tiler_job(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_draw_info *draw,
void *job);
void
panvk_emit_fragment_job(const struct panvk_device *dev,
const struct panvk_framebuffer *fb,
mali_ptr fbdesc,
void *job);
void
panvk_emit_viewport(const VkViewport *viewport, const VkRect2D *scissor,
void *vpd);
void
panvk_emit_blend(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, void *bd);
void
panvk_emit_blend_constant(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
unsigned rt, const float *constants, void *bd);
void
panvk_emit_dyn_fs_rsd(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct panvk_cmd_state *state,
void *rsd);
void
panvk_emit_base_fs_rsd(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
void *rsd);
void
panvk_emit_non_fs_rsd(const struct panvk_device *dev,
const struct pan_shader_info *shader_info,
mali_ptr shader_ptr,
void *rsd);
void
panvk_emit_bifrost_tiler_context(const struct panvk_device *dev,
unsigned width, unsigned height,
const struct panfrost_ptr *descs);
unsigned
panvk_emit_fb(const struct panvk_device *dev,
const struct panvk_batch *batch,
const struct panvk_subpass *subpass,
const struct panvk_pipeline *pipeline,
const struct panvk_framebuffer *fb,
const struct panvk_clear_value *clears,
const struct pan_tls_info *tlsinfo,
const struct pan_tiler_context *tilerctx,
void *desc);
void
panvk_emit_tls(const struct panvk_device *dev,
const struct panvk_pipeline *pipeline,
const struct pan_compute_dim *wg_count,
struct pan_pool *tls_pool,
void *desc);
void
panvk_sysval_upload_viewport_scale(const VkViewport *viewport,
union panvk_sysval_data *data);
void
panvk_sysval_upload_viewport_offset(const VkViewport *viewport,
union panvk_sysval_data *data);
#endif

View File

@ -0,0 +1,838 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from:
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include <assert.h>
#include <fcntl.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include "util/mesa-sha1.h"
#include "vk_descriptors.h"
#include "vk_util.h"
#include "pan_bo.h"
#include "midgard_pack.h"
VkResult
panvk_CreateDescriptorSetLayout(VkDevice _device,
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_descriptor_set_layout *set_layout;
VkDescriptorSetLayoutBinding *bindings;
assert(pCreateInfo->bindingCount);
VkResult result =
vk_create_sorted_bindings(pCreateInfo->pBindings,
pCreateInfo->bindingCount,
&bindings);
if (result != VK_SUCCESS)
return vk_error(device->instance, result);
unsigned num_immutable_samplers = 0;
for (unsigned i = 0; i < pCreateInfo->bindingCount; i++) {
if (bindings[i].pImmutableSamplers)
num_immutable_samplers += bindings[i].descriptorCount;
}
unsigned max_binding = bindings[pCreateInfo->bindingCount - 1].binding;
size_t size = sizeof(*set_layout) +
(sizeof(struct panvk_descriptor_set_binding_layout) *
(max_binding + 1)) +
(sizeof(struct panvk_sampler *) * num_immutable_samplers);
set_layout = vk_object_zalloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
if (!set_layout) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto err_free_bindings;
}
struct panvk_sampler **immutable_samplers =
(struct panvk_sampler **)((uint8_t *)set_layout + sizeof(*set_layout) +
(sizeof(struct panvk_descriptor_set_binding_layout) *
(max_binding + 1)));
set_layout->flags = pCreateInfo->flags;
set_layout->binding_count = max_binding + 1;
unsigned sampler_idx = 0, tex_idx = 0, ubo_idx = 0, ssbo_idx = 0;
unsigned dynoffset_idx = 0, desc_idx = 0;
for (unsigned i = 0; i < pCreateInfo->bindingCount; i++) {
const VkDescriptorSetLayoutBinding *binding = &bindings[i];
struct panvk_descriptor_set_binding_layout *binding_layout =
&set_layout->bindings[binding->binding];
binding_layout->type = binding->descriptorType;
binding_layout->array_size = binding->descriptorCount;
binding_layout->shader_stages = binding->stageFlags;
if (binding->pImmutableSamplers) {
binding_layout->immutable_samplers = immutable_samplers;
immutable_samplers += binding_layout->array_size;
for (unsigned j = 0; j < binding_layout->array_size; j++) {
VK_FROM_HANDLE(panvk_sampler, sampler, binding->pImmutableSamplers[j]);
binding_layout->immutable_samplers[j] = sampler;
}
}
binding_layout->desc_idx = desc_idx;
desc_idx += binding->descriptorCount;
switch (binding_layout->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
binding_layout->sampler_idx = sampler_idx;
sampler_idx += binding_layout->array_size;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
binding_layout->sampler_idx = sampler_idx;
binding_layout->tex_idx = tex_idx;
sampler_idx += binding_layout->array_size;
tex_idx += binding_layout->array_size;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
binding_layout->tex_idx = tex_idx;
tex_idx += binding_layout->array_size;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
binding_layout->dynoffset_idx = dynoffset_idx;
dynoffset_idx += binding_layout->array_size;
FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
binding_layout->ubo_idx = ubo_idx;
ubo_idx += binding_layout->array_size;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
binding_layout->dynoffset_idx = dynoffset_idx;
dynoffset_idx += binding_layout->array_size;
FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
binding_layout->ssbo_idx = ssbo_idx;
ssbo_idx += binding_layout->array_size;
break;
default:
unreachable("Invalid descriptor type");
}
}
set_layout->num_descs = desc_idx;
set_layout->num_samplers = sampler_idx;
set_layout->num_textures = tex_idx;
set_layout->num_ubos = ubo_idx;
set_layout->num_ssbos = ssbo_idx;
set_layout->num_dynoffsets = dynoffset_idx;
free(bindings);
*pSetLayout = panvk_descriptor_set_layout_to_handle(set_layout);
return VK_SUCCESS;
err_free_bindings:
free(bindings);
return vk_error(device->instance, result);
}
void
panvk_DestroyDescriptorSetLayout(VkDevice _device,
VkDescriptorSetLayout _set_layout,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_descriptor_set_layout, set_layout, _set_layout);
if (!set_layout)
return;
vk_object_free(&device->vk, pAllocator, set_layout);
}
/* FIXME: make sure those values are correct */
#define PANVK_MAX_TEXTURES (1 << 16)
#define PANVK_MAX_SAMPLERS (1 << 16)
#define PANVK_MAX_UBOS 255
void
panvk_GetDescriptorSetLayoutSupport(VkDevice _device,
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
VkDescriptorSetLayoutSupport *pSupport)
{
VK_FROM_HANDLE(panvk_device, device, _device);
pSupport->supported = false;
VkDescriptorSetLayoutBinding *bindings;
VkResult result =
vk_create_sorted_bindings(pCreateInfo->pBindings,
pCreateInfo->bindingCount,
&bindings);
if (result != VK_SUCCESS) {
vk_error(device->instance, result);
return;
}
unsigned sampler_idx = 0, tex_idx = 0, ubo_idx = 0, ssbo_idx = 0, dynoffset_idx = 0;
for (unsigned i = 0; i < pCreateInfo->bindingCount; i++) {
const VkDescriptorSetLayoutBinding *binding = &bindings[i];
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
sampler_idx += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
sampler_idx += binding->descriptorCount;
tex_idx += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
tex_idx += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
dynoffset_idx += binding->descriptorCount;
FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
ubo_idx += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
dynoffset_idx += binding->descriptorCount;
FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
ssbo_idx += binding->descriptorCount;
break;
default:
unreachable("Invalid descriptor type");
}
}
/* The maximum values apply to all sets attached to a pipeline since all
* sets descriptors have to be merged in a single array.
*/
if (tex_idx > PANVK_MAX_TEXTURES / MAX_SETS ||
sampler_idx > PANVK_MAX_SAMPLERS / MAX_SETS ||
ubo_idx > PANVK_MAX_UBOS / MAX_SETS)
return;
pSupport->supported = true;
}
/*
* Pipeline layouts. These have nothing to do with the pipeline. They are
* just multiple descriptor set layouts pasted together.
*/
VkResult
panvk_CreatePipelineLayout(VkDevice _device,
const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_pipeline_layout *layout;
struct mesa_sha1 ctx;
layout = vk_object_zalloc(&device->vk, pAllocator, sizeof(*layout),
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
layout->num_sets = pCreateInfo->setLayoutCount;
_mesa_sha1_init(&ctx);
unsigned sampler_idx = 0, tex_idx = 0, ssbo_idx = 0, ubo_idx = 0, dynoffset_idx = 0;
for (unsigned set = 0; set < pCreateInfo->setLayoutCount; set++) {
VK_FROM_HANDLE(panvk_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->sets[set].layout = set_layout;
layout->sets[set].sampler_offset = sampler_idx;
layout->sets[set].tex_offset = tex_idx;
layout->sets[set].ubo_offset = ubo_idx;
layout->sets[set].ssbo_offset = ssbo_idx;
layout->sets[set].dynoffset_offset = dynoffset_idx;
sampler_idx += set_layout->num_samplers;
tex_idx += set_layout->num_textures;
ubo_idx += set_layout->num_ubos + (set_layout->num_dynoffsets != 0);
ssbo_idx += set_layout->num_ssbos;
dynoffset_idx += set_layout->num_dynoffsets;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
struct panvk_descriptor_set_binding_layout *binding_layout =
&set_layout->bindings[b];
if (binding_layout->immutable_samplers) {
for (unsigned s = 0; s < binding_layout->array_size; s++) {
struct panvk_sampler *sampler = binding_layout->immutable_samplers[s];
_mesa_sha1_update(&ctx, &sampler->desc, sizeof(sampler->desc));
}
}
_mesa_sha1_update(&ctx, &binding_layout->type, sizeof(binding_layout->type));
_mesa_sha1_update(&ctx, &binding_layout->array_size, sizeof(binding_layout->array_size));
_mesa_sha1_update(&ctx, &binding_layout->desc_idx, sizeof(binding_layout->sampler_idx));
_mesa_sha1_update(&ctx, &binding_layout->shader_stages, sizeof(binding_layout->shader_stages));
}
}
layout->num_samplers = sampler_idx;
layout->num_textures = tex_idx;
layout->num_ubos = ubo_idx;
layout->num_ssbos = ssbo_idx;
layout->num_dynoffsets = dynoffset_idx;
_mesa_sha1_final(&ctx, layout->sha1);
*pPipelineLayout = panvk_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
}
void
panvk_DestroyPipelineLayout(VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!pipeline_layout)
return;
vk_object_free(&device->vk, pAllocator, pipeline_layout);
}
VkResult
panvk_CreateDescriptorPool(VkDevice _device,
const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_descriptor_pool *pool;
pool = vk_object_zalloc(&device->vk, pAllocator,
sizeof(struct panvk_descriptor_pool),
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
pool->max.sets = pCreateInfo->maxSets;
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
unsigned desc_count = pCreateInfo->pPoolSizes[i].descriptorCount;
switch(pCreateInfo->pPoolSizes[i].type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
pool->max.samplers += desc_count;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
pool->max.combined_image_samplers += desc_count;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
pool->max.sampled_images += desc_count;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
pool->max.storage_images += desc_count;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
pool->max.uniform_texel_bufs += desc_count;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
pool->max.storage_texel_bufs += desc_count;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
pool->max.input_attachments += desc_count;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
pool->max.uniform_bufs += desc_count;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
pool->max.storage_bufs += desc_count;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
pool->max.uniform_dyn_bufs += desc_count;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
pool->max.storage_dyn_bufs += desc_count;
break;
default:
unreachable("Invalid descriptor type");
}
}
*pDescriptorPool = panvk_descriptor_pool_to_handle(pool);
return VK_SUCCESS;
}
void
panvk_DestroyDescriptorPool(VkDevice _device,
VkDescriptorPool _pool,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_descriptor_pool, pool, _pool);
if (pool)
vk_object_free(&device->vk, pAllocator, pool);
}
VkResult
panvk_ResetDescriptorPool(VkDevice _device,
VkDescriptorPool _pool,
VkDescriptorPoolResetFlags flags)
{
VK_FROM_HANDLE(panvk_descriptor_pool, pool, _pool);
memset(&pool->cur, 0, sizeof(pool->cur));
return VK_SUCCESS;
}
static VkResult
panvk_descriptor_set_create(struct panvk_device *device,
struct panvk_descriptor_pool *pool,
const struct panvk_descriptor_set_layout *layout,
struct panvk_descriptor_set **out_set)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct panvk_descriptor_set *set;
/* TODO: Allocate from the pool! */
set = vk_object_zalloc(&device->vk, NULL,
sizeof(struct panvk_descriptor_set),
VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (!set)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
set->layout = layout;
set->descs = vk_alloc(&device->vk.alloc,
sizeof(*set->descs) * layout->num_descs, 8,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (!set->descs)
goto err_free_set;
if (layout->num_ubos) {
set->ubos = vk_zalloc(&device->vk.alloc,
sizeof(*set->ubos) * layout->num_ubos, 8,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (!set->ubos)
goto err_free_set;
}
if (layout->num_samplers) {
set->samplers = vk_zalloc(&device->vk.alloc,
sizeof(*set->samplers) * layout->num_samplers, 8,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (!set->samplers)
goto err_free_set;
}
if (layout->num_textures) {
if (pan_is_bifrost(pdev)) {
set->textures.bifrost = vk_zalloc(&device->vk.alloc,
sizeof(*set->textures.bifrost) *
layout->num_textures,
8, VK_OBJECT_TYPE_DESCRIPTOR_SET);
} else {
set->textures.midgard = vk_zalloc(&device->vk.alloc,
sizeof(*set->textures.midgard) *
layout->num_textures,
8, VK_OBJECT_TYPE_DESCRIPTOR_SET);
}
if (!set->textures.midgard)
goto err_free_set;
}
for (unsigned i = 0; i < layout->binding_count; i++) {
if (!layout->bindings[i].immutable_samplers)
continue;
for (unsigned j = 0; j < layout->bindings[i].array_size; j++) {
set->descs[layout->bindings[i].desc_idx].image.sampler =
layout->bindings[i].immutable_samplers[j];
}
}
*out_set = set;
return VK_SUCCESS;
err_free_set:
vk_free(&device->vk.alloc, set->textures.midgard);
vk_free(&device->vk.alloc, set->samplers);
vk_free(&device->vk.alloc, set->ubos);
vk_free(&device->vk.alloc, set->descs);
vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
static void
panvk_descriptor_set_destroy(struct panvk_device *device,
struct panvk_descriptor_pool *pool,
struct panvk_descriptor_set *set)
{
vk_free(&device->vk.alloc, set->textures.midgard);
vk_free(&device->vk.alloc, set->samplers);
vk_free(&device->vk.alloc, set->ubos);
vk_free(&device->vk.alloc, set->descs);
vk_object_free(&device->vk, NULL, set);
}
VkResult
panvk_AllocateDescriptorSets(VkDevice _device,
const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result;
unsigned i;
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
VK_FROM_HANDLE(panvk_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
struct panvk_descriptor_set *set = NULL;
result = panvk_descriptor_set_create(device, pool, layout, &set);
if (result != VK_SUCCESS)
goto err_free_sets;
pDescriptorSets[i] = panvk_descriptor_set_to_handle(set);
}
return VK_SUCCESS;
err_free_sets:
panvk_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++)
pDescriptorSets[i] = VK_NULL_HANDLE;
return result;
}
VkResult
panvk_FreeDescriptorSets(VkDevice _device,
VkDescriptorPool descriptorPool,
uint32_t count,
const VkDescriptorSet *pDescriptorSets)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_descriptor_pool, pool, descriptorPool);
for (unsigned i = 0; i < count; i++) {
VK_FROM_HANDLE(panvk_descriptor_set, set, pDescriptorSets[i]);
if (set)
panvk_descriptor_set_destroy(device, pool, set);
}
return VK_SUCCESS;
}
static void
panvk_set_image_desc(struct panvk_descriptor *desc,
const VkDescriptorImageInfo *pImageInfo)
{
VK_FROM_HANDLE(panvk_sampler, sampler, pImageInfo->sampler);
VK_FROM_HANDLE(panvk_image_view, image_view, pImageInfo->imageView);
desc->image.sampler = sampler;
desc->image.view = image_view;
desc->image.layout = pImageInfo->imageLayout;
}
static void
panvk_set_texel_buffer_view_desc(struct panvk_descriptor *desc,
const VkBufferView *pTexelBufferView)
{
VK_FROM_HANDLE(panvk_buffer_view, buffer_view, *pTexelBufferView);
desc->buffer_view = buffer_view;
}
static void
panvk_set_buffer_info_desc(struct panvk_descriptor *desc,
const VkDescriptorBufferInfo *pBufferInfo)
{
VK_FROM_HANDLE(panvk_buffer, buffer, pBufferInfo->buffer);
desc->buffer_info.buffer = buffer;
desc->buffer_info.offset = pBufferInfo->offset;
desc->buffer_info.range = pBufferInfo->range;
}
static void
panvk_set_ubo_desc(void *ubo,
const VkDescriptorBufferInfo *pBufferInfo)
{
VK_FROM_HANDLE(panvk_buffer, buffer, pBufferInfo->buffer);
size_t size = pBufferInfo->range == VK_WHOLE_SIZE ?
(buffer->bo->size - pBufferInfo->offset) :
pBufferInfo->range;
pan_pack(ubo, UNIFORM_BUFFER, cfg) {
cfg.pointer = buffer->bo->ptr.gpu + pBufferInfo->offset;
cfg.entries = DIV_ROUND_UP(size, 16);
}
}
static void
panvk_set_sampler_desc(void *desc,
const VkDescriptorImageInfo *pImageInfo)
{
VK_FROM_HANDLE(panvk_sampler, sampler, pImageInfo->sampler);
memcpy(desc, &sampler->desc, sizeof(sampler->desc));
}
static void
panvk_set_bifrost_texture_desc(struct mali_bifrost_texture_packed *desc,
const VkDescriptorImageInfo *pImageInfo)
{
VK_FROM_HANDLE(panvk_image_view, view, pImageInfo->imageView);
*desc = view->bifrost.tex_desc;
}
static void
panvk_set_midgard_texture_desc(mali_ptr *desc,
const VkDescriptorImageInfo *pImageInfo)
{
VK_FROM_HANDLE(panvk_image_view, view, pImageInfo->imageView);
*desc = view->bo->ptr.gpu;
}
static void
panvk_write_descriptor_set(struct panvk_device *dev,
const VkWriteDescriptorSet *pDescriptorWrite)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
VK_FROM_HANDLE(panvk_descriptor_set, set, pDescriptorWrite->dstSet);
const struct panvk_descriptor_set_layout *layout = set->layout;
unsigned dest_offset = pDescriptorWrite->dstArrayElement;
unsigned binding = pDescriptorWrite->dstBinding;
unsigned src_offset = 0;
while (src_offset < pDescriptorWrite->descriptorCount &&
binding < layout->binding_count) {
const struct panvk_descriptor_set_binding_layout *binding_layout =
&layout->bindings[binding];
if (!binding_layout->array_size) {
binding++;
dest_offset = 0;
continue;
}
assert(pDescriptorWrite->descriptorType == binding_layout->type);
unsigned ndescs = MIN2(pDescriptorWrite->descriptorCount - src_offset,
binding_layout->array_size - dest_offset);
struct panvk_descriptor *descs = &set->descs[binding_layout->desc_idx + dest_offset];
assert(binding_layout->desc_idx + dest_offset + ndescs <= set->layout->num_descs);
switch (pDescriptorWrite->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
for (unsigned i = 0; i < ndescs; i++) {
const VkDescriptorImageInfo *info = &pDescriptorWrite->pImageInfo[src_offset + i];
if (pDescriptorWrite->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
pDescriptorWrite->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
unsigned sampler = binding_layout->sampler_idx + dest_offset + i;
panvk_set_sampler_desc(&set->samplers[sampler], info);
}
if (pDescriptorWrite->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
pDescriptorWrite->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
unsigned tex = binding_layout->tex_idx + dest_offset + i;
if (pan_is_bifrost(pdev))
panvk_set_bifrost_texture_desc(&set->textures.bifrost[tex], info);
else
panvk_set_midgard_texture_desc(&set->textures.midgard[tex], info);
}
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (unsigned i = 0; i < ndescs; i++)
panvk_set_image_desc(&descs[i], &pDescriptorWrite->pImageInfo[src_offset + i]);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
for (unsigned i = 0; i < ndescs; i++)
panvk_set_texel_buffer_view_desc(&descs[i], &pDescriptorWrite->pTexelBufferView[src_offset + i]);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
for (unsigned i = 0; i < ndescs; i++) {
unsigned ubo = binding_layout->ubo_idx + dest_offset + i;
panvk_set_ubo_desc(&set->ubos[ubo],
&pDescriptorWrite->pBufferInfo[src_offset + i]);
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (unsigned i = 0; i < ndescs; i++)
panvk_set_buffer_info_desc(&descs[i], &pDescriptorWrite->pBufferInfo[src_offset + i]);
break;
default:
unreachable("Invalid type");
}
src_offset += ndescs;
binding++;
dest_offset = 0;
}
}
static void
panvk_copy_descriptor_set(struct panvk_device *dev,
const VkCopyDescriptorSet *pDescriptorCopy)
{
VK_FROM_HANDLE(panvk_descriptor_set, dest_set, pDescriptorCopy->dstSet);
VK_FROM_HANDLE(panvk_descriptor_set, src_set, pDescriptorCopy->srcSet);
const struct panvk_descriptor_set_layout *dest_layout = dest_set->layout;
const struct panvk_descriptor_set_layout *src_layout = dest_set->layout;
unsigned dest_offset = pDescriptorCopy->dstArrayElement;
unsigned src_offset = pDescriptorCopy->srcArrayElement;
unsigned dest_binding = pDescriptorCopy->dstBinding;
unsigned src_binding = pDescriptorCopy->srcBinding;
unsigned desc_count = pDescriptorCopy->descriptorCount;
while (desc_count && src_binding < src_layout->binding_count &&
dest_binding < dest_layout->binding_count) {
const struct panvk_descriptor_set_binding_layout *dest_binding_layout =
&src_layout->bindings[dest_binding];
if (!dest_binding_layout->array_size) {
dest_binding++;
dest_offset = 0;
continue;
}
const struct panvk_descriptor_set_binding_layout *src_binding_layout =
&src_layout->bindings[src_binding];
if (!src_binding_layout->array_size) {
src_binding++;
src_offset = 0;
continue;
}
assert(dest_binding_layout->type == src_binding_layout->type);
unsigned ndescs = MAX3(desc_count,
dest_binding_layout->array_size - dest_offset,
src_binding_layout->array_size - src_offset);
struct panvk_descriptor *dest_descs = dest_set->descs + dest_binding_layout->desc_idx + dest_offset;
struct panvk_descriptor *src_descs = src_set->descs + src_binding_layout->desc_idx + src_offset;
memcpy(dest_descs, src_descs, ndescs * sizeof(*dest_descs));
desc_count -= ndescs;
dest_offset += ndescs;
if (dest_offset == dest_binding_layout->array_size) {
dest_binding++;
dest_offset = 0;
continue;
}
src_offset += ndescs;
if (src_offset == src_binding_layout->array_size) {
src_binding++;
src_offset = 0;
continue;
}
}
assert(!desc_count);
}
void
panvk_UpdateDescriptorSets(VkDevice _device,
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies)
{
VK_FROM_HANDLE(panvk_device, dev, _device);
for (unsigned i = 0; i < descriptorWriteCount; i++)
panvk_write_descriptor_set(dev, &pDescriptorWrites[i]);
for (unsigned i = 0; i < descriptorCopyCount; i++)
panvk_copy_descriptor_set(dev, &pDescriptorCopies[i]);
}
VkResult
panvk_CreateDescriptorUpdateTemplate(VkDevice _device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
panvk_stub();
return VK_SUCCESS;
}
void
panvk_DestroyDescriptorUpdateTemplate(VkDevice _device,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
panvk_stub();
}
void
panvk_UpdateDescriptorSetWithTemplate(VkDevice _device,
VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
panvk_stub();
}
VkResult
panvk_CreateSamplerYcbcrConversion(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion)
{
panvk_stub();
return VK_SUCCESS;
}
void
panvk_DestroySamplerYcbcrConversion(VkDevice device,
VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator)
{
panvk_stub();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,484 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_formats.c which is:
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "util/format_r11g11b10f.h"
#include "util/format_srgb.h"
#include "util/half_float.h"
#include "vulkan/util/vk_format.h"
#include "vk_format.h"
#include "vk_util.h"
#include "panfrost/lib/pan_texture.h"
static void
get_format_properties(struct panvk_physical_device *physical_device,
VkFormat format,
VkFormatProperties *out_properties)
{
struct panfrost_device *pdev = &physical_device->pdev;
VkFormatFeatureFlags tex = 0, buffer = 0;
enum pipe_format pfmt = vk_format_to_pipe_format(format);
const struct panfrost_format fmt = pdev->formats[pfmt];
if (!pfmt || !fmt.hw)
goto end;
/* 3byte formats are not supported by the buffer <-> image copy helpers. */
if (util_format_get_blocksize(pfmt) == 3)
goto end;
/* We don't support compressed formats yet: this is causing trouble when
* doing a vkCmdCopyImage() between a compressed and a non-compressed format
* on a tiled/AFBC resource.
*/
if (util_format_is_compressed(pfmt))
goto end;
buffer |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
if (fmt.bind & PIPE_BIND_VERTEX_BUFFER)
buffer |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
if (fmt.bind & PIPE_BIND_SAMPLER_VIEW) {
tex |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT |
VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT |
VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT;
buffer |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
tex |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
}
if (fmt.bind & PIPE_BIND_RENDER_TARGET) {
tex |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
VK_FORMAT_FEATURE_BLIT_DST_BIT;
tex |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
buffer |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
/* Can always blend via blend shaders */
tex |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
}
if (fmt.bind & PIPE_BIND_DEPTH_STENCIL)
tex |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
end:
out_properties->linearTilingFeatures = tex;
out_properties->optimalTilingFeatures = tex;
out_properties->bufferFeatures = buffer;
}
void
panvk_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties *pFormatProperties)
{
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
get_format_properties(physical_device, format, pFormatProperties);
}
void
panvk_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties2 *pFormatProperties)
{
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
get_format_properties(physical_device, format,
&pFormatProperties->formatProperties);
VkDrmFormatModifierPropertiesListEXT *list =
vk_find_struct(pFormatProperties->pNext, DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
if (list) {
VK_OUTARRAY_MAKE(out, list->pDrmFormatModifierProperties,
&list->drmFormatModifierCount);
vk_outarray_append(&out, mod_props) {
mod_props->drmFormatModifier = DRM_FORMAT_MOD_LINEAR;
mod_props->drmFormatModifierPlaneCount = 1;
}
}
}
static VkResult
get_image_format_properties(struct panvk_physical_device *physical_device,
const VkPhysicalDeviceImageFormatInfo2 *info,
VkImageFormatProperties *pImageFormatProperties,
VkFormatFeatureFlags *p_feature_flags)
{
VkFormatProperties format_props;
VkFormatFeatureFlags format_feature_flags;
VkExtent3D maxExtent;
uint32_t maxMipLevels;
uint32_t maxArraySize;
VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
enum pipe_format format = vk_format_to_pipe_format(info->format);
get_format_properties(physical_device, info->format, &format_props);
switch (info->tiling) {
case VK_IMAGE_TILING_LINEAR:
format_feature_flags = format_props.linearTilingFeatures;
break;
case VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT:
/* The only difference between optimal and linear is currently whether
* depth/stencil attachments are allowed on depth/stencil formats.
* There's no reason to allow importing depth/stencil textures, so just
* disallow it and then this annoying edge case goes away.
*
* TODO: If anyone cares, we could enable this by looking at the
* modifier and checking if it's LINEAR or not.
*/
if (util_format_is_depth_or_stencil(format))
goto unsupported;
assert(format_props.optimalTilingFeatures == format_props.linearTilingFeatures);
/* fallthrough */
case VK_IMAGE_TILING_OPTIMAL:
format_feature_flags = format_props.optimalTilingFeatures;
break;
default:
unreachable("bad VkPhysicalDeviceImageFormatInfo2");
}
if (format_feature_flags == 0)
goto unsupported;
if (info->type != VK_IMAGE_TYPE_2D &&
util_format_is_depth_or_stencil(format))
goto unsupported;
switch (info->type) {
default:
unreachable("bad vkimage type");
case VK_IMAGE_TYPE_1D:
maxExtent.width = 16384;
maxExtent.height = 1;
maxExtent.depth = 1;
maxMipLevels = 15; /* log2(maxWidth) + 1 */
maxArraySize = 2048;
break;
case VK_IMAGE_TYPE_2D:
maxExtent.width = 16384;
maxExtent.height = 16384;
maxExtent.depth = 1;
maxMipLevels = 15; /* log2(maxWidth) + 1 */
maxArraySize = 2048;
break;
case VK_IMAGE_TYPE_3D:
maxExtent.width = 2048;
maxExtent.height = 2048;
maxExtent.depth = 2048;
maxMipLevels = 12; /* log2(maxWidth) + 1 */
maxArraySize = 1;
break;
}
if (info->tiling == VK_IMAGE_TILING_OPTIMAL &&
info->type == VK_IMAGE_TYPE_2D &&
(format_feature_flags &
(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
!(info->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) &&
!(info->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
sampleCounts |= VK_SAMPLE_COUNT_4_BIT;
}
if (info->usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_STORAGE_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
goto unsupported;
}
}
if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
if (!(format_feature_flags &
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
goto unsupported;
}
}
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = maxExtent,
.maxMipLevels = maxMipLevels,
.maxArrayLayers = maxArraySize,
.sampleCounts = sampleCounts,
/* FINISHME: Accurately calculate
* VkImageFormatProperties::maxResourceSize.
*/
.maxResourceSize = UINT32_MAX,
};
if (p_feature_flags)
*p_feature_flags = format_feature_flags;
return VK_SUCCESS;
unsupported:
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = { 0, 0, 0 },
.maxMipLevels = 0,
.maxArrayLayers = 0,
.sampleCounts = 0,
.maxResourceSize = 0,
};
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
VkResult
panvk_GetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags createFlags,
VkImageFormatProperties *pImageFormatProperties)
{
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceImageFormatInfo2 info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
.pNext = NULL,
.format = format,
.type = type,
.tiling = tiling,
.usage = usage,
.flags = createFlags,
};
return get_image_format_properties(physical_device, &info,
pImageFormatProperties, NULL);
}
static VkResult
panvk_get_external_image_format_properties(const struct panvk_physical_device *physical_device,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkExternalMemoryHandleTypeFlagBits handleType,
VkExternalMemoryProperties *external_properties)
{
VkExternalMemoryFeatureFlagBits flags = 0;
VkExternalMemoryHandleTypeFlags export_flags = 0;
VkExternalMemoryHandleTypeFlags compat_flags = 0;
/* From the Vulkan 1.1.98 spec:
*
* If handleType is not compatible with the format, type, tiling,
* usage, and flags specified in VkPhysicalDeviceImageFormatInfo2,
* then vkGetPhysicalDeviceImageFormatProperties2 returns
* VK_ERROR_FORMAT_NOT_SUPPORTED.
*/
switch (handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
switch (pImageFormatInfo->type) {
case VK_IMAGE_TYPE_2D:
flags = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT |
VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
compat_flags = export_flags =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
break;
default:
return vk_errorf(physical_device->instance, VK_ERROR_FORMAT_NOT_SUPPORTED,
"VkExternalMemoryTypeFlagBits(0x%x) unsupported for VkImageType(%d)",
handleType, pImageFormatInfo->type);
}
break;
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
break;
default:
return vk_errorf(physical_device->instance, VK_ERROR_FORMAT_NOT_SUPPORTED,
"VkExternalMemoryTypeFlagBits(0x%x) unsupported",
handleType);
}
*external_properties = (VkExternalMemoryProperties) {
.externalMemoryFeatures = flags,
.exportFromImportedHandleTypes = export_flags,
.compatibleHandleTypes = compat_flags,
};
return VK_SUCCESS;
}
VkResult
panvk_GetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
const VkPhysicalDeviceImageViewImageFormatInfoEXT *image_view_info = NULL;
VkExternalImageFormatProperties *external_props = NULL;
VkFilterCubicImageViewImageFormatPropertiesEXT *cubic_props = NULL;
VkFormatFeatureFlags format_feature_flags;
VkSamplerYcbcrConversionImageFormatProperties *ycbcr_props = NULL;
VkResult result;
result = get_image_format_properties(physical_device, base_info,
&base_props->imageFormatProperties,
&format_feature_flags);
if (result != VK_SUCCESS)
return result;
/* Extract input structs */
vk_foreach_struct_const(s, base_info->pNext)
{
switch (s->sType) {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
external_info = (const void *) s;
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
image_view_info = (const void *) s;
break;
default:
break;
}
}
/* Extract output structs */
vk_foreach_struct(s, base_props->pNext)
{
switch (s->sType) {
case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
external_props = (void *) s;
break;
case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
cubic_props = (void *) s;
break;
case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
ycbcr_props = (void *) s;
break;
default:
break;
}
}
/* From the Vulkan 1.0.42 spec:
*
* If handleType is 0, vkGetPhysicalDeviceImageFormatProperties2 will
* behave as if VkPhysicalDeviceExternalImageFormatInfo was not
* present and VkExternalImageFormatProperties will be ignored.
*/
if (external_info && external_info->handleType != 0) {
result = panvk_get_external_image_format_properties(physical_device,
base_info,
external_info->handleType,
&external_props->externalMemoryProperties);
if (result != VK_SUCCESS)
goto fail;
}
if (cubic_props) {
/* note: blob only allows cubic filtering for 2D and 2D array views
* its likely we can enable it for 1D and CUBE, needs testing however
*/
if ((image_view_info->imageViewType == VK_IMAGE_VIEW_TYPE_2D ||
image_view_info->imageViewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) &&
(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT)) {
cubic_props->filterCubic = true;
cubic_props->filterCubicMinmax = true;
} else {
cubic_props->filterCubic = false;
cubic_props->filterCubicMinmax = false;
}
}
if (ycbcr_props)
ycbcr_props->combinedImageSamplerDescriptorCount = 1;
return VK_SUCCESS;
fail:
if (result == VK_ERROR_FORMAT_NOT_SUPPORTED) {
/* From the Vulkan 1.0.42 spec:
*
* If the combination of parameters to
* vkGetPhysicalDeviceImageFormatProperties2 is not supported by
* the implementation for use in vkCreateImage, then all members of
* imageFormatProperties will be filled with zero.
*/
base_props->imageFormatProperties = (VkImageFormatProperties) {};
}
return result;
}
void
panvk_GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
uint32_t samples,
VkImageUsageFlags usage,
VkImageTiling tiling,
uint32_t *pNumProperties,
VkSparseImageFormatProperties *pProperties)
{
panvk_stub();
}
void
panvk_GetPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo,
uint32_t *pPropertyCount,
VkSparseImageFormatProperties2 *pProperties)
{
panvk_stub();
}
void
panvk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
VkExternalBufferProperties *pExternalBufferProperties)
{
panvk_stub();
}

View File

@ -0,0 +1,438 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_image.c which is:
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "panfrost-quirks.h"
#include "util/debug.h"
#include "util/u_atomic.h"
#include "vk_format.h"
#include "vk_object.h"
#include "vk_util.h"
#include "drm-uapi/drm_fourcc.h"
unsigned
panvk_image_get_plane_size(const struct panvk_image *image, unsigned plane)
{
assert(!plane);
return image->pimage.layout.data_size;
}
unsigned
panvk_image_get_total_size(const struct panvk_image *image)
{
assert(util_format_get_num_planes(image->pimage.layout.format) == 1);
return image->pimage.layout.data_size;
}
static enum mali_texture_dimension
panvk_image_type_to_mali_tex_dim(VkImageType type)
{
switch (type) {
case VK_IMAGE_TYPE_1D: return MALI_TEXTURE_DIMENSION_1D;
case VK_IMAGE_TYPE_2D: return MALI_TEXTURE_DIMENSION_2D;
case VK_IMAGE_TYPE_3D: return MALI_TEXTURE_DIMENSION_3D;
default: unreachable("Invalid image type");
}
}
static VkResult
panvk_image_create(VkDevice _device,
const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc,
VkImage *pImage,
uint64_t modifier,
const VkSubresourceLayout *plane_layouts)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct panvk_image *image = NULL;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
assert(pCreateInfo->mipLevels > 0);
assert(pCreateInfo->arrayLayers > 0);
assert(pCreateInfo->samples > 0);
assert(pCreateInfo->extent.width > 0);
assert(pCreateInfo->extent.height > 0);
assert(pCreateInfo->extent.depth > 0);
image = vk_object_zalloc(&device->vk, alloc, sizeof(*image),
VK_OBJECT_TYPE_IMAGE);
if (!image)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
image->type = pCreateInfo->imageType;
image->vk_format = pCreateInfo->format;
image->tiling = pCreateInfo->tiling;
image->usage = pCreateInfo->usage;
image->flags = pCreateInfo->flags;
image->extent = pCreateInfo->extent;
pan_image_layout_init(pdev, &image->pimage.layout, modifier,
vk_format_to_pipe_format(pCreateInfo->format),
panvk_image_type_to_mali_tex_dim(pCreateInfo->imageType),
pCreateInfo->extent.width, pCreateInfo->extent.height,
pCreateInfo->extent.depth, pCreateInfo->arrayLayers,
pCreateInfo->samples, pCreateInfo->mipLevels,
PAN_IMAGE_CRC_NONE, NULL);
image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i) {
if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL)
image->queue_family_mask |= (1u << PANVK_MAX_QUEUE_FAMILIES) - 1u;
else
image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i];
}
}
if (vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_IMAGE_CREATE_INFO))
image->shareable = true;
*pImage = panvk_image_to_handle(image);
return VK_SUCCESS;
}
static uint64_t
panvk_image_select_mod(VkDevice _device,
const VkImageCreateInfo *pCreateInfo,
const VkSubresourceLayout **plane_layouts)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const struct panfrost_device *pdev = &device->physical_device->pdev;
enum pipe_format fmt = vk_format_to_pipe_format(pCreateInfo->format);
bool noafbc = !(device->physical_device->instance->debug_flags & PANVK_DEBUG_AFBC);
bool linear = device->physical_device->instance->debug_flags & PANVK_DEBUG_LINEAR;
*plane_layouts = NULL;
if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR)
return DRM_FORMAT_MOD_LINEAR;
if (pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
vk_find_struct_const(pCreateInfo->pNext,
IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
const VkImageDrmFormatModifierExplicitCreateInfoEXT *drm_explicit_info =
vk_find_struct_const(pCreateInfo->pNext,
IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
assert(mod_info || drm_explicit_info);
uint64_t modifier;
if (mod_info) {
modifier = DRM_FORMAT_MOD_LINEAR;
for (unsigned i = 0; i < mod_info->drmFormatModifierCount; i++) {
if (drm_is_afbc(mod_info->pDrmFormatModifiers[i]) && !noafbc) {
modifier = mod_info->pDrmFormatModifiers[i];
break;
}
}
} else {
modifier = drm_explicit_info->drmFormatModifier;
assert(modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED ||
(drm_is_afbc(modifier) && !noafbc));
*plane_layouts = drm_explicit_info->pPlaneLayouts;
}
return modifier;
}
const struct wsi_image_create_info *wsi_info =
vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
if (wsi_info && wsi_info->scanout)
return DRM_FORMAT_MOD_LINEAR;
assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
if (linear)
return DRM_FORMAT_MOD_LINEAR;
/* Image store don't work on AFBC images */
if (pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
/* AFBC does not support layered multisampling */
if (pCreateInfo->samples > 1)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
if (pdev->quirks & MIDGARD_NO_AFBC)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
/* Only a small selection of formats are AFBC'able */
if (!panfrost_format_supports_afbc(pdev, fmt))
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
/* 3D AFBC is only supported on Bifrost v7+. It's supposed to
* be supported on Midgard but it doesn't seem to work.
*/
if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D && pdev->arch < 7)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
/* For one tile, AFBC is a loss compared to u-interleaved */
if (pCreateInfo->extent.width <= 16 && pCreateInfo->extent.height <= 16)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
if (noafbc)
return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
uint64_t afbc_type = AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_SPARSE;
if (panfrost_afbc_can_ytr(fmt))
afbc_type |= AFBC_FORMAT_MOD_YTR;
return DRM_FORMAT_MOD_ARM_AFBC(afbc_type);
}
VkResult
panvk_CreateImage(VkDevice device,
const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImage *pImage)
{
const VkSubresourceLayout *plane_layouts;
uint64_t modifier = panvk_image_select_mod(device, pCreateInfo, &plane_layouts);
return panvk_image_create(device, pCreateInfo, pAllocator, pImage, modifier, plane_layouts);
}
void
panvk_DestroyImage(VkDevice _device,
VkImage _image,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_image, image, _image);
if (!image)
return;
vk_object_free(&device->vk, pAllocator, image);
}
static unsigned
panvk_plane_index(VkFormat format, VkImageAspectFlags aspect_mask)
{
switch (aspect_mask) {
default:
return 0;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
return 1;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
return 2;
case VK_IMAGE_ASPECT_STENCIL_BIT:
return format == VK_FORMAT_D32_SFLOAT_S8_UINT;
}
}
void
panvk_GetImageSubresourceLayout(VkDevice _device,
VkImage _image,
const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout)
{
VK_FROM_HANDLE(panvk_image, image, _image);
unsigned plane = panvk_plane_index(image->vk_format, pSubresource->aspectMask);
assert(plane < PANVK_MAX_PLANES);
const struct pan_image_slice_layout *slice_layout =
&image->pimage.layout.slices[pSubresource->mipLevel];
pLayout->offset = slice_layout->offset +
(pSubresource->arrayLayer *
image->pimage.layout.array_stride);
pLayout->size = slice_layout->size;
pLayout->rowPitch = slice_layout->line_stride;
pLayout->arrayPitch = image->pimage.layout.array_stride;
pLayout->depthPitch = slice_layout->surface_stride;
}
static enum mali_texture_dimension
panvk_view_type_to_mali_tex_dim(VkImageViewType type)
{
switch (type) {
case VK_IMAGE_VIEW_TYPE_1D:
case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
return MALI_TEXTURE_DIMENSION_1D;
case VK_IMAGE_VIEW_TYPE_2D:
case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
return MALI_TEXTURE_DIMENSION_2D;
case VK_IMAGE_VIEW_TYPE_3D:
return MALI_TEXTURE_DIMENSION_3D;
case VK_IMAGE_VIEW_TYPE_CUBE:
case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
return MALI_TEXTURE_DIMENSION_CUBE;
default:
unreachable("Invalid view type");
}
}
static void
panvk_convert_swizzle(const VkComponentMapping *in,
unsigned char *out)
{
const VkComponentSwizzle *comp = &in->r;
for (unsigned i = 0; i < 4; i++) {
switch (comp[i]) {
case VK_COMPONENT_SWIZZLE_IDENTITY:
out[i] = PIPE_SWIZZLE_X + i;
break;
case VK_COMPONENT_SWIZZLE_ZERO:
out[i] = PIPE_SWIZZLE_0;
break;
case VK_COMPONENT_SWIZZLE_ONE:
out[i] = PIPE_SWIZZLE_1;
break;
case VK_COMPONENT_SWIZZLE_R:
out[i] = PIPE_SWIZZLE_X;
break;
case VK_COMPONENT_SWIZZLE_G:
out[i] = PIPE_SWIZZLE_Y;
break;
case VK_COMPONENT_SWIZZLE_B:
out[i] = PIPE_SWIZZLE_Z;
break;
case VK_COMPONENT_SWIZZLE_A:
out[i] = PIPE_SWIZZLE_W;
break;
default:
unreachable("Invalid swizzle");
}
}
}
VkResult
panvk_CreateImageView(VkDevice _device,
const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImageView *pView)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_image, image, pCreateInfo->image);
struct panvk_image_view *view;
view = vk_object_zalloc(&device->vk, pAllocator, sizeof(*view),
VK_OBJECT_TYPE_IMAGE_VIEW);
if (view == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
view->pview.format = vk_format_to_pipe_format(pCreateInfo->format);
if (pCreateInfo->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
view->pview.format = util_format_get_depth_only(view->pview.format);
else if (pCreateInfo->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
view->pview.format = util_format_stencil_only(view->pview.format);
view->pview.dim = panvk_view_type_to_mali_tex_dim(pCreateInfo->viewType);
view->pview.first_level = pCreateInfo->subresourceRange.baseMipLevel;
view->pview.last_level = pCreateInfo->subresourceRange.baseMipLevel +
pCreateInfo->subresourceRange.levelCount - 1;
view->pview.first_layer = pCreateInfo->subresourceRange.baseArrayLayer;
view->pview.last_layer = pCreateInfo->subresourceRange.baseArrayLayer +
pCreateInfo->subresourceRange.layerCount - 1;
panvk_convert_swizzle(&pCreateInfo->components, view->pview.swizzle);
view->pview.image = &image->pimage;
view->pview.nr_samples = image->pimage.layout.nr_samples;
view->vk_format = pCreateInfo->format;
struct panfrost_device *pdev = &device->physical_device->pdev;
unsigned bo_size =
panfrost_estimate_texture_payload_size(pdev, &view->pview);
unsigned surf_descs_offset = 0;
if (!pan_is_bifrost(pdev)) {
bo_size += MALI_MIDGARD_TEXTURE_LENGTH;
surf_descs_offset = MALI_MIDGARD_TEXTURE_LENGTH;
}
view->bo = panfrost_bo_create(pdev, bo_size, 0, "Texture descriptor");
struct panfrost_ptr surf_descs = {
.cpu = view->bo->ptr.cpu + surf_descs_offset,
.gpu = view->bo->ptr.gpu + surf_descs_offset,
};
void *tex_desc = pan_is_bifrost(pdev) ?
&view->bifrost.tex_desc : view->bo->ptr.cpu;
panfrost_new_texture(pdev, &view->pview, tex_desc, &surf_descs);
*pView = panvk_image_view_to_handle(view);
return VK_SUCCESS;
}
void
panvk_DestroyImageView(VkDevice _device,
VkImageView _view,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_image_view, view, _view);
if (!view)
return;
panfrost_bo_unreference(view->bo);
vk_object_free(&device->vk, pAllocator, view);
}
VkResult
panvk_CreateBufferView(VkDevice _device,
const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkBufferView *pView)
{
panvk_stub();
return VK_SUCCESS;
}
void
panvk_DestroyBufferView(VkDevice _device,
VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator)
{
panvk_stub();
}
VkResult
panvk_GetImageDrmFormatModifierPropertiesEXT(VkDevice device,
VkImage _image,
VkImageDrmFormatModifierPropertiesEXT *pProperties)
{
VK_FROM_HANDLE(panvk_image, image, _image);
assert(pProperties->sType == VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT);
pProperties->drmFormatModifier = image->pimage.layout.modifier;
return VK_SUCCESS;
}

View File

@ -0,0 +1,167 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nir/nir_builder.h"
#include "pan_encoder.h"
#include "panvk_private.h"
#include "vk_format.h"
void
panvk_CmdBlitImage(VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage destImage,
VkImageLayout destImageLayout,
uint32_t regionCount,
const VkImageBlit *pRegions,
VkFilter filter)
{
panvk_stub();
}
void
panvk_CmdCopyImage(VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage destImage,
VkImageLayout destImageLayout,
uint32_t regionCount,
const VkImageCopy *pRegions)
{
panvk_stub();
}
void
panvk_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkImage destImage,
VkImageLayout destImageLayout,
uint32_t regionCount,
const VkBufferImageCopy *pRegions)
{
panvk_stub();
}
void
panvk_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkBuffer destBuffer,
uint32_t regionCount,
const VkBufferImageCopy *pRegions)
{
panvk_stub();
}
void
panvk_CmdCopyBuffer(VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkBuffer destBuffer,
uint32_t regionCount,
const VkBufferCopy *pRegions)
{
panvk_stub();
}
void
panvk_CmdResolveImage(VkCommandBuffer cmd_buffer_h,
VkImage src_image_h,
VkImageLayout src_image_layout,
VkImage dest_image_h,
VkImageLayout dest_image_layout,
uint32_t region_count,
const VkImageResolve *regions)
{
panvk_stub();
}
void
panvk_CmdFillBuffer(VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize fillSize,
uint32_t data)
{
panvk_stub();
}
void
panvk_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void *pData)
{
panvk_stub();
}
void
panvk_CmdClearColorImage(VkCommandBuffer commandBuffer,
VkImage image,
VkImageLayout imageLayout,
const VkClearColorValue *pColor,
uint32_t rangeCount,
const VkImageSubresourceRange *pRanges)
{
panvk_stub();
}
void
panvk_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
VkImage image_h,
VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil,
uint32_t rangeCount,
const VkImageSubresourceRange *pRanges)
{
panvk_stub();
}
void
panvk_CmdClearAttachments(VkCommandBuffer commandBuffer,
uint32_t attachmentCount,
const VkClearAttachment *pAttachments,
uint32_t rectCount,
const VkClearRect *pRects)
{
panvk_stub();
}
void
panvk_meta_init(struct panvk_physical_device *dev)
{
panfrost_pool_init(&dev->meta.bin_pool, NULL, &dev->pdev, PAN_BO_EXECUTE,
16 * 1024, "panvk_meta binary pool", false, true);
panfrost_pool_init(&dev->meta.desc_pool, NULL, &dev->pdev, 0,
16 * 1024, "panvk_meta descriptor pool", false, true);
}
void
panvk_meta_cleanup(struct panvk_physical_device *dev)
{
panfrost_pool_cleanup(&dev->meta.desc_pool);
panfrost_pool_cleanup(&dev->meta.bin_pool);
}

View File

@ -0,0 +1,211 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_pass.c which is:
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "vk_format.h"
#include "vk_util.h"
VkResult
panvk_CreateRenderPass2(VkDevice _device,
const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_render_pass *pass;
size_t size;
size_t attachments_offset;
VkRenderPassMultiviewCreateInfo *multiview_info = NULL;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2);
size = sizeof(*pass);
size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
pass = vk_object_zalloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_RENDER_PASS);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
pass->attachment_count = pCreateInfo->attachmentCount;
pass->subpass_count = pCreateInfo->subpassCount;
pass->attachments = (void *) pass + attachments_offset;
vk_foreach_struct(ext, pCreateInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
multiview_info = (VkRenderPassMultiviewCreateInfo *) ext;
break;
default:
break;
}
}
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
struct panvk_render_pass_attachment *att = &pass->attachments[i];
att->format = vk_format_to_pipe_format(pCreateInfo->pAttachments[i].format);
att->samples = pCreateInfo->pAttachments[i].samples;
att->load_op = pCreateInfo->pAttachments[i].loadOp;
att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
att->initial_layout = pCreateInfo->pAttachments[i].initialLayout;
att->final_layout = pCreateInfo->pAttachments[i].finalLayout;
att->store_op = pCreateInfo->pAttachments[i].storeOp;
att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
att->clear_subpass = ~0;
}
uint32_t subpass_attachment_count = 0;
struct panvk_subpass_attachment *p;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 *desc = &pCreateInfo->pSubpasses[i];
subpass_attachment_count +=
desc->inputAttachmentCount + desc->colorAttachmentCount +
(desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
(desc->pDepthStencilAttachment != NULL);
}
if (subpass_attachment_count) {
pass->subpass_attachments =
vk_alloc2(&device->vk.alloc, pAllocator,
subpass_attachment_count *
sizeof(struct panvk_subpass_attachment),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
vk_object_free(&device->vk, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
p = pass->subpass_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 *desc = &pCreateInfo->pSubpasses[i];
struct panvk_subpass *subpass = &pass->subpasses[i];
subpass->input_count = desc->inputAttachmentCount;
subpass->color_count = desc->colorAttachmentCount;
if (multiview_info)
subpass->view_mask = multiview_info->pViewMasks[i];
if (desc->inputAttachmentCount > 0) {
subpass->input_attachments = p;
p += desc->inputAttachmentCount;
for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
subpass->input_attachments[j] = (struct panvk_subpass_attachment) {
.idx = desc->pInputAttachments[j].attachment,
.layout = desc->pInputAttachments[j].layout,
};
if (desc->pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
pass->attachments[desc->pInputAttachments[j].attachment]
.view_mask |= subpass->view_mask;
}
}
if (desc->colorAttachmentCount > 0) {
subpass->color_attachments = p;
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
uint32_t idx = desc->pColorAttachments[j].attachment;
subpass->color_attachments[j] = (struct panvk_subpass_attachment) {
.idx = idx,
.layout = desc->pColorAttachments[j].layout,
};
if (idx != VK_ATTACHMENT_UNUSED) {
pass->attachments[idx].view_mask |= subpass->view_mask;
if (pass->attachments[idx].clear_subpass == ~0) {
pass->attachments[idx].clear_subpass = i;
subpass->color_attachments[j].clear = true;
}
}
}
}
if (desc->pResolveAttachments) {
subpass->resolve_attachments = p;
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
uint32_t idx = desc->pResolveAttachments[j].attachment;
subpass->resolve_attachments[j] = (struct panvk_subpass_attachment) {
.idx = idx,
.layout = desc->pResolveAttachments[j].layout,
};
if (idx != VK_ATTACHMENT_UNUSED)
pass->attachments[idx].view_mask |= subpass->view_mask;
}
}
unsigned idx = desc->pDepthStencilAttachment ?
desc->pDepthStencilAttachment->attachment :
VK_ATTACHMENT_UNUSED;
subpass->zs_attachment.idx = idx;
if (idx != VK_ATTACHMENT_UNUSED) {
subpass->zs_attachment.layout = desc->pDepthStencilAttachment->layout;
pass->attachments[idx].view_mask |= subpass->view_mask;
if (pass->attachments[idx].clear_subpass == ~0) {
pass->attachments[idx].clear_subpass = i;
subpass->zs_attachment.clear = true;
}
}
}
*pRenderPass = panvk_render_pass_to_handle(pass);
return VK_SUCCESS;
}
void
panvk_DestroyRenderPass(VkDevice _device,
VkRenderPass _pass,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_render_pass, pass, _pass);
if (!pass)
return;
vk_free2(&device->vk.alloc, pAllocator, pass->subpass_attachments);
vk_object_free(&device->vk, pAllocator, pass);
}
void
panvk_GetRenderAreaGranularity(VkDevice _device,
VkRenderPass renderPass,
VkExtent2D *pGranularity)
{
panvk_stub();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_pipeline_cache.c which is:
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "util/debug.h"
#include "util/disk_cache.h"
#include "util/mesa-sha1.h"
#include "util/u_atomic.h"
VkResult
panvk_CreatePipelineCache(VkDevice _device,
const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_pipeline_cache *cache;
cache = vk_object_alloc(&device->vk, pAllocator, sizeof(*cache),
VK_OBJECT_TYPE_PIPELINE_CACHE);
if (cache == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
cache->alloc = *pAllocator;
else
cache->alloc = device->vk.alloc;
*pPipelineCache = panvk_pipeline_cache_to_handle(cache);
return VK_SUCCESS;
}
void
panvk_DestroyPipelineCache(VkDevice _device,
VkPipelineCache _cache,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_pipeline_cache, cache, _cache);
vk_object_free(&device->vk, pAllocator, cache);
}
VkResult
panvk_GetPipelineCacheData(VkDevice _device,
VkPipelineCache _cache,
size_t *pDataSize,
void *pData)
{
panvk_stub();
return VK_SUCCESS;
}
VkResult
panvk_MergePipelineCaches(VkDevice _device,
VkPipelineCache destCache,
uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches)
{
panvk_stub();
return VK_SUCCESS;
}

View File

@ -0,0 +1,994 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* derived from tu_private.h driver which is:
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef PANVK_PRIVATE_H
#define PANVK_PRIVATE_H
#include <assert.h>
#include <pthread.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_VALGRIND
#include <memcheck.h>
#include <valgrind.h>
#define VG(x) x
#else
#define VG(x)
#endif
#include "c11/threads.h"
#include "compiler/shader_enums.h"
#include "util/list.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_device.h"
#include "vk_instance.h"
#include "vk_object.h"
#include "vk_physical_device.h"
#include "wsi_common.h"
#include "drm-uapi/panfrost_drm.h"
#include "midgard/midgard_compile.h"
#include "pan_blend.h"
#include "pan_blitter.h"
#include "pan_cs.h"
#include "pan_device.h"
#include "pan_pool.h"
#include "pan_texture.h"
#include "pan_scoreboard.h"
#include "pan_shader.h"
#include "panvk_varyings.h"
/* Pre-declarations needed for WSI entrypoints */
struct wl_surface;
struct wl_display;
typedef struct xcb_connection_t xcb_connection_t;
typedef uint32_t xcb_visualid_t;
typedef uint32_t xcb_window_t;
#include <vulkan/vk_android_native_buffer.h>
#include <vulkan/vk_icd.h>
#include <vulkan/vulkan.h>
#include "panvk_entrypoints.h"
#define MAX_BIND_POINTS 2 /* compute + graphics */
#define MAX_VBS 16
#define MAX_VERTEX_ATTRIBS 16
#define MAX_RTS 8
#define MAX_VSC_PIPES 32
#define MAX_VIEWPORTS 1
#define MAX_SCISSORS 16
#define MAX_DISCARD_RECTANGLES 4
#define MAX_PUSH_CONSTANTS_SIZE 128
#define MAX_PUSH_DESCRIPTORS 32
#define MAX_DYNAMIC_UNIFORM_BUFFERS 16
#define MAX_DYNAMIC_STORAGE_BUFFERS 8
#define MAX_DYNAMIC_BUFFERS \
(MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
#define MAX_SAMPLES_LOG2 4
#define NUM_META_FS_KEYS 13
#define PANVK_MAX_DRM_DEVICES 1
#define MAX_VIEWS 8
#define NUM_DEPTH_CLEAR_PIPELINES 3
#define panvk_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
*/
struct panvk_instance;
VkResult
__vk_errorf(struct panvk_instance *instance,
VkResult error,
const char *file,
int line,
const char *format,
...);
#define vk_error(instance, error) \
__vk_errorf(instance, error, __FILE__, __LINE__, NULL);
#define vk_errorf(instance, error, format, ...) \
__vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
void
panvk_logi(const char *format, ...) panvk_printflike(1, 2);
void
panvk_logi_v(const char *format, va_list va);
#define panvk_stub() assert(!"stub")
struct panvk_meta {
struct pan_pool bin_pool;
struct pan_pool desc_pool;
};
struct panvk_physical_device {
struct vk_physical_device vk;
/* The API agnostic device object. */
struct panfrost_device pdev;
struct panvk_instance *instance;
char path[20];
char name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
uint8_t driver_uuid[VK_UUID_SIZE];
uint8_t device_uuid[VK_UUID_SIZE];
uint8_t cache_uuid[VK_UUID_SIZE];
struct wsi_device wsi_device;
struct panvk_meta meta;
int local_fd;
int master_fd;
};
void
panvk_meta_init(struct panvk_physical_device *dev);
void
panvk_meta_cleanup(struct panvk_physical_device *dev);
enum panvk_debug_flags {
PANVK_DEBUG_STARTUP = 1 << 0,
PANVK_DEBUG_NIR = 1 << 1,
PANVK_DEBUG_TRACE = 1 << 2,
PANVK_DEBUG_SYNC = 1 << 3,
PANVK_DEBUG_AFBC = 1 << 4,
PANVK_DEBUG_LINEAR = 1 << 5,
};
struct panvk_instance {
struct vk_instance vk;
uint32_t api_version;
int physical_device_count;
struct panvk_physical_device physical_devices[PANVK_MAX_DRM_DEVICES];
enum panvk_debug_flags debug_flags;
};
VkResult
panvk_wsi_init(struct panvk_physical_device *physical_device);
void
panvk_wsi_finish(struct panvk_physical_device *physical_device);
bool
panvk_instance_extension_supported(const char *name);
uint32_t
panvk_physical_device_api_version(struct panvk_physical_device *dev);
bool
panvk_physical_device_extension_supported(struct panvk_physical_device *dev,
const char *name);
struct panvk_pipeline_cache {
struct vk_object_base base;
VkAllocationCallbacks alloc;
};
/* queue types */
#define PANVK_QUEUE_GENERAL 0
#define PANVK_MAX_QUEUE_FAMILIES 1
struct panvk_queue {
struct vk_object_base base;
struct panvk_device *device;
uint32_t queue_family_index;
VkDeviceQueueCreateFlags flags;
uint32_t sync;
};
struct panvk_device {
struct vk_device vk;
struct panvk_instance *instance;
struct panvk_queue *queues[PANVK_MAX_QUEUE_FAMILIES];
int queue_count[PANVK_MAX_QUEUE_FAMILIES];
struct panvk_physical_device *physical_device;
int _lost;
};
VkResult _panvk_device_set_lost(struct panvk_device *device,
const char *file, int line,
const char *msg, ...) PRINTFLIKE(4, 5);
#define panvk_device_set_lost(dev, ...) \
_panvk_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
static inline bool
panvk_device_is_lost(struct panvk_device *device)
{
return unlikely(p_atomic_read(&device->_lost));
}
struct panvk_batch {
struct list_head node;
struct util_dynarray jobs;
struct pan_scoreboard scoreboard;
struct {
const struct panvk_framebuffer *info;
struct panfrost_ptr desc;
} fb;
struct {
struct panfrost_bo *src, *dst;
} blit;
struct panfrost_ptr tls;
mali_ptr fragment_job;
struct {
struct pan_tiler_context ctx;
struct panfrost_ptr bifrost_descs;
union {
struct {
struct mali_bifrost_tiler_heap_packed heap;
struct mali_bifrost_tiler_packed tiler;
} bifrost;
struct mali_midgard_tiler_packed midgard;
} templ;
} tiler;
bool issued;
};
struct panvk_syncobj {
uint32_t permanent, temporary;
};
struct panvk_fence {
struct vk_object_base base;
struct panvk_syncobj syncobj;
};
struct panvk_semaphore {
struct vk_object_base base;
struct panvk_syncobj syncobj;
};
int
panvk_signal_syncobjs(struct panvk_device *device,
struct panvk_syncobj *syncobj1,
struct panvk_syncobj *syncobj2);
int
panvk_syncobj_to_fd(struct panvk_device *device,
struct panvk_syncobj *sync);
struct panvk_device_memory {
struct vk_object_base base;
struct panfrost_bo *bo;
};
struct panvk_descriptor {
union {
struct {
VkImageLayout layout;
struct panvk_image_view *view;
struct panvk_sampler *sampler;
} image;
struct {
struct panvk_buffer *buffer;
uint64_t offset;
uint64_t range;
} buffer_info;
struct panvk_buffer_view *buffer_view;
};
};
struct panvk_descriptor_set {
struct vk_object_base base;
struct panvk_descriptor_pool *pool;
const struct panvk_descriptor_set_layout *layout;
struct panvk_descriptor *descs;
struct mali_uniform_buffer_packed *ubos;
struct mali_midgard_sampler_packed *samplers;
union {
struct mali_bifrost_texture_packed *bifrost;
mali_ptr *midgard;
} textures;
};
#define MAX_SETS 4
struct panvk_descriptor_set_binding_layout {
VkDescriptorType type;
/* Number of array elements in this binding */
unsigned array_size;
/* Indices in the desc arrays */
unsigned desc_idx;
union {
struct {
unsigned sampler_idx;
unsigned tex_idx;
};
struct {
union {
unsigned ssbo_idx;
unsigned ubo_idx;
};
unsigned dynoffset_idx;
};
};
/* Shader stages affected by this set+binding */
uint16_t shader_stages;
struct panvk_sampler **immutable_samplers;
};
struct panvk_descriptor_set_layout {
struct vk_object_base base;
/* The create flags for this descriptor set layout */
VkDescriptorSetLayoutCreateFlags flags;
/* Shader stages affected by this descriptor set */
uint16_t shader_stages;
unsigned num_descs;
unsigned num_samplers;
unsigned num_textures;
unsigned num_ubos;
unsigned num_ssbos;
unsigned num_dynoffsets;
/* Number of bindings in this descriptor set */
uint32_t binding_count;
/* Bindings in this descriptor set */
struct panvk_descriptor_set_binding_layout bindings[0];
};
struct panvk_pipeline_layout {
struct vk_object_base base;
unsigned char sha1[20];
unsigned num_samplers;
unsigned num_textures;
unsigned num_ubos;
unsigned num_ssbos;
unsigned num_dynoffsets;
uint32_t num_sets;
struct {
struct panvk_descriptor_set_layout *layout;
unsigned sampler_offset;
unsigned tex_offset;
unsigned ubo_offset;
unsigned ssbo_offset;
unsigned dynoffset_offset;
} sets[MAX_SETS];
};
struct panvk_desc_pool_counters {
unsigned samplers;
unsigned combined_image_samplers;
unsigned sampled_images;
unsigned storage_images;
unsigned uniform_texel_bufs;
unsigned storage_texel_bufs;
unsigned input_attachments;
unsigned uniform_bufs;
unsigned storage_bufs;
unsigned uniform_dyn_bufs;
unsigned storage_dyn_bufs;
unsigned sets;
};
struct panvk_descriptor_pool {
struct vk_object_base base;
struct panvk_desc_pool_counters max;
struct panvk_desc_pool_counters cur;
struct panvk_descriptor_set *sets;
};
struct panvk_buffer {
struct vk_object_base base;
VkDeviceSize size;
VkBufferUsageFlags usage;
VkBufferCreateFlags flags;
struct panfrost_bo *bo;
VkDeviceSize bo_offset;
};
enum panvk_dynamic_state_bits {
PANVK_DYNAMIC_VIEWPORT = 1 << 0,
PANVK_DYNAMIC_SCISSOR = 1 << 1,
PANVK_DYNAMIC_LINE_WIDTH = 1 << 2,
PANVK_DYNAMIC_DEPTH_BIAS = 1 << 3,
PANVK_DYNAMIC_BLEND_CONSTANTS = 1 << 4,
PANVK_DYNAMIC_DEPTH_BOUNDS = 1 << 5,
PANVK_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6,
PANVK_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7,
PANVK_DYNAMIC_STENCIL_REFERENCE = 1 << 8,
PANVK_DYNAMIC_DISCARD_RECTANGLE = 1 << 9,
PANVK_DYNAMIC_ALL = (1 << 10) - 1,
};
struct panvk_descriptor_state {
struct {
const struct panvk_descriptor_set *set;
struct panfrost_ptr dynoffsets;
} sets[MAX_SETS];
mali_ptr sysvals[MESA_SHADER_STAGES];
mali_ptr ubos;
mali_ptr textures;
mali_ptr samplers;
};
struct panvk_draw_info {
unsigned first_index;
unsigned index_count;
unsigned first_vertex;
unsigned vertex_count;
unsigned padded_vertex_count;
unsigned first_instance;
unsigned instance_count;
int vertex_offset;
unsigned offset_start;
struct mali_invocation_packed invocation;
struct {
mali_ptr varyings;
mali_ptr attributes;
mali_ptr push_constants;
} stages[MESA_SHADER_STAGES];
mali_ptr varying_bufs;
mali_ptr attribute_bufs;
mali_ptr textures;
mali_ptr samplers;
mali_ptr ubos;
mali_ptr position;
union {
mali_ptr psiz;
float line_width;
};
mali_ptr tls;
mali_ptr fb;
const struct pan_tiler_context *tiler_ctx;
mali_ptr fs_rsd;
mali_ptr viewport;
struct {
struct panfrost_ptr vertex;
struct panfrost_ptr tiler;
} jobs;
};
struct panvk_attrib_info {
unsigned buf;
unsigned offset;
enum pipe_format format;
};
struct panvk_attrib_buf_info {
bool special;
union {
struct {
unsigned stride;
bool per_instance;
};
unsigned special_id;
};
};
struct panvk_attribs_info {
struct panvk_attrib_info attrib[PAN_MAX_ATTRIBUTE];
unsigned attrib_count;
struct panvk_attrib_buf_info buf[PAN_MAX_ATTRIBUTE];
unsigned buf_count;
};
struct panvk_attrib_buf {
mali_ptr address;
unsigned size;
};
struct panvk_cmd_state {
VkPipelineBindPoint bind_point;
struct panvk_pipeline *pipeline;
uint32_t dirty;
struct panvk_varyings_info varyings;
mali_ptr fs_rsd;
struct {
float constants[8][4];
} blend;
struct {
struct pan_compute_dim wg_count;
} compute;
struct {
struct {
float constant_factor;
float clamp;
float slope_factor;
} depth_bias;
float line_width;
} rast;
struct {
struct panvk_attrib_buf bufs[MAX_VBS];
unsigned count;
mali_ptr attribs;
mali_ptr attrib_bufs;
} vb;
/* Index buffer */
struct {
struct panvk_buffer *buffer;
uint64_t offset;
uint32_t type;
uint32_t max_index_count;
uint8_t index_size;
uint64_t index_va;
} ib;
struct {
struct {
uint8_t compare_mask;
uint8_t write_mask;
uint8_t ref;
} s_front, s_back;
} zs;
const struct panvk_render_pass *pass;
const struct panvk_subpass *subpass;
const struct panvk_framebuffer *framebuffer;
VkRect2D render_area;
struct panvk_clear_value *clear;
mali_ptr vpd;
VkViewport viewport;
VkRect2D scissor;
struct panvk_batch *batch;
};
struct panvk_cmd_pool {
struct vk_object_base base;
VkAllocationCallbacks alloc;
uint32_t queue_family_index;
};
enum panvk_cmd_buffer_status {
PANVK_CMD_BUFFER_STATUS_INVALID,
PANVK_CMD_BUFFER_STATUS_INITIAL,
PANVK_CMD_BUFFER_STATUS_RECORDING,
PANVK_CMD_BUFFER_STATUS_EXECUTABLE,
PANVK_CMD_BUFFER_STATUS_PENDING,
};
struct panvk_cmd_buffer {
struct vk_object_base base;
struct panvk_device *device;
struct panvk_cmd_pool *pool;
struct pan_pool desc_pool;
struct pan_pool varying_pool;
struct pan_pool tls_pool;
struct list_head batches;
VkCommandBufferUsageFlags usage_flags;
VkCommandBufferLevel level;
enum panvk_cmd_buffer_status status;
struct panvk_cmd_state state;
uint32_t queue_family_index;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
VkShaderStageFlags push_constant_stages;
struct panvk_descriptor_set meta_push_descriptors;
struct panvk_descriptor_state descriptors[MAX_BIND_POINTS];
VkResult record_result;
};
void
panvk_cmd_open_batch(struct panvk_cmd_buffer *cmdbuf);
void
panvk_cmd_close_batch(struct panvk_cmd_buffer *cmdbuf);
void
panvk_cmd_get_midgard_polygon_list(struct panvk_cmd_buffer *cmdbuf,
unsigned width, unsigned height,
bool has_draws);
void
panvk_cmd_get_bifrost_tiler_context(struct panvk_cmd_buffer *cmdbuf,
unsigned width, unsigned height);
void
panvk_pack_color(struct panvk_clear_value *out,
const VkClearColorValue *in,
enum pipe_format format);
struct panvk_event {
struct vk_object_base base;
};
struct panvk_shader_module {
struct vk_object_base base;
unsigned char sha1[20];
uint32_t code_size;
const uint32_t *code[0];
};
struct panvk_shader {
struct pan_shader_info info;
struct util_dynarray binary;
unsigned sysval_ubo;
};
struct panvk_shader *
panvk_shader_create(struct panvk_device *dev,
gl_shader_stage stage,
const VkPipelineShaderStageCreateInfo *stage_info,
const struct panvk_pipeline_layout *layout,
unsigned sysval_ubo,
struct pan_blend_state *blend_state,
bool static_blend_constants,
const VkAllocationCallbacks *alloc);
void
panvk_shader_destroy(struct panvk_device *dev,
struct panvk_shader *shader,
const VkAllocationCallbacks *alloc);
union panvk_sysval_data {
float f32[4];
double f64[2];
uint32_t u32[4];
uint64_t u64[2];
};
struct panvk_pipeline {
struct vk_object_base base;
struct panvk_varyings_info varyings;
struct panvk_attribs_info attribs;
const struct panvk_pipeline_layout *layout;
unsigned active_stages;
uint32_t dynamic_state_mask;
struct panfrost_bo *binary_bo;
struct panfrost_bo *state_bo;
mali_ptr vpd;
mali_ptr rsds[MESA_SHADER_STAGES];
unsigned num_ubos;
unsigned num_sysvals;
struct {
unsigned ubo_idx;
mali_ptr ubo;
struct panfrost_sysvals ids;
uint32_t dirty_mask;
} sysvals[MESA_SHADER_STAGES];
unsigned tls_size;
unsigned wls_size;
struct {
mali_ptr address;
struct pan_shader_info info;
struct mali_renderer_state_packed rsd_template;
bool required;
bool dynamic_rsd;
} fs;
struct {
enum mali_draw_mode topology;
bool writes_point_size;
bool primitive_restart;
} ia;
struct {
bool clamp_depth;
float line_width;
struct {
bool enable;
float constant_factor;
float clamp;
float slope_factor;
} depth_bias;
bool front_ccw;
bool cull_front_face;
bool cull_back_face;
} rast;
struct {
bool z_test;
bool z_write;
enum mali_func z_compare_func;
bool s_test;
struct {
enum mali_stencil_op fail_op;
enum mali_stencil_op pass_op;
enum mali_stencil_op z_fail_op;
enum mali_func compare_func;
uint8_t compare_mask;
uint8_t write_mask;
uint8_t ref;
} s_front, s_back;
} zs;
struct {
uint8_t rast_samples;
uint8_t min_samples;
uint16_t sample_mask;
bool alpha_to_coverage;
bool alpha_to_one;
} ms;
struct {
struct pan_blend_state state;
struct mali_blend_packed bd_template[8];
struct {
uint8_t index;
uint16_t bifrost_factor;
} constant[8];
} blend;
VkViewport viewport;
VkRect2D scissor;
};
bool
panvk_blend_needs_lowering(const struct panfrost_device *dev,
const struct pan_blend_state *state,
unsigned rt);
struct panvk_image_level {
VkDeviceSize offset;
VkDeviceSize size;
uint32_t pitch;
};
struct panvk_slice_layout {
unsigned width;
unsigned height;
unsigned depth;
unsigned offset;
unsigned line_stride;
unsigned size;
/* If there is a header preceding each slice, how big is
* that header? Used for AFBC.
*/
unsigned afbc_header_size;
/* If checksumming is enabled following the slice, what
* is its offset/stride?
*/
struct {
unsigned offset;
unsigned stride;
unsigned size;
} checksum;
};
#define PANVK_MAX_MIP_LEVELS 13
struct panvk_plane_layout {
struct panvk_slice_layout slices[PANVK_MAX_MIP_LEVELS];
unsigned offset;
unsigned array_stride;
unsigned size;
};
struct panvk_plane_memory {
const struct panfrost_bo *bo;
unsigned offset;
};
#define PANVK_MAX_PLANES 1
struct panvk_image {
struct vk_object_base base;
struct pan_image pimage;
VkImageType type;
/* The original VkFormat provided by the client. This may not match any
* of the actual surface formats.
*/
VkFormat vk_format;
VkImageAspectFlags aspects;
VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
VkImageTiling tiling; /** VkImageCreateInfo::tiling */
VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
VkExtent3D extent;
unsigned queue_family_mask;
bool exclusive;
bool shareable;
};
unsigned
panvk_image_get_plane_size(const struct panvk_image *image, unsigned plane);
unsigned
panvk_image_get_total_size(const struct panvk_image *image);
struct panvk_image_view {
struct vk_object_base base;
struct pan_image_view pview;
VkFormat vk_format;
struct panfrost_bo *bo;
struct {
struct mali_bifrost_texture_packed tex_desc;
} bifrost;
};
struct panvk_sampler {
struct vk_object_base base;
struct mali_midgard_sampler_packed desc;
};
struct panvk_buffer_view {
struct vk_object_base base;
};
struct panvk_attachment_info {
struct panvk_image_view *iview;
};
struct panvk_framebuffer {
struct vk_object_base base;
uint32_t width;
uint32_t height;
uint32_t layers;
uint32_t attachment_count;
struct panvk_attachment_info attachments[0];
};
struct panvk_clear_value {
union {
uint32_t color[4];
struct {
float depth;
uint8_t stencil;
};
};
};
struct panvk_subpass_attachment {
uint32_t idx;
VkImageLayout layout;
bool clear;
};
struct panvk_subpass {
uint32_t input_count;
uint32_t color_count;
struct panvk_subpass_attachment *input_attachments;
uint8_t active_color_attachments;
struct panvk_subpass_attachment *color_attachments;
struct panvk_subpass_attachment *resolve_attachments;
struct panvk_subpass_attachment zs_attachment;
uint32_t view_mask;
};
struct panvk_render_pass_attachment {
VkAttachmentDescriptionFlags flags;
enum pipe_format format;
unsigned samples;
VkAttachmentLoadOp load_op;
VkAttachmentStoreOp store_op;
VkAttachmentLoadOp stencil_load_op;
VkAttachmentStoreOp stencil_store_op;
VkImageLayout initial_layout;
VkImageLayout final_layout;
unsigned view_mask;
unsigned clear_subpass;
};
struct panvk_render_pass {
struct vk_object_base base;
uint32_t attachment_count;
uint32_t subpass_count;
struct panvk_subpass_attachment *subpass_attachments;
struct panvk_render_pass_attachment *attachments;
struct panvk_subpass subpasses[0];
};
static inline enum mali_func
panvk_translate_compare_func(VkCompareOp comp)
{
STATIC_ASSERT(VK_COMPARE_OP_NEVER == (VkCompareOp)MALI_FUNC_NEVER);
STATIC_ASSERT(VK_COMPARE_OP_LESS == (VkCompareOp)MALI_FUNC_LESS);
STATIC_ASSERT(VK_COMPARE_OP_EQUAL == (VkCompareOp)MALI_FUNC_EQUAL);
STATIC_ASSERT(VK_COMPARE_OP_LESS_OR_EQUAL == (VkCompareOp)MALI_FUNC_LEQUAL);
STATIC_ASSERT(VK_COMPARE_OP_GREATER == (VkCompareOp)MALI_FUNC_GREATER);
STATIC_ASSERT(VK_COMPARE_OP_NOT_EQUAL == (VkCompareOp)MALI_FUNC_NOT_EQUAL);
STATIC_ASSERT(VK_COMPARE_OP_GREATER_OR_EQUAL == (VkCompareOp)MALI_FUNC_GEQUAL);
STATIC_ASSERT(VK_COMPARE_OP_ALWAYS == (VkCompareOp)MALI_FUNC_ALWAYS);
return (enum mali_func)comp;
}
VK_DEFINE_HANDLE_CASTS(panvk_cmd_buffer, base, VkCommandBuffer, VK_OBJECT_TYPE_COMMAND_BUFFER)
VK_DEFINE_HANDLE_CASTS(panvk_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
VK_DEFINE_HANDLE_CASTS(panvk_instance, vk.base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
VK_DEFINE_HANDLE_CASTS(panvk_physical_device, vk.base, VkPhysicalDevice, VK_OBJECT_TYPE_PHYSICAL_DEVICE)
VK_DEFINE_HANDLE_CASTS(panvk_queue, base, VkQueue, VK_OBJECT_TYPE_QUEUE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_cmd_pool, base, VkCommandPool, VK_OBJECT_TYPE_COMMAND_POOL)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_buffer, base, VkBuffer, VK_OBJECT_TYPE_BUFFER)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_buffer_view, base, VkBufferView, VK_OBJECT_TYPE_BUFFER_VIEW)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_descriptor_pool, base, VkDescriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_descriptor_set, base, VkDescriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_descriptor_set_layout, base,
VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_device_memory, base, VkDeviceMemory, VK_OBJECT_TYPE_DEVICE_MEMORY)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_fence, base, VkFence, VK_OBJECT_TYPE_FENCE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_framebuffer, base, VkFramebuffer, VK_OBJECT_TYPE_FRAMEBUFFER)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_image, base, VkImage, VK_OBJECT_TYPE_IMAGE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_image_view, base, VkImageView, VK_OBJECT_TYPE_IMAGE_VIEW);
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_pipeline_cache, base, VkPipelineCache, VK_OBJECT_TYPE_PIPELINE_CACHE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_pipeline, base, VkPipeline, VK_OBJECT_TYPE_PIPELINE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_pipeline_layout, base, VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_render_pass, base, VkRenderPass, VK_OBJECT_TYPE_RENDER_PASS)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_sampler, base, VkSampler, VK_OBJECT_TYPE_SAMPLER)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_shader_module, base, VkShaderModule, VK_OBJECT_TYPE_SHADER_MODULE)
VK_DEFINE_NONDISP_HANDLE_CASTS(panvk_semaphore, base, VkSemaphore, VK_OBJECT_TYPE_SEMAPHORE)
#endif /* PANVK_PRIVATE_H */

View File

@ -0,0 +1,104 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
VkResult
panvk_CreateQueryPool(VkDevice _device,
const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkQueryPool *pQueryPool)
{
panvk_stub();
return VK_SUCCESS;
}
void
panvk_DestroyQueryPool(VkDevice _device,
VkQueryPool _pool,
const VkAllocationCallbacks *pAllocator)
{
panvk_stub();
}
VkResult
panvk_GetQueryPoolResults(VkDevice _device,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount,
size_t dataSize,
void *pData,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
panvk_stub();
return VK_SUCCESS;
}
void
panvk_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize stride,
VkQueryResultFlags flags)
{
panvk_stub();
}
void
panvk_CmdResetQueryPool(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount)
{
panvk_stub();
}
void
panvk_CmdBeginQuery(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query,
VkQueryControlFlags flags)
{
panvk_stub();
}
void
panvk_CmdEndQuery(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query)
{
panvk_stub();
}
void
panvk_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool,
uint32_t query)
{
panvk_stub();
}

View File

@ -0,0 +1,466 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_shader.c which is:
* Copyright © 2019 Google LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "nir_builder.h"
#include "nir_lower_blend.h"
#include "spirv/nir_spirv.h"
#include "util/mesa-sha1.h"
#include "panfrost-quirks.h"
#include "pan_shader.h"
static nir_shader *
panvk_spirv_to_nir(const void *code,
size_t codesize,
gl_shader_stage stage,
const char *entry_point_name,
const VkSpecializationInfo *spec_info,
const nir_shader_compiler_options *nir_options)
{
/* TODO these are made-up */
const struct spirv_to_nir_options spirv_options = {
.caps = { false },
.ubo_addr_format = nir_address_format_32bit_index_offset,
.ssbo_addr_format = nir_address_format_32bit_index_offset,
};
/* convert VkSpecializationInfo */
struct nir_spirv_specialization *spec = NULL;
uint32_t num_spec = 0;
if (spec_info && spec_info->mapEntryCount) {
spec = malloc(sizeof(*spec) * spec_info->mapEntryCount);
if (!spec)
return NULL;
for (uint32_t i = 0; i < spec_info->mapEntryCount; i++) {
const VkSpecializationMapEntry *entry = &spec_info->pMapEntries[i];
const void *data = spec_info->pData + entry->offset;
assert(data + entry->size <= spec_info->pData + spec_info->dataSize);
spec[i].id = entry->constantID;
switch (entry->size) {
case 8:
spec[i].value.u64 = *(const uint64_t *)data;
break;
case 4:
spec[i].value.u32 = *(const uint32_t *)data;
break;
case 2:
spec[i].value.u16 = *(const uint16_t *)data;
break;
case 1:
spec[i].value.u8 = *(const uint8_t *)data;
break;
default:
assert(!"Invalid spec constant size");
break;
}
spec[i].defined_on_module = false;
}
num_spec = spec_info->mapEntryCount;
}
nir_shader *nir = spirv_to_nir(code, codesize / sizeof(uint32_t), spec,
num_spec, stage, entry_point_name,
&spirv_options, nir_options);
free(spec);
assert(nir->info.stage == stage);
nir_validate_shader(nir, "after spirv_to_nir");
return nir;
}
struct panvk_lower_misc_ctx {
struct panvk_shader *shader;
const struct panvk_pipeline_layout *layout;
};
static unsigned
get_fixed_sampler_index(nir_deref_instr *deref,
const struct panvk_lower_misc_ctx *ctx)
{
nir_variable *var = nir_deref_instr_get_variable(deref);
unsigned set = var->data.descriptor_set;
unsigned binding = var->data.binding;
const struct panvk_descriptor_set_binding_layout *bind_layout =
&ctx->layout->sets[set].layout->bindings[binding];
return bind_layout->sampler_idx + ctx->layout->sets[set].sampler_offset;
}
static unsigned
get_fixed_texture_index(nir_deref_instr *deref,
const struct panvk_lower_misc_ctx *ctx)
{
nir_variable *var = nir_deref_instr_get_variable(deref);
unsigned set = var->data.descriptor_set;
unsigned binding = var->data.binding;
const struct panvk_descriptor_set_binding_layout *bind_layout =
&ctx->layout->sets[set].layout->bindings[binding];
return bind_layout->tex_idx + ctx->layout->sets[set].tex_offset;
}
static bool
lower_tex(nir_builder *b, nir_tex_instr *tex,
const struct panvk_lower_misc_ctx *ctx)
{
bool progress = false;
int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
b->cursor = nir_before_instr(&tex->instr);
if (sampler_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
tex->sampler_index = get_fixed_sampler_index(deref, ctx);
nir_tex_instr_remove_src(tex, sampler_src_idx);
progress = true;
}
int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
if (tex_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
tex->texture_index = get_fixed_texture_index(deref, ctx);
nir_tex_instr_remove_src(tex, tex_src_idx);
progress = true;
}
return progress;
}
static void
lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *intr,
const struct panvk_lower_misc_ctx *ctx)
{
nir_ssa_def *vulkan_idx = intr->src[0].ssa;
unsigned set = nir_intrinsic_desc_set(intr);
unsigned binding = nir_intrinsic_binding(intr);
struct panvk_descriptor_set_layout *set_layout = ctx->layout->sets[set].layout;
struct panvk_descriptor_set_binding_layout *binding_layout =
&set_layout->bindings[binding];
unsigned base;
switch (binding_layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
base = binding_layout->ubo_idx + ctx->layout->sets[set].ubo_offset;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
base = binding_layout->ssbo_idx + ctx->layout->sets[set].ssbo_offset;
break;
default:
unreachable("Invalid descriptor type");
break;
}
b->cursor = nir_before_instr(&intr->instr);
nir_ssa_def *idx = nir_iadd(b, nir_imm_int(b, base), vulkan_idx);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, idx);
nir_instr_remove(&intr->instr);
}
static void
lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin)
{
/* Loading the descriptor happens as part of the load/store instruction so
* this is a no-op.
*/
b->cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *val = nir_vec2(b, intrin->src[0].ssa, nir_imm_int(b, 0));
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
nir_instr_remove(&intrin->instr);
}
static bool
lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
const struct panvk_lower_misc_ctx *ctx)
{
switch (intr->intrinsic) {
case nir_intrinsic_vulkan_resource_index:
lower_vulkan_resource_index(b, intr, ctx);
return true;
case nir_intrinsic_load_vulkan_descriptor:
lower_load_vulkan_descriptor(b, intr);
return true;
default:
return false;
}
}
static bool
panvk_lower_misc_instr(nir_builder *b,
nir_instr *instr,
void *data)
{
const struct panvk_lower_misc_ctx *ctx = data;
switch (instr->type) {
case nir_instr_type_tex:
return lower_tex(b, nir_instr_as_tex(instr), ctx);
case nir_instr_type_intrinsic:
return lower_intrinsic(b, nir_instr_as_intrinsic(instr), ctx);
default:
return false;
}
}
static bool
panvk_lower_misc(nir_shader *nir, const struct panvk_lower_misc_ctx *ctx)
{
return nir_shader_instructions_pass(nir, panvk_lower_misc_instr,
nir_metadata_block_index |
nir_metadata_dominance,
(void *)ctx);
}
static void
panvk_lower_blend(struct panfrost_device *pdev,
nir_shader *nir,
struct pan_blend_state *blend_state,
bool static_blend_constants)
{
nir_lower_blend_options options = {
.logicop_enable = blend_state->logicop_enable,
.logicop_func = blend_state->logicop_func,
};
bool lower_blend = false;
for (unsigned rt = 0; rt < blend_state->rt_count; rt++) {
if (!panvk_blend_needs_lowering(pdev, blend_state, rt))
continue;
const struct pan_blend_rt_state *rt_state = &blend_state->rts[rt];
options.rt[rt].colormask = rt_state->equation.color_mask;
options.format[rt] = rt_state->format;
if (!rt_state->equation.blend_enable) {
static const nir_lower_blend_channel replace = {
.func = BLEND_FUNC_ADD,
.src_factor = BLEND_FACTOR_ZERO,
.invert_src_factor = true,
.dst_factor = BLEND_FACTOR_ZERO,
.invert_dst_factor = false,
};
options.rt[rt].rgb = replace;
options.rt[rt].alpha = replace;
} else {
options.rt[rt].rgb.func = rt_state->equation.rgb_func;
options.rt[rt].rgb.src_factor = rt_state->equation.rgb_src_factor;
options.rt[rt].rgb.invert_src_factor = rt_state->equation.rgb_invert_src_factor;
options.rt[rt].rgb.dst_factor = rt_state->equation.rgb_dst_factor;
options.rt[rt].rgb.invert_dst_factor = rt_state->equation.rgb_invert_dst_factor;
options.rt[rt].alpha.func = rt_state->equation.alpha_func;
options.rt[rt].alpha.src_factor = rt_state->equation.alpha_src_factor;
options.rt[rt].alpha.invert_src_factor = rt_state->equation.alpha_invert_src_factor;
options.rt[rt].alpha.dst_factor = rt_state->equation.alpha_dst_factor;
options.rt[rt].alpha.invert_dst_factor = rt_state->equation.alpha_invert_dst_factor;
}
lower_blend = true;
}
/* FIXME: currently untested */
assert(!lower_blend);
if (lower_blend)
NIR_PASS_V(nir, nir_lower_blend, options);
}
struct panvk_shader *
panvk_shader_create(struct panvk_device *dev,
gl_shader_stage stage,
const VkPipelineShaderStageCreateInfo *stage_info,
const struct panvk_pipeline_layout *layout,
unsigned sysval_ubo,
struct pan_blend_state *blend_state,
bool static_blend_constants,
const VkAllocationCallbacks *alloc)
{
const struct panvk_shader_module *module = panvk_shader_module_from_handle(stage_info->module);
struct panfrost_device *pdev = &dev->physical_device->pdev;
struct panvk_shader *shader;
shader = vk_zalloc2(&dev->vk.alloc, alloc, sizeof(*shader), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!shader)
return NULL;
util_dynarray_init(&shader->binary, NULL);
/* translate SPIR-V to NIR */
assert(module->code_size % 4 == 0);
nir_shader *nir = panvk_spirv_to_nir(module->code,
module->code_size,
stage, stage_info->pName,
stage_info->pSpecializationInfo,
pan_shader_get_compiler_options(pdev));
if (!nir) {
vk_free2(&dev->vk.alloc, alloc, shader);
return NULL;
}
if (stage == MESA_SHADER_FRAGMENT)
panvk_lower_blend(pdev, nir, blend_state, static_blend_constants);
/* multi step inlining procedure */
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_deref);
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
if (!func->is_entrypoint)
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
/* Split member structs. We do this before lower_io_to_temporaries so that
* it doesn't lower system values to temporaries by accident.
*/
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_per_member_structs);
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out |
nir_var_system_value | nir_var_mem_shared,
NULL);
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_lower_indirect_derefs,
nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);
NIR_PASS_V(nir, nir_opt_copy_prop_vars);
NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, true, false);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_32bit_index_offset);
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, stage);
nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs, stage);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
NIR_PASS_V(nir, nir_lower_var_copies);
struct panvk_lower_misc_ctx ctx = {
.shader = shader,
.layout = layout,
};
NIR_PASS_V(nir, panvk_lower_misc, &ctx);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
if (unlikely(dev->physical_device->instance->debug_flags & PANVK_DEBUG_NIR)) {
fprintf(stderr, "translated nir:\n");
nir_print_shader(nir, stderr);
}
struct panfrost_compile_inputs inputs = {
.gpu_id = pdev->gpu_id,
.no_ubo_to_push = true,
.sysval_ubo = sysval_ubo,
};
pan_shader_compile(pdev, nir, &inputs, &shader->binary, &shader->info);
/* Patch the descriptor count */
shader->info.ubo_count =
shader->info.sysvals.sysval_count ? sysval_ubo + 1 : layout->num_ubos;
shader->info.sampler_count = layout->num_samplers;
shader->info.texture_count = layout->num_textures;
shader->sysval_ubo = sysval_ubo;
ralloc_free(nir);
return shader;
}
void
panvk_shader_destroy(struct panvk_device *dev,
struct panvk_shader *shader,
const VkAllocationCallbacks *alloc)
{
util_dynarray_fini(&shader->binary);
vk_free2(&dev->vk.alloc, alloc, shader);
}
VkResult
panvk_CreateShaderModule(VkDevice _device,
const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkShaderModule *pShaderModule)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_shader_module *module;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
assert(pCreateInfo->codeSize % 4 == 0);
module = vk_object_zalloc(&device->vk, pAllocator,
sizeof(*module) + pCreateInfo->codeSize,
VK_OBJECT_TYPE_SHADER_MODULE);
if (module == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
module->code_size = pCreateInfo->codeSize;
memcpy(module->code, pCreateInfo->pCode, pCreateInfo->codeSize);
_mesa_sha1_compute(module->code, module->code_size, module->sha1);
*pShaderModule = panvk_shader_module_to_handle(module);
return VK_SUCCESS;
}
void
panvk_DestroyShaderModule(VkDevice _device,
VkShaderModule _module,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_shader_module, module, _module);
if (!module)
return;
vk_object_free(&device->vk, pAllocator, module);
}

View File

@ -0,0 +1,417 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Derived from tu_drm.c which is:
* Copyright © 2018 Google, Inc.
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <xf86drm.h>
#include "panvk_private.h"
static VkResult
sync_create(struct panvk_device *device,
struct panvk_syncobj *sync,
bool signaled)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_create create = {
.flags = signaled ? DRM_SYNCOBJ_CREATE_SIGNALED : 0,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return VK_ERROR_OUT_OF_HOST_MEMORY;
sync->permanent = create.handle;
return VK_SUCCESS;
}
static void
sync_set_temporary(struct panvk_device *device, struct panvk_syncobj *sync,
uint32_t syncobj)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
if (sync->temporary) {
struct drm_syncobj_destroy destroy = { .handle = sync->temporary };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
sync->temporary = syncobj;
}
static void
sync_destroy(struct panvk_device *device, struct panvk_syncobj *sync)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
if (!sync)
return;
sync_set_temporary(device, sync, 0);
struct drm_syncobj_destroy destroy = { .handle = sync->permanent };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
static VkResult
sync_import(struct panvk_device *device, struct panvk_syncobj *sync,
bool temporary, bool sync_fd, int fd)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
int ret;
if (!sync_fd) {
uint32_t *dst = temporary ? &sync->temporary : &sync->permanent;
struct drm_syncobj_handle handle = { .fd = fd };
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
if (ret)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
if (*dst) {
struct drm_syncobj_destroy destroy = { .handle = *dst };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
}
*dst = handle.handle;
close(fd);
} else {
assert(temporary);
struct drm_syncobj_create create = {};
if (fd == -1)
create.flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
if (fd != -1) {
struct drm_syncobj_handle handle = {
.fd = fd,
.handle = create.handle,
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
};
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
if (ret) {
struct drm_syncobj_destroy destroy = { .handle = create.handle };
drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
close(fd);
}
sync_set_temporary(device, sync, create.handle);
}
return VK_SUCCESS;
}
static VkResult
sync_export(struct panvk_device *device, struct panvk_syncobj *sync,
bool sync_fd, int *p_fd)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_handle handle = {
.handle = sync->temporary ? : sync->permanent,
.flags = sync_fd ? DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE : 0,
.fd = -1,
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
if (ret)
return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
/* restore permanent payload on export */
sync_set_temporary(device, sync, 0);
*p_fd = handle.fd;
return VK_SUCCESS;
}
VkResult
panvk_CreateSemaphore(VkDevice _device,
const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSemaphore *pSemaphore)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_semaphore *sem =
vk_object_zalloc(&device->vk, pAllocator, sizeof(*sem),
VK_OBJECT_TYPE_SEMAPHORE);
if (!sem)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult ret = sync_create(device, &sem->syncobj, false);
if (ret != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, sync);
return ret;
}
*pSemaphore = panvk_semaphore_to_handle(sem);
return VK_SUCCESS;
}
void
panvk_DestroySemaphore(VkDevice _device, VkSemaphore _sem, const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, _sem);
sync_destroy(device, &sem->syncobj);
vk_object_free(&device->vk, pAllocator, sem);
}
VkResult
panvk_ImportSemaphoreFdKHR(VkDevice _device, const VkImportSemaphoreFdInfoKHR *info)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
bool temp = info->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_import(device, &sem->syncobj, temp, sync_fd, info->fd);
}
VkResult
panvk_GetSemaphoreFdKHR(VkDevice _device, const VkSemaphoreGetFdInfoKHR *info, int *pFd)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_export(device, &sem->syncobj, sync_fd, pFd);
}
VkResult
panvk_CreateFence(VkDevice _device,
const VkFenceCreateInfo *info,
const VkAllocationCallbacks *pAllocator,
VkFence *pFence)
{
VK_FROM_HANDLE(panvk_device, device, _device);
struct panvk_fence *fence =
vk_object_zalloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (!fence)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult ret = sync_create(device, &fence->syncobj,
info->flags & VK_FENCE_CREATE_SIGNALED_BIT);
if (ret != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, fence);
return ret;
}
*pFence = panvk_fence_to_handle(fence);
return VK_SUCCESS;
}
void
panvk_DestroyFence(VkDevice _device, VkFence _fence,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, _fence);
sync_destroy(device, &fence->syncobj);
vk_object_free(&device->vk, pAllocator, fence);
}
VkResult
panvk_ImportFenceFdKHR(VkDevice _device, const VkImportFenceFdInfoKHR *info)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, info->fence);
bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
bool temp = info->flags & VK_FENCE_IMPORT_TEMPORARY_BIT;
return sync_import(device, &fence->syncobj, temp, sync_fd, info->fd);
}
VkResult
panvk_GetFenceFdKHR(VkDevice _device, const VkFenceGetFdInfoKHR *info, int *pFd)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, info->fence);
bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
return sync_export(device, &fence->syncobj, sync_fd, pFd);
}
static VkResult
drm_syncobj_wait(struct panvk_device *device,
const uint32_t *handles, uint32_t count_handles,
int64_t timeout_nsec, bool wait_all)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_wait wait = {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = count_handles,
.timeout_nsec = timeout_nsec,
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
(wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0)
};
int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
if (ret) {
if (errno == ETIME)
return VK_TIMEOUT;
assert(0);
return VK_ERROR_DEVICE_LOST; /* TODO */
}
return VK_SUCCESS;
}
static uint64_t
gettime_ns(void)
{
struct timespec current;
clock_gettime(CLOCK_MONOTONIC, &current);
return (uint64_t)current.tv_sec * 1000000000 + current.tv_nsec;
}
/* and the kernel converts it right back to relative timeout - very smart UAPI */
static uint64_t
absolute_timeout(uint64_t timeout)
{
if (timeout == 0)
return 0;
uint64_t current_time = gettime_ns();
uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
timeout = MIN2(max_timeout, timeout);
return (current_time + timeout);
}
VkResult
panvk_WaitForFences(VkDevice _device,
uint32_t fenceCount,
const VkFence *pFences,
VkBool32 waitAll,
uint64_t timeout)
{
VK_FROM_HANDLE(panvk_device, device, _device);
if (panvk_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
if (fence->syncobj.temporary) {
handles[i] = fence->syncobj.temporary;
} else {
handles[i] = fence->syncobj.permanent;
}
}
return drm_syncobj_wait(device, handles, fenceCount, absolute_timeout(timeout), waitAll);
}
VkResult
panvk_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const struct panfrost_device *pdev = &device->physical_device->pdev;
int ret;
uint32_t handles[fenceCount];
for (unsigned i = 0; i < fenceCount; ++i) {
VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
sync_set_temporary(device, &fence->syncobj, 0);
handles[i] = fence->syncobj.permanent;
}
struct drm_syncobj_array objs = {
.handles = (uint64_t) (uintptr_t) handles,
.count_handles = fenceCount,
};
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_RESET, &objs);
if (ret) {
panvk_device_set_lost(device, "DRM_IOCTL_SYNCOBJ_RESET failure: %s",
strerror(errno));
}
return VK_SUCCESS;
}
VkResult
panvk_GetFenceStatus(VkDevice _device, VkFence _fence)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, _fence);
uint32_t handle = fence->syncobj.temporary ? : fence->syncobj.permanent;
VkResult result;
result = drm_syncobj_wait(device, &handle, 1, 0, false);
if (result == VK_TIMEOUT)
result = VK_NOT_READY;
return result;
}
int
panvk_signal_syncobjs(struct panvk_device *device,
struct panvk_syncobj *syncobj1,
struct panvk_syncobj *syncobj2)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
uint32_t handles[2], count = 0;
if (syncobj1)
handles[count++] = syncobj1->temporary ?: syncobj1->permanent;
if (syncobj2)
handles[count++] = syncobj2->temporary ?: syncobj2->permanent;
if (!count)
return 0;
struct drm_syncobj_array objs = {
.handles = (uintptr_t) handles,
.count_handles = count
};
return drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs);
}
int
panvk_syncobj_to_fd(struct panvk_device *device, struct panvk_syncobj *sync)
{
const struct panfrost_device *pdev = &device->physical_device->pdev;
struct drm_syncobj_handle handle = { .handle = sync->permanent };
int ret;
ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
return ret ? -1 : handle.fd;
}

View File

@ -0,0 +1,87 @@
/*
* Copyright © 2015 Collabora Ltd.
*
* Derived from tu_util.c which is:
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include <assert.h>
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "util/u_math.h"
#include "vk_enum_to_str.h"
/** Log an error message. */
void panvk_printflike(1, 2)
panvk_logi(const char *format, ...)
{
va_list va;
va_start(va, format);
panvk_logi_v(format, va);
va_end(va);
}
/** \see panvk_logi() */
void
panvk_logi_v(const char *format, va_list va)
{
fprintf(stderr, "tu: info: ");
vfprintf(stderr, format, va);
fprintf(stderr, "\n");
}
VkResult
__vk_errorf(struct panvk_instance *instance,
VkResult error,
const char *file,
int line,
const char *format,
...)
{
va_list ap;
char buffer[256];
const char *error_str = vk_Result_to_str(error);
#ifndef DEBUG
return error;
#endif
if (format) {
va_start(ap, format);
vsnprintf(buffer, sizeof(buffer), format, ap);
va_end(ap);
fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
} else {
fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
}
return error;
}

View File

@ -0,0 +1,58 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "panvk_varyings.h"
#include "pan_pool.h"
unsigned
panvk_varyings_buf_count(const struct panvk_device *dev,
struct panvk_varyings_info *varyings)
{
const struct panfrost_device *pdev = &dev->physical_device->pdev;
return util_bitcount(varyings->buf_mask) + (pan_is_bifrost(pdev) ? 1 : 0);
}
void
panvk_varyings_alloc(struct panvk_varyings_info *varyings,
struct pan_pool *varying_mem_pool,
unsigned vertex_count)
{
for (unsigned i = 0; i < PANVK_VARY_BUF_MAX; i++) {
if (!(varyings->buf_mask & (1 << i))) continue;
unsigned buf_idx = panvk_varying_buf_index(varyings, i);
unsigned size = varyings->buf[buf_idx].stride * vertex_count;
if (!size)
continue;
struct panfrost_ptr ptr =
panfrost_pool_alloc_aligned(varying_mem_pool, size, 64);
varyings->buf[buf_idx].size = size;
varyings->buf[buf_idx].address = ptr.gpu;
varyings->buf[buf_idx].cpu = ptr.cpu;
}
}

View File

@ -0,0 +1,144 @@
/*
* Copyright (C) 2021 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef PANVK_VARYINGS_H
#define PANVK_VARYINGS_H
#include "util/bitset.h"
#include "util/format/u_format.h"
#include "compiler/shader_enums.h"
#include "midgard_pack.h"
#include "panfrost-job.h"
struct pan_pool;
struct panvk_device;
enum panvk_varying_buf_id {
PANVK_VARY_BUF_GENERAL,
PANVK_VARY_BUF_POSITION,
PANVK_VARY_BUF_PSIZ,
PANVK_VARY_BUF_PNTCOORD,
PANVK_VARY_BUF_FRAGCOORD,
/* Keep last */
PANVK_VARY_BUF_MAX,
};
struct panvk_varying {
unsigned buf;
unsigned offset;
enum pipe_format format;
};
struct panvk_varying_buf {
mali_ptr address;
void *cpu;
unsigned stride;
unsigned size;
};
struct panvk_varyings_info {
struct panvk_varying varying[VARYING_SLOT_MAX];
BITSET_DECLARE(active, VARYING_SLOT_MAX);
struct panvk_varying_buf buf[VARYING_SLOT_MAX];
struct {
unsigned count;
gl_varying_slot loc[VARYING_SLOT_MAX];
} stage[MESA_SHADER_STAGES];
unsigned buf_mask;
};
void
panvk_varyings_alloc(struct panvk_varyings_info *varyings,
struct pan_pool *varying_mem_pool,
unsigned vertex_count);
unsigned
panvk_varyings_buf_count(const struct panvk_device *dev,
struct panvk_varyings_info *varyings);
static inline unsigned
panvk_varying_buf_index(const struct panvk_varyings_info *varyings,
enum panvk_varying_buf_id b)
{
return util_bitcount(varyings->buf_mask & BITFIELD_MASK(b));
}
static inline enum panvk_varying_buf_id
panvk_varying_buf_id(bool fs, gl_varying_slot loc)
{
switch (loc) {
case VARYING_SLOT_POS:
return fs ? PANVK_VARY_BUF_FRAGCOORD : PANVK_VARY_BUF_POSITION;
case VARYING_SLOT_PSIZ:
return PANVK_VARY_BUF_PSIZ;
case VARYING_SLOT_PNTC:
return PANVK_VARY_BUF_PNTCOORD;
default:
return PANVK_VARY_BUF_GENERAL;
}
}
static inline bool
panvk_varying_is_builtin(gl_shader_stage stage, gl_varying_slot loc)
{
bool fs = stage == MESA_SHADER_FRAGMENT;
switch (loc) {
case VARYING_SLOT_POS:
case VARYING_SLOT_PNTC:
return fs;
default:
return false;
}
}
static inline enum mali_attribute_special
panvk_varying_special_buf_id(enum panvk_varying_buf_id buf_id)
{
switch (buf_id) {
case PANVK_VARY_BUF_PNTCOORD:
return MALI_ATTRIBUTE_SPECIAL_POINT_COORD;
case PANVK_VARY_BUF_FRAGCOORD:
return MALI_ATTRIBUTE_SPECIAL_FRAG_COORD;
default:
return 0;
}
}
static inline unsigned
panvk_varying_size(const struct panvk_varyings_info *varyings,
gl_varying_slot loc)
{
switch (loc) {
case VARYING_SLOT_POS:
return sizeof(float) * 4;
case VARYING_SLOT_PSIZ:
return sizeof(uint16_t);
default:
return util_format_get_blocksize(varyings->varying[loc].format);
}
}
#endif

View File

@ -0,0 +1,293 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from tu_wsi.c:
* Copyright © 2016 Red Hat
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "vk_util.h"
#include "wsi_common.h"
static VKAPI_PTR PFN_vkVoidFunction
panvk_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physicalDevice);
return vk_instance_get_proc_addr_unchecked(&pdevice->instance->vk, pName);
}
VkResult
panvk_wsi_init(struct panvk_physical_device *physical_device)
{
VkResult result;
result = wsi_device_init(&physical_device->wsi_device,
panvk_physical_device_to_handle(physical_device),
panvk_wsi_proc_addr,
&physical_device->instance->vk.alloc,
physical_device->master_fd, NULL,
false);
if (result != VK_SUCCESS)
return result;
physical_device->wsi_device.supports_modifiers = false;
return VK_SUCCESS;
}
void
panvk_wsi_finish(struct panvk_physical_device *physical_device)
{
wsi_device_finish(&physical_device->wsi_device,
&physical_device->instance->vk.alloc);
}
void
panvk_DestroySurfaceKHR(VkInstance _instance,
VkSurfaceKHR _surface,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_instance, instance, _instance);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
vk_free2(&instance->vk.alloc, pAllocator, surface);
}
VkResult
panvk_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32 *pSupported)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_support(
&device->wsi_device, queueFamilyIndex, surface, pSupported);
}
VkResult
panvk_GetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities(&device->wsi_device, surface,
pSurfaceCapabilities);
}
VkResult
panvk_GetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2(
&device->wsi_device, pSurfaceInfo, pSurfaceCapabilities);
}
VkResult
panvk_GetPhysicalDeviceSurfaceCapabilities2EXT(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2ext(
&device->wsi_device, surface, pSurfaceCapabilities);
}
VkResult
panvk_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_formats(
&device->wsi_device, surface, pSurfaceFormatCount, pSurfaceFormats);
}
VkResult
panvk_GetPhysicalDeviceSurfaceFormats2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_formats2(&device->wsi_device, pSurfaceInfo,
pSurfaceFormatCount,
pSurfaceFormats);
}
VkResult
panvk_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_surface_present_modes(
&device->wsi_device, surface, pPresentModeCount, pPresentModes);
}
VkResult
panvk_CreateSwapchainKHR(VkDevice _device,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchain)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
alloc = pAllocator;
else
alloc = &device->vk.alloc;
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
panvk_device_to_handle(device),
pCreateInfo, alloc, pSwapchain);
}
void
panvk_DestroySwapchainKHR(VkDevice _device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(panvk_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
alloc = pAllocator;
else
alloc = &device->vk.alloc;
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}
VkResult
panvk_GetSwapchainImagesKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages)
{
return wsi_common_get_images(swapchain, pSwapchainImageCount,
pSwapchainImages);
}
VkResult
panvk_AcquireNextImageKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint64_t timeout,
VkSemaphore semaphore,
VkFence fence,
uint32_t *pImageIndex)
{
VkAcquireNextImageInfoKHR acquire_info = {
.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
.swapchain = swapchain,
.timeout = timeout,
.semaphore = semaphore,
.fence = fence,
.deviceMask = 0,
};
return panvk_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
}
VkResult
panvk_AcquireNextImage2KHR(VkDevice _device,
const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_fence, fence, pAcquireInfo->fence);
VK_FROM_HANDLE(panvk_semaphore, sem, pAcquireInfo->semaphore);
struct panvk_physical_device *pdevice = device->physical_device;
VkResult result =
wsi_common_acquire_next_image2(&pdevice->wsi_device, _device,
pAcquireInfo, pImageIndex);
/* signal fence/semaphore - image is available immediately */
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
panvk_signal_syncobjs(device, fence ? &fence->syncobj : NULL,
sem ? &sem->syncobj : NULL);
}
return result;
}
VkResult
panvk_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
VK_FROM_HANDLE(panvk_queue, queue, _queue);
return wsi_common_queue_present(
&queue->device->physical_device->wsi_device,
panvk_device_to_handle(queue->device), _queue, queue->queue_family_index,
pPresentInfo);
}
VkResult
panvk_GetDeviceGroupPresentCapabilitiesKHR(
VkDevice device, VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
{
memset(pCapabilities->presentMask, 0, sizeof(pCapabilities->presentMask));
pCapabilities->presentMask[0] = 0x1;
pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult
panvk_GetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR *pModes)
{
*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
}
VkResult
panvk_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pRectCount,
VkRect2D *pRects)
{
VK_FROM_HANDLE(panvk_physical_device, device, physicalDevice);
return wsi_common_get_present_rectangles(&device->wsi_device, surface,
pRectCount, pRects);
}

View File

@ -0,0 +1,136 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from v3dv driver:
* Copyright © 2020 Raspberry Pi
* Copyright © 2017 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include "panvk_private.h"
#include "wsi_common_display.h"
VkResult
panvk_GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physical_device,
uint32_t *property_count,
VkDisplayPropertiesKHR *properties)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_display_get_physical_device_display_properties(
physical_device,
&pdevice->wsi_device,
property_count,
properties);
}
VkResult
panvk_GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physical_device,
uint32_t *property_count,
VkDisplayPlanePropertiesKHR *properties)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_display_get_physical_device_display_plane_properties(
physical_device,
&pdevice->wsi_device,
property_count,
properties);
}
VkResult
panvk_GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physical_device,
uint32_t plane_index,
uint32_t *display_count,
VkDisplayKHR *displays)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_display_get_display_plane_supported_displays(
physical_device,
&pdevice->wsi_device,
plane_index,
display_count,
displays);
}
VkResult
panvk_GetDisplayModePropertiesKHR(VkPhysicalDevice physical_device,
VkDisplayKHR display,
uint32_t *property_count,
VkDisplayModePropertiesKHR *properties)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_display_get_display_mode_properties(physical_device,
&pdevice->wsi_device,
display,
property_count,
properties);
}
VkResult
panvk_CreateDisplayModeKHR(VkPhysicalDevice physical_device,
VkDisplayKHR display,
const VkDisplayModeCreateInfoKHR *create_info,
const VkAllocationCallbacks *allocator,
VkDisplayModeKHR *mode)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_display_create_display_mode(physical_device,
&pdevice->wsi_device,
display,
create_info,
allocator,
mode);
}
VkResult
panvk_GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physical_device,
VkDisplayModeKHR mode_khr,
uint32_t plane_index,
VkDisplayPlaneCapabilitiesKHR *capabilities)
{
VK_FROM_HANDLE(panvk_physical_device, pdevice, physical_device);
return wsi_get_display_plane_capabilities(physical_device,
&pdevice->wsi_device,
mode_khr,
plane_index,
capabilities);
}
VkResult
panvk_CreateDisplayPlaneSurfaceKHR(VkInstance _instance,
const VkDisplaySurfaceCreateInfoKHR *create_info,
const VkAllocationCallbacks *allocator,
VkSurfaceKHR *surface)
{
VK_FROM_HANDLE(panvk_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
if (allocator)
alloc = allocator;
else
alloc = &instance->vk.alloc;
return wsi_create_display_surface(_instance, alloc,
create_info, surface);
}

View File

@ -0,0 +1,61 @@
/*
* Copyright © 2021 Collabora Ltd.
*
* Derived from turnip driver:
* Copyright © 2016 Red Hat
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "panvk_private.h"
#include "wsi_common_wayland.h"
VkBool32
panvk_GetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display)
{
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
return wsi_wl_get_presentation_support(&physical_device->wsi_device,
display);
}
VkResult
panvk_CreateWaylandSurfaceKHR(VkInstance _instance,
const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface)
{
VK_FROM_HANDLE(panvk_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType ==
VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
if (pAllocator)
alloc = pAllocator;
else
alloc = &instance->vk.alloc;
return wsi_create_wl_surface(alloc, pCreateInfo, pSurface);
}