2018-03-15 22:42:44 +00:00
|
|
|
#
|
|
|
|
# Copyright (C) 2018 Red Hat
|
|
|
|
# Copyright (C) 2014 Intel Corporation
|
|
|
|
#
|
|
|
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
# copy of this software and associated documentation files (the "Software"),
|
|
|
|
# to deal in the Software without restriction, including without limitation
|
|
|
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
# and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
# Software is furnished to do so, subject to the following conditions:
|
|
|
|
#
|
|
|
|
# The above copyright notice and this permission notice (including the next
|
|
|
|
# paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
# Software.
|
|
|
|
#
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
# IN THE SOFTWARE.
|
|
|
|
#
|
|
|
|
|
|
|
|
# This file defines all the available intrinsics in one place.
|
|
|
|
#
|
|
|
|
# The Intrinsic class corresponds one-to-one with nir_intrinsic_info
|
|
|
|
# structure.
|
|
|
|
|
|
|
|
class Intrinsic(object):
|
|
|
|
"""Class that represents all the information about an intrinsic opcode.
|
|
|
|
NOTE: this must be kept in sync with nir_intrinsic_info.
|
|
|
|
"""
|
2018-03-26 22:50:38 +01:00
|
|
|
def __init__(self, name, src_components, dest_components,
|
2018-07-19 12:04:43 +01:00
|
|
|
indices, flags, sysval, bit_sizes):
|
2018-03-15 22:42:44 +00:00
|
|
|
"""Parameters:
|
|
|
|
|
|
|
|
- name: the intrinsic name
|
|
|
|
- src_components: list of the number of components per src, 0 means
|
|
|
|
vectorized instruction with number of components given in the
|
|
|
|
num_components field in nir_intrinsic_instr.
|
|
|
|
- dest_components: number of destination components, -1 means no
|
|
|
|
dest, 0 means number of components given in num_components field
|
|
|
|
in nir_intrinsic_instr.
|
|
|
|
- indices: list of constant indicies
|
|
|
|
- flags: list of semantic flags
|
|
|
|
- sysval: is this a system-value intrinsic
|
2018-07-19 12:04:43 +01:00
|
|
|
- bit_sizes: allowed dest bit_sizes
|
2018-03-15 22:42:44 +00:00
|
|
|
"""
|
|
|
|
assert isinstance(name, str)
|
|
|
|
assert isinstance(src_components, list)
|
|
|
|
if src_components:
|
|
|
|
assert isinstance(src_components[0], int)
|
|
|
|
assert isinstance(dest_components, int)
|
|
|
|
assert isinstance(indices, list)
|
|
|
|
if indices:
|
|
|
|
assert isinstance(indices[0], str)
|
|
|
|
assert isinstance(flags, list)
|
|
|
|
if flags:
|
|
|
|
assert isinstance(flags[0], str)
|
|
|
|
assert isinstance(sysval, bool)
|
2018-07-19 12:04:43 +01:00
|
|
|
if bit_sizes:
|
|
|
|
assert isinstance(bit_sizes[0], int)
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
self.name = name
|
|
|
|
self.num_srcs = len(src_components)
|
|
|
|
self.src_components = src_components
|
|
|
|
self.has_dest = (dest_components >= 0)
|
|
|
|
self.dest_components = dest_components
|
|
|
|
self.num_indices = len(indices)
|
|
|
|
self.indices = indices
|
|
|
|
self.flags = flags
|
|
|
|
self.sysval = sysval
|
2018-07-19 12:04:43 +01:00
|
|
|
self.bit_sizes = bit_sizes
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Possible indices:
|
|
|
|
#
|
|
|
|
|
|
|
|
# A constant 'base' value that is added to an offset src:
|
|
|
|
BASE = "NIR_INTRINSIC_BASE"
|
|
|
|
# For store instructions, a writemask:
|
|
|
|
WRMASK = "NIR_INTRINSIC_WRMASK"
|
|
|
|
# The stream-id for GS emit_vertex/end_primitive intrinsics:
|
|
|
|
STREAM_ID = "NIR_INTRINSIC_STREAM_ID"
|
|
|
|
# The clip-plane id for load_user_clip_plane intrinsics:
|
|
|
|
UCP_ID = "NIR_INTRINSIC_UCP_ID"
|
|
|
|
# The amount of data, starting from BASE, that this instruction
|
|
|
|
# may access. This is used to provide bounds if the offset is
|
|
|
|
# not constant.
|
|
|
|
RANGE = "NIR_INTRINSIC_RANGE"
|
nir: Add a range_base+range to nir_intrinsic_load_ubo().
For UBO accesses to be the same performance as classic GL default uniform
block uniforms, we need to be able to push them through the same path. On
freedreno, we haven't been uploading UBOs as push constants when they're
used for indirect array access, because we don't know what range of the
UBO is needed for an access.
I believe we won't be able to calculate the range in general in spirv
given casts that can happen, so we define a [0, ~0] range to be "We don't
know anything". We use that at the moment for all UBO loads except for
nir_lower_uniforms_to_ubo, where we now avoid losing the range information
that default uniform block loads come with.
In a departure from other NIR intrinsics with a "base", I didn't make the
base an be something you have to add to the src[1] offset. This keeps us
from needing to modify all drivers (particularly since the base+offset
thing can mean needing to do addition in the backend), makes backend
tracking of ranges easy, and makes the range calculations in
load_store_vectorizer reasonable. However, this could definitely cause
some confusion for people used to the normal NIR base.
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Reviewed-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6359>
2020-08-14 21:10:02 +01:00
|
|
|
# The offset to the start of the NIR_INTRINSIC_RANGE. This is an alternative
|
|
|
|
# to NIR_INTRINSIC_BASE for describing the valid range in intrinsics that don't
|
|
|
|
# have the implicit addition of a base to the offset.
|
|
|
|
RANGE_BASE = "NIR_INTRINSIC_RANGE_BASE"
|
2018-03-15 22:42:44 +00:00
|
|
|
# The vulkan descriptor set binding for vulkan_resource_index
|
|
|
|
# intrinsic
|
|
|
|
DESC_SET = "NIR_INTRINSIC_DESC_SET"
|
|
|
|
# The vulkan descriptor set binding for vulkan_resource_index
|
|
|
|
# intrinsic
|
|
|
|
BINDING = "NIR_INTRINSIC_BINDING"
|
|
|
|
# Component offset
|
|
|
|
COMPONENT = "NIR_INTRINSIC_COMPONENT"
|
|
|
|
# Interpolation mode (only meaningful for FS inputs)
|
|
|
|
INTERP_MODE = "NIR_INTRINSIC_INTERP_MODE"
|
|
|
|
# A binary nir_op to use when performing a reduction or scan operation
|
|
|
|
REDUCTION_OP = "NIR_INTRINSIC_REDUCTION_OP"
|
|
|
|
# Cluster size for reduction operations
|
|
|
|
CLUSTER_SIZE = "NIR_INTRINSIC_CLUSTER_SIZE"
|
2018-03-22 23:41:18 +00:00
|
|
|
# Parameter index for a load_param intrinsic
|
|
|
|
PARAM_IDX = "NIR_INTRINSIC_PARAM_IDX"
|
2018-08-16 21:11:44 +01:00
|
|
|
# Image dimensionality for image intrinsics
|
|
|
|
IMAGE_DIM = "NIR_INTRINSIC_IMAGE_DIM"
|
|
|
|
# Non-zero if we are accessing an array image
|
|
|
|
IMAGE_ARRAY = "NIR_INTRINSIC_IMAGE_ARRAY"
|
2018-11-28 19:44:56 +00:00
|
|
|
# Access qualifiers for image and memory access intrinsics
|
2018-08-16 21:11:44 +01:00
|
|
|
ACCESS = "NIR_INTRINSIC_ACCESS"
|
2019-06-04 10:40:14 +01:00
|
|
|
DST_ACCESS = "NIR_INTRINSIC_DST_ACCESS"
|
|
|
|
SRC_ACCESS = "NIR_INTRINSIC_SRC_ACCESS"
|
2018-08-16 21:11:44 +01:00
|
|
|
# Image format for image intrinsics
|
|
|
|
FORMAT = "NIR_INTRINSIC_FORMAT"
|
2018-11-13 15:45:03 +00:00
|
|
|
# Offset or address alignment
|
|
|
|
ALIGN_MUL = "NIR_INTRINSIC_ALIGN_MUL"
|
|
|
|
ALIGN_OFFSET = "NIR_INTRINSIC_ALIGN_OFFSET"
|
2018-12-13 22:50:19 +00:00
|
|
|
# The vulkan descriptor type for vulkan_resource_index
|
|
|
|
DESC_TYPE = "NIR_INTRINSIC_DESC_TYPE"
|
2020-09-30 21:19:45 +01:00
|
|
|
# The nir_alu_type of input data to a store or conversion
|
2020-10-01 03:20:53 +01:00
|
|
|
SRC_TYPE = "NIR_INTRINSIC_SRC_TYPE"
|
2020-09-30 21:19:45 +01:00
|
|
|
# The nir_alu_type of the data output from a load or conversion
|
2020-10-01 03:20:53 +01:00
|
|
|
DEST_TYPE = "NIR_INTRINSIC_DEST_TYPE"
|
2018-05-09 19:37:24 +01:00
|
|
|
# The swizzle mask for quad_swizzle_amd & masked_swizzle_amd
|
|
|
|
SWIZZLE_MASK = "NIR_INTRINSIC_SWIZZLE_MASK"
|
2019-10-11 01:17:10 +01:00
|
|
|
# Driver location of attribute
|
|
|
|
DRIVER_LOCATION = "NIR_INTRINSIC_DRIVER_LOCATION"
|
2019-07-19 00:14:03 +01:00
|
|
|
# Ordering and visibility of a memory operation
|
|
|
|
MEMORY_SEMANTICS = "NIR_INTRINSIC_MEMORY_SEMANTICS"
|
|
|
|
# Modes affected by a memory operation
|
|
|
|
MEMORY_MODES = "NIR_INTRINSIC_MEMORY_MODES"
|
|
|
|
# Scope of a memory operation
|
|
|
|
MEMORY_SCOPE = "NIR_INTRINSIC_MEMORY_SCOPE"
|
2020-05-05 08:13:20 +01:00
|
|
|
# Scope of a control barrier
|
|
|
|
EXECUTION_SCOPE = "NIR_INTRINSIC_EXECUTION_SCOPE"
|
2020-08-12 04:48:12 +01:00
|
|
|
IO_SEMANTICS = "NIR_INTRINSIC_IO_SEMANTICS"
|
2020-09-30 21:19:45 +01:00
|
|
|
# Rounding mode for conversions
|
|
|
|
ROUNDING_MODE = "NIR_INTRINSIC_ROUNDING_MODE"
|
|
|
|
# Whether or not to saturate in conversions
|
|
|
|
SATURATE = "NIR_INTRINSIC_SATURATE"
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Possible flags:
|
|
|
|
#
|
|
|
|
|
|
|
|
CAN_ELIMINATE = "NIR_INTRINSIC_CAN_ELIMINATE"
|
|
|
|
CAN_REORDER = "NIR_INTRINSIC_CAN_REORDER"
|
|
|
|
|
|
|
|
INTR_OPCODES = {}
|
|
|
|
|
2019-04-11 17:49:13 +01:00
|
|
|
# Defines a new NIR intrinsic. By default, the intrinsic will have no sources
|
|
|
|
# and no destination.
|
|
|
|
#
|
|
|
|
# You can set dest_comp=n to enable a destination for the intrinsic, in which
|
|
|
|
# case it will have that many components, or =0 for "as many components as the
|
|
|
|
# NIR destination value."
|
|
|
|
#
|
|
|
|
# Set src_comp=n to enable sources for the intruction. It can be an array of
|
|
|
|
# component counts, or (for convenience) a scalar component count if there's
|
|
|
|
# only one source. If a component count is 0, it will be as many components as
|
|
|
|
# the intrinsic has based on the dest_comp.
|
2018-03-26 22:50:38 +01:00
|
|
|
def intrinsic(name, src_comp=[], dest_comp=-1, indices=[],
|
2018-07-19 12:04:43 +01:00
|
|
|
flags=[], sysval=False, bit_sizes=[]):
|
2018-03-15 22:42:44 +00:00
|
|
|
assert name not in INTR_OPCODES
|
2018-03-26 22:50:38 +01:00
|
|
|
INTR_OPCODES[name] = Intrinsic(name, src_comp, dest_comp,
|
2018-07-19 12:04:43 +01:00
|
|
|
indices, flags, sysval, bit_sizes)
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
intrinsic("nop", flags=[CAN_ELIMINATE])
|
|
|
|
|
2020-09-30 21:19:45 +01:00
|
|
|
intrinsic("convert_alu_types", dest_comp=0, src_comp=[0],
|
|
|
|
indices=[SRC_TYPE, DEST_TYPE, ROUNDING_MODE, SATURATE],
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2018-03-22 23:41:18 +00:00
|
|
|
intrinsic("load_param", dest_comp=0, indices=[PARAM_IDX], flags=[CAN_ELIMINATE])
|
|
|
|
|
2018-11-28 19:44:56 +00:00
|
|
|
intrinsic("load_deref", dest_comp=0, src_comp=[-1],
|
|
|
|
indices=[ACCESS], flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("store_deref", src_comp=[-1, 0], indices=[WRMASK, ACCESS])
|
2019-06-04 10:40:14 +01:00
|
|
|
intrinsic("copy_deref", src_comp=[-1, -1], indices=[DST_ACCESS, SRC_ACCESS])
|
2020-09-14 21:21:47 +01:00
|
|
|
intrinsic("memcpy_deref", src_comp=[-1, -1, 1], indices=[DST_ACCESS, SRC_ACCESS])
|
2018-03-15 21:56:43 +00:00
|
|
|
|
|
|
|
# Interpolation of input. The interp_deref_at* intrinsics are similar to the
|
|
|
|
# load_var intrinsic acting on a shader input except that they interpolate the
|
2020-01-24 15:01:04 +00:00
|
|
|
# input differently. The at_sample, at_offset and at_vertex intrinsics take an
|
|
|
|
# additional source that is an integer sample id, a vec2 position offset, or a
|
|
|
|
# vertex ID respectively.
|
2018-03-15 21:56:43 +00:00
|
|
|
|
|
|
|
intrinsic("interp_deref_at_centroid", dest_comp=0, src_comp=[1],
|
|
|
|
flags=[ CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
intrinsic("interp_deref_at_sample", src_comp=[1, 1], dest_comp=0,
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
intrinsic("interp_deref_at_offset", src_comp=[1, 2], dest_comp=0,
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
2020-01-24 15:01:04 +00:00
|
|
|
intrinsic("interp_deref_at_vertex", src_comp=[1, 1], dest_comp=0,
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
2018-03-15 21:56:43 +00:00
|
|
|
|
2019-03-10 13:35:00 +00:00
|
|
|
# Gets the length of an unsized array at the end of a buffer
|
|
|
|
intrinsic("deref_buffer_array_length", src_comp=[-1], dest_comp=1,
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2020-09-22 09:24:45 +01:00
|
|
|
# Ask the driver for the size of a given SSBO. It takes the buffer index
|
2018-03-15 22:42:44 +00:00
|
|
|
# as source.
|
2020-09-22 09:24:45 +01:00
|
|
|
intrinsic("get_ssbo_size", src_comp=[-1], dest_comp=1,
|
2018-03-15 22:42:44 +00:00
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
|
|
|
# a barrier is an intrinsic with no inputs/outputs but which can't be moved
|
|
|
|
# around/optimized in general
|
|
|
|
def barrier(name):
|
|
|
|
intrinsic(name)
|
|
|
|
|
|
|
|
barrier("discard")
|
|
|
|
|
2019-06-08 01:29:05 +01:00
|
|
|
# Demote fragment shader invocation to a helper invocation. Any stores to
|
|
|
|
# memory after this instruction are suppressed and the fragment does not write
|
|
|
|
# outputs to the framebuffer. Unlike discard, demote needs to ensure that
|
|
|
|
# derivatives will still work for invocations that were not demoted.
|
|
|
|
#
|
|
|
|
# As specified by SPV_EXT_demote_to_helper_invocation.
|
|
|
|
barrier("demote")
|
|
|
|
intrinsic("is_helper_invocation", dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
|
2020-01-07 20:54:26 +00:00
|
|
|
# A workgroup-level control barrier. Any thread which hits this barrier will
|
|
|
|
# pause until all threads within the current workgroup have also hit the
|
|
|
|
# barrier. For compute shaders, the workgroup is defined as the local group.
|
|
|
|
# For tessellation control shaders, the workgroup is defined as the current
|
|
|
|
# patch. This intrinsic does not imply any sort of memory barrier.
|
|
|
|
barrier("control_barrier")
|
2019-06-08 01:29:05 +01:00
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Memory barrier with semantics analogous to the memoryBarrier() GLSL
|
|
|
|
# intrinsic.
|
|
|
|
barrier("memory_barrier")
|
|
|
|
|
2020-05-05 08:13:20 +01:00
|
|
|
# Control/Memory barrier with explicit scope. Follows the semantics of SPIR-V
|
|
|
|
# OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model.
|
|
|
|
# Storage that the barrier applies is represented using NIR variable modes.
|
|
|
|
# For an OpMemoryBarrier, set EXECUTION_SCOPE to NIR_SCOPE_NONE.
|
|
|
|
intrinsic("scoped_barrier",
|
|
|
|
indices=[EXECUTION_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE])
|
2019-07-19 00:14:03 +01:00
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Shader clock intrinsic with semantics analogous to the clock2x32ARB()
|
|
|
|
# GLSL intrinsic.
|
|
|
|
# The latter can be used as code motion barrier, which is currently not
|
|
|
|
# feasible with NIR.
|
2020-05-20 08:54:50 +01:00
|
|
|
intrinsic("shader_clock", dest_comp=2, flags=[CAN_ELIMINATE],
|
|
|
|
indices=[MEMORY_SCOPE])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
# Shader ballot intrinsics with semantics analogous to the
|
|
|
|
#
|
|
|
|
# ballotARB()
|
|
|
|
# readInvocationARB()
|
|
|
|
# readFirstInvocationARB()
|
|
|
|
#
|
|
|
|
# GLSL functions from ARB_shader_ballot.
|
|
|
|
intrinsic("ballot", src_comp=[1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("read_invocation", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("read_first_invocation", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
# Additional SPIR-V ballot intrinsics
|
|
|
|
#
|
|
|
|
# These correspond to the SPIR-V opcodes
|
|
|
|
#
|
|
|
|
# OpGroupUniformElect
|
|
|
|
# OpSubgroupFirstInvocationKHR
|
|
|
|
intrinsic("elect", dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("first_invocation", dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
# Memory barrier with semantics analogous to the compute shader
|
|
|
|
# groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
|
|
|
|
# memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
|
|
|
|
barrier("group_memory_barrier")
|
|
|
|
barrier("memory_barrier_atomic_counter")
|
|
|
|
barrier("memory_barrier_buffer")
|
|
|
|
barrier("memory_barrier_image")
|
|
|
|
barrier("memory_barrier_shared")
|
2018-04-27 14:12:30 +01:00
|
|
|
barrier("begin_invocation_interlock")
|
|
|
|
barrier("end_invocation_interlock")
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2020-01-07 20:18:56 +00:00
|
|
|
# Memory barrier for synchronizing TCS patch outputs
|
|
|
|
barrier("memory_barrier_tcs_patch")
|
|
|
|
|
2019-07-18 12:39:49 +01:00
|
|
|
# A conditional discard/demote, with a single boolean source.
|
2018-03-15 22:42:44 +00:00
|
|
|
intrinsic("discard_if", src_comp=[1])
|
2019-07-18 12:39:49 +01:00
|
|
|
intrinsic("demote_if", src_comp=[1])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
# ARB_shader_group_vote intrinsics
|
|
|
|
intrinsic("vote_any", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("vote_all", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("vote_feq", src_comp=[0], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("vote_ieq", src_comp=[0], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
# Ballot ALU operations from SPIR-V.
|
|
|
|
#
|
|
|
|
# These operations work like their ALU counterparts except that the operate
|
|
|
|
# on a uvec4 which is treated as a 128bit integer. Also, they are, in
|
|
|
|
# general, free to ignore any bits which are above the subgroup size.
|
|
|
|
intrinsic("ballot_bitfield_extract", src_comp=[4, 1], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("ballot_bit_count_reduce", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("ballot_bit_count_inclusive", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("ballot_bit_count_exclusive", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("ballot_find_lsb", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("ballot_find_msb", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
# Shuffle operations from SPIR-V.
|
|
|
|
intrinsic("shuffle", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("shuffle_xor", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("shuffle_up", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("shuffle_down", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
# Quad operations from SPIR-V.
|
|
|
|
intrinsic("quad_broadcast", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("quad_swap_horizontal", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("quad_swap_vertical", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("quad_swap_diagonal", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
|
|
|
|
intrinsic("reduce", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP, CLUSTER_SIZE],
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("inclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP],
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("exclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP],
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
|
2018-05-09 19:37:24 +01:00
|
|
|
# AMD shader ballot operations
|
|
|
|
intrinsic("quad_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK],
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("masked_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK],
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("write_invocation_amd", src_comp=[0, 0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
intrinsic("mbcnt_amd", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
|
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Basic Geometry Shader intrinsics.
|
|
|
|
#
|
|
|
|
# emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
|
|
|
|
# index, which is the stream ID to write to.
|
|
|
|
#
|
|
|
|
# end_primitive implements GLSL's EndPrimitive() built-in.
|
|
|
|
intrinsic("emit_vertex", indices=[STREAM_ID])
|
|
|
|
intrinsic("end_primitive", indices=[STREAM_ID])
|
|
|
|
|
|
|
|
# Geometry Shader intrinsics with a vertex count.
|
|
|
|
#
|
|
|
|
# Alternatively, drivers may implement these intrinsics, and use
|
|
|
|
# nir_lower_gs_intrinsics() to convert from the basic intrinsics.
|
|
|
|
#
|
|
|
|
# These maintain a count of the number of vertices emitted, as an additional
|
|
|
|
# unsigned integer source.
|
|
|
|
intrinsic("emit_vertex_with_counter", src_comp=[1], indices=[STREAM_ID])
|
|
|
|
intrinsic("end_primitive_with_counter", src_comp=[1], indices=[STREAM_ID])
|
|
|
|
intrinsic("set_vertex_count", src_comp=[1])
|
|
|
|
|
|
|
|
# Atomic counters
|
|
|
|
#
|
|
|
|
# The *_var variants take an atomic_uint nir_variable, while the other,
|
|
|
|
# lowered, variants take a constant buffer index and register offset.
|
|
|
|
|
|
|
|
def atomic(name, flags=[]):
|
2018-11-28 03:30:22 +00:00
|
|
|
intrinsic(name + "_deref", src_comp=[-1], dest_comp=1, flags=flags)
|
2018-03-15 22:42:44 +00:00
|
|
|
intrinsic(name, src_comp=[1], dest_comp=1, indices=[BASE], flags=flags)
|
|
|
|
|
|
|
|
def atomic2(name):
|
2018-11-28 03:30:22 +00:00
|
|
|
intrinsic(name + "_deref", src_comp=[-1, 1], dest_comp=1)
|
2018-03-15 22:42:44 +00:00
|
|
|
intrinsic(name, src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
|
|
|
|
def atomic3(name):
|
2018-11-28 03:30:22 +00:00
|
|
|
intrinsic(name + "_deref", src_comp=[-1, 1, 1], dest_comp=1)
|
2018-03-15 22:42:44 +00:00
|
|
|
intrinsic(name, src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
|
|
|
|
atomic("atomic_counter_inc")
|
nir: Fix OpAtomicCounterIDecrement for uniform atomic counters
From the SPIR-V 1.0 specification, section 3.32.18, "Atomic
Instructions":
"OpAtomicIDecrement:
<skip>
The instruction's result is the Original Value."
However, we were implementing it, for uniform atomic counters, as a
pre-decrement operation, as was the one available from GLSL.
Renamed the former nir intrinsic 'atomic_counter_dec*' to
'atomic_counter_pre_dec*' for clarification purposes, as it implements
a pre-decrement operation as specified for GLSL. From GLSL 4.50 spec,
section 8.10, "Atomic Counter Functions":
"uint atomicCounterDecrement (atomic_uint c)
Atomically
1. decrements the counter for c, and
2. returns the value resulting from the decrement operation.
These two steps are done atomically with respect to the atomic
counter functions in this table."
Added a new nir intrinsic 'atomic_counter_post_dec*' which implements
a post-decrement operation as required by SPIR-V.
v2: (Timothy Arceri)
* Add extra spec quotes on commit message
* Use "post" instead "pos" to avoid confusion with "position"
Signed-off-by: Antia Puentes <apuentes@igalia.com>
Signed-off-by: Alejandro Piñeiro <apinheiro@igalia.com>
Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
2018-02-22 12:50:23 +00:00
|
|
|
atomic("atomic_counter_pre_dec")
|
|
|
|
atomic("atomic_counter_post_dec")
|
2018-03-15 22:42:44 +00:00
|
|
|
atomic("atomic_counter_read", flags=[CAN_ELIMINATE])
|
|
|
|
atomic2("atomic_counter_add")
|
|
|
|
atomic2("atomic_counter_min")
|
|
|
|
atomic2("atomic_counter_max")
|
|
|
|
atomic2("atomic_counter_and")
|
|
|
|
atomic2("atomic_counter_or")
|
|
|
|
atomic2("atomic_counter_xor")
|
|
|
|
atomic2("atomic_counter_exchange")
|
|
|
|
atomic3("atomic_counter_comp_swap")
|
|
|
|
|
2018-03-15 21:56:43 +00:00
|
|
|
# Image load, store and atomic intrinsics.
|
|
|
|
#
|
2018-10-28 12:52:44 +00:00
|
|
|
# All image intrinsics come in three versions. One which take an image target
|
|
|
|
# passed as a deref chain as the first source, one which takes an index as the
|
|
|
|
# first source, and one which takes a bindless handle as the first source.
|
|
|
|
# In the first version, the image variable contains the memory and layout
|
|
|
|
# qualifiers that influence the semantics of the intrinsic. In the second and
|
|
|
|
# third, the image format and access qualifiers are provided as constant
|
|
|
|
# indices.
|
2018-03-15 21:56:43 +00:00
|
|
|
#
|
|
|
|
# All image intrinsics take a four-coordinate vector and a sample index as
|
2018-06-23 23:22:42 +01:00
|
|
|
# 2nd and 3rd sources, determining the location within the image that will be
|
2018-03-15 21:56:43 +00:00
|
|
|
# accessed by the intrinsic. Components not applicable to the image target
|
|
|
|
# in use are undefined. Image store takes an additional four-component
|
|
|
|
# argument with the value to be written, and image atomic operations take
|
|
|
|
# either one or two additional scalar arguments with the same meaning as in
|
|
|
|
# the ARB_shader_image_load_store specification.
|
2020-04-13 15:50:37 +01:00
|
|
|
def image(name, src_comp=[], extra_indices=[], **kwargs):
|
2019-06-04 10:40:14 +01:00
|
|
|
intrinsic("image_deref_" + name, src_comp=[1] + src_comp,
|
2020-04-13 15:50:37 +01:00
|
|
|
indices=[ACCESS] + extra_indices, **kwargs)
|
2018-08-16 21:11:44 +01:00
|
|
|
intrinsic("image_" + name, src_comp=[1] + src_comp,
|
2020-04-13 15:50:37 +01:00
|
|
|
indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS] + extra_indices, **kwargs)
|
2018-10-28 12:52:44 +00:00
|
|
|
intrinsic("bindless_image_" + name, src_comp=[1] + src_comp,
|
2020-04-13 15:50:37 +01:00
|
|
|
indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS] + extra_indices, **kwargs)
|
2018-08-16 21:11:44 +01:00
|
|
|
|
2020-10-01 03:20:53 +01:00
|
|
|
image("load", src_comp=[4, 1, 1], extra_indices=[DEST_TYPE], dest_comp=0, flags=[CAN_ELIMINATE])
|
|
|
|
image("store", src_comp=[4, 1, 0, 1], extra_indices=[SRC_TYPE])
|
2018-08-16 21:11:44 +01:00
|
|
|
image("atomic_add", src_comp=[4, 1, 1], dest_comp=1)
|
2019-08-21 04:32:50 +01:00
|
|
|
image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_imax", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_umax", src_comp=[4, 1, 1], dest_comp=1)
|
2018-08-16 21:11:44 +01:00
|
|
|
image("atomic_and", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_or", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_xor", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_exchange", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1)
|
2020-05-26 21:58:56 +01:00
|
|
|
image("atomic_fadd", src_comp=[4, 1, 1], dest_comp=1)
|
2020-08-20 00:21:33 +01:00
|
|
|
image("size", dest_comp=0, src_comp=[1], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2018-08-16 21:11:44 +01:00
|
|
|
image("samples", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
|
2019-07-24 11:06:34 +01:00
|
|
|
image("atomic_inc_wrap", src_comp=[4, 1, 1], dest_comp=1)
|
|
|
|
image("atomic_dec_wrap", src_comp=[4, 1, 1], dest_comp=1)
|
2020-05-11 17:09:47 +01:00
|
|
|
# CL-specific format queries
|
|
|
|
image("format", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
image("order", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
|
2018-03-15 21:56:43 +00:00
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Vulkan descriptor set intrinsics
|
|
|
|
#
|
|
|
|
# The Vulkan API uses a different binding model from GL. In the Vulkan
|
|
|
|
# API, all external resources are represented by a tuple:
|
|
|
|
#
|
|
|
|
# (descriptor set, binding, array index)
|
|
|
|
#
|
|
|
|
# where the array index is the only thing allowed to be indirect. The
|
|
|
|
# vulkan_surface_index intrinsic takes the descriptor set and binding as
|
|
|
|
# its first two indices and the array index as its source. The third
|
|
|
|
# index is a nir_variable_mode in case that's useful to the backend.
|
|
|
|
#
|
|
|
|
# The intended usage is that the shader will call vulkan_surface_index to
|
|
|
|
# get an index and then pass that as the buffer index ubo/ssbo calls.
|
|
|
|
#
|
|
|
|
# The vulkan_resource_reindex intrinsic takes a resource index in src0
|
|
|
|
# (the result of a vulkan_resource_index or vulkan_resource_reindex) which
|
|
|
|
# corresponds to the tuple (set, binding, index) and computes an index
|
|
|
|
# corresponding to tuple (set, binding, idx + src1).
|
2019-01-12 16:58:33 +00:00
|
|
|
intrinsic("vulkan_resource_index", src_comp=[1], dest_comp=0,
|
2018-12-13 22:50:19 +00:00
|
|
|
indices=[DESC_SET, BINDING, DESC_TYPE],
|
2018-03-15 22:42:44 +00:00
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
2019-01-12 16:58:33 +00:00
|
|
|
intrinsic("vulkan_resource_reindex", src_comp=[0, 1], dest_comp=0,
|
2018-12-13 22:50:19 +00:00
|
|
|
indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2019-01-12 16:58:33 +00:00
|
|
|
intrinsic("load_vulkan_descriptor", src_comp=[-1], dest_comp=0,
|
2018-12-15 00:36:01 +00:00
|
|
|
indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2018-03-15 21:56:43 +00:00
|
|
|
# variable atomic intrinsics
|
|
|
|
#
|
|
|
|
# All of these variable atomic memory operations read a value from memory,
|
|
|
|
# compute a new value using one of the operations below, write the new value
|
|
|
|
# to memory, and return the original value read.
|
|
|
|
#
|
|
|
|
# All operations take 2 sources except CompSwap that takes 3. These sources
|
|
|
|
# represent:
|
|
|
|
#
|
|
|
|
# 0: A deref to the memory on which to perform the atomic
|
|
|
|
# 1: The data parameter to the atomic function (i.e. the value to add
|
|
|
|
# in shared_atomic_add, etc).
|
|
|
|
# 2: For CompSwap only: the second data parameter.
|
2019-03-04 19:04:45 +00:00
|
|
|
intrinsic("deref_atomic_add", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_imin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_umin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_imax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_umax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_and", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_or", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_xor", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_exchange", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_comp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_fadd", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_fmin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_fmax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("deref_atomic_fcomp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
2018-03-15 21:56:43 +00:00
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# SSBO atomic intrinsics
|
|
|
|
#
|
|
|
|
# All of the SSBO atomic memory operations read a value from memory,
|
|
|
|
# compute a new value using one of the operations below, write the new
|
|
|
|
# value to memory, and return the original value read.
|
|
|
|
#
|
|
|
|
# All operations take 3 sources except CompSwap that takes 4. These
|
|
|
|
# sources represent:
|
|
|
|
#
|
|
|
|
# 0: The SSBO buffer index.
|
|
|
|
# 1: The offset into the SSBO buffer of the variable that the atomic
|
|
|
|
# operation will operate on.
|
|
|
|
# 2: The data parameter to the atomic function (i.e. the value to add
|
|
|
|
# in ssbo_atomic_add, etc).
|
|
|
|
# 3: For CompSwap only: the second data parameter.
|
nir: add vec2_index_32bit_offset address format
For turnip, we use the "bindless" model on a6xx. Loads and stores with
the bindless model require a bindless base, which is an immediate field
in the instruction that selects between 5 different 64-bit "bindless
base registers", a 32-bit descriptor index that's added to the base, and
the usual 32-bit offset. The bindless base usually, but not always,
corresponds to the Vulkan descriptor set. We can handle the case where
the base is non-constant by using a bunch of if-statements, to make it a
little easier in core NIR, and this seems to be what Qualcomm's driver
does too. Therefore, the pointer format we need to use in NIR has a vec2
index, for the bindless base and descriptor index. Plumb this format
through core NIR.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5683>
2020-06-29 18:47:57 +01:00
|
|
|
intrinsic("ssbo_atomic_add", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_imin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_umin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_imax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_umax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_and", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_or", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_xor", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_exchange", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_comp_swap", src_comp=[-1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_fadd", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_fmin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_fmax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_fcomp_swap", src_comp=[-1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
# CS shared variable atomic intrinsics
|
|
|
|
#
|
|
|
|
# All of the shared variable atomic memory operations read a value from
|
|
|
|
# memory, compute a new value using one of the operations below, write the
|
|
|
|
# new value to memory, and return the original value read.
|
|
|
|
#
|
|
|
|
# All operations take 2 sources except CompSwap that takes 3. These
|
|
|
|
# sources represent:
|
|
|
|
#
|
|
|
|
# 0: The offset into the shared variable storage region that the atomic
|
|
|
|
# operation will operate on.
|
|
|
|
# 1: The data parameter to the atomic function (i.e. the value to add
|
|
|
|
# in shared_atomic_add, etc).
|
|
|
|
# 2: For CompSwap only: the second data parameter.
|
|
|
|
intrinsic("shared_atomic_add", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_imin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_umin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_imax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_umax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_and", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_or", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_xor", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_exchange", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
|
2018-04-10 02:36:22 +01:00
|
|
|
intrinsic("shared_atomic_fadd", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
2018-04-18 21:34:25 +01:00
|
|
|
intrinsic("shared_atomic_fmin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_fmax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("shared_atomic_fcomp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2018-11-19 19:40:35 +00:00
|
|
|
# Global atomic intrinsics
|
|
|
|
#
|
|
|
|
# All of the shared variable atomic memory operations read a value from
|
|
|
|
# memory, compute a new value using one of the operations below, write the
|
|
|
|
# new value to memory, and return the original value read.
|
|
|
|
#
|
|
|
|
# All operations take 2 sources except CompSwap that takes 3. These
|
|
|
|
# sources represent:
|
|
|
|
#
|
|
|
|
# 0: The memory address that the atomic operation will operate on.
|
|
|
|
# 1: The data parameter to the atomic function (i.e. the value to add
|
|
|
|
# in shared_atomic_add, etc).
|
|
|
|
# 2: For CompSwap only: the second data parameter.
|
|
|
|
intrinsic("global_atomic_add", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_imin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_umin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_imax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_umax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_and", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_or", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_xor", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_exchange", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_fadd", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_fmin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_fmax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
intrinsic("global_atomic_fcomp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
|
|
|
|
|
2018-07-19 12:04:43 +01:00
|
|
|
def system_value(name, dest_comp, indices=[], bit_sizes=[32]):
|
2018-03-26 22:50:38 +01:00
|
|
|
intrinsic("load_" + name, [], dest_comp, indices,
|
2018-07-19 12:04:43 +01:00
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER], sysval=True,
|
|
|
|
bit_sizes=bit_sizes)
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
system_value("frag_coord", 4)
|
2019-06-04 12:24:53 +01:00
|
|
|
system_value("point_coord", 2)
|
2020-07-03 11:57:36 +01:00
|
|
|
system_value("line_coord", 1)
|
2018-07-19 12:04:43 +01:00
|
|
|
system_value("front_face", 1, bit_sizes=[1, 32])
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("vertex_id", 1)
|
|
|
|
system_value("vertex_id_zero_base", 1)
|
2018-01-25 18:15:38 +00:00
|
|
|
system_value("first_vertex", 1)
|
2018-04-28 13:09:18 +01:00
|
|
|
system_value("is_indexed_draw", 1)
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("base_vertex", 1)
|
|
|
|
system_value("instance_id", 1)
|
|
|
|
system_value("base_instance", 1)
|
|
|
|
system_value("draw_id", 1)
|
|
|
|
system_value("sample_id", 1)
|
2018-06-01 19:07:15 +01:00
|
|
|
# sample_id_no_per_sample is like sample_id but does not imply per-
|
|
|
|
# sample shading. See the lower_helper_invocation option.
|
|
|
|
system_value("sample_id_no_per_sample", 1)
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("sample_pos", 2)
|
|
|
|
system_value("sample_mask_in", 1)
|
|
|
|
system_value("primitive_id", 1)
|
|
|
|
system_value("invocation_id", 1)
|
|
|
|
system_value("tess_coord", 3)
|
|
|
|
system_value("tess_level_outer", 4)
|
|
|
|
system_value("tess_level_inner", 2)
|
2019-08-01 19:38:40 +01:00
|
|
|
system_value("tess_level_outer_default", 4)
|
|
|
|
system_value("tess_level_inner_default", 2)
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("patch_vertices_in", 1)
|
|
|
|
system_value("local_invocation_id", 3)
|
|
|
|
system_value("local_invocation_index", 1)
|
2020-07-28 00:56:21 +01:00
|
|
|
# zero_base indicates it starts from 0 for the current dispatch
|
|
|
|
# non-zero_base indicates the base is included
|
|
|
|
system_value("work_group_id", 3, bit_sizes=[32, 64])
|
|
|
|
system_value("work_group_id_zero_base", 3)
|
|
|
|
system_value("base_work_group_id", 3, bit_sizes=[32, 64])
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("user_clip_plane", 4, indices=[UCP_ID])
|
2020-07-28 00:56:21 +01:00
|
|
|
system_value("num_work_groups", 3, bit_sizes=[32, 64])
|
2018-07-19 12:04:43 +01:00
|
|
|
system_value("helper_invocation", 1, bit_sizes=[1, 32])
|
2019-10-21 09:48:07 +01:00
|
|
|
system_value("alpha_ref_float", 1)
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("layer_id", 1)
|
|
|
|
system_value("view_index", 1)
|
|
|
|
system_value("subgroup_size", 1)
|
|
|
|
system_value("subgroup_invocation", 1)
|
2018-07-19 12:04:43 +01:00
|
|
|
system_value("subgroup_eq_mask", 0, bit_sizes=[32, 64])
|
|
|
|
system_value("subgroup_ge_mask", 0, bit_sizes=[32, 64])
|
|
|
|
system_value("subgroup_gt_mask", 0, bit_sizes=[32, 64])
|
|
|
|
system_value("subgroup_le_mask", 0, bit_sizes=[32, 64])
|
|
|
|
system_value("subgroup_lt_mask", 0, bit_sizes=[32, 64])
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("num_subgroups", 1)
|
|
|
|
system_value("subgroup_id", 1)
|
|
|
|
system_value("local_group_size", 3)
|
2020-07-28 00:56:21 +01:00
|
|
|
# note: the definition of global_invocation_id_zero_base is based on
|
|
|
|
# (work_group_id * local_group_size) + local_invocation_id.
|
|
|
|
# it is *not* based on work_group_id_zero_base, meaning the work group
|
|
|
|
# base is already accounted for, and the global base is additive on top of that
|
2018-07-19 15:39:58 +01:00
|
|
|
system_value("global_invocation_id", 3, bit_sizes=[32, 64])
|
2020-07-28 00:56:21 +01:00
|
|
|
system_value("global_invocation_id_zero_base", 3, bit_sizes=[32, 64])
|
|
|
|
system_value("base_global_invocation_id", 3, bit_sizes=[32, 64])
|
2019-01-14 17:36:37 +00:00
|
|
|
system_value("global_invocation_index", 1, bit_sizes=[32, 64])
|
2018-03-08 19:18:59 +00:00
|
|
|
system_value("work_dim", 1)
|
2020-06-22 08:49:33 +01:00
|
|
|
system_value("line_width", 1)
|
|
|
|
system_value("aa_line_width", 1)
|
2020-07-14 19:32:19 +01:00
|
|
|
# BASE=0 for global/shader, BASE=1 for local/function
|
|
|
|
system_value("scratch_base_ptr", 0, bit_sizes=[32,64], indices=[BASE])
|
2020-08-18 20:43:39 +01:00
|
|
|
system_value("constant_base_ptr", 0, bit_sizes=[32,64])
|
2020-07-14 19:32:19 +01:00
|
|
|
|
2019-02-22 22:26:26 +00:00
|
|
|
# Driver-specific viewport scale/offset parameters.
|
|
|
|
#
|
|
|
|
# VC4 and V3D need to emit a scaled version of the position in the vertex
|
|
|
|
# shaders for binning, and having system values lets us move the math for that
|
|
|
|
# into NIR.
|
2019-04-03 02:45:44 +01:00
|
|
|
#
|
|
|
|
# Panfrost needs to implement all coordinate transformation in the
|
|
|
|
# vertex shader; system values allow us to share this routine in NIR.
|
2019-02-22 22:26:26 +00:00
|
|
|
system_value("viewport_x_scale", 1)
|
|
|
|
system_value("viewport_y_scale", 1)
|
|
|
|
system_value("viewport_z_scale", 1)
|
|
|
|
system_value("viewport_z_offset", 1)
|
2019-04-03 02:45:44 +01:00
|
|
|
system_value("viewport_scale", 3)
|
|
|
|
system_value("viewport_offset", 3)
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2019-05-06 03:00:37 +01:00
|
|
|
# Blend constant color values. Float values are clamped. Vectored versions are
|
|
|
|
# provided as well for driver convenience
|
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("blend_const_color_r_float", 1)
|
|
|
|
system_value("blend_const_color_g_float", 1)
|
|
|
|
system_value("blend_const_color_b_float", 1)
|
|
|
|
system_value("blend_const_color_a_float", 1)
|
2019-05-06 03:00:37 +01:00
|
|
|
system_value("blend_const_color_rgba", 4)
|
2018-03-15 22:42:44 +00:00
|
|
|
system_value("blend_const_color_rgba8888_unorm", 1)
|
|
|
|
system_value("blend_const_color_aaaa8888_unorm", 1)
|
|
|
|
|
2019-05-27 16:48:42 +01:00
|
|
|
# System values for gl_Color, for radeonsi which interpolates these in the
|
|
|
|
# shader prolog to handle two-sided color without recompiles and therefore
|
|
|
|
# doesn't handle these in the main shader part like normal varyings.
|
|
|
|
system_value("color0", 4)
|
|
|
|
system_value("color1", 4)
|
|
|
|
|
2019-08-01 02:52:48 +01:00
|
|
|
# System value for internal compute shaders in radeonsi.
|
|
|
|
system_value("user_data_amd", 4)
|
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Barycentric coordinate intrinsics.
|
|
|
|
#
|
|
|
|
# These set up the barycentric coordinates for a particular interpolation.
|
2020-01-24 10:36:37 +00:00
|
|
|
# The first four are for the simple cases: pixel, centroid, per-sample
|
|
|
|
# (at gl_SampleID), or pull model (1/W, 1/I, 1/J) at the pixel center. The next
|
|
|
|
# three two handle interpolating at a specified sample location, or
|
|
|
|
# interpolating with a vec2 offset,
|
2018-03-15 22:42:44 +00:00
|
|
|
#
|
|
|
|
# The interp_mode index should be either the INTERP_MODE_SMOOTH or
|
|
|
|
# INTERP_MODE_NOPERSPECTIVE enum values.
|
|
|
|
#
|
|
|
|
# The vec2 value produced by these intrinsics is intended for use as the
|
|
|
|
# barycoord source of a load_interpolated_input intrinsic.
|
|
|
|
|
2020-01-24 10:36:37 +00:00
|
|
|
def barycentric(name, dst_comp, src_comp=[]):
|
|
|
|
intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=dst_comp,
|
2018-03-15 22:42:44 +00:00
|
|
|
indices=[INTERP_MODE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2019-04-11 17:29:17 +01:00
|
|
|
# no sources.
|
2020-01-24 10:36:37 +00:00
|
|
|
barycentric("pixel", 2)
|
|
|
|
barycentric("centroid", 2)
|
|
|
|
barycentric("sample", 2)
|
|
|
|
barycentric("model", 3)
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { sample_id }.
|
2020-01-24 10:36:37 +00:00
|
|
|
barycentric("at_sample", 2, [1])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset.xy }.
|
2020-01-24 10:36:37 +00:00
|
|
|
barycentric("at_offset", 2, [2])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2019-04-19 19:12:34 +01:00
|
|
|
# Load sample position:
|
|
|
|
#
|
|
|
|
# Takes a sample # and returns a sample position. Used for lowering
|
|
|
|
# interpolateAtSample() to interpolateAtOffset()
|
|
|
|
intrinsic("load_sample_pos_from_id", src_comp=[1], dest_comp=2,
|
|
|
|
flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2019-04-19 19:15:40 +01:00
|
|
|
# Loads what I believe is the primitive size, for scaling ij to pixel size:
|
|
|
|
intrinsic("load_size_ir3", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2019-04-04 00:29:36 +01:00
|
|
|
# Fragment shader input interpolation delta intrinsic.
|
|
|
|
#
|
|
|
|
# For hw where fragment shader input interpolation is handled in shader, the
|
|
|
|
# load_fs_input_interp deltas intrinsics can be used to load the input deltas
|
|
|
|
# used for interpolation as follows:
|
|
|
|
#
|
|
|
|
# vec3 iid = load_fs_input_interp_deltas(varying_slot)
|
|
|
|
# vec2 bary = load_barycentric_*(...)
|
|
|
|
# float result = iid.x + iid.y * bary.y + iid.z * bary.x
|
|
|
|
|
|
|
|
intrinsic("load_fs_input_interp_deltas", src_comp=[1], dest_comp=3,
|
2020-08-24 23:12:36 +01:00
|
|
|
indices=[BASE, COMPONENT, IO_SEMANTICS], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2019-04-04 00:29:36 +01:00
|
|
|
|
2018-03-15 22:42:44 +00:00
|
|
|
# Load operations pull data from some piece of GPU memory. All load
|
|
|
|
# operations operate in terms of offsets into some piece of theoretical
|
|
|
|
# memory. Loads from externally visible memory (UBO and SSBO) simply take a
|
|
|
|
# byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
|
2019-04-11 17:52:27 +01:00
|
|
|
# take a base+offset pair where the nir_intrinsic_base() gives the location
|
2018-03-15 22:42:44 +00:00
|
|
|
# of the start of the variable being loaded and and the offset source is a
|
|
|
|
# offset into that variable.
|
|
|
|
#
|
2019-04-11 17:52:27 +01:00
|
|
|
# Uniform load operations have a nir_intrinsic_range() index that specifies the
|
2018-03-15 22:42:44 +00:00
|
|
|
# range (starting at base) of the data from which we are loading. If
|
2019-04-11 17:52:27 +01:00
|
|
|
# range == 0, then the range is unknown.
|
2018-03-15 22:42:44 +00:00
|
|
|
#
|
nir: Add a range_base+range to nir_intrinsic_load_ubo().
For UBO accesses to be the same performance as classic GL default uniform
block uniforms, we need to be able to push them through the same path. On
freedreno, we haven't been uploading UBOs as push constants when they're
used for indirect array access, because we don't know what range of the
UBO is needed for an access.
I believe we won't be able to calculate the range in general in spirv
given casts that can happen, so we define a [0, ~0] range to be "We don't
know anything". We use that at the moment for all UBO loads except for
nir_lower_uniforms_to_ubo, where we now avoid losing the range information
that default uniform block loads come with.
In a departure from other NIR intrinsics with a "base", I didn't make the
base an be something you have to add to the src[1] offset. This keeps us
from needing to modify all drivers (particularly since the base+offset
thing can mean needing to do addition in the backend), makes backend
tracking of ranges easy, and makes the range calculations in
load_store_vectorizer reasonable. However, this could definitely cause
some confusion for people used to the normal NIR base.
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Reviewed-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6359>
2020-08-14 21:10:02 +01:00
|
|
|
# UBO load operations have a nir_intrinsic_range_base() and
|
|
|
|
# nir_intrinsic_range() that specify the byte range [range_base,
|
|
|
|
# range_base+range] of the UBO that the src offset access must lie within.
|
|
|
|
#
|
2018-03-15 22:42:44 +00:00
|
|
|
# Some load operations such as UBO/SSBO load and per_vertex loads take an
|
|
|
|
# additional source to specify which UBO/SSBO/vertex to load from.
|
|
|
|
#
|
|
|
|
# The exact address type depends on the lowering pass that generates the
|
|
|
|
# load/store intrinsics. Typically, this is vec4 units for things such as
|
|
|
|
# varying slots and float units for fragment shader inputs. UBO and SSBO
|
|
|
|
# offsets are always in bytes.
|
|
|
|
|
2020-06-29 18:16:26 +01:00
|
|
|
def load(name, src_comp, indices=[], flags=[]):
|
|
|
|
intrinsic("load_" + name, src_comp, dest_comp=0, indices=indices,
|
2018-03-15 22:42:44 +00:00
|
|
|
flags=flags)
|
|
|
|
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-10-01 03:20:53 +01:00
|
|
|
load("uniform", [1], [BASE, RANGE, DEST_TYPE], [CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { buffer_index, offset }.
|
nir: Add a range_base+range to nir_intrinsic_load_ubo().
For UBO accesses to be the same performance as classic GL default uniform
block uniforms, we need to be able to push them through the same path. On
freedreno, we haven't been uploading UBOs as push constants when they're
used for indirect array access, because we don't know what range of the
UBO is needed for an access.
I believe we won't be able to calculate the range in general in spirv
given casts that can happen, so we define a [0, ~0] range to be "We don't
know anything". We use that at the moment for all UBO loads except for
nir_lower_uniforms_to_ubo, where we now avoid losing the range information
that default uniform block loads come with.
In a departure from other NIR intrinsics with a "base", I didn't make the
base an be something you have to add to the src[1] offset. This keeps us
from needing to modify all drivers (particularly since the base+offset
thing can mean needing to do addition in the backend), makes backend
tracking of ranges easy, and makes the range calculations in
load_store_vectorizer reasonable. However, this could definitely cause
some confusion for people used to the normal NIR base.
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Reviewed-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6359>
2020-08-14 21:10:02 +01:00
|
|
|
load("ubo", [-1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET, RANGE_BASE, RANGE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2020-08-18 19:38:41 +01:00
|
|
|
# src[] = { buffer_index, offset in vec4 units }
|
|
|
|
load("ubo_vec4", [-1, 1], [ACCESS, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-10-01 03:20:53 +01:00
|
|
|
load("input", [1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
2020-01-27 10:34:00 +00:00
|
|
|
# src[] = { vertex_id, offset }.
|
2020-10-01 03:20:53 +01:00
|
|
|
load("input_vertex", [1, 1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { vertex, offset }.
|
2020-08-12 04:48:12 +01:00
|
|
|
load("per_vertex_input", [1, 1], [BASE, COMPONENT, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { barycoord, offset }.
|
2020-08-12 04:48:12 +01:00
|
|
|
load("interpolated_input", [2, 1], [BASE, COMPONENT, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2018-11-13 15:45:03 +00:00
|
|
|
# src[] = { buffer_index, offset }.
|
nir: add vec2_index_32bit_offset address format
For turnip, we use the "bindless" model on a6xx. Loads and stores with
the bindless model require a bindless base, which is an immediate field
in the instruction that selects between 5 different 64-bit "bindless
base registers", a 32-bit descriptor index that's added to the base, and
the usual 32-bit offset. The bindless base usually, but not always,
corresponds to the Vulkan descriptor set. We can handle the case where
the base is non-constant by using a bunch of if-statements, to make it a
little easier in core NIR, and this seems to be what Qualcomm's driver
does too. Therefore, the pointer format we need to use in NIR has a vec2
index, for the bindless base and descriptor index. Plumb this format
through core NIR.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5683>
2020-06-29 18:47:57 +01:00
|
|
|
load("ssbo", [-1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
|
2019-11-04 22:27:18 +00:00
|
|
|
# src[] = { buffer_index }
|
2020-06-29 18:16:26 +01:00
|
|
|
load("ssbo_address", [1], [], [CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-08-12 04:48:12 +01:00
|
|
|
load("output", [1], [BASE, COMPONENT, IO_SEMANTICS], flags=[CAN_ELIMINATE])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { vertex, offset }.
|
2020-08-12 04:48:12 +01:00
|
|
|
load("per_vertex_output", [1, 1], [BASE, COMPONENT, IO_SEMANTICS], [CAN_ELIMINATE])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("shared", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("push_constant", [1], [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("constant", [1], [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET],
|
2020-04-08 21:08:03 +01:00
|
|
|
[CAN_ELIMINATE, CAN_REORDER])
|
2018-11-19 19:40:35 +00:00
|
|
|
# src[] = { address }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("global", [1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { address }.
|
2020-08-29 06:59:22 +01:00
|
|
|
load("global_constant", [1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET],
|
|
|
|
[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
# src[] = { address }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("kernel_input", [1], [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER])
|
2016-12-02 19:36:42 +00:00
|
|
|
# src[] = { offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("scratch", [1], [ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
|
2018-03-15 22:42:44 +00:00
|
|
|
|
|
|
|
# Stores work the same way as loads, except now the first source is the value
|
|
|
|
# to store and the second (and possibly third) source specify where to store
|
2019-04-11 17:52:27 +01:00
|
|
|
# the value. SSBO and shared memory stores also have a
|
|
|
|
# nir_intrinsic_write_mask()
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2020-06-29 18:16:26 +01:00
|
|
|
def store(name, srcs, indices=[], flags=[]):
|
|
|
|
intrinsic("store_" + name, [0] + srcs, indices=indices, flags=flags)
|
2018-03-15 22:42:44 +00:00
|
|
|
|
2019-04-11 17:29:17 +01:00
|
|
|
# src[] = { value, offset }.
|
2020-10-01 03:20:53 +01:00
|
|
|
store("output", [1], [BASE, WRMASK, COMPONENT, SRC_TYPE, IO_SEMANTICS])
|
2018-03-15 22:42:44 +00:00
|
|
|
# src[] = { value, vertex, offset }.
|
2020-08-12 04:48:12 +01:00
|
|
|
store("per_vertex_output", [1, 1], [BASE, WRMASK, COMPONENT, IO_SEMANTICS])
|
2018-11-13 15:45:03 +00:00
|
|
|
# src[] = { value, block_index, offset }
|
nir: add vec2_index_32bit_offset address format
For turnip, we use the "bindless" model on a6xx. Loads and stores with
the bindless model require a bindless base, which is an immediate field
in the instruction that selects between 5 different 64-bit "bindless
base registers", a 32-bit descriptor index that's added to the base, and
the usual 32-bit offset. The bindless base usually, but not always,
corresponds to the Vulkan descriptor set. We can handle the case where
the base is non-constant by using a bunch of if-statements, to make it a
little easier in core NIR, and this seems to be what Qualcomm's driver
does too. Therefore, the pointer format we need to use in NIR has a vec2
index, for the bindless base and descriptor index. Plumb this format
through core NIR.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5683>
2020-06-29 18:47:57 +01:00
|
|
|
store("ssbo", [-1, 1], [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
|
2018-11-13 15:45:03 +00:00
|
|
|
# src[] = { value, offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
store("shared", [1], [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET])
|
2018-11-19 19:40:35 +00:00
|
|
|
# src[] = { value, address }.
|
2020-06-29 18:16:26 +01:00
|
|
|
store("global", [1], [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
|
2016-12-02 19:36:42 +00:00
|
|
|
# src[] = { value, offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
store("scratch", [1], [ALIGN_MUL, ALIGN_OFFSET, WRMASK])
|
2019-02-26 07:45:07 +00:00
|
|
|
|
|
|
|
# IR3-specific version of most SSBO intrinsics. The only different
|
|
|
|
# compare to the originals is that they add an extra source to hold
|
|
|
|
# the dword-offset, which is needed by the backend code apart from
|
|
|
|
# the byte-offset already provided by NIR in one of the sources.
|
|
|
|
#
|
|
|
|
# NIR lowering pass 'ir3_nir_lower_io_offset' will replace the
|
|
|
|
# original SSBO intrinsics by these, placing the computed
|
|
|
|
# dword-offset always in the last source.
|
|
|
|
#
|
|
|
|
# The float versions are not handled because those are not supported
|
|
|
|
# by the backend.
|
2020-06-29 18:16:26 +01:00
|
|
|
store("ssbo_ir3", [1, 1, 1],
|
|
|
|
indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
|
|
|
|
load("ssbo_ir3", [1, 1, 1],
|
|
|
|
indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
|
2020-05-06 23:52:53 +01:00
|
|
|
intrinsic("ssbo_atomic_add_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_imin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_umin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_imax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_umax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_and_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_or_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_xor_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_exchange_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
|
|
|
intrinsic("ssbo_atomic_comp_swap_ir3", src_comp=[1, 1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
|
2019-07-01 23:01:19 +01:00
|
|
|
|
2019-10-11 01:17:10 +01:00
|
|
|
# System values for freedreno geometry shaders.
|
|
|
|
system_value("vs_primitive_stride_ir3", 1)
|
|
|
|
system_value("vs_vertex_stride_ir3", 1)
|
|
|
|
system_value("gs_header_ir3", 1)
|
|
|
|
system_value("primitive_location_ir3", 1, indices=[DRIVER_LOCATION])
|
|
|
|
|
2019-10-23 00:33:18 +01:00
|
|
|
# System values for freedreno tessellation shaders.
|
|
|
|
system_value("hs_patch_stride_ir3", 1)
|
|
|
|
system_value("tess_factor_base_ir3", 2)
|
|
|
|
system_value("tess_param_base_ir3", 2)
|
|
|
|
system_value("tcs_header_ir3", 1)
|
|
|
|
|
2019-10-23 03:30:51 +01:00
|
|
|
# IR3-specific intrinsics for tessellation control shaders. cond_end_ir3 end
|
|
|
|
# the shader when src0 is false and is used to narrow down the TCS shader to
|
|
|
|
# just thread 0 before writing out tessellation levels.
|
|
|
|
intrinsic("cond_end_ir3", src_comp=[1])
|
|
|
|
# end_patch_ir3 is used just before thread 0 exist the TCS and presumably
|
|
|
|
# signals the TE that the patch is complete and can be tessellated.
|
|
|
|
intrinsic("end_patch_ir3")
|
|
|
|
|
2019-10-10 23:15:37 +01:00
|
|
|
# IR3-specific load/store intrinsics. These access a buffer used to pass data
|
|
|
|
# between geometry stages - perhaps it's explicit access to the vertex cache.
|
|
|
|
|
|
|
|
# src[] = { value, offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
store("shared_ir3", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET])
|
2019-10-10 23:15:37 +01:00
|
|
|
# src[] = { offset }.
|
2020-06-29 18:16:26 +01:00
|
|
|
load("shared_ir3", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
|
2019-10-10 23:15:37 +01:00
|
|
|
|
2019-10-23 00:16:35 +01:00
|
|
|
# IR3-specific load/store global intrinsics. They take a 64-bit base address
|
|
|
|
# and a 32-bit offset. The hardware will add the base and the offset, which
|
|
|
|
# saves us from doing 64-bit math on the base address.
|
|
|
|
|
|
|
|
# src[] = { value, address(vec2 of hi+lo uint32_t), offset }.
|
|
|
|
# const_index[] = { write_mask, align_mul, align_offset }
|
2020-06-29 18:16:26 +01:00
|
|
|
store("global_ir3", [2, 1], indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET])
|
2019-10-23 00:16:35 +01:00
|
|
|
# src[] = { address(vec2 of hi+lo uint32_t), offset }.
|
|
|
|
# const_index[] = { access, align_mul, align_offset }
|
2020-06-29 18:16:26 +01:00
|
|
|
load("global_ir3", [2, 1], indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
|
2019-10-23 00:16:35 +01:00
|
|
|
|
2020-03-19 13:15:26 +00:00
|
|
|
# IR3-specific bindless handle specifier. Similar to vulkan_resource_index, but
|
|
|
|
# without the binding because the hardware expects a single flattened index
|
|
|
|
# rather than a (binding, index) pair. We may also want to use this with GL.
|
|
|
|
# Note that this doesn't actually turn into a HW instruction.
|
|
|
|
intrinsic("bindless_resource_ir3", [1], dest_comp=1, indices=[DESC_SET], flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2019-07-01 23:01:19 +01:00
|
|
|
# Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined
|
|
|
|
# within a blend shader to read/write the raw value from the tile buffer,
|
|
|
|
# without applying any format conversion in the process. If the shader needs
|
|
|
|
# usable pixel values, it must apply format conversions itself.
|
|
|
|
#
|
|
|
|
# These definitions are generic, but they are explicitly vendored to prevent
|
|
|
|
# other drivers from using them, as their semantics is defined in terms of the
|
|
|
|
# Midgard/Bifrost hardware tile buffer and may not line up with anything sane.
|
|
|
|
# One notable divergence is sRGB, which is asymmetric: raw_input_pan requires
|
|
|
|
# an sRGB->linear conversion, but linear values should be written to
|
|
|
|
# raw_output_pan and the hardware handles linear->sRGB.
|
|
|
|
|
|
|
|
# src[] = { value }
|
2020-06-29 18:16:26 +01:00
|
|
|
store("raw_output_pan", [], [])
|
|
|
|
store("combined_output_pan", [1, 1, 1], [BASE, COMPONENT])
|
2020-07-16 03:16:11 +01:00
|
|
|
load("raw_output_pan", [1], [BASE], [CAN_ELIMINATE, CAN_REORDER])
|
2019-07-09 08:15:02 +01:00
|
|
|
|
2019-11-21 13:41:22 +00:00
|
|
|
# Loads the sampler paramaters <min_lod, max_lod, lod_bias>
|
|
|
|
# src[] = { sampler_index }
|
2020-06-29 18:16:26 +01:00
|
|
|
load("sampler_lod_parameters_pan", [1], [CAN_ELIMINATE, CAN_REORDER])
|
2019-11-21 13:41:22 +00:00
|
|
|
|
2019-12-27 16:49:27 +00:00
|
|
|
# R600 specific instrincs
|
|
|
|
#
|
2020-04-12 15:35:42 +01:00
|
|
|
# location where the tesselation data is stored in LDS
|
|
|
|
system_value("tcs_in_param_base_r600", 4)
|
|
|
|
system_value("tcs_out_param_base_r600", 4)
|
|
|
|
system_value("tcs_rel_patch_id_r600", 1)
|
|
|
|
system_value("tcs_tess_factor_base_r600", 1)
|
|
|
|
|
|
|
|
# load as many components as needed giving per-component addresses
|
|
|
|
intrinsic("load_local_shared_r600", src_comp=[0], dest_comp=0, indices = [COMPONENT], flags = [CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
|
2020-06-29 18:16:26 +01:00
|
|
|
store("local_shared_r600", [1], [WRMASK])
|
|
|
|
store("tf_r600", [])
|
2020-04-12 15:35:42 +01:00
|
|
|
|
2019-07-09 08:15:02 +01:00
|
|
|
# V3D-specific instrinc for tile buffer color reads.
|
|
|
|
#
|
|
|
|
# The hardware requires that we read the samples and components of a pixel
|
|
|
|
# in order, so we cannot eliminate or remove any loads in a sequence.
|
|
|
|
#
|
|
|
|
# src[] = { render_target }
|
|
|
|
# BASE = sample index
|
2020-06-29 18:16:26 +01:00
|
|
|
load("tlb_color_v3d", [1], [BASE, COMPONENT], [])
|
2019-07-16 08:44:52 +01:00
|
|
|
|
|
|
|
# V3D-specific instrinc for per-sample tile buffer color writes.
|
|
|
|
#
|
|
|
|
# The driver backend needs to identify per-sample color writes and emit
|
|
|
|
# specific code for them.
|
|
|
|
#
|
|
|
|
# src[] = { value, render_target }
|
|
|
|
# BASE = sample index
|
2020-10-01 03:20:53 +01:00
|
|
|
store("tlb_sample_color_v3d", [1], [BASE, COMPONENT, SRC_TYPE], [])
|
2019-10-29 09:12:28 +00:00
|
|
|
|
|
|
|
# V3D-specific intrinsic to load the number of layers attached to
|
|
|
|
# the target framebuffer
|
|
|
|
intrinsic("load_fb_layers_v3d", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
|
2020-06-13 16:51:01 +01:00
|
|
|
|
|
|
|
# Intel-specific query for loading from the brw_image_param struct passed
|
|
|
|
# into the shader as a uniform. The variable is a deref to the image
|
|
|
|
# variable. The const index specifies which of the six parameters to load.
|
|
|
|
intrinsic("image_deref_load_param_intel", src_comp=[1], dest_comp=0,
|
|
|
|
indices=[BASE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
|
|
|
image("load_raw_intel", src_comp=[1], dest_comp=0,
|
|
|
|
flags=[CAN_ELIMINATE])
|
|
|
|
image("store_raw_intel", src_comp=[1, 0])
|
|
|
|
|
|
|
|
# Number of data items being operated on for a SIMD program.
|
|
|
|
system_value("simd_width_intel", 1)
|
2020-08-08 19:56:16 +01:00
|
|
|
|
|
|
|
# Load a relocatable 32-bit value
|
|
|
|
intrinsic("load_reloc_const_intel", dest_comp=1, bit_sizes=[32],
|
|
|
|
indices=[PARAM_IDX], flags=[CAN_ELIMINATE, CAN_REORDER])
|