2016-01-08 19:02:17 +00:00
|
|
|
|
/*
|
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-07-26 21:32:01 +01:00
|
|
|
|
#include <math.h>
|
2016-01-08 19:02:17 +00:00
|
|
|
|
#include "vtn_private.h"
|
2019-04-10 19:13:40 +01:00
|
|
|
|
#include "spirv_info.h"
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Normally, column vectors in SPIR-V correspond to a single NIR SSA
|
|
|
|
|
* definition. But for matrix multiplies, we want to do one routine for
|
|
|
|
|
* multiplying a matrix by a matrix and then pretend that vectors are matrices
|
|
|
|
|
* with one column. So we "wrap" these things, and unwrap the result before we
|
|
|
|
|
* send it off.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static struct vtn_ssa_value *
|
|
|
|
|
wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
|
|
|
|
|
{
|
|
|
|
|
if (val == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (glsl_type_is_matrix(val->type))
|
|
|
|
|
return val;
|
|
|
|
|
|
|
|
|
|
struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
|
2020-05-29 22:10:28 +01:00
|
|
|
|
dest->type = glsl_get_bare_type(val->type);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
|
|
|
|
|
dest->elems[0] = val;
|
|
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct vtn_ssa_value *
|
|
|
|
|
unwrap_matrix(struct vtn_ssa_value *val)
|
|
|
|
|
{
|
|
|
|
|
if (glsl_type_is_matrix(val->type))
|
|
|
|
|
return val;
|
|
|
|
|
|
|
|
|
|
return val->elems[0];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct vtn_ssa_value *
|
|
|
|
|
matrix_multiply(struct vtn_builder *b,
|
|
|
|
|
struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
struct vtn_ssa_value *src0 = wrap_matrix(b, _src0);
|
|
|
|
|
struct vtn_ssa_value *src1 = wrap_matrix(b, _src1);
|
|
|
|
|
struct vtn_ssa_value *src0_transpose = wrap_matrix(b, _src0->transposed);
|
|
|
|
|
struct vtn_ssa_value *src1_transpose = wrap_matrix(b, _src1->transposed);
|
|
|
|
|
|
|
|
|
|
unsigned src0_rows = glsl_get_vector_elements(src0->type);
|
|
|
|
|
unsigned src0_columns = glsl_get_matrix_columns(src0->type);
|
|
|
|
|
unsigned src1_columns = glsl_get_matrix_columns(src1->type);
|
|
|
|
|
|
|
|
|
|
const struct glsl_type *dest_type;
|
|
|
|
|
if (src1_columns > 1) {
|
|
|
|
|
dest_type = glsl_matrix_type(glsl_get_base_type(src0->type),
|
|
|
|
|
src0_rows, src1_columns);
|
|
|
|
|
} else {
|
|
|
|
|
dest_type = glsl_vector_type(glsl_get_base_type(src0->type), src0_rows);
|
|
|
|
|
}
|
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
|
|
|
|
|
|
|
|
|
|
dest = wrap_matrix(b, dest);
|
|
|
|
|
|
|
|
|
|
bool transpose_result = false;
|
|
|
|
|
if (src0_transpose && src1_transpose) {
|
|
|
|
|
/* transpose(A) * transpose(B) = transpose(B * A) */
|
|
|
|
|
src1 = src0_transpose;
|
|
|
|
|
src0 = src1_transpose;
|
|
|
|
|
src0_transpose = NULL;
|
|
|
|
|
src1_transpose = NULL;
|
|
|
|
|
transpose_result = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (src0_transpose && !src1_transpose &&
|
|
|
|
|
glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
|
|
|
|
|
/* We already have the rows of src0 and the columns of src1 available,
|
|
|
|
|
* so we can just take the dot product of each row with each column to
|
|
|
|
|
* get the result.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < src1_columns; i++) {
|
|
|
|
|
nir_ssa_def *vec_src[4];
|
|
|
|
|
for (unsigned j = 0; j < src0_rows; j++) {
|
|
|
|
|
vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def,
|
|
|
|
|
src1->elems[i]->def);
|
|
|
|
|
}
|
|
|
|
|
dest->elems[i]->def = nir_vec(&b->nb, vec_src, src0_rows);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* We don't handle the case where src1 is transposed but not src0, since
|
|
|
|
|
* the general case only uses individual components of src1 so the
|
|
|
|
|
* optimizer should chew through the transpose we emitted for src1.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < src1_columns; i++) {
|
|
|
|
|
/* dest[i] = sum(src0[j] * src1[i][j] for all j) */
|
|
|
|
|
dest->elems[i]->def =
|
|
|
|
|
nir_fmul(&b->nb, src0->elems[0]->def,
|
|
|
|
|
nir_channel(&b->nb, src1->elems[i]->def, 0));
|
|
|
|
|
for (unsigned j = 1; j < src0_columns; j++) {
|
|
|
|
|
dest->elems[i]->def =
|
|
|
|
|
nir_fadd(&b->nb, dest->elems[i]->def,
|
|
|
|
|
nir_fmul(&b->nb, src0->elems[j]->def,
|
|
|
|
|
nir_channel(&b->nb, src1->elems[i]->def, j)));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dest = unwrap_matrix(dest);
|
|
|
|
|
|
|
|
|
|
if (transpose_result)
|
|
|
|
|
dest = vtn_ssa_transpose(b, dest);
|
|
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct vtn_ssa_value *
|
|
|
|
|
mat_times_scalar(struct vtn_builder *b,
|
|
|
|
|
struct vtn_ssa_value *mat,
|
|
|
|
|
nir_ssa_def *scalar)
|
|
|
|
|
{
|
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
|
|
|
|
|
for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
|
2018-03-13 12:26:19 +00:00
|
|
|
|
if (glsl_base_type_is_integer(glsl_get_base_type(mat->type)))
|
2016-01-08 19:02:17 +00:00
|
|
|
|
dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
|
2018-03-13 12:26:19 +00:00
|
|
|
|
else
|
|
|
|
|
dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
static struct vtn_ssa_value *
|
2016-01-08 19:02:17 +00:00
|
|
|
|
vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
|
|
|
|
|
struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
|
|
|
|
|
{
|
|
|
|
|
switch (opcode) {
|
2016-01-08 19:18:47 +00:00
|
|
|
|
case SpvOpFNegate: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
2016-01-08 19:18:47 +00:00
|
|
|
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
|
|
|
|
for (unsigned i = 0; i < cols; i++)
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
|
|
|
|
|
return dest;
|
2016-01-08 19:18:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case SpvOpFAdd: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
2016-01-08 19:18:47 +00:00
|
|
|
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
|
|
|
|
for (unsigned i = 0; i < cols; i++)
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[i]->def =
|
2016-01-08 19:18:47 +00:00
|
|
|
|
nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return dest;
|
2016-01-08 19:18:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case SpvOpFSub: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
2016-01-08 19:18:47 +00:00
|
|
|
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
|
|
|
|
for (unsigned i = 0; i < cols; i++)
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[i]->def =
|
2016-01-08 19:18:47 +00:00
|
|
|
|
nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return dest;
|
2016-01-08 19:18:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 19:02:17 +00:00
|
|
|
|
case SpvOpTranspose:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return vtn_ssa_transpose(b, src0);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpMatrixTimesScalar:
|
|
|
|
|
if (src0->transposed) {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
|
|
|
|
|
src1->def));
|
2016-01-08 19:02:17 +00:00
|
|
|
|
} else {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return mat_times_scalar(b, src0, src1->def);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SpvOpVectorTimesMatrix:
|
|
|
|
|
case SpvOpMatrixTimesVector:
|
|
|
|
|
case SpvOpMatrixTimesMatrix:
|
|
|
|
|
if (opcode == SpvOpVectorTimesMatrix) {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
} else {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
return matrix_multiply(b, src0, src1);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2019-04-10 19:13:40 +01:00
|
|
|
|
default: vtn_fail_with_opcode("unknown matrix opcode", opcode);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-13 01:16:48 +00:00
|
|
|
|
nir_op
|
2017-09-05 23:46:58 +01:00
|
|
|
|
vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b,
|
|
|
|
|
SpvOp opcode, bool *swap,
|
2018-03-14 07:49:43 +00:00
|
|
|
|
unsigned src_bit_size, unsigned dst_bit_size)
|
2016-01-13 01:16:48 +00:00
|
|
|
|
{
|
|
|
|
|
/* Indicates that the first two arguments should be swapped. This is
|
|
|
|
|
* used for implementing greater-than and less-than-or-equal.
|
|
|
|
|
*/
|
|
|
|
|
*swap = false;
|
|
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case SpvOpSNegate: return nir_op_ineg;
|
|
|
|
|
case SpvOpFNegate: return nir_op_fneg;
|
|
|
|
|
case SpvOpNot: return nir_op_inot;
|
|
|
|
|
case SpvOpIAdd: return nir_op_iadd;
|
|
|
|
|
case SpvOpFAdd: return nir_op_fadd;
|
|
|
|
|
case SpvOpISub: return nir_op_isub;
|
|
|
|
|
case SpvOpFSub: return nir_op_fsub;
|
|
|
|
|
case SpvOpIMul: return nir_op_imul;
|
|
|
|
|
case SpvOpFMul: return nir_op_fmul;
|
|
|
|
|
case SpvOpUDiv: return nir_op_udiv;
|
|
|
|
|
case SpvOpSDiv: return nir_op_idiv;
|
|
|
|
|
case SpvOpFDiv: return nir_op_fdiv;
|
|
|
|
|
case SpvOpUMod: return nir_op_umod;
|
2016-01-13 23:09:45 +00:00
|
|
|
|
case SpvOpSMod: return nir_op_imod;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
case SpvOpFMod: return nir_op_fmod;
|
2016-01-13 23:09:45 +00:00
|
|
|
|
case SpvOpSRem: return nir_op_irem;
|
|
|
|
|
case SpvOpFRem: return nir_op_frem;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpShiftRightLogical: return nir_op_ushr;
|
|
|
|
|
case SpvOpShiftRightArithmetic: return nir_op_ishr;
|
|
|
|
|
case SpvOpShiftLeftLogical: return nir_op_ishl;
|
|
|
|
|
case SpvOpLogicalOr: return nir_op_ior;
|
2018-10-19 17:14:47 +01:00
|
|
|
|
case SpvOpLogicalEqual: return nir_op_ieq;
|
|
|
|
|
case SpvOpLogicalNotEqual: return nir_op_ine;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
case SpvOpLogicalAnd: return nir_op_iand;
|
|
|
|
|
case SpvOpLogicalNot: return nir_op_inot;
|
|
|
|
|
case SpvOpBitwiseOr: return nir_op_ior;
|
|
|
|
|
case SpvOpBitwiseXor: return nir_op_ixor;
|
|
|
|
|
case SpvOpBitwiseAnd: return nir_op_iand;
|
2018-10-19 17:14:47 +01:00
|
|
|
|
case SpvOpSelect: return nir_op_bcsel;
|
|
|
|
|
case SpvOpIEqual: return nir_op_ieq;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpBitFieldInsert: return nir_op_bitfield_insert;
|
|
|
|
|
case SpvOpBitFieldSExtract: return nir_op_ibitfield_extract;
|
|
|
|
|
case SpvOpBitFieldUExtract: return nir_op_ubitfield_extract;
|
|
|
|
|
case SpvOpBitReverse: return nir_op_bitfield_reverse;
|
|
|
|
|
case SpvOpBitCount: return nir_op_bit_count;
|
|
|
|
|
|
2018-09-21 08:35:18 +01:00
|
|
|
|
case SpvOpUCountLeadingZerosINTEL: return nir_op_uclz;
|
|
|
|
|
/* SpvOpUCountTrailingZerosINTEL is handled elsewhere. */
|
|
|
|
|
case SpvOpAbsISubINTEL: return nir_op_uabs_isub;
|
|
|
|
|
case SpvOpAbsUSubINTEL: return nir_op_uabs_usub;
|
|
|
|
|
case SpvOpIAddSatINTEL: return nir_op_iadd_sat;
|
|
|
|
|
case SpvOpUAddSatINTEL: return nir_op_uadd_sat;
|
|
|
|
|
case SpvOpIAverageINTEL: return nir_op_ihadd;
|
|
|
|
|
case SpvOpUAverageINTEL: return nir_op_uhadd;
|
|
|
|
|
case SpvOpIAverageRoundedINTEL: return nir_op_irhadd;
|
|
|
|
|
case SpvOpUAverageRoundedINTEL: return nir_op_urhadd;
|
|
|
|
|
case SpvOpISubSatINTEL: return nir_op_isub_sat;
|
|
|
|
|
case SpvOpUSubSatINTEL: return nir_op_usub_sat;
|
|
|
|
|
case SpvOpIMul32x16INTEL: return nir_op_imul_32x16;
|
|
|
|
|
case SpvOpUMul32x16INTEL: return nir_op_umul_32x16;
|
|
|
|
|
|
2016-11-17 08:36:36 +00:00
|
|
|
|
/* The ordered / unordered operators need special implementation besides
|
|
|
|
|
* the logical operator to use since they also need to check if operands are
|
|
|
|
|
* ordered.
|
|
|
|
|
*/
|
2018-10-19 17:14:47 +01:00
|
|
|
|
case SpvOpFOrdEqual: return nir_op_feq;
|
|
|
|
|
case SpvOpFUnordEqual: return nir_op_feq;
|
|
|
|
|
case SpvOpINotEqual: return nir_op_ine;
|
2020-06-23 13:44:39 +01:00
|
|
|
|
case SpvOpLessOrGreater: /* Deprecated, use OrdNotEqual */
|
2018-10-19 17:14:47 +01:00
|
|
|
|
case SpvOpFOrdNotEqual: return nir_op_fne;
|
|
|
|
|
case SpvOpFUnordNotEqual: return nir_op_fne;
|
|
|
|
|
case SpvOpULessThan: return nir_op_ult;
|
|
|
|
|
case SpvOpSLessThan: return nir_op_ilt;
|
|
|
|
|
case SpvOpFOrdLessThan: return nir_op_flt;
|
|
|
|
|
case SpvOpFUnordLessThan: return nir_op_flt;
|
|
|
|
|
case SpvOpUGreaterThan: *swap = true; return nir_op_ult;
|
|
|
|
|
case SpvOpSGreaterThan: *swap = true; return nir_op_ilt;
|
|
|
|
|
case SpvOpFOrdGreaterThan: *swap = true; return nir_op_flt;
|
|
|
|
|
case SpvOpFUnordGreaterThan: *swap = true; return nir_op_flt;
|
|
|
|
|
case SpvOpULessThanEqual: *swap = true; return nir_op_uge;
|
|
|
|
|
case SpvOpSLessThanEqual: *swap = true; return nir_op_ige;
|
|
|
|
|
case SpvOpFOrdLessThanEqual: *swap = true; return nir_op_fge;
|
|
|
|
|
case SpvOpFUnordLessThanEqual: *swap = true; return nir_op_fge;
|
|
|
|
|
case SpvOpUGreaterThanEqual: return nir_op_uge;
|
|
|
|
|
case SpvOpSGreaterThanEqual: return nir_op_ige;
|
|
|
|
|
case SpvOpFOrdGreaterThanEqual: return nir_op_fge;
|
|
|
|
|
case SpvOpFUnordGreaterThanEqual: return nir_op_fge;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
|
|
|
|
|
/* Conversions: */
|
|
|
|
|
case SpvOpQuantizeToF16: return nir_op_fquantize2f16;
|
2017-02-15 08:25:52 +00:00
|
|
|
|
case SpvOpUConvert:
|
2016-12-07 07:10:16 +00:00
|
|
|
|
case SpvOpConvertFToU:
|
|
|
|
|
case SpvOpConvertFToS:
|
|
|
|
|
case SpvOpConvertSToF:
|
|
|
|
|
case SpvOpConvertUToF:
|
|
|
|
|
case SpvOpSConvert:
|
2018-03-14 07:32:08 +00:00
|
|
|
|
case SpvOpFConvert: {
|
|
|
|
|
nir_alu_type src_type;
|
|
|
|
|
nir_alu_type dst_type;
|
|
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case SpvOpConvertFToS:
|
|
|
|
|
src_type = nir_type_float;
|
|
|
|
|
dst_type = nir_type_int;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpConvertFToU:
|
|
|
|
|
src_type = nir_type_float;
|
|
|
|
|
dst_type = nir_type_uint;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpFConvert:
|
|
|
|
|
src_type = dst_type = nir_type_float;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpConvertSToF:
|
|
|
|
|
src_type = nir_type_int;
|
|
|
|
|
dst_type = nir_type_float;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpSConvert:
|
|
|
|
|
src_type = dst_type = nir_type_int;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpConvertUToF:
|
|
|
|
|
src_type = nir_type_uint;
|
|
|
|
|
dst_type = nir_type_float;
|
|
|
|
|
break;
|
|
|
|
|
case SpvOpUConvert:
|
|
|
|
|
src_type = dst_type = nir_type_uint;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid opcode");
|
|
|
|
|
}
|
2018-03-14 07:49:43 +00:00
|
|
|
|
src_type |= src_bit_size;
|
|
|
|
|
dst_type |= dst_bit_size;
|
2018-03-14 07:32:08 +00:00
|
|
|
|
return nir_type_conversion_op(src_type, dst_type, nir_rounding_mode_undef);
|
|
|
|
|
}
|
2016-01-13 01:16:48 +00:00
|
|
|
|
/* Derivatives: */
|
|
|
|
|
case SpvOpDPdx: return nir_op_fddx;
|
|
|
|
|
case SpvOpDPdy: return nir_op_fddy;
|
|
|
|
|
case SpvOpDPdxFine: return nir_op_fddx_fine;
|
|
|
|
|
case SpvOpDPdyFine: return nir_op_fddy_fine;
|
|
|
|
|
case SpvOpDPdxCoarse: return nir_op_fddx_coarse;
|
|
|
|
|
case SpvOpDPdyCoarse: return nir_op_fddy_coarse;
|
|
|
|
|
|
2020-06-08 13:55:39 +01:00
|
|
|
|
case SpvOpIsNormal: return nir_op_fisnormal;
|
2020-06-23 23:22:44 +01:00
|
|
|
|
case SpvOpIsFinite: return nir_op_fisfinite;
|
2020-06-08 13:55:39 +01:00
|
|
|
|
|
2016-01-13 01:16:48 +00:00
|
|
|
|
default:
|
2018-07-14 05:17:08 +01:00
|
|
|
|
vtn_fail("No NIR equivalent: %u", opcode);
|
2016-01-13 01:16:48 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-25 22:30:46 +00:00
|
|
|
|
static void
|
|
|
|
|
handle_no_contraction(struct vtn_builder *b, struct vtn_value *val, int member,
|
|
|
|
|
const struct vtn_decoration *dec, void *_void)
|
|
|
|
|
{
|
2017-08-17 01:38:13 +01:00
|
|
|
|
vtn_assert(dec->scope == VTN_DEC_DECORATION);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
if (dec->decoration != SpvDecorationNoContraction)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
b->nb.exact = true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-01 07:05:55 +01:00
|
|
|
|
static void
|
|
|
|
|
handle_rounding_mode(struct vtn_builder *b, struct vtn_value *val, int member,
|
|
|
|
|
const struct vtn_decoration *dec, void *_out_rounding_mode)
|
|
|
|
|
{
|
|
|
|
|
nir_rounding_mode *out_rounding_mode = _out_rounding_mode;
|
|
|
|
|
assert(dec->scope == VTN_DEC_DECORATION);
|
|
|
|
|
if (dec->decoration != SpvDecorationFPRoundingMode)
|
|
|
|
|
return;
|
2019-04-23 00:17:58 +01:00
|
|
|
|
switch (dec->operands[0]) {
|
2017-07-01 07:05:55 +01:00
|
|
|
|
case SpvFPRoundingModeRTE:
|
|
|
|
|
*out_rounding_mode = nir_rounding_mode_rtne;
|
|
|
|
|
break;
|
|
|
|
|
case SpvFPRoundingModeRTZ:
|
|
|
|
|
*out_rounding_mode = nir_rounding_mode_rtz;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Not supported rounding mode");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-18 06:52:42 +01:00
|
|
|
|
static void
|
|
|
|
|
handle_no_wrap(struct vtn_builder *b, struct vtn_value *val, int member,
|
|
|
|
|
const struct vtn_decoration *dec, void *_alu)
|
|
|
|
|
{
|
|
|
|
|
nir_alu_instr *alu = _alu;
|
|
|
|
|
switch (dec->decoration) {
|
|
|
|
|
case SpvDecorationNoSignedWrap:
|
|
|
|
|
alu->no_signed_wrap = true;
|
|
|
|
|
break;
|
|
|
|
|
case SpvDecorationNoUnsignedWrap:
|
|
|
|
|
alu->no_unsigned_wrap = true;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
/* Do nothing. */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 19:02:17 +00:00
|
|
|
|
void
|
|
|
|
|
vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|
|
|
|
const uint32_t *w, unsigned count)
|
|
|
|
|
{
|
2020-05-29 21:57:42 +01:00
|
|
|
|
struct vtn_value *dest_val = vtn_untyped_value(b, w[2]);
|
|
|
|
|
const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_foreach_decoration(b, dest_val, handle_no_contraction, NULL);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
|
2016-01-08 19:02:17 +00:00
|
|
|
|
/* Collect the various SSA sources */
|
|
|
|
|
const unsigned num_inputs = count - 3;
|
|
|
|
|
struct vtn_ssa_value *vtn_src[4] = { NULL, };
|
2016-03-28 20:27:40 +01:00
|
|
|
|
for (unsigned i = 0; i < num_inputs; i++)
|
2016-01-08 19:02:17 +00:00
|
|
|
|
vtn_src[i] = vtn_ssa_value(b, w[i + 3]);
|
|
|
|
|
|
|
|
|
|
if (glsl_type_is_matrix(vtn_src[0]->type) ||
|
|
|
|
|
(num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_push_ssa_value(b, w[2],
|
|
|
|
|
vtn_handle_matrix_alu(b, opcode, vtn_src[0], vtn_src[1]));
|
2018-12-03 18:08:41 +00:00
|
|
|
|
b->nb.exact = b->exact;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
nir_ssa_def *src[4] = { NULL, };
|
|
|
|
|
for (unsigned i = 0; i < num_inputs; i++) {
|
2017-08-17 01:38:13 +01:00
|
|
|
|
vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
|
2016-01-08 19:02:17 +00:00
|
|
|
|
src[i] = vtn_src[i]->def;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case SpvOpAny:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_bany(&b->nb, src[0]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpAll:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_ball(&b->nb, src[0]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2016-01-08 19:38:59 +00:00
|
|
|
|
case SpvOpOuterProduct: {
|
|
|
|
|
for (unsigned i = 0; i < src[1]->num_components; i++) {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[i]->def =
|
2016-01-08 19:38:59 +00:00
|
|
|
|
nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
|
|
|
|
|
}
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:38:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 19:02:17 +00:00
|
|
|
|
case SpvOpDot:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fdot(&b->nb, src[0], src[1]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpIAddCarry:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
|
|
|
|
dest->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
|
|
|
|
|
dest->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpISubBorrow:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
|
|
|
|
dest->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
|
|
|
|
|
dest->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2019-02-25 22:56:29 +00:00
|
|
|
|
case SpvOpUMulExtended: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
2019-02-25 22:56:29 +00:00
|
|
|
|
nir_ssa_def *umul = nir_umul_2x32_64(&b->nb, src[0], src[1]);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, umul);
|
|
|
|
|
dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, umul);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2019-02-25 22:56:29 +00:00
|
|
|
|
}
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2019-02-25 22:56:29 +00:00
|
|
|
|
case SpvOpSMulExtended: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
2019-02-25 22:56:29 +00:00
|
|
|
|
nir_ssa_def *smul = nir_imul_2x32_64(&b->nb, src[0], src[1]);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, smul);
|
|
|
|
|
dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, smul);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2019-02-25 22:56:29 +00:00
|
|
|
|
}
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpFwidth:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fadd(&b->nb,
|
2016-01-08 19:02:17 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
|
2016-02-17 23:28:52 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
case SpvOpFwidthFine:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fadd(&b->nb,
|
2016-01-08 19:02:17 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
|
2016-02-17 23:28:52 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
case SpvOpFwidthCoarse:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fadd(&b->nb,
|
2016-01-08 19:02:17 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
|
2016-02-17 23:28:52 +00:00
|
|
|
|
nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpVectorTimesScalar:
|
|
|
|
|
/* The builder will take care of splatting for us. */
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fmul(&b->nb, src[0], src[1]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
|
|
|
|
case SpvOpIsNan:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_fne(&b->nb, src[0], src[0]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2020-06-24 18:58:36 +01:00
|
|
|
|
case SpvOpOrdered:
|
|
|
|
|
dest->def = nir_iand(&b->nb, nir_feq(&b->nb, src[0], src[0]),
|
|
|
|
|
nir_feq(&b->nb, src[1], src[1]));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SpvOpUnordered:
|
|
|
|
|
dest->def = nir_ior(&b->nb, nir_fne(&b->nb, src[0], src[0]),
|
|
|
|
|
nir_fne(&b->nb, src[1], src[1]));
|
|
|
|
|
break;
|
|
|
|
|
|
2018-03-21 19:34:39 +00:00
|
|
|
|
case SpvOpIsInf: {
|
|
|
|
|
nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_ieq(&b->nb, nir_fabs(&b->nb, src[0]), inf);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2018-03-21 19:34:39 +00:00
|
|
|
|
}
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2016-11-17 08:36:36 +00:00
|
|
|
|
case SpvOpFUnordEqual:
|
|
|
|
|
case SpvOpFUnordNotEqual:
|
|
|
|
|
case SpvOpFUnordLessThan:
|
|
|
|
|
case SpvOpFUnordGreaterThan:
|
|
|
|
|
case SpvOpFUnordLessThanEqual:
|
|
|
|
|
case SpvOpFUnordGreaterThanEqual: {
|
|
|
|
|
bool swap;
|
2018-03-14 07:49:43 +00:00
|
|
|
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
2017-09-05 23:46:58 +01:00
|
|
|
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
2018-03-14 07:49:43 +00:00
|
|
|
|
src_bit_size, dst_bit_size);
|
2016-11-17 08:36:36 +00:00
|
|
|
|
|
|
|
|
|
if (swap) {
|
|
|
|
|
nir_ssa_def *tmp = src[0];
|
|
|
|
|
src[0] = src[1];
|
|
|
|
|
src[1] = tmp;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def =
|
2016-11-17 08:36:36 +00:00
|
|
|
|
nir_ior(&b->nb,
|
|
|
|
|
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
|
|
|
|
nir_ior(&b->nb,
|
|
|
|
|
nir_fne(&b->nb, src[0], src[0]),
|
|
|
|
|
nir_fne(&b->nb, src[1], src[1])));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-23 13:44:39 +01:00
|
|
|
|
case SpvOpLessOrGreater:
|
2018-04-24 11:17:56 +01:00
|
|
|
|
case SpvOpFOrdNotEqual: {
|
|
|
|
|
/* For all the SpvOpFOrd* comparisons apart from NotEqual, the value
|
|
|
|
|
* from the ALU will probably already be false if the operands are not
|
|
|
|
|
* ordered so we don’t need to handle it specially.
|
|
|
|
|
*/
|
2016-11-17 08:36:36 +00:00
|
|
|
|
bool swap;
|
2018-03-14 07:49:43 +00:00
|
|
|
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
2017-09-05 23:46:58 +01:00
|
|
|
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
2018-03-14 07:49:43 +00:00
|
|
|
|
src_bit_size, dst_bit_size);
|
2016-11-17 08:36:36 +00:00
|
|
|
|
|
2018-04-24 11:17:56 +01:00
|
|
|
|
assert(!swap);
|
2016-11-17 08:36:36 +00:00
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def =
|
2016-11-17 08:36:36 +00:00
|
|
|
|
nir_iand(&b->nb,
|
|
|
|
|
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
|
|
|
|
nir_iand(&b->nb,
|
|
|
|
|
nir_feq(&b->nb, src[0], src[0]),
|
|
|
|
|
nir_feq(&b->nb, src[1], src[1])));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-01 07:05:55 +01:00
|
|
|
|
case SpvOpFConvert: {
|
|
|
|
|
nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(dest_type);
|
2017-07-01 07:05:55 +01:00
|
|
|
|
nir_rounding_mode rounding_mode = nir_rounding_mode_undef;
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_foreach_decoration(b, dest_val, handle_rounding_mode, &rounding_mode);
|
2017-07-01 07:05:55 +01:00
|
|
|
|
nir_op op = nir_type_conversion_op(src_alu_type, dst_alu_type, rounding_mode);
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL);
|
2017-07-01 07:05:55 +01:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-25 10:55:49 +01:00
|
|
|
|
case SpvOpBitFieldInsert:
|
|
|
|
|
case SpvOpBitFieldSExtract:
|
|
|
|
|
case SpvOpBitFieldUExtract:
|
|
|
|
|
case SpvOpShiftLeftLogical:
|
|
|
|
|
case SpvOpShiftRightArithmetic:
|
|
|
|
|
case SpvOpShiftRightLogical: {
|
|
|
|
|
bool swap;
|
|
|
|
|
unsigned src0_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
2018-04-25 10:55:49 +01:00
|
|
|
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
|
|
|
|
src0_bit_size, dst_bit_size);
|
|
|
|
|
|
|
|
|
|
assert (op == nir_op_ushr || op == nir_op_ishr || op == nir_op_ishl ||
|
|
|
|
|
op == nir_op_bitfield_insert || op == nir_op_ubitfield_extract ||
|
|
|
|
|
op == nir_op_ibitfield_extract);
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
|
|
|
|
|
unsigned src_bit_size =
|
|
|
|
|
nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]);
|
|
|
|
|
if (src_bit_size == 0)
|
|
|
|
|
continue;
|
|
|
|
|
if (src_bit_size != src[i]->bit_size) {
|
|
|
|
|
assert(src_bit_size == 32);
|
|
|
|
|
/* Convert the Shift, Offset and Count operands to 32 bits, which is the bitsize
|
|
|
|
|
* supported by the NIR instructions. See discussion here:
|
|
|
|
|
*
|
|
|
|
|
* https://lists.freedesktop.org/archives/mesa-dev/2018-April/193026.html
|
|
|
|
|
*/
|
|
|
|
|
src[i] = nir_u2u32(&b->nb, src[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
2018-04-25 10:55:49 +01:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-28 23:14:30 +01:00
|
|
|
|
case SpvOpSignBitSet:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_i2b(&b->nb,
|
2020-05-28 23:14:30 +01:00
|
|
|
|
nir_ushr(&b->nb, src[0], nir_imm_int(&b->nb, src[0]->bit_size - 1)));
|
2018-07-12 14:02:27 +01:00
|
|
|
|
break;
|
|
|
|
|
|
2018-09-21 08:35:18 +01:00
|
|
|
|
case SpvOpUCountTrailingZerosINTEL:
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_umin(&b->nb,
|
2018-09-21 08:35:18 +01:00
|
|
|
|
nir_find_lsb(&b->nb, src[0]),
|
|
|
|
|
nir_imm_int(&b->nb, 32u));
|
|
|
|
|
break;
|
|
|
|
|
|
2016-01-13 01:16:48 +00:00
|
|
|
|
default: {
|
|
|
|
|
bool swap;
|
2018-03-14 07:49:43 +00:00
|
|
|
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
2020-05-29 21:57:42 +01:00
|
|
|
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
2017-09-05 23:46:58 +01:00
|
|
|
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
2018-03-14 07:49:43 +00:00
|
|
|
|
src_bit_size, dst_bit_size);
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2016-01-13 01:16:48 +00:00
|
|
|
|
if (swap) {
|
|
|
|
|
nir_ssa_def *tmp = src[0];
|
|
|
|
|
src[0] = src[1];
|
|
|
|
|
src[1] = tmp;
|
|
|
|
|
}
|
2016-01-08 19:02:17 +00:00
|
|
|
|
|
2018-04-26 15:54:26 +01:00
|
|
|
|
switch (op) {
|
|
|
|
|
case nir_op_ishl:
|
|
|
|
|
case nir_op_ishr:
|
|
|
|
|
case nir_op_ushr:
|
|
|
|
|
if (src[1]->bit_size != 32)
|
|
|
|
|
src[1] = nir_u2u32(&b->nb, src[1]);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
2016-03-25 22:30:46 +00:00
|
|
|
|
break;
|
2016-01-13 01:16:48 +00:00
|
|
|
|
} /* default */
|
|
|
|
|
}
|
2016-03-25 22:30:46 +00:00
|
|
|
|
|
2019-05-18 06:52:42 +01:00
|
|
|
|
switch (opcode) {
|
|
|
|
|
case SpvOpIAdd:
|
|
|
|
|
case SpvOpIMul:
|
|
|
|
|
case SpvOpISub:
|
|
|
|
|
case SpvOpShiftLeftLogical:
|
|
|
|
|
case SpvOpSNegate: {
|
2020-05-29 21:57:42 +01:00
|
|
|
|
nir_alu_instr *alu = nir_instr_as_alu(dest->def->parent_instr);
|
|
|
|
|
vtn_foreach_decoration(b, dest_val, handle_no_wrap, alu);
|
2019-05-18 06:52:42 +01:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
/* Do nothing. */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 21:57:42 +01:00
|
|
|
|
vtn_push_ssa_value(b, w[2], dest);
|
|
|
|
|
|
2018-12-03 18:08:41 +00:00
|
|
|
|
b->nb.exact = b->exact;
|
2016-01-08 19:02:17 +00:00
|
|
|
|
}
|
2019-03-09 19:32:52 +00:00
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w, unsigned count)
|
|
|
|
|
{
|
|
|
|
|
vtn_assert(count == 4);
|
|
|
|
|
/* From the definition of OpBitcast in the SPIR-V 1.2 spec:
|
|
|
|
|
*
|
|
|
|
|
* "If Result Type has the same number of components as Operand, they
|
|
|
|
|
* must also have the same component width, and results are computed per
|
|
|
|
|
* component.
|
|
|
|
|
*
|
|
|
|
|
* If Result Type has a different number of components than Operand, the
|
|
|
|
|
* total number of bits in Result Type must equal the total number of
|
|
|
|
|
* bits in Operand. Let L be the type, either Result Type or Operand’s
|
|
|
|
|
* type, that has the larger number of components. Let S be the other
|
|
|
|
|
* type, with the smaller number of components. The number of components
|
|
|
|
|
* in L must be an integer multiple of the number of components in S.
|
|
|
|
|
* The first component (that is, the only or lowest-numbered component)
|
|
|
|
|
* of S maps to the first components of L, and so on, up to the last
|
|
|
|
|
* component of S mapping to the last components of L. Within this
|
|
|
|
|
* mapping, any single component of S (mapping to multiple components of
|
|
|
|
|
* L) maps its lower-ordered bits to the lower-numbered components of L."
|
|
|
|
|
*/
|
|
|
|
|
|
2020-05-28 00:33:47 +01:00
|
|
|
|
struct vtn_type *type = vtn_get_type(b, w[1]);
|
2020-05-28 00:28:18 +01:00
|
|
|
|
struct nir_ssa_def *src = vtn_get_nir_ssa(b, w[3]);
|
2019-03-09 19:32:52 +00:00
|
|
|
|
|
|
|
|
|
vtn_fail_if(src->num_components * src->bit_size !=
|
|
|
|
|
glsl_get_vector_elements(type->type) * glsl_get_bit_size(type->type),
|
|
|
|
|
"Source and destination of OpBitcast must have the same "
|
|
|
|
|
"total number of bits");
|
2020-05-27 23:49:47 +01:00
|
|
|
|
nir_ssa_def *val =
|
|
|
|
|
nir_bitcast_vector(&b->nb, src, glsl_get_bit_size(type->type));
|
|
|
|
|
vtn_push_nir_ssa(b, w[2], val);
|
2019-03-09 19:32:52 +00:00
|
|
|
|
}
|