2015-04-15 15:16:31 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Rob Clark <robclark@freedesktop.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef IR3_NIR_H_
|
|
|
|
#define IR3_NIR_H_
|
|
|
|
|
2016-01-18 10:54:03 +00:00
|
|
|
#include "compiler/nir/nir.h"
|
2019-03-26 17:31:54 +00:00
|
|
|
#include "compiler/nir/nir_builder.h"
|
2016-01-18 08:47:13 +00:00
|
|
|
#include "compiler/shader_enums.h"
|
2015-04-15 15:16:31 +01:00
|
|
|
|
2015-10-24 19:30:31 +01:00
|
|
|
#include "ir3_shader.h"
|
|
|
|
|
2016-04-23 14:03:28 +01:00
|
|
|
bool ir3_nir_apply_trig_workarounds(nir_shader *shader);
|
ir3/nir: Add new NIR AlgebraicPass for lowering imul
Currently, ir3 backend compiler is lowering integer multiplication from:
dst = a * b
to:
dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
by emitting this code:
mull.u tmp0, a, b ; mul low, i.e. al * bl
madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
which at that point has very low chances of being optimized.
This patch adds a new nir_algebraic.AlgebraicPass to performs this
lowering during NIR algebraic optimization passes, giving it a better
chance for optimizing the resulting code.
Reviewed-by: Eric Anholt <eric@anholt.net>
2019-05-12 23:23:58 +01:00
|
|
|
bool ir3_nir_lower_imul(nir_shader *shader);
|
2021-07-31 20:35:57 +01:00
|
|
|
bool ir3_nir_lower_io_offsets(nir_shader *shader);
|
2019-04-19 19:12:34 +01:00
|
|
|
bool ir3_nir_lower_load_barycentric_at_sample(nir_shader *shader);
|
2019-04-19 19:15:40 +01:00
|
|
|
bool ir3_nir_lower_load_barycentric_at_offset(nir_shader *shader);
|
2019-03-26 15:28:34 +00:00
|
|
|
bool ir3_nir_move_varying_inputs(nir_shader *shader);
|
2019-08-05 07:09:23 +01:00
|
|
|
int ir3_nir_coord_offset(nir_ssa_def *ssa);
|
|
|
|
bool ir3_nir_lower_tex_prefetch(nir_shader *shader);
|
2021-10-16 18:30:44 +01:00
|
|
|
bool ir3_nir_lower_wide_load_store(nir_shader *shader);
|
2019-08-05 07:09:23 +01:00
|
|
|
|
2020-04-28 20:29:46 +01:00
|
|
|
void ir3_nir_lower_to_explicit_output(nir_shader *shader,
|
2021-07-09 13:50:05 +01:00
|
|
|
struct ir3_shader_variant *v,
|
|
|
|
unsigned topology);
|
|
|
|
void ir3_nir_lower_to_explicit_input(nir_shader *shader,
|
|
|
|
struct ir3_shader_variant *v);
|
|
|
|
void ir3_nir_lower_tess_ctrl(nir_shader *shader, struct ir3_shader_variant *v,
|
|
|
|
unsigned topology);
|
|
|
|
void ir3_nir_lower_tess_eval(nir_shader *shader, struct ir3_shader_variant *v,
|
|
|
|
unsigned topology);
|
2020-04-28 20:52:42 +01:00
|
|
|
void ir3_nir_lower_gs(nir_shader *shader);
|
2019-10-11 01:17:10 +01:00
|
|
|
|
2021-08-22 17:21:03 +01:00
|
|
|
/*
|
|
|
|
* 64b related lowering:
|
|
|
|
*/
|
|
|
|
bool ir3_nir_lower_64b_intrinsics(nir_shader *shader);
|
2021-10-11 11:32:29 +01:00
|
|
|
bool ir3_nir_lower_64b_undef(nir_shader *shader);
|
2021-08-22 17:53:56 +01:00
|
|
|
bool ir3_nir_lower_64b_global(nir_shader *shader);
|
2021-08-22 17:21:03 +01:00
|
|
|
|
2020-11-10 16:59:03 +00:00
|
|
|
void ir3_optimize_loop(struct ir3_compiler *compiler, nir_shader *s);
|
2020-10-22 13:58:01 +01:00
|
|
|
void ir3_nir_lower_io_to_temporaries(nir_shader *s);
|
2020-06-15 22:24:00 +01:00
|
|
|
void ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s);
|
2021-11-25 14:16:36 +00:00
|
|
|
void ir3_nir_post_finalize(struct ir3_shader *shader);
|
2020-06-10 10:11:27 +01:00
|
|
|
void ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s);
|
2015-10-24 19:30:31 +01:00
|
|
|
|
2020-06-15 20:14:04 +01:00
|
|
|
void ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
|
2021-07-09 13:50:05 +01:00
|
|
|
struct ir3_const_state *const_state);
|
2020-07-07 19:56:35 +01:00
|
|
|
bool ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v);
|
2020-06-17 18:07:09 +01:00
|
|
|
void ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v);
|
|
|
|
bool ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v);
|
2020-11-13 19:48:57 +00:00
|
|
|
bool ir3_nir_fixup_load_uniform(nir_shader *nir);
|
2021-09-24 18:04:04 +01:00
|
|
|
bool ir3_nir_opt_preamble(nir_shader *nir, struct ir3_shader_variant *v);
|
|
|
|
bool ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v);
|
2019-03-26 17:31:54 +00:00
|
|
|
|
2021-07-09 13:50:05 +01:00
|
|
|
nir_ssa_def *ir3_nir_try_propagate_bit_shift(nir_builder *b,
|
|
|
|
nir_ssa_def *offset,
|
|
|
|
int32_t shift);
|
2019-03-26 17:31:54 +00:00
|
|
|
|
2020-03-19 13:15:26 +00:00
|
|
|
static inline nir_intrinsic_instr *
|
|
|
|
ir3_bindless_resource(nir_src src)
|
|
|
|
{
|
2021-07-09 13:50:05 +01:00
|
|
|
if (!src.is_ssa)
|
|
|
|
return NULL;
|
2020-03-19 13:15:26 +00:00
|
|
|
|
2021-07-09 13:50:05 +01:00
|
|
|
if (src.ssa->parent_instr->type != nir_instr_type_intrinsic)
|
|
|
|
return NULL;
|
2020-03-19 13:15:26 +00:00
|
|
|
|
2021-07-09 13:50:05 +01:00
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
|
|
|
|
if (intrin->intrinsic != nir_intrinsic_bindless_resource_ir3)
|
|
|
|
return NULL;
|
2020-03-19 13:15:26 +00:00
|
|
|
|
2021-07-09 13:50:05 +01:00
|
|
|
return intrin;
|
2020-03-19 13:15:26 +00:00
|
|
|
}
|
|
|
|
|
2021-08-22 17:21:03 +01:00
|
|
|
static inline bool
|
|
|
|
is_intrinsic_store(nir_intrinsic_op op)
|
|
|
|
{
|
|
|
|
switch (op) {
|
|
|
|
case nir_intrinsic_store_output:
|
|
|
|
case nir_intrinsic_store_scratch:
|
|
|
|
case nir_intrinsic_store_ssbo:
|
|
|
|
case nir_intrinsic_store_shared:
|
|
|
|
case nir_intrinsic_store_global:
|
|
|
|
case nir_intrinsic_store_global_ir3:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_intrinsic_load(nir_intrinsic_op op)
|
|
|
|
{
|
|
|
|
switch (op) {
|
|
|
|
case nir_intrinsic_load_input:
|
|
|
|
case nir_intrinsic_load_scratch:
|
|
|
|
case nir_intrinsic_load_uniform:
|
|
|
|
case nir_intrinsic_load_ssbo:
|
|
|
|
case nir_intrinsic_load_ubo:
|
|
|
|
case nir_intrinsic_load_shared:
|
|
|
|
case nir_intrinsic_load_global:
|
|
|
|
case nir_intrinsic_load_global_ir3:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:16:31 +01:00
|
|
|
#endif /* IR3_NIR_H_ */
|