agx: Stub NIR backend compiler

A fork of the Bifrost compiler, tailored to AGX. nir_register support is
removed, as I want to use an SSA-based allocator for AGX. (There are no
VLIW-like requirements and extremely limited vector semantics, so we can
use an ACO approach with ease.)

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10582>
This commit is contained in:
Alyssa Rosenzweig 2021-04-10 22:03:19 -04:00 committed by Alyssa Rosenzweig
parent 719bf5152f
commit 2470a080d2
5 changed files with 865 additions and 2 deletions

View File

@ -0,0 +1,213 @@
/*
* Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
* Copyright (C) 2020 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "main/mtypes.h"
#include "compiler/nir_types.h"
#include "compiler/nir/nir_builder.h"
#include "util/u_debug.h"
#include "agx_compile.h"
#include "agx_compiler.h"
#include "agx_builder.h"
static const struct debug_named_value agx_debug_options[] = {
{"msgs", AGX_DBG_MSGS, "Print debug messages"},
{"shaders", AGX_DBG_SHADERS, "Dump shaders in NIR and AIR"},
{"shaderdb", AGX_DBG_SHADERDB, "Print statistics"},
{"verbose", AGX_DBG_VERBOSE, "Disassemble verbosely"},
{"internal", AGX_DBG_INTERNAL, "Dump even internal shaders"},
DEBUG_NAMED_VALUE_END
};
DEBUG_GET_ONCE_FLAGS_OPTION(agx_debug, "AGX_MESA_DEBUG", agx_debug_options, 0)
int agx_debug = 0;
#define DBG(fmt, ...) \
do { if (agx_debug & AGX_DBG_MSGS) \
fprintf(stderr, "%s:%d: "fmt, \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
static agx_block *
emit_cf_list(agx_context *ctx, struct exec_list *list)
{
/* stub */
return NULL;
}
static void
agx_print_stats(agx_context *ctx, unsigned size, FILE *fp)
{
unsigned nr_ins = 0, nr_bytes = 0, nr_threads = 1;
/* TODO */
fprintf(stderr, "%s shader: %u inst, %u bytes, %u threads, %u loops,"
"%u:%u spills:fills\n",
ctx->nir->info.label ?: "",
nr_ins, nr_bytes, nr_threads, ctx->loop_count,
ctx->spills, ctx->fills);
}
static int
glsl_type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
static void
agx_optimize_nir(nir_shader *nir)
{
bool progress;
nir_lower_idiv_options idiv_options = {
.imprecise_32bit_lowering = true,
.allow_fp16 = true,
};
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_int64);
NIR_PASS_V(nir, nir_lower_idiv, &idiv_options);
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
NIR_PASS_V(nir, nir_lower_flrp, 16 | 32 | 64, false);
do {
progress = false;
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_lower_undef_to_zero);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
nir_var_shader_out |
nir_var_function_temp);
} while (progress);
NIR_PASS_V(nir, nir_opt_algebraic_late);
NIR_PASS_V(nir, nir_opt_constant_folding);
NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_dce);
NIR_PASS_V(nir, nir_opt_cse);
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
/* Cleanup optimizations */
nir_move_options move_all =
nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
nir_move_comparisons | nir_move_copies | nir_move_load_ssbo;
NIR_PASS_V(nir, nir_opt_sink, move_all);
NIR_PASS_V(nir, nir_opt_move, move_all);
}
void
agx_compile_shader_nir(nir_shader *nir,
struct agx_shader_key *key,
struct util_dynarray *binary,
struct agx_shader_info *out)
{
agx_debug = debug_get_option_agx_debug();
agx_context *ctx = rzalloc(NULL, agx_context);
ctx->nir = nir;
ctx->out = out;
ctx->key = key;
ctx->stage = nir->info.stage;
list_inithead(&ctx->blocks);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
/* Lower large arrays to scratch and small arrays to csel */
NIR_PASS_V(nir, nir_lower_vars_to_scratch, nir_var_function_temp, 16,
glsl_get_natural_size_align_bytes);
NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_function_temp, ~0);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
glsl_type_size, 0);
if (ctx->stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_mediump_io,
nir_var_shader_in | nir_var_shader_out, ~0, false);
}
NIR_PASS_V(nir, nir_lower_ssbo);
/* Varying output is scalar, other I/O is vector */
if (ctx->stage == MESA_SHADER_VERTEX) {
NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_shader_out);
}
nir_lower_tex_options lower_tex_options = {
.lower_txs_lod = true,
.lower_txp = ~0,
};
NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
agx_optimize_nir(nir);
bool skip_internal = nir->info.internal;
skip_internal &= !(agx_debug & AGX_DBG_INTERNAL);
if (agx_debug & AGX_DBG_SHADERS && !skip_internal) {
nir_print_shader(nir, stdout);
}
nir_foreach_function(func, nir) {
if (!func->impl)
continue;
ctx->alloc += func->impl->ssa_alloc;
emit_cf_list(ctx, &func->impl->body);
break; /* TODO: Multi-function shaders */
}
unsigned block_source_count = 0;
/* Name blocks now that we're done emitting so the order is consistent */
agx_foreach_block(ctx, block)
block->name = block_source_count++;
if (agx_debug & AGX_DBG_SHADERS && !skip_internal)
agx_print_shader(ctx, stdout);
if ((agx_debug & AGX_DBG_SHADERDB) && !skip_internal)
agx_print_stats(ctx, binary->size, stderr);
ralloc_free(ctx);
}

View File

@ -0,0 +1,551 @@
/*
* Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
* Copyright (C) 2020 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __AGX_COMPILER_H
#define __AGX_COMPILER_H
#include "compiler/nir/nir.h"
#include "util/u_math.h"
#include "util/half_float.h"
#include "util/u_dynarray.h"
#include "agx_compile.h"
#include "agx_opcodes.h"
enum agx_dbg {
AGX_DBG_MSGS = BITFIELD_BIT(0),
AGX_DBG_SHADERS = BITFIELD_BIT(1),
AGX_DBG_SHADERDB = BITFIELD_BIT(2),
AGX_DBG_VERBOSE = BITFIELD_BIT(3),
AGX_DBG_INTERNAL = BITFIELD_BIT(4),
};
extern int agx_debug;
enum agx_index_type {
AGX_INDEX_NULL = 0,
AGX_INDEX_NORMAL = 1,
AGX_INDEX_IMMEDIATE = 2,
AGX_INDEX_UNIFORM = 3,
AGX_INDEX_REGISTER = 4,
};
enum agx_size {
AGX_SIZE_16 = 0,
AGX_SIZE_32 = 1,
AGX_SIZE_64 = 2
};
typedef struct {
/* Sufficient for as many SSA values as we need. Immediates and uniforms fit in 16-bits */
unsigned value : 23;
/* Cache hints */
bool cache : 1;
bool discard : 1;
/* src - float modifiers */
bool abs : 1;
bool neg : 1;
enum agx_size size : 2;
enum agx_index_type type : 3;
} agx_index;
static inline agx_index
agx_get_index(unsigned value, enum agx_size size)
{
return (agx_index) {
.type = AGX_INDEX_NORMAL,
.value = value,
.size = size
};
}
static inline agx_index
agx_immediate(uint16_t imm)
{
return (agx_index) {
.type = AGX_INDEX_IMMEDIATE,
.value = imm,
.size = AGX_SIZE_32
};
}
/* in half-words, specify r0h as 1, r1 as 2... */
static inline agx_index
agx_register(uint8_t imm, enum agx_size size)
{
return (agx_index) {
.type = AGX_INDEX_REGISTER,
.value = imm,
.size = size
};
}
/* Also in half-words */
static inline agx_index
agx_uniform(uint8_t imm, enum agx_size size)
{
return (agx_index) {
.type = AGX_INDEX_UNIFORM,
.value = imm,
.size = size
};
}
static inline agx_index
agx_null()
{
return (agx_index) { .type = AGX_INDEX_NULL };
}
static inline agx_index
agx_zero()
{
return agx_immediate(0);
}
/* IEEE 754 additive identity -0.0, stored as an 8-bit AGX minifloat: mantissa
* = exponent = 0, sign bit set */
static inline agx_index
agx_negzero()
{
return agx_immediate(0x80);
}
static inline agx_index
agx_abs(agx_index idx)
{
idx.abs = true;
idx.neg = false;
return idx;
}
static inline agx_index
agx_neg(agx_index idx)
{
idx.neg ^= true;
return idx;
}
/* Replaces an index, preserving any modifiers */
static inline agx_index
agx_replace_index(agx_index old, agx_index replacement)
{
replacement.abs = old.abs;
replacement.neg = old.neg;
return replacement;
}
static inline bool
agx_is_null(agx_index idx)
{
return idx.type == AGX_INDEX_NULL;
}
/* Compares equivalence as references */
static inline bool
agx_is_equiv(agx_index left, agx_index right)
{
return (left.type == right.type) && (left.value == right.value);
}
#define AGX_MAX_DESTS 1
#define AGX_MAX_SRCS 5
enum agx_sr {
AGX_SR_INVOCATION,
/* stub */
};
enum agx_icond {
AGX_ICOND_UEQ = 0,
AGX_ICOND_ULT = 1,
AGX_ICOND_UGT = 2,
/* unknown */
AGX_ICOND_SEQ = 4,
AGX_ICOND_SLT = 5,
AGX_ICOND_SGT = 6,
/* unknown */
};
enum agx_fcond {
AGX_FCOND_EQ = 0,
AGX_FCOND_LT = 1,
AGX_FCOND_GT = 2,
AGX_FCOND_LTN = 3,
/* unknown */
AGX_FCOND_GE = 5,
AGX_FCOND_LE = 6,
AGX_FCOND_GTN = 7,
};
enum agx_round {
AGX_ROUND_RTZ = 0,
AGX_ROUND_RTE = 1,
};
enum agx_convert {
AGX_CONVERT_U8_TO_F = 0,
AGX_CONVERT_S8_TO_F = 1,
AGX_CONVERT_F_TO_U16 = 4,
AGX_CONVERT_F_TO_S16 = 5,
AGX_CONVERT_U16_TO_F = 6,
AGX_CONVERT_S16_TO_F = 7,
AGX_CONVERT_F_TO_U32 = 8,
AGX_CONVERT_F_TO_S32 = 9,
AGX_CONVERT_U32_TO_F = 10,
AGX_CONVERT_S32_TO_F = 11
};
enum agx_lod_mode {
AGX_LOD_MODE_AUTO_LOD = 0,
AGX_LOD_MODE_LOD_MIN = 3,
AGX_LOD_GRAD = 8,
AGX_LOD_GRAD_MIN = 12
};
enum agx_dim {
AGX_DIM_TEX_1D = 0,
AGX_DIM_TEX_1D_ARRAY = 1,
AGX_DIM_TEX_2D = 2,
AGX_DIM_TEX_2D_ARRAY = 3,
AGX_DIM_TEX_2D_MS = 4,
AGX_DIM_TEX_3D = 5,
AGX_DIM_TEX_CUBE = 6,
AGX_DIM_TEX_CUBE_ARRAY = 7
};
typedef struct {
/* Must be first */
struct list_head link;
enum agx_opcode op;
/* Data flow */
agx_index dest[AGX_MAX_DESTS];
agx_index src[AGX_MAX_SRCS];
union {
uint32_t imm;
uint32_t writeout;
uint32_t truth_table;
uint32_t component;
uint32_t channels;
uint32_t bfi_mask;
enum agx_sr sr;
enum agx_icond icond;
enum agx_fcond fcond;
enum agx_format format;
enum agx_round round;
enum agx_lod_mode lod_mode;
};
/* TODO: Handle tex ops more efficient */
enum agx_dim dim : 3;
/* Final st_vary op */
bool last : 1;
/* Shift for a bitwise or memory op (conflicts with format for memory ops) */
unsigned shift : 4;
/* Scoreboard index, 0 or 1. Leave as 0 for instructions that do not require
* scoreboarding (everything but memory load/store and texturing). */
unsigned scoreboard : 1;
/* Output modifiers */
bool saturate : 1;
unsigned mask : 4;
} agx_instr;
struct agx_block;
typedef struct agx_block {
/* Link to next block. Must be first */
struct list_head link;
/* List of instructions emitted for the current block */
struct list_head instructions;
/* Index of the block in source order */
unsigned name;
/* Control flow graph */
struct agx_block *successors[2];
struct set *predecessors;
bool unconditional_jumps;
/* Liveness analysis results */
BITSET_WORD *live_in;
BITSET_WORD *live_out;
} agx_block;
typedef struct {
nir_shader *nir;
gl_shader_stage stage;
struct list_head blocks; /* list of agx_block */
struct agx_shader_info *out;
struct agx_shader_key *key;
/* Place to start pushing new values */
unsigned push_base;
/* For creating temporaries */
unsigned alloc;
/* Stats for shader-db */
unsigned loop_count;
unsigned spills;
unsigned fills;
} agx_context;
static inline void
agx_remove_instruction(agx_instr *ins)
{
list_del(&ins->link);
}
static inline agx_index
agx_temp(agx_context *ctx, enum agx_size size)
{
return agx_get_index(ctx->alloc++, size);
}
static enum agx_size
agx_size_for_bits(unsigned bits)
{
switch (bits) {
case 1:
case 16: return AGX_SIZE_16;
case 32: return AGX_SIZE_32;
case 64: return AGX_SIZE_64;
default: unreachable("Invalid bitsize");
}
}
static inline agx_index
agx_src_index(nir_src *src)
{
assert(src->is_ssa);
return agx_get_index(src->ssa->index,
agx_size_for_bits(nir_src_bit_size(*src)));
}
static inline agx_index
agx_dest_index(nir_dest *dst)
{
assert(dst->is_ssa);
return agx_get_index(dst->ssa.index,
agx_size_for_bits(nir_dest_bit_size(*dst)));
}
/* Iterators for AGX IR */
#define agx_foreach_block(ctx, v) \
list_for_each_entry(agx_block, v, &ctx->blocks, link)
#define agx_foreach_block_rev(ctx, v) \
list_for_each_entry_rev(agx_block, v, &ctx->blocks, link)
#define agx_foreach_block_from(ctx, from, v) \
list_for_each_entry_from(agx_block, v, from, &ctx->blocks, link)
#define agx_foreach_block_from_rev(ctx, from, v) \
list_for_each_entry_from_rev(agx_block, v, from, &ctx->blocks, link)
#define agx_foreach_instr_in_block(block, v) \
list_for_each_entry(agx_instr, v, &(block)->instructions, link)
#define agx_foreach_instr_in_block_rev(block, v) \
list_for_each_entry_rev(agx_instr, v, &(block)->instructions, link)
#define agx_foreach_instr_in_block_safe(block, v) \
list_for_each_entry_safe(agx_instr, v, &(block)->instructions, link)
#define agx_foreach_instr_in_block_safe_rev(block, v) \
list_for_each_entry_safe_rev(agx_instr, v, &(block)->instructions, link)
#define agx_foreach_instr_in_block_from(block, v, from) \
list_for_each_entry_from(agx_instr, v, from, &(block)->instructions, link)
#define agx_foreach_instr_in_block_from_rev(block, v, from) \
list_for_each_entry_from_rev(agx_instr, v, from, &(block)->instructions, link)
#define agx_foreach_instr_global(ctx, v) \
agx_foreach_block(ctx, v_block) \
agx_foreach_instr_in_block(v_block, v)
#define agx_foreach_instr_global_rev(ctx, v) \
agx_foreach_block_rev(ctx, v_block) \
agx_foreach_instr_in_block_rev(v_block, v)
#define agx_foreach_instr_global_safe(ctx, v) \
agx_foreach_block(ctx, v_block) \
agx_foreach_instr_in_block_safe(v_block, v)
#define agx_foreach_instr_global_safe_rev(ctx, v) \
agx_foreach_block_rev(ctx, v_block) \
agx_foreach_instr_in_block_safe_rev(v_block, v)
/* Based on set_foreach, expanded with automatic type casts */
#define agx_foreach_successor(blk, v) \
agx_block *v; \
agx_block **_v; \
for (_v = (agx_block **) &blk->successors[0], \
v = *_v; \
v != NULL && _v < (agx_block **) &blk->successors[2]; \
_v++, v = *_v) \
#define agx_foreach_predecessor(blk, v) \
struct set_entry *_entry_##v; \
agx_block *v; \
for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
v = (agx_block *) (_entry_##v ? _entry_##v->key : NULL); \
_entry_##v != NULL; \
_entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
v = (agx_block *) (_entry_##v ? _entry_##v->key : NULL))
#define agx_foreach_src(ins, v) \
for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
#define agx_foreach_dest(ins, v) \
for (unsigned v = 0; v < ARRAY_SIZE(ins->dest); ++v)
static inline agx_instr *
agx_prev_op(agx_instr *ins)
{
return list_last_entry(&(ins->link), agx_instr, link);
}
static inline agx_instr *
agx_next_op(agx_instr *ins)
{
return list_first_entry(&(ins->link), agx_instr, link);
}
static inline agx_block *
agx_next_block(agx_block *block)
{
return list_first_entry(&(block->link), agx_block, link);
}
/* Like in NIR, for use with the builder */
enum agx_cursor_option {
agx_cursor_after_block,
agx_cursor_before_instr,
agx_cursor_after_instr
};
typedef struct {
enum agx_cursor_option option;
union {
agx_block *block;
agx_instr *instr;
};
} agx_cursor;
static inline agx_cursor
agx_after_block(agx_block *block)
{
return (agx_cursor) {
.option = agx_cursor_after_block,
.block = block
};
}
static inline agx_cursor
agx_before_instr(agx_instr *instr)
{
return (agx_cursor) {
.option = agx_cursor_before_instr,
.instr = instr
};
}
static inline agx_cursor
agx_after_instr(agx_instr *instr)
{
return (agx_cursor) {
.option = agx_cursor_after_instr,
.instr = instr
};
}
/* IR builder in terms of cursor infrastructure */
typedef struct {
agx_context *shader;
agx_cursor cursor;
} agx_builder;
static inline agx_builder
agx_init_builder(agx_context *ctx, agx_cursor cursor)
{
return (agx_builder) {
.shader = ctx,
.cursor = cursor
};
}
/* Insert an instruction at the cursor and move the cursor */
static inline void
agx_builder_insert(agx_cursor *cursor, agx_instr *I)
{
switch (cursor->option) {
case agx_cursor_after_instr:
list_add(&I->link, &cursor->instr->link);
cursor->instr = I;
return;
case agx_cursor_after_block:
list_addtail(&I->link, &cursor->block->instructions);
cursor->option = agx_cursor_after_instr;
cursor->instr = I;
return;
case agx_cursor_before_instr:
list_addtail(&I->link, &cursor->instr->link);
cursor->option = agx_cursor_after_instr;
cursor->instr = I;
return;
}
unreachable("Invalid cursor option");
}
/* Routines defined for AIR */
void agx_print_instr(agx_instr *I, FILE *fp);
void agx_print_block(agx_block *block, FILE *fp);
void agx_print_shader(agx_context *ctx, FILE *fp);
#endif

View File

@ -0,0 +1,65 @@
/*
* Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
* Copyright (C) 2019-2020 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "agx_compiler.h"
void
agx_print_instr(agx_instr *I, FILE *fp)
{
/* Stub */
}
void
agx_print_block(agx_block *block, FILE *fp)
{
fprintf(fp, "block%u {\n", block->name);
agx_foreach_instr_in_block(block, ins)
agx_print_instr(ins, fp);
fprintf(fp, "}");
if (block->successors[0]) {
fprintf(fp, " -> ");
agx_foreach_successor(block, succ)
fprintf(fp, "block%u ", succ->name);
}
if (block->predecessors->entries) {
fprintf(fp, " from");
agx_foreach_predecessor(block, pred)
fprintf(fp, " block%u", pred->name);
}
fprintf(fp, "\n\n");
}
void
agx_print_shader(agx_context *ctx, FILE *fp)
{
agx_foreach_block(ctx, block)
agx_print_block(block, fp);
}

View File

@ -145,6 +145,39 @@ compile_shader(char **argv)
NIR_PASS_V(nir[i], gl_nir_lower_buffers, prog);
NIR_PASS_V(nir[i], nir_opt_constant_folding);
struct agx_shader_info out = { 0 };
struct agx_shader_key keys[2] = {
{
.vs = {
.num_vbufs = 1,
.vbuf_strides = { 16 },
.attributes = {
{
.buf = 0,
.src_offset = 0,
.format = AGX_FORMAT_I32,
.nr_comps_minus_1 = 4 - 1
}
},
}
},
{
.fs = {
.tib_formats = { AGX_FORMAT_U8NORM }
}
}
};
agx_compile_shader_nir(nir[i], &keys[i], &binary, &out);
char *fn = NULL;
asprintf(&fn, "shader_%u.bin", i);
assert(fn != NULL);
FILE *fp = fopen(fn, "wb");
fwrite(binary.data, 1, binary.size, fp);
fclose(fp);
free(fn);
util_dynarray_clear(&binary);
}

View File

@ -21,6 +21,7 @@
libasahi_agx_files = files(
'agx_compile.c',
'agx_print.c',
)
agx_opcodes_h = custom_target(
@ -62,9 +63,9 @@ idep_agx_builder_h = declare_dependency(
libasahi_compiler = static_library(
'asahi_compiler',
[libasahi_agx_files],
[libasahi_agx_files, agx_opcodes_c],
include_directories : [inc_include, inc_src, inc_mesa, inc_gallium, inc_gallium_aux, inc_mapi],
dependencies: [idep_nir],
dependencies: [idep_nir, idep_agx_opcodes_h, idep_agx_builder_h],
c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden',
build_by_default : false,