2010-10-10 23:42:37 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-04-10 20:01:50 +01:00
|
|
|
#pragma once
|
|
|
|
|
2011-05-03 18:55:50 +01:00
|
|
|
#include "brw_shader.h"
|
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
extern "C" {
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
|
|
|
#include "main/macros.h"
|
|
|
|
#include "main/shaderobj.h"
|
|
|
|
#include "main/uniforms.h"
|
|
|
|
#include "program/prog_parameter.h"
|
|
|
|
#include "program/prog_print.h"
|
|
|
|
#include "program/prog_optimize.h"
|
2014-09-22 20:24:21 +01:00
|
|
|
#include "util/register_allocate.h"
|
2010-10-10 23:42:37 +01:00
|
|
|
#include "program/hash_table.h"
|
|
|
|
#include "brw_context.h"
|
|
|
|
#include "brw_eu.h"
|
|
|
|
#include "brw_wm.h"
|
2012-10-03 21:01:23 +01:00
|
|
|
#include "brw_shader.h"
|
2014-06-30 02:09:35 +01:00
|
|
|
#include "intel_asm_annotation.h"
|
2010-10-10 23:42:37 +01:00
|
|
|
}
|
2011-08-26 21:58:41 +01:00
|
|
|
#include "glsl/glsl_types.h"
|
|
|
|
#include "glsl/ir.h"
|
2014-12-16 22:29:28 +00:00
|
|
|
#include "program/sampler.h"
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2013-12-11 00:22:56 +00:00
|
|
|
#define MAX_SAMPLER_MESSAGE_SIZE 11
|
2014-10-01 18:54:59 +01:00
|
|
|
#define MAX_VGRF_SIZE 16
|
2013-12-11 00:22:56 +00:00
|
|
|
|
2014-06-15 06:53:40 +01:00
|
|
|
struct bblock_t;
|
2012-06-06 18:57:54 +01:00
|
|
|
namespace {
|
2013-01-08 03:42:38 +00:00
|
|
|
struct acp_entry;
|
2012-06-06 18:57:54 +01:00
|
|
|
}
|
2012-05-11 00:10:15 +01:00
|
|
|
|
2012-06-05 19:37:22 +01:00
|
|
|
namespace brw {
|
|
|
|
class fs_live_variables;
|
|
|
|
}
|
|
|
|
|
2014-09-20 04:36:52 +01:00
|
|
|
class fs_inst;
|
2014-08-13 20:25:58 +01:00
|
|
|
class fs_visitor;
|
|
|
|
|
2014-06-29 23:27:07 +01:00
|
|
|
class fs_reg : public backend_reg {
|
2010-10-10 23:42:37 +01:00
|
|
|
public:
|
2013-09-18 22:11:32 +01:00
|
|
|
DECLARE_RALLOC_CXX_OPERATORS(fs_reg)
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-07-04 21:12:50 +01:00
|
|
|
void init();
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-07-04 21:12:50 +01:00
|
|
|
fs_reg();
|
2014-09-10 18:17:28 +01:00
|
|
|
explicit fs_reg(float f);
|
|
|
|
explicit fs_reg(int32_t i);
|
|
|
|
explicit fs_reg(uint32_t u);
|
2014-03-09 01:25:34 +00:00
|
|
|
explicit fs_reg(uint8_t vf[4]);
|
|
|
|
explicit fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3);
|
2012-07-04 21:12:50 +01:00
|
|
|
fs_reg(struct brw_reg fixed_hw_reg);
|
2011-05-15 17:36:19 +01:00
|
|
|
fs_reg(enum register_file file, int reg);
|
2014-06-30 00:02:59 +01:00
|
|
|
fs_reg(enum register_file file, int reg, enum brw_reg_type type);
|
2014-08-13 20:25:58 +01:00
|
|
|
fs_reg(enum register_file file, int reg, enum brw_reg_type type, uint8_t width);
|
|
|
|
fs_reg(fs_visitor *v, const struct glsl_type *type);
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-07-04 21:12:50 +01:00
|
|
|
bool equals(const fs_reg &r) const;
|
2013-12-08 03:57:35 +00:00
|
|
|
bool is_contiguous() const;
|
2013-12-08 03:59:11 +00:00
|
|
|
|
2014-02-20 02:53:00 +00:00
|
|
|
/** Smear a channel of the reg to all channels. */
|
|
|
|
fs_reg &set_smear(unsigned subreg);
|
2010-11-19 07:57:05 +00:00
|
|
|
|
2014-02-20 02:53:00 +00:00
|
|
|
/**
|
|
|
|
* Offset in bytes from the start of the register. Values up to a
|
|
|
|
* backend_reg::reg_offset unit are valid.
|
|
|
|
*/
|
|
|
|
int subreg_offset;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2014-02-20 02:53:00 +00:00
|
|
|
fs_reg *reladdr;
|
|
|
|
|
2014-08-13 20:25:58 +01:00
|
|
|
/**
|
|
|
|
* The register width. This indicates how many hardware values are
|
|
|
|
* represented by each virtual value. Valid values are 1, 8, or 16.
|
|
|
|
* For immediate values, this is 1. Most of the rest of the time, it
|
|
|
|
* will be equal to the dispatch width.
|
|
|
|
*/
|
|
|
|
uint8_t width;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the effective register width when used as a source in the
|
|
|
|
* given instruction. Registers such as uniforms and immediates
|
|
|
|
* effectively take on the width of the instruction in which they are
|
|
|
|
* used.
|
|
|
|
*/
|
2014-09-20 04:36:52 +01:00
|
|
|
uint8_t effective_width;
|
2014-08-13 20:25:58 +01:00
|
|
|
|
2013-12-08 03:57:35 +00:00
|
|
|
/** Register region horizontal stride */
|
2014-02-20 01:45:56 +00:00
|
|
|
uint8_t stride;
|
2010-10-10 23:42:37 +01:00
|
|
|
};
|
|
|
|
|
2014-12-04 21:35:25 +00:00
|
|
|
static inline fs_reg
|
|
|
|
negate(fs_reg reg)
|
|
|
|
{
|
|
|
|
assert(reg.file != HW_REG && reg.file != IMM);
|
|
|
|
reg.negate = !reg.negate;
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2014-02-19 14:21:07 +00:00
|
|
|
static inline fs_reg
|
2014-06-30 00:02:59 +01:00
|
|
|
retype(fs_reg reg, enum brw_reg_type type)
|
2014-02-19 14:21:07 +00:00
|
|
|
{
|
|
|
|
reg.fixed_hw_reg.type = reg.type = type;
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2014-02-19 14:19:10 +00:00
|
|
|
static inline fs_reg
|
2014-08-18 22:27:55 +01:00
|
|
|
byte_offset(fs_reg reg, unsigned delta)
|
2014-02-19 14:19:10 +00:00
|
|
|
{
|
2014-08-18 22:27:55 +01:00
|
|
|
switch (reg.file) {
|
|
|
|
case BAD_FILE:
|
|
|
|
break;
|
|
|
|
case GRF:
|
2014-10-21 07:16:48 +01:00
|
|
|
case ATTR:
|
2014-08-18 22:27:55 +01:00
|
|
|
reg.reg_offset += delta / 32;
|
|
|
|
break;
|
|
|
|
case MRF:
|
|
|
|
reg.reg += delta / 32;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(delta == 0);
|
|
|
|
}
|
|
|
|
reg.subreg_offset += delta % 32;
|
2014-02-19 14:19:10 +00:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2014-09-17 00:28:53 +01:00
|
|
|
static inline fs_reg
|
|
|
|
horiz_offset(fs_reg reg, unsigned delta)
|
|
|
|
{
|
|
|
|
switch (reg.file) {
|
|
|
|
case BAD_FILE:
|
|
|
|
case UNIFORM:
|
|
|
|
case IMM:
|
|
|
|
/* These only have a single component that is implicitly splatted. A
|
|
|
|
* horizontal offset should be a harmless no-op.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case GRF:
|
|
|
|
case MRF:
|
2014-10-21 07:16:48 +01:00
|
|
|
case ATTR:
|
2014-09-17 00:28:53 +01:00
|
|
|
return byte_offset(reg, delta * reg.stride * type_sz(reg.type));
|
|
|
|
default:
|
|
|
|
assert(delta == 0);
|
|
|
|
}
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2013-12-08 03:57:08 +00:00
|
|
|
static inline fs_reg
|
2014-08-18 22:27:55 +01:00
|
|
|
offset(fs_reg reg, unsigned delta)
|
2013-12-08 03:57:08 +00:00
|
|
|
{
|
2014-08-18 22:27:55 +01:00
|
|
|
assert(reg.stride > 0);
|
|
|
|
switch (reg.file) {
|
|
|
|
case BAD_FILE:
|
|
|
|
break;
|
|
|
|
case GRF:
|
|
|
|
case MRF:
|
2014-10-21 07:16:48 +01:00
|
|
|
case ATTR:
|
2014-08-18 22:27:55 +01:00
|
|
|
return byte_offset(reg, delta * reg.width * reg.stride * type_sz(reg.type));
|
|
|
|
case UNIFORM:
|
|
|
|
reg.reg_offset += delta;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(delta == 0);
|
|
|
|
}
|
2013-12-08 03:57:08 +00:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2014-09-12 00:15:10 +01:00
|
|
|
static inline fs_reg
|
|
|
|
component(fs_reg reg, unsigned idx)
|
|
|
|
{
|
|
|
|
assert(reg.subreg_offset == 0);
|
|
|
|
assert(idx < reg.width);
|
|
|
|
reg.subreg_offset = idx * type_sz(reg.type);
|
|
|
|
reg.width = 1;
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2014-01-15 21:21:50 +00:00
|
|
|
/**
|
|
|
|
* Get either of the 8-component halves of a 16-component register.
|
|
|
|
*
|
|
|
|
* Note: this also works if \c reg represents a SIMD16 pair of registers.
|
|
|
|
*/
|
|
|
|
static inline fs_reg
|
2014-08-16 18:48:18 +01:00
|
|
|
half(fs_reg reg, unsigned idx)
|
2014-01-15 21:21:50 +00:00
|
|
|
{
|
|
|
|
assert(idx < 2);
|
2014-08-29 22:41:21 +01:00
|
|
|
|
|
|
|
if (reg.file == UNIFORM)
|
|
|
|
return reg;
|
|
|
|
|
2014-01-15 21:21:50 +00:00
|
|
|
assert(idx == 0 || (reg.file != HW_REG && reg.file != IMM));
|
2014-08-16 18:48:18 +01:00
|
|
|
assert(reg.width == 16);
|
|
|
|
reg.width = 8;
|
2014-09-17 00:28:53 +01:00
|
|
|
return horiz_offset(reg, 8 * idx);
|
2014-01-15 21:21:50 +00:00
|
|
|
}
|
|
|
|
|
2010-11-19 09:44:35 +00:00
|
|
|
static const fs_reg reg_undef;
|
|
|
|
|
2012-10-03 21:01:23 +01:00
|
|
|
class fs_inst : public backend_instruction {
|
2014-02-20 17:14:40 +00:00
|
|
|
fs_inst &operator=(const fs_inst &);
|
|
|
|
|
2014-08-14 21:56:24 +01:00
|
|
|
void init(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
|
|
|
|
fs_reg *src, int sources);
|
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
public:
|
2013-09-21 00:33:55 +01:00
|
|
|
DECLARE_RALLOC_CXX_OPERATORS(fs_inst)
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2014-08-14 21:56:24 +01:00
|
|
|
fs_inst();
|
|
|
|
fs_inst(enum opcode opcode, uint8_t exec_size);
|
|
|
|
fs_inst(enum opcode opcode, const fs_reg &dst);
|
|
|
|
fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
|
|
|
|
const fs_reg &src0);
|
2014-05-27 18:25:05 +01:00
|
|
|
fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0);
|
2014-08-14 21:56:24 +01:00
|
|
|
fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
|
|
|
|
const fs_reg &src0, const fs_reg &src1);
|
2014-05-27 18:25:05 +01:00
|
|
|
fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
|
|
|
|
const fs_reg &src1);
|
2014-08-14 21:56:24 +01:00
|
|
|
fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
|
|
|
|
const fs_reg &src0, const fs_reg &src1, const fs_reg &src2);
|
2014-05-27 18:25:05 +01:00
|
|
|
fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
|
|
|
|
const fs_reg &src1, const fs_reg &src2);
|
2014-05-27 02:44:17 +01:00
|
|
|
fs_inst(enum opcode opcode, const fs_reg &dst, fs_reg src[], int sources);
|
2014-08-14 21:56:24 +01:00
|
|
|
fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
|
|
|
|
fs_reg src[], int sources);
|
2014-02-20 17:40:02 +00:00
|
|
|
fs_inst(const fs_inst &that);
|
2010-10-08 23:11:42 +01:00
|
|
|
|
2014-02-20 21:14:05 +00:00
|
|
|
void resize_sources(uint8_t num_sources);
|
|
|
|
|
2014-03-27 16:40:30 +00:00
|
|
|
bool equals(fs_inst *inst) const;
|
|
|
|
bool overwrites_reg(const fs_reg ®) const;
|
|
|
|
bool is_send_from_grf() const;
|
|
|
|
bool is_partial_write() const;
|
|
|
|
int regs_read(fs_visitor *v, int arg) const;
|
2014-06-24 05:57:31 +01:00
|
|
|
bool can_do_source_mods(struct brw_context *brw);
|
2014-03-27 16:40:30 +00:00
|
|
|
|
|
|
|
bool reads_flag() const;
|
|
|
|
bool writes_flag() const;
|
2013-10-20 19:32:01 +01:00
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
fs_reg dst;
|
2014-02-20 05:18:44 +00:00
|
|
|
fs_reg *src;
|
2014-02-20 01:22:55 +00:00
|
|
|
|
2014-02-20 16:18:22 +00:00
|
|
|
uint8_t sources; /**< Number of fs_reg sources. */
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2014-08-14 21:56:24 +01:00
|
|
|
/**
|
|
|
|
* Execution size of the instruction. This is used by the generator to
|
|
|
|
* generate the correct binary for the given fs_inst. Current valid
|
|
|
|
* values are 1, 8, 16.
|
|
|
|
*/
|
|
|
|
uint8_t exec_size;
|
|
|
|
|
2012-12-06 18:36:11 +00:00
|
|
|
/* Chooses which flag subregister (f0.0 or f0.1) is used for conditional
|
|
|
|
* mod and predication.
|
|
|
|
*/
|
|
|
|
uint8_t flag_subreg;
|
|
|
|
|
2014-02-20 01:12:57 +00:00
|
|
|
uint8_t regs_written; /**< Number of vgrfs written by a SEND message, or 1 */
|
2014-02-20 03:49:46 +00:00
|
|
|
bool eot:1;
|
|
|
|
bool force_uncompressed:1;
|
|
|
|
bool force_sechalf:1;
|
2013-11-18 08:13:13 +00:00
|
|
|
bool pi_noperspective:1; /**< Pixel interpolator noperspective flag */
|
2010-10-10 23:42:37 +01:00
|
|
|
};
|
|
|
|
|
2012-11-09 09:05:47 +00:00
|
|
|
/**
|
|
|
|
* The fragment shader front-end.
|
|
|
|
*
|
|
|
|
* Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
|
|
|
|
*/
|
2012-10-03 21:01:23 +01:00
|
|
|
class fs_visitor : public backend_visitor
|
2010-10-10 23:42:37 +01:00
|
|
|
{
|
|
|
|
public:
|
2014-09-10 19:28:27 +01:00
|
|
|
const fs_reg reg_null_f;
|
|
|
|
const fs_reg reg_null_d;
|
|
|
|
const fs_reg reg_null_ud;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-11-20 22:46:56 +00:00
|
|
|
fs_visitor(struct brw_context *brw,
|
2014-05-14 09:21:02 +01:00
|
|
|
void *mem_ctx,
|
2014-05-14 08:41:41 +01:00
|
|
|
const struct brw_wm_prog_key *key,
|
|
|
|
struct brw_wm_prog_data *prog_data,
|
2013-04-09 01:17:44 +01:00
|
|
|
struct gl_shader_program *shader_prog,
|
2012-11-20 22:41:21 +00:00
|
|
|
struct gl_fragment_program *fp,
|
2012-11-20 22:26:49 +00:00
|
|
|
unsigned dispatch_width);
|
2014-10-28 05:42:50 +00:00
|
|
|
|
|
|
|
fs_visitor(struct brw_context *brw,
|
|
|
|
void *mem_ctx,
|
|
|
|
const struct brw_vs_prog_key *key,
|
|
|
|
struct brw_vs_prog_data *prog_data,
|
|
|
|
struct gl_shader_program *shader_prog,
|
|
|
|
struct gl_vertex_program *cp,
|
|
|
|
unsigned dispatch_width);
|
|
|
|
|
2012-07-04 21:12:50 +01:00
|
|
|
~fs_visitor();
|
2014-08-31 03:10:27 +01:00
|
|
|
void init();
|
2010-10-10 23:42:37 +01:00
|
|
|
|
|
|
|
fs_reg *variable_storage(ir_variable *var);
|
|
|
|
int virtual_grf_alloc(int size);
|
2011-07-26 02:13:04 +01:00
|
|
|
void import_uniforms(fs_visitor *v);
|
2014-10-28 05:42:50 +00:00
|
|
|
void setup_uniform_clipplane_values();
|
|
|
|
void compute_clip_distance();
|
2010-10-10 23:42:37 +01:00
|
|
|
|
|
|
|
void visit(ir_variable *ir);
|
|
|
|
void visit(ir_assignment *ir);
|
|
|
|
void visit(ir_dereference_variable *ir);
|
|
|
|
void visit(ir_dereference_record *ir);
|
|
|
|
void visit(ir_dereference_array *ir);
|
|
|
|
void visit(ir_expression *ir);
|
|
|
|
void visit(ir_texture *ir);
|
|
|
|
void visit(ir_if *ir);
|
|
|
|
void visit(ir_constant *ir);
|
|
|
|
void visit(ir_swizzle *ir);
|
|
|
|
void visit(ir_return *ir);
|
|
|
|
void visit(ir_loop *ir);
|
|
|
|
void visit(ir_loop_jump *ir);
|
|
|
|
void visit(ir_discard *ir);
|
|
|
|
void visit(ir_call *ir);
|
|
|
|
void visit(ir_function *ir);
|
|
|
|
void visit(ir_function_signature *ir);
|
2013-02-15 15:26:35 +00:00
|
|
|
void visit(ir_emit_vertex *);
|
|
|
|
void visit(ir_end_primitive *);
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2014-08-02 02:08:08 +01:00
|
|
|
uint32_t gather_channel(int orig_chan, uint32_t sampler);
|
2014-08-02 02:05:37 +01:00
|
|
|
void swizzle_result(ir_texture_opcode op, int dest_components,
|
|
|
|
fs_reg orig_val, uint32_t sampler);
|
2011-06-10 22:45:24 +01:00
|
|
|
|
2012-11-09 20:01:05 +00:00
|
|
|
fs_inst *emit(fs_inst *inst);
|
2012-11-09 00:06:24 +00:00
|
|
|
void emit(exec_list list);
|
2011-03-13 08:23:40 +00:00
|
|
|
|
2012-07-04 21:12:50 +01:00
|
|
|
fs_inst *emit(enum opcode opcode);
|
2014-06-29 07:11:22 +01:00
|
|
|
fs_inst *emit(enum opcode opcode, const fs_reg &dst);
|
|
|
|
fs_inst *emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0);
|
|
|
|
fs_inst *emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
|
|
|
|
const fs_reg &src1);
|
|
|
|
fs_inst *emit(enum opcode opcode, const fs_reg &dst,
|
|
|
|
const fs_reg &src0, const fs_reg &src1, const fs_reg &src2);
|
|
|
|
fs_inst *emit(enum opcode opcode, const fs_reg &dst,
|
2014-05-27 02:44:17 +01:00
|
|
|
fs_reg src[], int sources);
|
2011-03-13 08:23:40 +00:00
|
|
|
|
2014-06-28 21:40:52 +01:00
|
|
|
fs_inst *MOV(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *NOT(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *RNDD(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *RNDE(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *RNDZ(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *FRC(const fs_reg &dst, const fs_reg &src);
|
|
|
|
fs_inst *ADD(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *MUL(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *MACH(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *MAC(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *SHL(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *SHR(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *ASR(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *AND(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *OR(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *XOR(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
2014-06-30 01:58:59 +01:00
|
|
|
fs_inst *IF(enum brw_predicate predicate);
|
2014-06-30 01:50:20 +01:00
|
|
|
fs_inst *IF(const fs_reg &src0, const fs_reg &src1,
|
|
|
|
enum brw_conditional_mod condition);
|
2012-11-09 20:01:05 +00:00
|
|
|
fs_inst *CMP(fs_reg dst, fs_reg src0, fs_reg src1,
|
2014-06-30 01:50:20 +01:00
|
|
|
enum brw_conditional_mod condition);
|
2014-06-28 21:40:52 +01:00
|
|
|
fs_inst *LRP(const fs_reg &dst, const fs_reg &a, const fs_reg &y,
|
|
|
|
const fs_reg &x);
|
2013-02-05 23:46:22 +00:00
|
|
|
fs_inst *DEP_RESOLVE_MOV(int grf);
|
2014-06-28 21:40:52 +01:00
|
|
|
fs_inst *BFREV(const fs_reg &dst, const fs_reg &value);
|
|
|
|
fs_inst *BFE(const fs_reg &dst, const fs_reg &bits, const fs_reg &offset,
|
|
|
|
const fs_reg &value);
|
|
|
|
fs_inst *BFI1(const fs_reg &dst, const fs_reg &bits, const fs_reg &offset);
|
|
|
|
fs_inst *BFI2(const fs_reg &dst, const fs_reg &bfi1_dst,
|
|
|
|
const fs_reg &insert, const fs_reg &base);
|
|
|
|
fs_inst *FBH(const fs_reg &dst, const fs_reg &value);
|
|
|
|
fs_inst *FBL(const fs_reg &dst, const fs_reg &value);
|
|
|
|
fs_inst *CBIT(const fs_reg &dst, const fs_reg &value);
|
|
|
|
fs_inst *MAD(const fs_reg &dst, const fs_reg &c, const fs_reg &b,
|
|
|
|
const fs_reg &a);
|
|
|
|
fs_inst *ADDC(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *SUBB(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
|
|
|
fs_inst *SEL(const fs_reg &dst, const fs_reg &src0, const fs_reg &src1);
|
2012-11-09 20:01:05 +00:00
|
|
|
|
2011-05-25 00:45:17 +01:00
|
|
|
int type_size(const struct glsl_type *type);
|
2012-03-10 21:48:42 +00:00
|
|
|
fs_inst *get_instruction_generating_reg(fs_inst *start,
|
|
|
|
fs_inst *end,
|
2014-02-20 04:31:14 +00:00
|
|
|
const fs_reg ®);
|
2011-05-25 00:45:17 +01:00
|
|
|
|
2014-05-28 02:47:40 +01:00
|
|
|
fs_inst *LOAD_PAYLOAD(const fs_reg &dst, fs_reg *src, int sources);
|
|
|
|
|
2014-02-20 04:31:14 +00:00
|
|
|
exec_list VARYING_PULL_CONSTANT_LOAD(const fs_reg &dst,
|
|
|
|
const fs_reg &surf_index,
|
|
|
|
const fs_reg &varying_offset,
|
2013-03-13 19:27:17 +00:00
|
|
|
uint32_t const_offset);
|
2012-11-09 00:06:24 +00:00
|
|
|
|
2014-10-28 06:36:31 +00:00
|
|
|
bool run_fs();
|
2014-10-28 05:42:50 +00:00
|
|
|
bool run_vs();
|
2014-11-14 00:28:18 +00:00
|
|
|
void optimize();
|
2014-11-14 00:28:19 +00:00
|
|
|
void allocate_registers();
|
2013-10-02 22:07:40 +01:00
|
|
|
void assign_binding_table_offsets();
|
2012-11-19 22:59:14 +00:00
|
|
|
void setup_payload_gen4();
|
2012-11-14 03:36:18 +00:00
|
|
|
void setup_payload_gen6();
|
2014-10-28 05:42:50 +00:00
|
|
|
void setup_vs_payload();
|
2010-10-10 23:42:37 +01:00
|
|
|
void assign_curb_setup();
|
|
|
|
void calculate_urb_setup();
|
|
|
|
void assign_urb_setup();
|
2014-10-28 05:42:50 +00:00
|
|
|
void assign_vs_urb_setup();
|
2013-11-07 01:38:23 +00:00
|
|
|
bool assign_regs(bool allow_spilling);
|
2010-10-10 23:42:37 +01:00
|
|
|
void assign_regs_trivial();
|
2013-10-29 19:18:10 +00:00
|
|
|
void get_used_mrfs(bool *mrf_used);
|
2012-10-02 23:01:24 +01:00
|
|
|
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
|
|
|
|
int first_payload_node);
|
2012-10-03 00:31:51 +01:00
|
|
|
void setup_mrf_hack_interference(struct ra_graph *g,
|
|
|
|
int first_mrf_hack_node);
|
2010-10-19 17:25:51 +01:00
|
|
|
int choose_spill_reg(struct ra_graph *g);
|
|
|
|
void spill_reg(int spill_reg);
|
2010-10-14 04:17:15 +01:00
|
|
|
void split_virtual_grfs();
|
2014-09-16 21:14:09 +01:00
|
|
|
bool compact_virtual_grfs();
|
2012-11-09 00:06:24 +00:00
|
|
|
void move_uniform_array_access_to_pull_constants();
|
2014-03-11 21:35:27 +00:00
|
|
|
void assign_constant_locations();
|
2014-03-12 05:24:39 +00:00
|
|
|
void demote_pull_constants();
|
2014-09-01 18:54:00 +01:00
|
|
|
void invalidate_live_intervals();
|
2014-07-12 04:54:52 +01:00
|
|
|
void calculate_live_intervals();
|
2013-08-05 07:27:14 +01:00
|
|
|
void calculate_register_pressure();
|
2011-07-23 00:45:15 +01:00
|
|
|
bool opt_algebraic();
|
2012-05-11 00:10:15 +01:00
|
|
|
bool opt_cse();
|
2014-07-12 04:35:31 +01:00
|
|
|
bool opt_cse_local(bblock_t *block);
|
2012-05-08 21:01:52 +01:00
|
|
|
bool opt_copy_propagate();
|
2012-06-06 18:57:54 +01:00
|
|
|
bool try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry);
|
2014-09-24 01:22:09 +01:00
|
|
|
bool try_constant_propagate(fs_inst *inst, acp_entry *entry);
|
i965/fs: Add support for global copy propagation.
It is common for complicated shaders, particularly code-generated ones, to
have a big array of uniforms or attributes, and a prologue in the shader that
dereferences from the big array to more informatively-named local variables.
Then there will be some small control flow operation (like a ? : statement),
and then use of those informatively-named variables. We were emitting extra
MOVs in these cases, because copy propagation couldn't reach across control
flow.
Instead, implement dataflow analysis on the output of the first copy
propagation pass and re-run it to propagate those extra MOVs out.
On one future Steam release, reduces VS+FS instruction count from 42837 to
41437. No statistically significant performance difference (n=48), though, at
least at the low resolution I'm running it at.
shader-db results:
total instructions in shared programs: 722170 -> 702545 (-2.72%)
instructions in affected programs: 260618 -> 240993 (-7.53%)
Some shaders do get hurt by up to 2 instructions, because a choice to copy
propagate instead of coalesce or something like that results in a dead write
sticking around. Given that we already have instances of those instructions
in the affected programs (particularly unigine), we should just improve dead
code elimination to fix the problem.
2012-10-30 18:09:59 +00:00
|
|
|
bool opt_copy_propagate_local(void *mem_ctx, bblock_t *block,
|
|
|
|
exec_list *acp);
|
2014-04-14 23:01:37 +01:00
|
|
|
bool opt_register_renaming();
|
2013-11-30 06:16:14 +00:00
|
|
|
bool register_coalesce();
|
2010-10-08 22:00:14 +01:00
|
|
|
bool compute_to_mrf();
|
2010-10-10 23:42:37 +01:00
|
|
|
bool dead_code_eliminate();
|
2010-11-19 07:57:05 +00:00
|
|
|
bool remove_duplicate_mrf_writes();
|
2010-10-10 23:42:37 +01:00
|
|
|
bool virtual_grf_interferes(int a, int b);
|
2013-11-07 01:38:23 +00:00
|
|
|
void schedule_instructions(instruction_scheduler_mode mode);
|
2013-02-05 23:46:22 +00:00
|
|
|
void insert_gen4_send_dependency_workarounds();
|
2014-08-25 03:07:01 +01:00
|
|
|
void insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
|
|
|
|
fs_inst *inst);
|
|
|
|
void insert_gen4_post_send_dependency_workarounds(bblock_t *block,
|
|
|
|
fs_inst *inst);
|
i965: Accurately bail on SIMD16 compiles.
Ideally, we'd like to never even attempt the SIMD16 compile if we could
know ahead of time that it won't succeed---it's purely a waste of time.
This is especially important for state-based recompiles, which happen at
draw time.
The fragment shader compiler has a number of checks like:
if (dispatch_width == 16)
fail("...some reason...");
This patch introduces a new no16() function which replaces the above
pattern. In the SIMD8 compile, it sets a "SIMD16 will never work" flag.
Then, brw_wm_fs_emit can check that flag, skip the SIMD16 compile, and
issue a helpful performance warning if INTEL_DEBUG=perf is set. (In
SIMD16 mode, no16() calls fail(), for safety's sake.)
The great part is that this is not a heuristic---if the flag is set, we
know with 100% certainty that the SIMD16 compile would fail. (It might
fail anyway if we run out of registers, but it's always worth trying.)
v2: Fix missing va_end in early-return case (caught by Ilia Mirkin).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Chris Forbes <chrisf@ijw.co.nz> [v1]
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> [v1]
Reviewed-by: Eric Anholt <eric@anholt.net>
2014-03-07 08:49:45 +00:00
|
|
|
void vfail(const char *msg, va_list args);
|
2011-03-13 20:43:05 +00:00
|
|
|
void fail(const char *msg, ...);
|
i965: Accurately bail on SIMD16 compiles.
Ideally, we'd like to never even attempt the SIMD16 compile if we could
know ahead of time that it won't succeed---it's purely a waste of time.
This is especially important for state-based recompiles, which happen at
draw time.
The fragment shader compiler has a number of checks like:
if (dispatch_width == 16)
fail("...some reason...");
This patch introduces a new no16() function which replaces the above
pattern. In the SIMD8 compile, it sets a "SIMD16 will never work" flag.
Then, brw_wm_fs_emit can check that flag, skip the SIMD16 compile, and
issue a helpful performance warning if INTEL_DEBUG=perf is set. (In
SIMD16 mode, no16() calls fail(), for safety's sake.)
The great part is that this is not a heuristic---if the flag is set, we
know with 100% certainty that the SIMD16 compile would fail. (It might
fail anyway if we run out of registers, but it's always worth trying.)
v2: Fix missing va_end in early-return case (caught by Ilia Mirkin).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Chris Forbes <chrisf@ijw.co.nz> [v1]
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> [v1]
Reviewed-by: Eric Anholt <eric@anholt.net>
2014-03-07 08:49:45 +00:00
|
|
|
void no16(const char *msg, ...);
|
2013-02-16 03:26:48 +00:00
|
|
|
void lower_uniform_pull_constant_loads();
|
2014-04-18 19:56:46 +01:00
|
|
|
bool lower_load_payload();
|
2011-01-19 01:16:49 +00:00
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
void emit_dummy_fs();
|
2014-09-26 22:47:03 +01:00
|
|
|
void emit_repclear_shader();
|
2014-08-05 19:02:02 +01:00
|
|
|
fs_reg *emit_fragcoord_interpolation(bool pixel_center_integer,
|
|
|
|
bool origin_upper_left);
|
2012-06-21 19:33:22 +01:00
|
|
|
fs_inst *emit_linterp(const fs_reg &attr, const fs_reg &interp,
|
2012-06-18 21:52:02 +01:00
|
|
|
glsl_interp_qualifier interpolation_mode,
|
2014-01-06 21:59:18 +00:00
|
|
|
bool is_centroid, bool is_sample);
|
2014-08-05 18:29:00 +01:00
|
|
|
fs_reg *emit_frontfacing_interpolation();
|
2014-08-05 19:10:07 +01:00
|
|
|
fs_reg *emit_samplepos_setup();
|
2014-10-17 20:59:18 +01:00
|
|
|
fs_reg *emit_sampleid_setup();
|
2010-10-10 23:42:37 +01:00
|
|
|
fs_reg *emit_general_interpolation(ir_variable *ir);
|
2014-10-28 05:42:50 +00:00
|
|
|
fs_reg *emit_vs_system_value(enum brw_reg_type type, int location);
|
2010-10-10 23:42:37 +01:00
|
|
|
void emit_interpolation_setup_gen4();
|
|
|
|
void emit_interpolation_setup_gen6();
|
2013-10-24 23:53:05 +01:00
|
|
|
void compute_sample_position(fs_reg dst, fs_reg int_sample_pos);
|
2014-08-01 23:03:03 +01:00
|
|
|
fs_reg rescale_texcoord(fs_reg coordinate, const glsl_type *coord_type,
|
2014-08-03 10:13:02 +01:00
|
|
|
bool is_rect, uint32_t sampler, int texunit);
|
2014-08-02 00:47:58 +01:00
|
|
|
fs_inst *emit_texture_gen4(ir_texture_opcode op, fs_reg dst,
|
2014-08-01 23:46:11 +01:00
|
|
|
fs_reg coordinate, int coord_components,
|
2014-08-02 00:24:44 +01:00
|
|
|
fs_reg shadow_comp,
|
|
|
|
fs_reg lod, fs_reg lod2, int grad_components,
|
2014-08-03 10:13:02 +01:00
|
|
|
uint32_t sampler);
|
2014-08-02 00:47:58 +01:00
|
|
|
fs_inst *emit_texture_gen5(ir_texture_opcode op, fs_reg dst,
|
2014-08-01 23:46:11 +01:00
|
|
|
fs_reg coordinate, int coord_components,
|
2014-08-02 00:24:44 +01:00
|
|
|
fs_reg shadow_comp,
|
|
|
|
fs_reg lod, fs_reg lod2, int grad_components,
|
2014-08-01 22:13:31 +01:00
|
|
|
fs_reg sample_index, uint32_t sampler,
|
|
|
|
bool has_offset);
|
2014-08-02 00:47:58 +01:00
|
|
|
fs_inst *emit_texture_gen7(ir_texture_opcode op, fs_reg dst,
|
2014-08-01 23:46:11 +01:00
|
|
|
fs_reg coordinate, int coord_components,
|
2014-08-02 00:24:44 +01:00
|
|
|
fs_reg shadow_comp,
|
|
|
|
fs_reg lod, fs_reg lod2, int grad_components,
|
2014-09-01 09:58:06 +01:00
|
|
|
fs_reg sample_index, fs_reg mcs, fs_reg sampler,
|
|
|
|
fs_reg offset_value);
|
2014-10-10 10:41:20 +01:00
|
|
|
void emit_texture(ir_texture_opcode op,
|
|
|
|
const glsl_type *dest_type,
|
|
|
|
fs_reg coordinate, const struct glsl_type *coord_type,
|
|
|
|
fs_reg shadow_c,
|
|
|
|
fs_reg lod, fs_reg dpdy, int grad_components,
|
|
|
|
fs_reg sample_index,
|
|
|
|
fs_reg offset, unsigned offset_components,
|
|
|
|
fs_reg mcs,
|
|
|
|
int gather_component,
|
|
|
|
bool is_cube_array,
|
|
|
|
bool is_rect,
|
|
|
|
uint32_t sampler,
|
|
|
|
fs_reg sampler_reg,
|
|
|
|
int texunit);
|
2014-08-01 22:46:31 +01:00
|
|
|
fs_reg emit_mcs_fetch(fs_reg coordinate, int components, fs_reg sampler);
|
2014-02-03 09:15:16 +00:00
|
|
|
void emit_gen6_gather_wa(uint8_t wa, fs_reg dst);
|
2012-11-28 19:39:08 +00:00
|
|
|
fs_reg fix_math_operand(fs_reg src);
|
2011-05-03 18:55:50 +01:00
|
|
|
fs_inst *emit_math(enum opcode op, fs_reg dst, fs_reg src0);
|
|
|
|
fs_inst *emit_math(enum opcode op, fs_reg dst, fs_reg src0, fs_reg src1);
|
2014-02-20 04:31:14 +00:00
|
|
|
void emit_lrp(const fs_reg &dst, const fs_reg &x, const fs_reg &y,
|
|
|
|
const fs_reg &a);
|
2014-06-30 01:50:20 +01:00
|
|
|
void emit_minmax(enum brw_conditional_mod conditionalmod, const fs_reg &dst,
|
2014-02-20 04:31:14 +00:00
|
|
|
const fs_reg &src0, const fs_reg &src1);
|
2010-11-19 02:36:06 +00:00
|
|
|
bool try_emit_saturate(ir_expression *ir);
|
2014-04-02 00:49:13 +01:00
|
|
|
bool try_emit_line(ir_expression *ir);
|
2014-04-02 00:42:36 +01:00
|
|
|
bool try_emit_mad(ir_expression *ir);
|
i965/fs: Optimize IF/MOV/ELSE/MOV/ENDIF to SEL when possible.
Many GLSL shaders contain code of the form:
x = condition ? foo : bar
The compiler emits an ir_if tree for this, since each subexpression
might be a complex tree that could have side-effects and short-circuit
logic operations.
However, the common case is to simply pick one of two constants or
variable's values---which is exactly what SEL is for. Replacing IF/ELSE
with SEL also simplifies the control flow graph, making optimization
passes which work on basic blocks more effective.
The shader-db statistics:
total instructions in shared programs: 1655247 -> 1503234 (-9.18%)
instructions in affected programs: 949188 -> 797175 (-16.02%)
2,970 shaders were helped, none hurt. Gained 181 SIMD16 programs.
This helps Valve's Source Engine games (max -41.33%), The Cave
(max -33.33%), Serious Sam 3 (max -18.64%), Yo Frankie! (max -30.19%),
Zen Bound (max -22.22%), GStreamer (max -6.12%), and GLBenchmark 2.7
(max -1.94%).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2013-08-04 10:05:43 +01:00
|
|
|
void try_replace_with_sel();
|
2013-10-23 01:51:28 +01:00
|
|
|
bool opt_peephole_sel();
|
2013-10-23 01:51:28 +01:00
|
|
|
bool opt_peephole_predicated_break();
|
2013-12-12 08:30:16 +00:00
|
|
|
bool opt_saturate_propagation();
|
2010-10-14 19:11:29 +01:00
|
|
|
void emit_bool_to_cond_code(ir_rvalue *condition);
|
2010-10-19 20:32:55 +01:00
|
|
|
void emit_if_gen6(ir_if *ir);
|
2014-07-15 19:45:20 +01:00
|
|
|
void emit_unspill(bblock_t *block, fs_inst *inst, fs_reg reg,
|
|
|
|
uint32_t spill_offset, int count);
|
2014-08-18 22:27:55 +01:00
|
|
|
void emit_spill(bblock_t *block, fs_inst *inst, fs_reg reg,
|
|
|
|
uint32_t spill_offset, int count);
|
2010-10-08 22:35:34 +01:00
|
|
|
|
2012-08-27 22:35:01 +01:00
|
|
|
void emit_fragment_program_code();
|
|
|
|
void setup_fp_regs();
|
|
|
|
fs_reg get_fp_src_reg(const prog_src_register *src);
|
|
|
|
fs_reg get_fp_dst_reg(const prog_dst_register *dst);
|
|
|
|
void emit_fp_alu1(enum opcode opcode,
|
|
|
|
const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src);
|
|
|
|
void emit_fp_alu2(enum opcode opcode,
|
|
|
|
const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src0, fs_reg src1);
|
|
|
|
void emit_fp_scalar_write(const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src);
|
|
|
|
void emit_fp_scalar_math(enum opcode opcode,
|
|
|
|
const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src);
|
|
|
|
|
|
|
|
void emit_fp_minmax(const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src0, fs_reg src1);
|
|
|
|
|
2014-06-30 01:50:20 +01:00
|
|
|
void emit_fp_sop(enum brw_conditional_mod conditional_mod,
|
2012-08-27 22:35:01 +01:00
|
|
|
const struct prog_instruction *fpi,
|
|
|
|
fs_reg dst, fs_reg src0, fs_reg src1, fs_reg one);
|
|
|
|
|
2014-09-13 00:17:37 +01:00
|
|
|
int setup_color_payload(fs_reg *dst, fs_reg color, unsigned components);
|
2013-10-27 00:32:03 +01:00
|
|
|
void emit_alpha_test();
|
2014-09-13 01:49:49 +01:00
|
|
|
fs_inst *emit_single_fb_write(fs_reg color1, fs_reg color2,
|
|
|
|
fs_reg src0_alpha, unsigned components);
|
2010-10-10 23:42:37 +01:00
|
|
|
void emit_fb_writes();
|
2014-10-28 05:42:50 +00:00
|
|
|
void emit_urb_writes();
|
2012-11-27 22:10:52 +00:00
|
|
|
|
|
|
|
void emit_shader_time_begin();
|
|
|
|
void emit_shader_time_end();
|
|
|
|
void emit_shader_time_write(enum shader_time_shader_type type,
|
2012-12-10 17:21:34 +00:00
|
|
|
fs_reg value);
|
2012-11-27 22:10:52 +00:00
|
|
|
|
2013-09-26 00:30:20 +01:00
|
|
|
void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
|
|
|
|
fs_reg dst, fs_reg offset, fs_reg src0,
|
|
|
|
fs_reg src1);
|
|
|
|
|
|
|
|
void emit_untyped_surface_read(unsigned surf_index, fs_reg dst,
|
|
|
|
fs_reg offset);
|
|
|
|
|
2013-11-17 07:00:00 +00:00
|
|
|
void emit_interpolate_expression(ir_expression *ir);
|
|
|
|
|
2011-08-26 20:24:43 +01:00
|
|
|
bool try_rewrite_rhs_to_dst(ir_assignment *ir,
|
|
|
|
fs_reg dst,
|
|
|
|
fs_reg src,
|
|
|
|
fs_inst *pre_rhs_inst,
|
|
|
|
fs_inst *last_rhs_inst);
|
2010-10-10 23:42:37 +01:00
|
|
|
void emit_assignment_writes(fs_reg &l, fs_reg &r,
|
|
|
|
const glsl_type *type, bool predicated);
|
2011-10-03 23:12:10 +01:00
|
|
|
void resolve_ud_negate(fs_reg *reg);
|
2012-04-24 00:48:09 +01:00
|
|
|
void resolve_bool_comparison(ir_rvalue *rvalue, fs_reg *reg);
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-11-27 22:10:52 +00:00
|
|
|
fs_reg get_timestamp();
|
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
struct brw_reg interp_reg(int location, int channel);
|
2012-11-21 01:43:31 +00:00
|
|
|
void setup_uniform_values(ir_variable *ir);
|
2010-10-10 23:42:37 +01:00
|
|
|
void setup_builtin_uniform_values(ir_variable *ir);
|
2010-11-19 07:57:05 +00:00
|
|
|
int implied_mrf_writes(fs_inst *inst);
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2013-08-05 07:34:01 +01:00
|
|
|
virtual void dump_instructions();
|
2014-05-29 21:08:59 +01:00
|
|
|
virtual void dump_instructions(const char *name);
|
2013-04-29 22:21:14 +01:00
|
|
|
void dump_instruction(backend_instruction *inst);
|
2014-05-29 19:45:15 +01:00
|
|
|
void dump_instruction(backend_instruction *inst, FILE *file);
|
2012-10-30 22:35:44 +00:00
|
|
|
|
2013-09-26 00:30:20 +01:00
|
|
|
void visit_atomic_counter_intrinsic(ir_call *ir);
|
|
|
|
|
2014-08-29 20:50:46 +01:00
|
|
|
const void *const key;
|
2014-08-29 20:50:46 +01:00
|
|
|
struct brw_stage_prog_data *prog_data;
|
2012-11-21 21:11:32 +00:00
|
|
|
unsigned int sanity_param_count;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2014-02-19 14:36:48 +00:00
|
|
|
int *param_size;
|
2011-01-18 00:02:58 +00:00
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
int *virtual_grf_sizes;
|
2012-07-06 21:45:53 +01:00
|
|
|
int virtual_grf_count;
|
2010-10-10 23:42:37 +01:00
|
|
|
int virtual_grf_array_size;
|
2013-04-30 23:00:40 +01:00
|
|
|
int *virtual_grf_start;
|
|
|
|
int *virtual_grf_end;
|
2012-06-05 19:37:22 +01:00
|
|
|
brw::fs_live_variables *live_intervals;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2013-08-05 07:27:14 +01:00
|
|
|
int *regs_live_at_ip;
|
|
|
|
|
2014-02-19 14:27:01 +00:00
|
|
|
/** Number of uniform variable components visited. */
|
|
|
|
unsigned uniforms;
|
|
|
|
|
2014-05-14 05:00:35 +01:00
|
|
|
/** Byte-offset for the next available spot in the scratch space buffer. */
|
|
|
|
unsigned last_scratch;
|
|
|
|
|
2014-03-07 10:10:14 +00:00
|
|
|
/**
|
|
|
|
* Array mapping UNIFORM register numbers to the pull parameter index,
|
|
|
|
* or -1 if this uniform register isn't being uploaded as a pull constant.
|
|
|
|
*/
|
|
|
|
int *pull_constant_loc;
|
|
|
|
|
2014-03-11 21:35:27 +00:00
|
|
|
/**
|
|
|
|
* Array mapping UNIFORM register numbers to the push parameter index,
|
|
|
|
* or -1 if this uniform register isn't being uploaded as a push constant.
|
2011-07-26 02:13:04 +01:00
|
|
|
*/
|
2014-03-11 21:35:27 +00:00
|
|
|
int *push_constant_loc;
|
2011-07-26 02:13:04 +01:00
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
struct hash_table *variable_ht;
|
2012-09-18 17:12:48 +01:00
|
|
|
fs_reg frag_depth;
|
2013-10-25 00:21:13 +01:00
|
|
|
fs_reg sample_mask;
|
2014-10-28 05:42:50 +00:00
|
|
|
fs_reg outputs[VARYING_SLOT_MAX];
|
|
|
|
unsigned output_components[VARYING_SLOT_MAX];
|
2012-04-25 21:58:07 +01:00
|
|
|
fs_reg dual_src_output;
|
2014-03-25 23:46:12 +00:00
|
|
|
bool do_dual_src;
|
2010-10-10 23:42:37 +01:00
|
|
|
int first_non_payload_grf;
|
2012-10-02 00:39:54 +01:00
|
|
|
/** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
|
2012-01-27 20:54:11 +00:00
|
|
|
int max_grf;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2012-08-27 22:35:01 +01:00
|
|
|
fs_reg *fp_temp_regs;
|
|
|
|
fs_reg *fp_input_regs;
|
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
/** @{ debug annotation info */
|
|
|
|
const char *current_annotation;
|
2012-08-27 22:35:01 +01:00
|
|
|
const void *base_ir;
|
2010-10-10 23:42:37 +01:00
|
|
|
/** @} */
|
|
|
|
|
2011-03-13 20:43:05 +00:00
|
|
|
bool failed;
|
2011-05-16 23:10:26 +01:00
|
|
|
char *fail_msg;
|
i965: Accurately bail on SIMD16 compiles.
Ideally, we'd like to never even attempt the SIMD16 compile if we could
know ahead of time that it won't succeed---it's purely a waste of time.
This is especially important for state-based recompiles, which happen at
draw time.
The fragment shader compiler has a number of checks like:
if (dispatch_width == 16)
fail("...some reason...");
This patch introduces a new no16() function which replaces the above
pattern. In the SIMD8 compile, it sets a "SIMD16 will never work" flag.
Then, brw_wm_fs_emit can check that flag, skip the SIMD16 compile, and
issue a helpful performance warning if INTEL_DEBUG=perf is set. (In
SIMD16 mode, no16() calls fail(), for safety's sake.)
The great part is that this is not a heuristic---if the flag is set, we
know with 100% certainty that the SIMD16 compile would fail. (It might
fail anyway if we run out of registers, but it's always worth trying.)
v2: Fix missing va_end in early-return case (caught by Ilia Mirkin).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Chris Forbes <chrisf@ijw.co.nz> [v1]
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> [v1]
Reviewed-by: Eric Anholt <eric@anholt.net>
2014-03-07 08:49:45 +00:00
|
|
|
bool simd16_unsupported;
|
|
|
|
char *no16_msg;
|
2010-10-10 23:42:37 +01:00
|
|
|
|
2011-08-26 20:05:07 +01:00
|
|
|
/* Result of last visit() method. */
|
2010-10-10 23:42:37 +01:00
|
|
|
fs_reg result;
|
|
|
|
|
2014-05-14 05:52:51 +01:00
|
|
|
/** Register numbers for thread payload fields. */
|
|
|
|
struct {
|
|
|
|
uint8_t source_depth_reg;
|
|
|
|
uint8_t source_w_reg;
|
|
|
|
uint8_t aa_dest_stencil_reg;
|
|
|
|
uint8_t dest_depth_reg;
|
|
|
|
uint8_t sample_pos_reg;
|
|
|
|
uint8_t sample_mask_in_reg;
|
|
|
|
uint8_t barycentric_coord_reg[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT];
|
|
|
|
|
|
|
|
/** The number of thread payload registers the hardware will supply. */
|
|
|
|
uint8_t num_regs;
|
|
|
|
} payload;
|
|
|
|
|
2014-05-14 08:08:58 +01:00
|
|
|
bool source_depth_to_render_target;
|
|
|
|
bool runtime_check_aads_emit;
|
|
|
|
|
2010-10-10 23:42:37 +01:00
|
|
|
fs_reg pixel_x;
|
|
|
|
fs_reg pixel_y;
|
|
|
|
fs_reg wpos_w;
|
|
|
|
fs_reg pixel_w;
|
2011-10-22 01:20:32 +01:00
|
|
|
fs_reg delta_x[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT];
|
|
|
|
fs_reg delta_y[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT];
|
2012-11-27 22:10:52 +00:00
|
|
|
fs_reg shader_start_time;
|
2014-10-28 05:42:50 +00:00
|
|
|
fs_reg userplane[MAX_CLIP_PLANES];
|
2010-10-10 23:42:37 +01:00
|
|
|
|
|
|
|
int grf_used;
|
2013-10-29 19:46:18 +00:00
|
|
|
bool spilled_any_registers;
|
2011-03-12 03:19:01 +00:00
|
|
|
|
2012-11-20 21:50:52 +00:00
|
|
|
const unsigned dispatch_width; /**< 8 or 16 */
|
2010-10-10 23:42:37 +01:00
|
|
|
};
|
|
|
|
|
2012-11-09 09:05:47 +00:00
|
|
|
/**
|
|
|
|
* The fragment shader code generator.
|
|
|
|
*
|
|
|
|
* Translates FS IR to actual i965 assembly code.
|
|
|
|
*/
|
|
|
|
class fs_generator
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
fs_generator(struct brw_context *brw,
|
2014-05-14 09:21:02 +01:00
|
|
|
void *mem_ctx,
|
2014-10-21 06:53:31 +01:00
|
|
|
const void *key,
|
|
|
|
struct brw_stage_prog_data *prog_data,
|
|
|
|
struct gl_program *fp,
|
2015-01-13 22:28:13 +00:00
|
|
|
bool runtime_check_aads_emit,
|
|
|
|
const char *stage_abbrev);
|
2012-11-09 09:05:47 +00:00
|
|
|
~fs_generator();
|
|
|
|
|
2014-10-28 02:40:47 +00:00
|
|
|
void enable_debug(const char *shader_name);
|
2014-11-14 00:28:08 +00:00
|
|
|
int generate_code(const cfg_t *cfg, int dispatch_width);
|
|
|
|
const unsigned *get_assembly(unsigned int *assembly_size);
|
2012-11-09 09:05:47 +00:00
|
|
|
|
|
|
|
private:
|
2014-06-05 14:03:08 +01:00
|
|
|
void fire_fb_write(fs_inst *inst,
|
2014-09-16 23:16:20 +01:00
|
|
|
struct brw_reg payload,
|
2014-06-05 14:03:08 +01:00
|
|
|
struct brw_reg implied_header,
|
|
|
|
GLuint nr);
|
2014-09-16 23:16:20 +01:00
|
|
|
void generate_fb_write(fs_inst *inst, struct brw_reg payload);
|
2014-10-21 07:00:50 +01:00
|
|
|
void generate_urb_write(fs_inst *inst, struct brw_reg payload);
|
2013-12-17 12:00:50 +00:00
|
|
|
void generate_blorp_fb_write(fs_inst *inst);
|
2012-11-09 09:05:47 +00:00
|
|
|
void generate_pixel_xy(struct brw_reg dst, bool is_x);
|
|
|
|
void generate_linterp(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg *src);
|
2014-08-03 10:23:31 +01:00
|
|
|
void generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
|
|
|
|
struct brw_reg sampler_index);
|
2014-06-07 10:21:47 +01:00
|
|
|
void generate_math_gen6(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src0,
|
|
|
|
struct brw_reg src1);
|
2012-11-09 09:05:47 +00:00
|
|
|
void generate_math_gen4(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
2013-03-30 07:15:54 +00:00
|
|
|
void generate_math_g45(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
2014-11-08 09:39:14 +00:00
|
|
|
void generate_ddx(enum opcode op, struct brw_reg dst, struct brw_reg src);
|
|
|
|
void generate_ddy(enum opcode op, struct brw_reg dst, struct brw_reg src,
|
|
|
|
bool negate_value);
|
2013-10-16 19:45:06 +01:00
|
|
|
void generate_scratch_write(fs_inst *inst, struct brw_reg src);
|
|
|
|
void generate_scratch_read(fs_inst *inst, struct brw_reg dst);
|
2013-10-16 19:51:22 +01:00
|
|
|
void generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst);
|
2012-11-07 18:42:34 +00:00
|
|
|
void generate_uniform_pull_constant_load(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-12-05 08:06:30 +00:00
|
|
|
void generate_uniform_pull_constant_load_gen7(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg surf_index,
|
|
|
|
struct brw_reg offset);
|
2012-11-07 19:18:34 +00:00
|
|
|
void generate_varying_pull_constant_load(fs_inst *inst, struct brw_reg dst,
|
2013-03-18 17:16:42 +00:00
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-11-07 19:18:34 +00:00
|
|
|
void generate_varying_pull_constant_load_gen7(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-12-06 18:36:11 +00:00
|
|
|
void generate_mov_dispatch_to_flags(fs_inst *inst);
|
2013-10-25 00:17:08 +01:00
|
|
|
|
2013-11-18 08:13:13 +00:00
|
|
|
void generate_pixel_interpolator_query(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src,
|
|
|
|
struct brw_reg msg_data,
|
|
|
|
unsigned msg_type);
|
|
|
|
|
2013-10-25 00:21:13 +01:00
|
|
|
void generate_set_omask(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg sample_mask);
|
|
|
|
|
2013-10-25 00:17:08 +01:00
|
|
|
void generate_set_sample_id(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src0,
|
|
|
|
struct brw_reg src1);
|
|
|
|
|
2013-03-06 22:47:22 +00:00
|
|
|
void generate_set_simd4x2_offset(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg offset);
|
2012-12-06 18:15:08 +00:00
|
|
|
void generate_discard_jump(fs_inst *inst);
|
|
|
|
|
2013-01-09 19:46:42 +00:00
|
|
|
void generate_pack_half_2x16_split(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg x,
|
|
|
|
struct brw_reg y);
|
|
|
|
void generate_unpack_half_2x16_split(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
|
|
|
|
2013-03-19 22:28:11 +00:00
|
|
|
void generate_shader_time_add(fs_inst *inst,
|
|
|
|
struct brw_reg payload,
|
|
|
|
struct brw_reg offset,
|
|
|
|
struct brw_reg value);
|
|
|
|
|
2013-09-11 22:01:50 +01:00
|
|
|
void generate_untyped_atomic(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
2014-09-12 00:13:15 +01:00
|
|
|
struct brw_reg payload,
|
2013-09-11 22:01:50 +01:00
|
|
|
struct brw_reg atomic_op,
|
|
|
|
struct brw_reg surf_index);
|
|
|
|
|
2013-09-11 22:03:13 +01:00
|
|
|
void generate_untyped_surface_read(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
2014-09-12 00:43:37 +01:00
|
|
|
struct brw_reg payload,
|
2013-09-11 22:03:13 +01:00
|
|
|
struct brw_reg surf_index);
|
|
|
|
|
2014-05-16 21:06:45 +01:00
|
|
|
bool patch_discard_jumps_to_fb_writes();
|
2012-11-09 09:05:47 +00:00
|
|
|
|
|
|
|
struct brw_context *brw;
|
|
|
|
struct gl_context *ctx;
|
|
|
|
|
|
|
|
struct brw_compile *p;
|
2014-08-29 20:50:46 +01:00
|
|
|
const void * const key;
|
2014-08-29 20:50:46 +01:00
|
|
|
struct brw_stage_prog_data * const prog_data;
|
2012-11-09 09:05:47 +00:00
|
|
|
|
2014-08-29 20:50:46 +01:00
|
|
|
const struct gl_program *prog;
|
2012-11-09 09:05:47 +00:00
|
|
|
|
|
|
|
unsigned dispatch_width; /**< 8 or 16 */
|
|
|
|
|
2012-12-06 18:15:08 +00:00
|
|
|
exec_list discard_halt_patches;
|
2014-06-05 14:03:06 +01:00
|
|
|
bool runtime_check_aads_emit;
|
2014-10-28 02:40:47 +00:00
|
|
|
bool debug_flag;
|
|
|
|
const char *shader_name;
|
2015-01-13 22:28:13 +00:00
|
|
|
const char *stage_abbrev;
|
2012-11-09 09:05:47 +00:00
|
|
|
void *mem_ctx;
|
|
|
|
};
|
|
|
|
|
2011-10-07 20:26:50 +01:00
|
|
|
bool brw_do_channel_expressions(struct exec_list *instructions);
|
|
|
|
bool brw_do_vector_splitting(struct exec_list *instructions);
|