r600g: remove deprecated state management code

It's nice to see so much code that did pretty much nothing go away.

Reviewed-by: Jerome Glisse <jglisse@redhat.com>
This commit is contained in:
Marek Olšák 2013-03-02 17:36:05 +01:00
parent 65cbf89567
commit 1724ef8908
10 changed files with 2 additions and 560 deletions

View File

@ -430,7 +430,6 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
ctx->pm4_dirty_cdwords = 0;
ctx->flags = 0;
COMPUTE_DBG(ctx->screen, "shader started\n");

View File

@ -58,7 +58,6 @@ struct evergreen_compute_resource {
};
struct compute_sampler_state {
struct r600_pipe_state base;
struct pipe_sampler_state state;
};

View File

@ -28,22 +28,6 @@
#include "util/u_memory.h"
#include "util/u_math.h"
int evergreen_context_init(struct r600_context *ctx)
{
int r = 0;
/* add blocks */
r = r600_setup_block_table(ctx);
if (r)
goto out_err;
ctx->max_db = 8;
return 0;
out_err:
r600_context_fini(ctx);
return r;
}
void evergreen_flush_vgt_streamout(struct r600_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;

View File

@ -63,63 +63,6 @@ struct r600_resource {
struct util_range valid_buffer_range;
};
#define R600_BLOCK_MAX_BO 32
#define R600_BLOCK_MAX_REG 128
/* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
/* there is a block entry for each register so 512 blocks */
/* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
/* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
#define RANGE_OFFSET_START 0x8000
#define HASH_SHIFT 9
#define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
#define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
#define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
struct r600_pipe_reg {
uint32_t value;
struct r600_block *block;
struct r600_resource *bo;
enum radeon_bo_usage bo_usage;
uint32_t id;
};
struct r600_pipe_state {
unsigned id;
unsigned nregs;
struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
};
#define R600_BLOCK_STATUS_ENABLED (1 << 0)
#define R600_BLOCK_STATUS_DIRTY (1 << 1)
struct r600_block_reloc {
struct r600_resource *bo;
enum radeon_bo_usage bo_usage;
unsigned bo_pm4_index;
};
struct r600_block {
struct list_head list;
struct list_head enable_list;
unsigned status;
unsigned flags;
unsigned start_offset;
unsigned pm4_ndwords;
unsigned nbo;
uint16_t nreg;
uint16_t nreg_dirty;
uint32_t *reg;
uint32_t pm4[R600_BLOCK_MAX_REG];
unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
};
struct r600_range {
struct r600_block **blocks;
};
struct r600_query_buffer {
/* The buffer where query results are stored. */
struct r600_resource *buf;
@ -169,10 +112,6 @@ struct r600_context;
struct r600_screen;
void r600_get_backend_mask(struct r600_context *ctx);
int r600_context_init(struct r600_context *ctx);
void r600_context_fini(struct r600_context *ctx);
void r600_context_pipe_state_emit(struct r600_context *ctx, struct r600_pipe_state *state, unsigned pkt_flags);
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
void r600_context_flush(struct r600_context *ctx, unsigned flags);
void r600_begin_new_cs(struct r600_context *ctx);
@ -208,27 +147,9 @@ boolean evergreen_dma_blit(struct pipe_context *ctx,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box);
void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block, unsigned pkt_flags);
void r600_cp_dma_copy_buffer(struct r600_context *rctx,
struct pipe_resource *dst, uint64_t dst_offset,
struct pipe_resource *src, uint64_t src_offset,
unsigned size);
int evergreen_context_init(struct r600_context *ctx);
void _r600_pipe_state_add_reg_bo(struct r600_context *ctx,
struct r600_pipe_state *state,
uint32_t offset, uint32_t value,
uint32_t range_id, uint32_t block_id,
struct r600_resource *bo,
enum radeon_bo_usage usage);
void _r600_pipe_state_add_reg(struct r600_context *ctx,
struct r600_pipe_state *state,
uint32_t offset, uint32_t value,
uint32_t range_id, uint32_t block_id);
#define r600_pipe_state_add_reg_bo(state, offset, value, bo, usage) _r600_pipe_state_add_reg_bo(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
#define r600_pipe_state_add_reg(state, offset, value) _r600_pipe_state_add_reg(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset))
#endif

View File

@ -114,182 +114,6 @@ err:
return;
}
static void r600_init_block(struct r600_context *ctx,
struct r600_block *block,
const struct r600_reg *reg, int index, int nreg,
unsigned opcode, unsigned offset_base)
{
int i = index;
int j, n = nreg;
/* initialize block */
block->flags = 0;
block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */
block->start_offset = reg[i].offset;
block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0);
block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2;
block->reg = &block->pm4[block->pm4_ndwords];
block->pm4_ndwords += n;
block->nreg = n;
block->nreg_dirty = n;
LIST_INITHEAD(&block->list);
LIST_INITHEAD(&block->enable_list);
for (j = 0; j < n; j++) {
if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) {
block->flags |= REG_FLAG_DIRTY_ALWAYS;
}
if (reg[i+j].flags & REG_FLAG_ENABLE_ALWAYS) {
if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
LIST_ADDTAIL(&block->list,&ctx->dirty);
}
}
if (reg[i+j].flags & REG_FLAG_FLUSH_CHANGE) {
block->flags |= REG_FLAG_FLUSH_CHANGE;
}
if (reg[i+j].flags & REG_FLAG_NEED_BO) {
block->nbo++;
assert(block->nbo < R600_BLOCK_MAX_BO);
block->pm4_bo_index[j] = block->nbo;
block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
block->pm4[block->pm4_ndwords++] = 0x00000000;
block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1;
}
}
/* check that we stay in limit */
assert(block->pm4_ndwords < R600_BLOCK_MAX_REG);
}
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
unsigned opcode, unsigned offset_base)
{
struct r600_block *block;
struct r600_range *range;
int offset;
for (unsigned i = 0, n = 0; i < nreg; i += n) {
/* ignore new block balise */
if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) {
n = 1;
continue;
}
/* register that need relocation are in their own group */
/* find number of consecutive registers */
n = 0;
offset = reg[i].offset;
while (reg[i + n].offset == offset) {
n++;
offset += 4;
if ((n + i) >= nreg)
break;
if (n >= (R600_BLOCK_MAX_REG - 2))
break;
}
/* allocate new block */
block = calloc(1, sizeof(struct r600_block));
if (block == NULL) {
return -ENOMEM;
}
ctx->nblocks++;
for (int j = 0; j < n; j++) {
range = &ctx->range[CTX_RANGE_ID(reg[i + j].offset)];
/* create block table if it doesn't exist */
if (!range->blocks)
range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *));
if (!range->blocks) {
free(block);
return -1;
}
range->blocks[CTX_BLOCK_ID(reg[i + j].offset)] = block;
}
r600_init_block(ctx, block, reg, i, n, opcode, offset_base);
}
return 0;
}
/* initialize */
void r600_context_fini(struct r600_context *ctx)
{
struct r600_block *block;
struct r600_range *range;
if (ctx->range) {
for (int i = 0; i < NUM_RANGES; i++) {
if (!ctx->range[i].blocks)
continue;
for (int j = 0; j < (1 << HASH_SHIFT); j++) {
block = ctx->range[i].blocks[j];
if (block) {
for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) {
range = &ctx->range[CTX_RANGE_ID(offset)];
range->blocks[CTX_BLOCK_ID(offset)] = NULL;
}
for (int k = 1; k <= block->nbo; k++) {
pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
}
free(block);
}
}
free(ctx->range[i].blocks);
}
}
free(ctx->blocks);
}
int r600_setup_block_table(struct r600_context *ctx)
{
/* setup block table */
int c = 0;
ctx->blocks = calloc(ctx->nblocks, sizeof(void*));
if (!ctx->blocks)
return -ENOMEM;
for (int i = 0; i < NUM_RANGES; i++) {
if (!ctx->range[i].blocks)
continue;
for (int j = 0, add; j < (1 << HASH_SHIFT); j++) {
if (!ctx->range[i].blocks[j])
continue;
add = 1;
for (int k = 0; k < c; k++) {
if (ctx->blocks[k] == ctx->range[i].blocks[j]) {
add = 0;
break;
}
}
if (add) {
assert(c < ctx->nblocks);
ctx->blocks[c++] = ctx->range[i].blocks[j];
j += (ctx->range[i].blocks[j]->nreg) - 1;
}
}
}
return 0;
}
int r600_context_init(struct r600_context *ctx)
{
int r;
r = r600_setup_block_table(ctx);
if (r)
goto out_err;
ctx->max_db = 4;
return 0;
out_err:
r600_context_fini(ctx);
return r;
}
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
@ -321,8 +145,6 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
}
}
num_dw += ctx->pm4_dirty_cdwords;
/* The upper-bound of how much space a draw command would take. */
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
#if R600_TRACE_CS
@ -362,191 +184,6 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
}
}
void r600_context_dirty_block(struct r600_context *ctx,
struct r600_block *block,
int dirty, int index)
{
if ((index + 1) > block->nreg_dirty)
block->nreg_dirty = index + 1;
if ((dirty != (block->status & R600_BLOCK_STATUS_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) {
block->status |= R600_BLOCK_STATUS_DIRTY;
ctx->pm4_dirty_cdwords += block->pm4_ndwords;
if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
}
LIST_ADDTAIL(&block->list,&ctx->dirty);
if (block->flags & REG_FLAG_FLUSH_CHANGE) {
ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE;
}
}
}
/**
* If reg needs a reloc, this function will add it to its block's reloc list.
* @return true if reg needs a reloc, false otherwise
*/
static bool r600_reg_set_block_reloc(struct r600_pipe_reg *reg)
{
unsigned reloc_id;
if (!reg->block->pm4_bo_index[reg->id]) {
return false;
}
/* find relocation */
reloc_id = reg->block->pm4_bo_index[reg->id];
pipe_resource_reference(
(struct pipe_resource**)&reg->block->reloc[reloc_id].bo,
&reg->bo->b.b);
reg->block->reloc[reloc_id].bo_usage = reg->bo_usage;
return true;
}
/**
* This function will emit all the registers in state directly to the command
* stream allowing you to bypass the r600_context dirty list.
*
* This is used for dispatching compute shaders to avoid mixing compute and
* 3D states in the context's dirty list.
*
* @param pkt_flags Should be either 0 or RADEON_CP_PACKET3_COMPUTE_MODE. This
* value will be passed on to r600_context_block_emit_dirty an or'd against
* the PKT3 headers.
*/
void r600_context_pipe_state_emit(struct r600_context *ctx,
struct r600_pipe_state *state,
unsigned pkt_flags)
{
unsigned i;
/* Mark all blocks as dirty:
* Since two registers can be in the same block, we need to make sure
* we mark all the blocks dirty before we emit any of them. If we were
* to mark blocks dirty and emit them in the same loop, like this:
*
* foreach (reg in state->regs) {
* mark_dirty(reg->block)
* emit_block(reg->block)
* }
*
* Then if we have two registers in this state that are in the same
* block, we would end up emitting that block twice.
*/
for (i = 0; i < state->nregs; i++) {
struct r600_pipe_reg *reg = &state->regs[i];
/* Mark all the registers in the block as dirty */
reg->block->nreg_dirty = reg->block->nreg;
reg->block->status |= R600_BLOCK_STATUS_DIRTY;
/* Update the reloc for this register if necessary. */
r600_reg_set_block_reloc(reg);
}
/* Emit the registers writes */
for (i = 0; i < state->nregs; i++) {
struct r600_pipe_reg *reg = &state->regs[i];
if (reg->block->status & R600_BLOCK_STATUS_DIRTY) {
r600_context_block_emit_dirty(ctx, reg->block, pkt_flags);
}
}
}
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state)
{
struct r600_block *block;
int dirty;
for (int i = 0; i < state->nregs; i++) {
unsigned id;
struct r600_pipe_reg *reg = &state->regs[i];
block = reg->block;
id = reg->id;
dirty = block->status & R600_BLOCK_STATUS_DIRTY;
if (reg->value != block->reg[id]) {
block->reg[id] = reg->value;
dirty |= R600_BLOCK_STATUS_DIRTY;
}
if (block->flags & REG_FLAG_DIRTY_ALWAYS)
dirty |= R600_BLOCK_STATUS_DIRTY;
if (r600_reg_set_block_reloc(reg)) {
/* always force dirty for relocs for now */
dirty |= R600_BLOCK_STATUS_DIRTY;
}
if (dirty)
r600_context_dirty_block(ctx, block, dirty, id);
}
}
/**
* @param pkt_flags should be set to RADEON_CP_PACKET3_COMPUTE_MODE if this
* block will be used for compute shaders.
*/
void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block,
unsigned pkt_flags)
{
struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
int optional = block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS);
int cp_dwords = block->pm4_ndwords, start_dword = 0;
int new_dwords = 0;
int nbo = block->nbo;
if (block->nreg_dirty == 0 && optional) {
goto out;
}
if (nbo) {
for (int j = 0; j < block->nreg; j++) {
if (block->pm4_bo_index[j]) {
/* find relocation */
struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]];
if (reloc->bo) {
block->pm4[reloc->bo_pm4_index] =
r600_context_bo_reloc(ctx, &ctx->rings.gfx, reloc->bo, reloc->bo_usage);
} else {
block->pm4[reloc->bo_pm4_index] = 0;
}
nbo--;
if (nbo == 0)
break;
}
}
}
optional &= (block->nreg_dirty != block->nreg);
if (optional) {
new_dwords = block->nreg_dirty;
start_dword = cs->cdw;
cp_dwords = new_dwords + 2;
}
memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4);
/* We are applying the pkt_flags after copying the register block to
* the the command stream, because it is possible this block will be
* emitted with a different pkt_flags, and we don't want to store the
* pkt_flags in the block.
*/
cs->buf[cs->cdw] |= pkt_flags;
cs->cdw += cp_dwords;
if (optional) {
uint32_t newword;
newword = cs->buf[start_dword];
newword &= PKT_COUNT_C;
newword |= PKT_COUNT_S(new_dwords);
cs->buf[start_dword] = newword;
}
out:
block->status ^= R600_BLOCK_STATUS_DIRTY;
block->nreg_dirty = 0;
LIST_DELINIT(&block->list);
}
void r600_flush_emit(struct r600_context *rctx)
{
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
@ -737,10 +374,8 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
void r600_begin_new_cs(struct r600_context *ctx)
{
struct r600_block *enable_block = NULL;
unsigned shader;
ctx->pm4_dirty_cdwords = 0;
ctx->flags = 0;
ctx->gtt = 0;
ctx->vram = 0;
@ -806,18 +441,6 @@ void r600_begin_new_cs(struct r600_context *ctx)
r600_resume_nontimer_queries(ctx);
}
/* set all valid group as dirty so they get reemited on
* next draw command
*/
LIST_FOR_EACH_ENTRY(enable_block, &ctx->enable_list, enable_list) {
if(!(enable_block->status & R600_BLOCK_STATUS_DIRTY)) {
LIST_ADDTAIL(&enable_block->list,&ctx->dirty);
enable_block->status |= R600_BLOCK_STATUS_DIRTY;
}
ctx->pm4_dirty_cdwords += enable_block->pm4_ndwords;
enable_block->nreg_dirty = enable_block->nreg;
}
/* Re-emit the draw state. */
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;

View File

@ -33,29 +33,6 @@
#define R600_MAX_DRAW_CS_DWORDS 34
#define R600_TRACE_CS_DWORDS 7
/* these flags are used in register flags and added into block flags */
#define REG_FLAG_NEED_BO 1
#define REG_FLAG_DIRTY_ALWAYS 2
#define REG_FLAG_ENABLE_ALWAYS 16
#define REG_FLAG_FLUSH_CHANGE 64
#define GROUP_FORCE_NEW_BLOCK 0
struct r600_reg {
unsigned offset;
unsigned flags;
unsigned reserved;
};
/*
* r600_hw_context.c
*/
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
unsigned opcode, unsigned offset_base);
void r600_context_dirty_block(struct r600_context *ctx, struct r600_block *block,
int dirty, int index);
int r600_setup_block_table(struct r600_context *ctx);
/*
* evergreen_hw_context.c
*/

View File

@ -318,8 +318,6 @@ static void r600_destroy_context(struct pipe_context *context)
}
util_unreference_framebuffer_state(&rctx->framebuffer.state);
r600_context_fini(rctx);
if (rctx->blitter) {
util_blitter_destroy(rctx->blitter);
}
@ -343,7 +341,6 @@ static void r600_destroy_context(struct pipe_context *context)
rctx->ws->cs_destroy(rctx->rings.dma.cs);
}
FREE(rctx->range);
FREE(rctx);
}
@ -372,19 +369,12 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12;
LIST_INITHEAD(&rctx->active_nontimer_queries);
LIST_INITHEAD(&rctx->dirty);
LIST_INITHEAD(&rctx->enable_list);
rctx->range = CALLOC(NUM_RANGES, sizeof(struct r600_range));
if (!rctx->range)
goto fail;
r600_init_blit_functions(rctx);
r600_init_query_functions(rctx);
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
rctx->context.create_video_decoder = vl_create_decoder;
rctx->context.create_video_buffer = vl_video_buffer_create;
@ -395,8 +385,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
case R700:
r600_init_state_functions(rctx);
r600_init_atom_start_cs(rctx);
if (r600_context_init(rctx))
goto fail;
rctx->max_db = 4;
rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx)
: r600_create_resolve_blend(rctx);
@ -412,8 +401,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
evergreen_init_state_functions(rctx);
evergreen_init_atom_start_cs(rctx);
evergreen_init_atom_start_compute_cs(rctx);
if (evergreen_context_init(rctx))
goto fail;
rctx->max_db = 8;
rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);

View File

@ -568,14 +568,6 @@ struct r600_context {
unsigned current_render_cond_mode;
boolean predicate_drawing;
/* Deprecated state management. */
struct r600_range *range;
unsigned nblocks;
struct r600_block **blocks;
struct list_head dirty;
struct list_head enable_list;
unsigned pm4_dirty_cdwords;
struct r600_isa *isa;
};

View File

@ -76,7 +76,6 @@ struct r600_pipe_shader {
struct r600_pipe_shader *next_variant;
struct r600_shader shader;
struct r600_command_buffer command_buffer; /* register writes */
struct r600_pipe_state rstate;
struct r600_resource *bo;
unsigned sprite_coord_enable;
unsigned flatshade;

View File

@ -1319,7 +1319,6 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
struct pipe_draw_info info = *dinfo;
struct pipe_index_buffer ib = {};
unsigned i;
struct r600_block *dirty_block = NULL, *next_block = NULL;
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
if (!info.count && (info.indexed || !info.count_from_stream_output)) {
@ -1410,10 +1409,6 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
}
r600_emit_atom(rctx, rctx->atoms[i]);
}
LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->dirty,list) {
r600_context_block_emit_dirty(rctx, dirty_block, 0 /* pkt_flags */);
}
rctx->pm4_dirty_cdwords = 0;
/* Update start instance. */
if (rctx->last_start_instance != info.start_instance) {
@ -1585,41 +1580,6 @@ void r600_draw_rectangle(struct blitter_context *blitter,
pipe_resource_reference(&buf, NULL);
}
void _r600_pipe_state_add_reg_bo(struct r600_context *ctx,
struct r600_pipe_state *state,
uint32_t offset, uint32_t value,
uint32_t range_id, uint32_t block_id,
struct r600_resource *bo,
enum radeon_bo_usage usage)
{
struct r600_range *range;
struct r600_block *block;
if (bo) assert(usage);
range = &ctx->range[range_id];
block = range->blocks[block_id];
state->regs[state->nregs].block = block;
state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
state->regs[state->nregs].value = value;
state->regs[state->nregs].bo = bo;
state->regs[state->nregs].bo_usage = usage;
state->nregs++;
assert(state->nregs < R600_BLOCK_MAX_REG);
}
void _r600_pipe_state_add_reg(struct r600_context *ctx,
struct r600_pipe_state *state,
uint32_t offset, uint32_t value,
uint32_t range_id, uint32_t block_id)
{
_r600_pipe_state_add_reg_bo(ctx, state, offset, value,
range_id, block_id, NULL, 0);
}
uint32_t r600_translate_stencil_op(int s_op)
{
switch (s_op) {