Change all debug_assert calls to assert

Acked-By: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Acked-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17403>
This commit is contained in:
Marek Olšák 2022-07-07 13:29:53 -04:00 committed by Marge Bot
parent 2f01a22fe4
commit c9ca8abe4f
88 changed files with 278 additions and 287 deletions

View File

@ -161,7 +161,7 @@ emit_const(struct fd_ringbuffer *ring, struct kernel *kernel, uint32_t constid,
{
uint32_t align_sz;
debug_assert((constid % 4) == 0);
assert((constid % 4) == 0);
/* Overwrite appropriate entries with buffer addresses */
struct fd_bo **replacements = calloc(sizedwords, sizeof(struct fd_bo *));

View File

@ -232,7 +232,7 @@ emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
{
uint32_t align_sz;
debug_assert((regid % 4) == 0);
assert((regid % 4) == 0);
align_sz = align(sizedwords, 4);

View File

@ -72,16 +72,16 @@ struct fd_ringbuffer *
fd_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
enum fd_ringbuffer_flags flags)
{
debug_assert(!(flags & _FD_RINGBUFFER_OBJECT));
assert(!(flags & _FD_RINGBUFFER_OBJECT));
if (flags & FD_RINGBUFFER_STREAMING) {
debug_assert(!(flags & FD_RINGBUFFER_GROWABLE));
debug_assert(!(flags & FD_RINGBUFFER_PRIMARY));
assert(!(flags & FD_RINGBUFFER_GROWABLE));
assert(!(flags & FD_RINGBUFFER_PRIMARY));
}
struct fd_ringbuffer *ring =
submit->funcs->new_ringbuffer(submit, size, flags);
if (flags & FD_RINGBUFFER_PRIMARY) {
debug_assert(!submit->primary);
assert(!submit->primary);
submit->primary = fd_ringbuffer_ref(ring);
}

View File

@ -252,7 +252,7 @@ fd_ringbuffer_size(struct fd_ringbuffer *ring)
* do what you expect for growable rb's.. so lets just restrict
* this to stateobj's for now:
*/
debug_assert(!(ring->flags & FD_RINGBUFFER_GROWABLE));
assert(!(ring->flags & FD_RINGBUFFER_GROWABLE));
return offset_bytes(ring->cur, ring->start);
}
@ -287,7 +287,7 @@ OUT_RELOC(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t offset,
fprintf(stderr, "ring[%p]: OUT_RELOC %04x: %p+%u << %d", ring,
(uint32_t)(ring->cur - ring->start), bo, offset, shift);
}
debug_assert(offset < fd_bo_size(bo));
assert(offset < fd_bo_size(bo));
uint64_t iova = fd_bo_get_iova(bo) + offset;

View File

@ -391,7 +391,7 @@ fd_submit_sp_destroy(struct fd_submit *submit)
_mesa_hash_table_destroy(fd_submit->bo_table, NULL);
// TODO it would be nice to have a way to debug_assert() if all
// TODO it would be nice to have a way to assert() if all
// rb's haven't been free'd back to the slab, because that is
// an indication that we are leaking bo's
slab_destroy_child(&fd_submit->ring_pool);
@ -445,7 +445,7 @@ fd_pipe_sp_ringpool_fini(struct fd_pipe *pipe)
static void
finalize_current_cmd(struct fd_ringbuffer *ring)
{
debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
struct fd_ringbuffer_sp *fd_ring = to_fd_ringbuffer_sp(ring);
APPEND(&fd_ring->u, cmds,
@ -461,7 +461,7 @@ fd_ringbuffer_sp_grow(struct fd_ringbuffer *ring, uint32_t size)
struct fd_ringbuffer_sp *fd_ring = to_fd_ringbuffer_sp(ring);
struct fd_pipe *pipe = fd_ring->u.submit->pipe;
debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
assert(ring->flags & FD_RINGBUFFER_GROWABLE);
finalize_current_cmd(ring);
@ -581,7 +581,7 @@ fd_ringbuffer_sp_init(struct fd_ringbuffer_sp *fd_ring, uint32_t size,
{
struct fd_ringbuffer *ring = &fd_ring->base;
debug_assert(fd_ring->ring_bo);
assert(fd_ring->ring_bo);
uint8_t *base = fd_bo_map(fd_ring->ring_bo);
ring->start = (void *)(base + fd_ring->offset);

View File

@ -303,12 +303,12 @@ msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
struct fd_ringbuffer *ring = (void *)entry->key;
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
debug_assert(i < nr_cmds);
assert(i < nr_cmds);
// TODO handle relocs:
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
debug_assert(o < nr_objs);
assert(o < nr_objs);
void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
obj_relocs[o++] = relocs;
@ -399,7 +399,7 @@ msm_submit_destroy(struct fd_submit *submit)
_mesa_hash_table_destroy(msm_submit->bo_table, NULL);
_mesa_set_destroy(msm_submit->ring_set, unref_rings);
// TODO it would be nice to have a way to debug_assert() if all
// TODO it would be nice to have a way to assert() if all
// rb's haven't been free'd back to the slab, because that is
// an indication that we are leaking bo's
slab_destroy(&msm_submit->ring_pool);
@ -442,12 +442,12 @@ finalize_current_cmd(struct fd_ringbuffer *ring)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
if (!msm_ring->cmd)
return;
debug_assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
msm_ring->cmd->size = offset_bytes(ring->cur, ring->start);
APPEND(&msm_ring->u, cmds, msm_ring->cmd);
@ -460,7 +460,7 @@ msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
struct fd_pipe *pipe = msm_ring->u.submit->pipe;
debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
assert(ring->flags & FD_RINGBUFFER_GROWABLE);
finalize_current_cmd(ring);
@ -531,7 +531,7 @@ append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
{
struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
debug_assert(target->flags & _FD_RINGBUFFER_OBJECT);
assert(target->flags & _FD_RINGBUFFER_OBJECT);
set_foreach (msm_target->u.ring_set, entry) {
struct fd_ringbuffer *ring = (void *)entry->key;
@ -661,7 +661,7 @@ msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
{
struct fd_ringbuffer *ring = &msm_ring->base;
debug_assert(msm_ring->ring_bo);
assert(msm_ring->ring_bo);
uint8_t *base = fd_bo_map(msm_ring->ring_bo);
ring->start = (void *)(base + msm_ring->offset);

View File

@ -567,7 +567,7 @@ ir3_src_create(struct ir3_instruction *instr, int num, int flags)
{
struct ir3 *shader = instr->block->shader;
#ifdef DEBUG
debug_assert(instr->srcs_count < instr->srcs_max);
assert(instr->srcs_count < instr->srcs_max);
#endif
struct ir3_register *reg = reg_create(shader, num, flags);
instr->srcs[instr->srcs_count++] = reg;
@ -579,7 +579,7 @@ ir3_dst_create(struct ir3_instruction *instr, int num, int flags)
{
struct ir3 *shader = instr->block->shader;
#ifdef DEBUG
debug_assert(instr->dsts_count < instr->dsts_max);
assert(instr->dsts_count < instr->dsts_max);
#endif
struct ir3_register *reg = reg_create(shader, num, flags);
instr->dsts[instr->dsts_count++] = reg;
@ -612,21 +612,21 @@ ir3_instr_set_address(struct ir3_instruction *instr,
if (!instr->address) {
struct ir3 *ir = instr->block->shader;
debug_assert(instr->block == addr->block);
assert(instr->block == addr->block);
instr->address =
ir3_src_create(instr, addr->dsts[0]->num, addr->dsts[0]->flags);
instr->address->def = addr->dsts[0];
debug_assert(reg_num(addr->dsts[0]) == REG_A0);
assert(reg_num(addr->dsts[0]) == REG_A0);
unsigned comp = reg_comp(addr->dsts[0]);
if (comp == 0) {
array_insert(ir, ir->a0_users, instr);
} else {
debug_assert(comp == 1);
assert(comp == 1);
array_insert(ir, ir->a1_users, instr);
}
} else {
debug_assert(instr->address->def->instr == addr);
assert(instr->address->def->instr == addr);
}
}

View File

@ -1158,7 +1158,7 @@ dest_regs(struct ir3_instruction *instr)
if (instr->dsts_count == 0)
return 0;
debug_assert(instr->dsts_count == 1);
assert(instr->dsts_count == 1);
return util_last_bit(instr->dsts[0]->wrmask);
}
@ -1994,7 +1994,7 @@ ir3_MOV(struct ir3_block *block, struct ir3_instruction *src, type_t type)
} else {
__ssa_src(instr, src, src->dsts[0]->flags & IR3_REG_SHARED);
}
debug_assert(!(src->dsts[0]->flags & IR3_REG_RELATIV));
assert(!(src->dsts[0]->flags & IR3_REG_RELATIV));
instr->cat1.src_type = type;
instr->cat1.dst_type = type;
return instr;
@ -2008,13 +2008,13 @@ ir3_COV(struct ir3_block *block, struct ir3_instruction *src, type_t src_type,
unsigned dst_flags = (type_size(dst_type) < 32) ? IR3_REG_HALF : 0;
unsigned src_flags = (type_size(src_type) < 32) ? IR3_REG_HALF : 0;
debug_assert((src->dsts[0]->flags & IR3_REG_HALF) == src_flags);
assert((src->dsts[0]->flags & IR3_REG_HALF) == src_flags);
__ssa_dst(instr)->flags |= dst_flags;
__ssa_src(instr, src, 0);
instr->cat1.src_type = src_type;
instr->cat1.dst_type = dst_type;
debug_assert(!(src->dsts[0]->flags & IR3_REG_ARRAY));
assert(!(src->dsts[0]->flags & IR3_REG_ARRAY));
return instr;
}

View File

@ -232,7 +232,7 @@ get_image_offset(struct ir3_context *ctx, const nir_intrinsic_instr *instr,
unsigned cb;
if (ctx->compiler->gen > 4) {
const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
debug_assert(const_state->image_dims.mask & (1 << index));
assert(const_state->image_dims.mask & (1 << index));
cb = regid(const_state->offsets.image_dims, 0) +
const_state->image_dims.off[index];
@ -308,7 +308,7 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
break;
default:
/* For some reason even more 32-bit components don't work. */
debug_assert(0);
assert(0);
break;
}
}

View File

@ -284,7 +284,7 @@ shader_debug_enabled(gl_shader_stage type)
case MESA_SHADER_KERNEL:
return !!(ir3_shader_debug & IR3_DBG_SHADER_CS);
default:
debug_assert(0);
assert(0);
return false;
}
}

View File

@ -1896,7 +1896,7 @@ create_multidst_mov(struct ir3_block *block, struct ir3_register *dst)
ir3_src_create(mov, INVALID_REG, IR3_REG_SSA | src_flags);
src->wrmask = dst->wrmask;
src->def = dst;
debug_assert(!(dst->flags & IR3_REG_RELATIV));
assert(!(dst->flags & IR3_REG_RELATIV));
mov->cat1.src_type = mov->cat1.dst_type =
(dst->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
return mov;
@ -3220,7 +3220,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
bits = 10;
break;
default:
debug_assert(0);
assert(0);
}
sam->cat5.type = TYPE_F32;
@ -3881,7 +3881,7 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl)
if ((ctx->compiler->gen < 5) &&
(ctx->so->stream_output.num_outputs > 0) &&
!ctx->so->binning_pass) {
debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
assert(ctx->so->type == MESA_SHADER_VERTEX);
emit_stream_out(ctx);
}
@ -4153,7 +4153,7 @@ setup_output(struct ir3_context *ctx, nir_intrinsic_instr *intr)
break;
case VARYING_SLOT_PRIMITIVE_ID:
case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
debug_assert(ctx->so->type == MESA_SHADER_GEOMETRY);
assert(ctx->so->type == MESA_SHADER_GEOMETRY);
FALLTHROUGH;
case VARYING_SLOT_COL0:
case VARYING_SLOT_COL1:
@ -4755,7 +4755,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
unsigned n = i / 4;
unsigned c = i % 4;
debug_assert(n < so->nonbinning->inputs_count);
assert(n < so->nonbinning->inputs_count);
if (so->nonbinning->inputs[n].sysval)
continue;

View File

@ -344,7 +344,7 @@ ir3_create_collect(struct ir3_block *block, struct ir3_instruction *const *arr,
elem = ir3_MOV(block, elem, type);
}
debug_assert(dest_flags(elem) == flags);
assert(dest_flags(elem) == flags);
__ssa_src(collect, elem, flags);
}
@ -368,7 +368,7 @@ ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
}
if (src->opc == OPC_META_COLLECT) {
debug_assert((base + n) <= src->srcs_count);
assert((base + n) <= src->srcs_count);
for (int i = 0; i < n; i++) {
dst[i] = ssa(src->srcs[i + base]);

View File

@ -248,7 +248,7 @@ lower_immed(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr, unsigned n,
static void
unuse(struct ir3_instruction *instr)
{
debug_assert(instr->use_count > 0);
assert(instr->use_count > 0);
if (--instr->use_count == 0) {
struct ir3_block *block = instr->block;
@ -260,7 +260,7 @@ unuse(struct ir3_instruction *instr)
* be things like array store's)
*/
for (unsigned i = 0; i < block->keeps_count; i++) {
debug_assert(block->keeps[i] != instr);
assert(block->keeps[i] != instr);
}
}
}
@ -360,7 +360,7 @@ reg_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr,
if (ir3_valid_flags(instr, n, new_flags)) {
if (new_flags & IR3_REG_ARRAY) {
debug_assert(!(reg->flags & IR3_REG_ARRAY));
assert(!(reg->flags & IR3_REG_ARRAY));
reg->array = src_reg->array;
}
reg->flags = new_flags;
@ -475,7 +475,7 @@ reg_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr,
if (src_reg->flags & IR3_REG_IMMED) {
int32_t iim_val = src_reg->iim_val;
debug_assert((opc_cat(instr->opc) == 1) ||
assert((opc_cat(instr->opc) == 1) ||
(opc_cat(instr->opc) == 2) ||
(opc_cat(instr->opc) == 6) ||
is_meta(instr) ||
@ -530,7 +530,7 @@ eliminate_output_mov(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr)
struct ir3_register *reg = instr->srcs[0];
if (!(reg->flags & IR3_REG_ARRAY)) {
struct ir3_instruction *src_instr = ssa(reg);
debug_assert(src_instr);
assert(src_instr);
ctx->progress = true;
return src_instr;
}
@ -648,7 +648,7 @@ instr_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr)
*/
struct ir3_instruction *samp_tex = ssa(instr->srcs[0]);
debug_assert(samp_tex->opc == OPC_META_COLLECT);
assert(samp_tex->opc == OPC_META_COLLECT);
struct ir3_register *samp = samp_tex->srcs[0];
struct ir3_register *tex = samp_tex->srcs[1];
@ -690,7 +690,7 @@ ir3_cp(struct ir3 *ir, struct ir3_shader_variant *so)
/* by the way, we don't account for false-dep's, so the CP
* pass should always happen before false-dep's are inserted
*/
debug_assert(instr->deps_count == 0);
assert(instr->deps_count == 0);
foreach_ssa_src (src, instr) {
src->use_count++;

View File

@ -563,7 +563,7 @@ retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
if (cur_block->successors[0] == old_target) {
cur_block->successors[0] = new_target;
} else {
debug_assert(cur_block->successors[1] == old_target);
assert(cur_block->successors[1] == old_target);
cur_block->successors[1] = new_target;
}
@ -571,7 +571,7 @@ retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
if (cur_block->physical_successors[0] == old_target) {
cur_block->physical_successors[0] = new_target;
} else {
debug_assert(cur_block->physical_successors[1] == old_target);
assert(cur_block->physical_successors[1] == old_target);
cur_block->physical_successors[1] = new_target;
}
@ -753,7 +753,7 @@ block_sched(struct ir3 *ir)
* to follow it with an inverted branch, so follow it by an
* unconditional branch.
*/
debug_assert(!block->condition);
assert(!block->condition);
if (block->brtype == IR3_BRANCH_GETONE)
br1 = ir3_GETONE(block);
else
@ -763,7 +763,7 @@ block_sched(struct ir3 *ir)
br2 = ir3_JUMP(block);
br2->cat0.target = block->successors[0];
} else {
debug_assert(block->condition);
assert(block->condition);
/* create "else" branch first (since "then" block should
* frequently/always end up being a fall-thru):
@ -914,7 +914,7 @@ nop_sched(struct ir3 *ir, struct ir3_shader_variant *so)
}
if (delay > 0) {
debug_assert(delay <= 6);
assert(delay <= 6);
ir3_NOP(block)->repeat = delay - 1;
}

View File

@ -959,7 +959,7 @@ ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
const_state->num_ubos = nir->info.num_ubos;
debug_assert((const_state->ubo_state.size % 16) == 0);
assert((const_state->ubo_state.size % 16) == 0);
unsigned constoff = v->num_reserved_user_consts +
const_state->ubo_state.size / 16 +
const_state->preamble_size;

View File

@ -314,7 +314,7 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
: nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
}
debug_assert(!(const_offset & 0x3));
assert(!(const_offset & 0x3));
const_offset >>= 2;
const int range_offset = ((int)range->offset - (int)range->start) / 4;
@ -453,7 +453,7 @@ ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
for (uint32_t i = 0; i < state->num_enabled; i++) {
uint32_t range_size = state->range[i].end - state->range[i].start;
debug_assert(offset <= max_upload);
assert(offset <= max_upload);
state->range[i].offset = offset;
assert(offset <= max_upload);
offset += range_size;

View File

@ -45,7 +45,7 @@ static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
uint8_t *offset_src_idx)
{
debug_assert(offset_src_idx);
assert(offset_src_idx);
*offset_src_idx = 1;
@ -86,7 +86,7 @@ static nir_ssa_def *
check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
int32_t direction, int32_t shift)
{
debug_assert(alu_instr->src[1].src.is_ssa);
assert(alu_instr->src[1].src.is_ssa);
nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
/* Only propagate if the shift is a const value so we can check value range
@ -217,7 +217,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
debug_assert(intrinsic->src[offset_src_idx].is_ssa);
assert(intrinsic->src[offset_src_idx].is_ssa);
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
/* Since we don't have value range checking, we first try to propagate
@ -237,7 +237,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
*target_src = nir_src_for_ssa(offset);
if (has_dest) {
debug_assert(intrinsic->dest.is_ssa);
assert(intrinsic->dest.is_ssa);
nir_ssa_def *dest = &intrinsic->dest.ssa;
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
dest->num_components, dest->bit_size, NULL);

View File

@ -95,7 +95,7 @@ ir3_nir_lower_load_barycentric_at_offset_filter(const nir_instr *instr,
bool
ir3_nir_lower_load_barycentric_at_offset(nir_shader *shader)
{
debug_assert(shader->info.stage == MESA_SHADER_FRAGMENT);
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_lower_instructions(
shader, ir3_nir_lower_load_barycentric_at_offset_filter,

View File

@ -86,7 +86,7 @@ ir3_nir_lower_load_barycentric_at_sample_filter(const nir_instr *instr,
bool
ir3_nir_lower_load_barycentric_at_sample(nir_shader *shader)
{
debug_assert(shader->info.stage == MESA_SHADER_FRAGMENT);
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_lower_instructions(
shader, ir3_nir_lower_load_barycentric_at_sample_filter,

View File

@ -181,7 +181,7 @@ lower_tex_prefetch_block(nir_block *block)
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
/* First source should be the sampling coordinate. */
nir_tex_src *coord = &tex->src[idx];
debug_assert(coord->src.is_ssa);
assert(coord->src.is_ssa);
if (ir3_nir_coord_offset(coord->src.ssa) >= 0) {
tex->op = nir_texop_tex_prefetch;

View File

@ -125,7 +125,7 @@ static bool
move_src(nir_src *src, void *state)
{
/* At this point we shouldn't have any non-ssa src: */
debug_assert(src->is_ssa);
assert(src->is_ssa);
move_instruction_to_start_block(state, src->ssa->parent_instr);
return true;
}
@ -169,7 +169,7 @@ move_varying_inputs_block(state *state, nir_block *block)
continue;
}
debug_assert(intr->dest.is_ssa);
assert(intr->dest.is_ssa);
move_instruction_to_start_block(state, instr);
@ -184,7 +184,7 @@ ir3_nir_move_varying_inputs(nir_shader *shader)
{
bool progress = false;
debug_assert(shader->info.stage == MESA_SHADER_FRAGMENT);
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function (function, shader) {
precond_state state;

View File

@ -107,7 +107,7 @@ has_ss_src(struct ir3_instruction *instr)
static void
schedule(struct ir3_postsched_ctx *ctx, struct ir3_instruction *instr)
{
debug_assert(ctx->block == instr->block);
assert(ctx->block == instr->block);
/* remove from unscheduled_list:
*/
@ -687,7 +687,7 @@ sched_block(struct ir3_postsched_ctx *ctx, struct ir3_block *block)
unsigned delay = node_delay(ctx, instr->data);
d("delay=%u", delay);
debug_assert(delay <= 6);
assert(delay <= 6);
schedule(ctx, instr);
}

View File

@ -261,24 +261,24 @@ cycle_count(struct ir3_instruction *instr)
static void
schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
{
debug_assert(ctx->block == instr->block);
assert(ctx->block == instr->block);
/* remove from depth list:
*/
list_delinit(&instr->node);
if (writes_addr0(instr)) {
debug_assert(ctx->addr0 == NULL);
assert(ctx->addr0 == NULL);
ctx->addr0 = instr;
}
if (writes_addr1(instr)) {
debug_assert(ctx->addr1 == NULL);
assert(ctx->addr1 == NULL);
ctx->addr1 = instr;
}
if (writes_pred(instr)) {
debug_assert(ctx->pred == NULL);
assert(ctx->pred == NULL);
ctx->pred = instr;
}
@ -416,7 +416,7 @@ static bool
check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
struct ir3_instruction *instr)
{
debug_assert(!is_scheduled(instr));
assert(!is_scheduled(instr));
if (instr == ctx->split) {
/* Don't schedule instructions created by splitting a a0.x/a1.x/p0.x
@ -474,19 +474,19 @@ check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
* free:
*/
if (writes_addr0(instr) && ctx->addr0) {
debug_assert(ctx->addr0 != instr);
assert(ctx->addr0 != instr);
notes->addr0_conflict = true;
return false;
}
if (writes_addr1(instr) && ctx->addr1) {
debug_assert(ctx->addr1 != instr);
assert(ctx->addr1 != instr);
notes->addr1_conflict = true;
return false;
}
if (writes_pred(instr) && ctx->pred) {
debug_assert(ctx->pred != instr);
assert(ctx->pred != instr);
notes->pred_conflict = true;
return false;
}
@ -919,7 +919,7 @@ split_addr(struct ir3_sched_ctx *ctx, struct ir3_instruction **addr,
struct ir3_instruction *new_addr = NULL;
unsigned i;
debug_assert(*addr);
assert(*addr);
for (i = 0; i < users_count; i++) {
struct ir3_instruction *indirect = users[i];
@ -966,7 +966,7 @@ split_pred(struct ir3_sched_ctx *ctx)
struct ir3_instruction *new_pred = NULL;
unsigned i;
debug_assert(ctx->pred);
assert(ctx->pred);
ir = ctx->pred->block->shader;
@ -1038,7 +1038,7 @@ sched_node_add_dep(struct ir3_instruction *instr, struct ir3_instruction *src,
/* we could have false-dep's that end up unused: */
if (src->flags & IR3_INSTR_UNUSED) {
debug_assert(__is_false_dep(instr, i));
assert(__is_false_dep(instr, i));
return;
}
@ -1234,7 +1234,7 @@ sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
unsigned delay = node_delay(ctx, instr->data);
d("delay=%u", delay);
debug_assert(delay <= 6);
assert(delay <= 6);
schedule(ctx, instr);
@ -1263,7 +1263,7 @@ sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
d("unscheduled_list:");
foreach_instr (instr, &ctx->unscheduled_list)
di(instr, "unscheduled: ");
debug_assert(0);
assert(0);
ctx->error = true;
return;
}

View File

@ -1044,7 +1044,7 @@ ir3_link_add(struct ir3_shader_linkage *l, uint8_t slot, uint8_t regid_,
if (regid_ != regid(63, 0)) {
int i = l->cnt++;
debug_assert(i < ARRAY_SIZE(l->var));
assert(i < ARRAY_SIZE(l->var));
l->var[i].slot = slot;
l->var[i].regid = regid_;

View File

@ -821,7 +821,7 @@ tu6_setup_streamout(struct tu_cs *cs,
if (l->var[idx].slot == v->outputs[k].slot)
break;
debug_assert(idx < l->cnt);
assert(idx < l->cnt);
for (unsigned j = 0; j < out->num_components; j++) {
unsigned c = j + out->start_component;

View File

@ -386,8 +386,8 @@ void draw_set_viewport_states( struct draw_context *draw,
const struct pipe_viewport_state *viewport = vps;
draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
assert(start_slot < PIPE_MAX_VIEWPORTS);
assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
memcpy(draw->viewports + start_slot, vps,
sizeof(struct pipe_viewport_state) * num_viewports);
@ -455,11 +455,11 @@ draw_set_mapped_constant_buffer(struct draw_context *draw,
const void *buffer,
unsigned size )
{
debug_assert(shader_type == PIPE_SHADER_VERTEX ||
assert(shader_type == PIPE_SHADER_VERTEX ||
shader_type == PIPE_SHADER_GEOMETRY ||
shader_type == PIPE_SHADER_TESS_CTRL ||
shader_type == PIPE_SHADER_TESS_EVAL);
debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
@ -492,11 +492,11 @@ draw_set_mapped_shader_buffer(struct draw_context *draw,
const void *buffer,
unsigned size )
{
debug_assert(shader_type == PIPE_SHADER_VERTEX ||
assert(shader_type == PIPE_SHADER_VERTEX ||
shader_type == PIPE_SHADER_GEOMETRY ||
shader_type == PIPE_SHADER_TESS_CTRL ||
shader_type == PIPE_SHADER_TESS_EVAL);
debug_assert(slot < PIPE_MAX_SHADER_BUFFERS);
assert(slot < PIPE_MAX_SHADER_BUFFERS);
draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
@ -1010,7 +1010,7 @@ draw_current_shader_clipvertex_output(const struct draw_context *draw)
uint
draw_current_shader_ccdistance_output(const struct draw_context *draw, int index)
{
debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
if (draw->gs.geometry_shader)
return draw->gs.geometry_shader->ccdistance_output[index];
if (draw->tes.tess_eval_shader)
@ -1099,8 +1099,8 @@ draw_set_sampler_views(struct draw_context *draw,
{
unsigned i;
debug_assert(shader_stage < PIPE_SHADER_TYPES);
debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
assert(shader_stage < PIPE_SHADER_TYPES);
assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
@ -1120,8 +1120,8 @@ draw_set_samplers(struct draw_context *draw,
{
unsigned i;
debug_assert(shader_stage < PIPE_SHADER_TYPES);
debug_assert(num <= PIPE_MAX_SAMPLERS);
assert(shader_stage < PIPE_SHADER_TYPES);
assert(num <= PIPE_MAX_SAMPLERS);
draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
@ -1146,8 +1146,8 @@ draw_set_images(struct draw_context *draw,
{
unsigned i;
debug_assert(shader_stage < PIPE_SHADER_TYPES);
debug_assert(num <= PIPE_MAX_SHADER_IMAGES);
assert(shader_stage < PIPE_SHADER_TYPES);
assert(num <= PIPE_MAX_SHADER_IMAGES);
draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );

View File

@ -332,8 +332,8 @@ llvm_fetch_gs_outputs(struct draw_geometry_shader *shader,
}
#endif
debug_assert(current_verts <= shader->max_output_vertices);
debug_assert(next_verts <= shader->max_output_vertices);
assert(current_verts <= shader->max_output_vertices);
assert(next_verts <= shader->max_output_vertices);
if (next_verts) {
memmove(output_ptr + (vertex_count + current_verts) * shader->vertex_size,
output_ptr + ((i + 1) * next_prim_boundary) * shader->vertex_size,
@ -419,7 +419,7 @@ static void gs_flush(struct draw_geometry_shader *shader)
shader->draw->statistics.gs_invocations += input_primitives;
}
debug_assert(input_primitives > 0 &&
assert(input_primitives > 0 &&
input_primitives <= 4);
for (unsigned invocation = 0; invocation < shader->num_invocations; invocation++) {
@ -592,7 +592,7 @@ int draw_geometry_shader_run(struct draw_geometry_shader *shader,
(struct vertex_header *)MALLOC(output_verts[i].vertex_size *
total_verts_per_buffer * shader->num_invocations +
DRAW_EXTRA_VERTICES_PADDING);
debug_assert(output_verts[i].verts);
assert(output_verts[i].verts);
}
#if 0
@ -669,7 +669,7 @@ int draw_geometry_shader_run(struct draw_geometry_shader *shader,
if (shader->fetched_prim_count > 0) {
gs_flush(shader);
}
debug_assert(shader->fetched_prim_count == 0);
assert(shader->fetched_prim_count == 0);
/* Update prim_info:
*/
@ -856,7 +856,7 @@ draw_create_geometry_shader(struct draw_context *draw,
gs->clipvertex_output = i;
}
if (gs->info.output_semantic_name[i] == TGSI_SEMANTIC_CLIPDIST) {
debug_assert(gs->info.output_semantic_index[i] <
assert(gs->info.output_semantic_index[i] <
PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
gs->ccdistance_output[gs->info.output_semantic_index[i]] = i;
}

View File

@ -16,7 +16,7 @@
case PIPE_PRIM_QUADS: \
case PIPE_PRIM_QUAD_STRIP: \
case PIPE_PRIM_POLYGON: \
debug_assert(!"unexpected primitive type in GS"); \
assert(!"unexpected primitive type in GS"); \
return; \
default: \
break; \

View File

@ -1251,7 +1251,7 @@ store_aos_array(struct gallivm_state *gallivm,
int vector_length = soa_type.length;
int i;
debug_assert(TGSI_NUM_CHANNELS == 4);
assert(TGSI_NUM_CHANNELS == 4);
for (i = 0; i < vector_length; i++) {
linear_inds[i] = lp_build_const_int32(gallivm, i);

View File

@ -113,7 +113,7 @@ draw_llvm_texture_member(const struct lp_sampler_dynamic_state *base,
LLVMValueRef ptr;
LLVMValueRef res;
debug_assert(texture_unit < PIPE_MAX_SHADER_SAMPLER_VIEWS);
assert(texture_unit < PIPE_MAX_SHADER_SAMPLER_VIEWS);
/* context[0] */
indices[0] = lp_build_const_int32(gallivm, 0);
@ -164,7 +164,7 @@ draw_llvm_sampler_member(const struct lp_sampler_dynamic_state *base,
LLVMValueRef ptr;
LLVMValueRef res;
debug_assert(sampler_unit < PIPE_MAX_SAMPLERS);
assert(sampler_unit < PIPE_MAX_SAMPLERS);
/* context[0] */
indices[0] = lp_build_const_int32(gallivm, 0);
@ -210,7 +210,7 @@ draw_llvm_image_member(const struct lp_sampler_dynamic_state *base,
LLVMValueRef ptr;
LLVMValueRef res;
debug_assert(image_unit < PIPE_MAX_SHADER_IMAGES);
assert(image_unit < PIPE_MAX_SHADER_IMAGES);
/* context[0] */
indices[0] = lp_build_const_int32(gallivm, 0);

View File

@ -59,7 +59,7 @@ static void user_cull_point( struct draw_stage *stage,
draw_current_shader_num_written_clipdistances(stage->draw);
unsigned i;
debug_assert(num_written_culldistances);
assert(num_written_culldistances);
for (i = 0; i < num_written_culldistances; ++i) {
unsigned cull_idx = (num_written_clipdistances + i) / 4;
@ -89,7 +89,7 @@ static void user_cull_line( struct draw_stage *stage,
draw_current_shader_num_written_clipdistances(stage->draw);
unsigned i;
debug_assert(num_written_culldistances);
assert(num_written_culldistances);
for (i = 0; i < num_written_culldistances; ++i) {
unsigned cull_idx = (num_written_clipdistances + i) / 4;
@ -120,7 +120,7 @@ static void user_cull_tri( struct draw_stage *stage,
draw_current_shader_num_written_clipdistances(stage->draw);
unsigned i;
debug_assert(num_written_culldistances);
assert(num_written_culldistances);
/* Do the distance culling */
for (i = 0; i < num_written_culldistances; ++i) {

View File

@ -12,7 +12,7 @@
const boolean last_vertex_last = !asmblr->draw->rasterizer->flatshade_first; \
switch (prim) { \
case PIPE_PRIM_POLYGON: \
debug_assert(!"unexpected primitive type in prim assembler"); \
assert(!"unexpected primitive type in prim assembler"); \
return; \
default: \
break; \

View File

@ -446,7 +446,7 @@ resolve_draw_info(const struct pipe_draw_info *raw_info,
target->internal_offset / vertex_buffer->stride;
/* Stream output draw can not be indexed */
debug_assert(!info->index_size);
assert(!info->index_size);
info->max_index = draw->count - 1;
}

View File

@ -17,7 +17,7 @@
case PIPE_PRIM_LINE_STRIP_ADJACENCY: \
case PIPE_PRIM_TRIANGLES_ADJACENCY: \
case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY: \
debug_assert(!"unexpected primitive type in stream output"); \
assert(!"unexpected primitive type in stream output"); \
return; \
default: \
break; \

View File

@ -568,7 +568,7 @@ draw_create_tess_eval_shader(struct draw_context *draw,
tes->clipvertex_output = i;
}
if (tes->info.output_semantic_name[i] == TGSI_SEMANTIC_CLIPDIST) {
debug_assert(tes->info.output_semantic_index[i] <
assert(tes->info.output_semantic_index[i] <
PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
tes->ccdistance_output[tes->info.output_semantic_index[i]] = i;
}

View File

@ -103,7 +103,7 @@ draw_create_vertex_shader(struct draw_context *draw,
} else if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX)
vs->viewport_index_output = i;
else if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_CLIPDIST) {
debug_assert(vs->info.output_semantic_index[i] <
assert(vs->info.output_semantic_index[i] <
PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
vs->ccdistance_output[vs->info.output_semantic_index[i]] = i;
}

View File

@ -68,7 +68,7 @@ vs_exec_prepare(struct draw_vertex_shader *shader,
{
struct exec_vertex_shader *evs = exec_vertex_shader(shader);
debug_assert(!draw->llvm);
assert(!draw->llvm);
/* Specify the vertex program to interpret/execute.
* Avoid rebinding when possible.
*/
@ -105,7 +105,7 @@ vs_exec_run_linear(struct draw_vertex_shader *shader,
unsigned slot;
boolean clamp_vertex_color = shader->draw->rasterizer->clamp_vertex_color;
debug_assert(!shader->draw->llvm);
assert(!shader->draw->llvm);
tgsi_exec_set_constant_buffers(machine, PIPE_MAX_CONSTANT_BUFFERS,
constants, const_size);

View File

@ -60,7 +60,7 @@ vs_llvm_run_linear( struct draw_vertex_shader *shader,
{
/* we should never get here since the entire pipeline is
* generated in draw_pt_fetch_shade_pipeline_llvm.c */
debug_assert(0);
assert(0);
}

View File

@ -71,14 +71,14 @@ build_binary_int_overflow(struct gallivm_state *gallivm,
LLVMValueRef oresult;
LLVMTypeRef otype;
debug_assert(LLVMTypeOf(a) == LLVMTypeOf(b));
assert(LLVMTypeOf(a) == LLVMTypeOf(b));
type_ref = LLVMTypeOf(a);
type_kind = LLVMGetTypeKind(type_ref);
debug_assert(type_kind == LLVMIntegerTypeKind);
assert(type_kind == LLVMIntegerTypeKind);
type_width = LLVMGetIntTypeWidth(type_ref);
debug_assert(type_width == 16 || type_width == 32 || type_width == 64);
assert(type_width == 16 || type_width == 32 || type_width == 64);
snprintf(intr_str, sizeof intr_str, "%s.i%u",
intr_prefix, type_width);

View File

@ -58,7 +58,7 @@ convert_to_soa(struct gallivm_state *gallivm,
LLVMValueRef aos_channels[4];
unsigned pixels_per_channel = soa_type.length / 4;
debug_assert((soa_type.length % 4) == 0);
assert((soa_type.length % 4) == 0);
aos_channel_type.length >>= 1;

View File

@ -2671,10 +2671,7 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
switch (mip_filter) {
default:
debug_assert(0 && "bad mip_filter value in lp_build_sample_soa()");
#if defined(NDEBUG) || defined(DEBUG)
FALLTHROUGH;
#endif
assert(0 && "bad mip_filter value in lp_build_sample_soa()");
case PIPE_TEX_MIPFILTER_NONE:
/* always use mip level 0 */
first_level = bld->dynamic_state->first_level(bld->dynamic_state,

View File

@ -4573,7 +4573,7 @@ lp_build_tgsi_soa(struct gallivm_state *gallivm,
/* There's no specific value for this because it should always
* be set, but apps using ext_geometry_shader4 quite often
* were forgetting so we're using MAX_VERTEX_VARYING from
* that spec even though we could debug_assert if it's not
* that spec even though we could assert if it's not
* set, but that's a lot uglier. */
uint max_output_vertices;

View File

@ -210,8 +210,8 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
if(underflow || overflow)
debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
debug_assert(!underflow);
debug_assert(!overflow);
assert(!underflow);
assert(!overflow);
/* re-fill if not aborted */
if(underflow)

View File

@ -1876,7 +1876,7 @@ emit_primitive(struct tgsi_exec_machine *mach,
prim_count = &mach->OutputPrimCount[stream_id];
if (mach->ExecMask) {
++(*prim_count);
debug_assert((*prim_count * mach->NumOutputs) < TGSI_MAX_TOTAL_VERTICES);
assert((*prim_count * mach->NumOutputs) < TGSI_MAX_TOTAL_VERTICES);
mach->Primitives[stream_id][*prim_count] = 0;
}
}

View File

@ -91,7 +91,7 @@ struct u_transfer {
static inline struct u_transfer *
u_transfer(struct pipe_transfer *ptrans)
{
debug_assert(handle_transfer(ptrans->resource));
assert(handle_transfer(ptrans->resource));
return (struct u_transfer *)ptrans;
}
@ -259,7 +259,7 @@ u_transfer_helper_transfer_map(struct pipe_context *pctx,
if (helper->msaa_map && (prsc->nr_samples > 1))
return transfer_map_msaa(pctx, prsc, level, usage, box, pptrans);
debug_assert(box->depth == 1);
assert(box->depth == 1);
trans = calloc(1, sizeof(*trans));
if (!trans)
@ -601,7 +601,7 @@ u_transfer_helper_deinterleave_transfer_map(struct pipe_context *pctx,
if (!need_interleave_path(helper, format))
return helper->vtbl->transfer_map(pctx, prsc, level, usage, box, pptrans);
debug_assert(box->depth == 1);
assert(box->depth == 1);
trans = calloc(1, sizeof(*trans));
if (!trans)

View File

@ -89,7 +89,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
unsigned counter_idx = counters_per_group[entry->gid]++;
debug_assert(counter_idx < g->num_counters);
assert(counter_idx < g->num_counters);
OUT_PKT0(ring, g->counters[counter_idx].select_reg, 1);
OUT_RING(ring, g->countables[entry->cid].selector);

View File

@ -106,7 +106,7 @@ fd3_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
uint32_t anum = align(num, 4);
uint32_t i;
debug_assert((regid % 4) == 0);
assert((regid % 4) == 0);
OUT_PKT3(ring, CP_LOAD_STATE, 2 + anum);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid / 2) |
@ -330,7 +330,7 @@ fd3_emit_gmem_restore_tex(struct fd_ringbuffer *ring,
/* note: PIPE_BUFFER disallowed for surfaces */
unsigned lvl = psurf[i]->u.tex.level;
debug_assert(psurf[i]->u.tex.first_layer == psurf[i]->u.tex.last_layer);
assert(psurf[i]->u.tex.first_layer == psurf[i]->u.tex.last_layer);
OUT_RING(ring, A3XX_TEX_CONST_0_TILE_MODE(rsc->layout.tile_mode) |
A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(format)) |
@ -424,7 +424,7 @@ fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit)
uint32_t off = vb->buffer_offset + elem->src_offset;
uint32_t fs = util_format_get_blocksize(pfmt);
debug_assert(fmt != VFMT_NONE);
assert(fmt != VFMT_NONE);
OUT_PKT0(ring, REG_A3XX_VFD_FETCH(j), 2);
OUT_RING(ring, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs - 1) |

View File

@ -99,7 +99,7 @@ emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
else
pformat = util_format_linear(pformat);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
offset = fd_resource_offset(rsc, psurf->u.tex.level,
psurf->u.tex.first_layer);
@ -350,7 +350,7 @@ emit_gmem2mem_surf(struct fd_batch *batch,
fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
uint32_t pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
OUT_PKT0(ring, REG_A3XX_RB_COPY_CONTROL, 4);
OUT_RING(ring, A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(MSAA_ONE) |

View File

@ -104,7 +104,7 @@ fd3_program_emit(struct fd_ringbuffer *ring, struct fd3_emit *emit, int nr,
int constmode;
int i, j;
debug_assert(nr <= ARRAY_SIZE(color_regid));
assert(nr <= ARRAY_SIZE(color_regid));
vp = fd3_emit_get_vp(emit);
fp = fd3_emit_get_fp(emit);

View File

@ -98,7 +98,7 @@ fd4_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
uint32_t anum = align(num, 4);
uint32_t i;
debug_assert((regid % 4) == 0);
assert((regid % 4) == 0);
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + anum);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
@ -241,7 +241,7 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
view = tex->textures[idx] ? fd4_pipe_sampler_view(tex->textures[idx])
: &dummy_view;
debug_assert(view->texconst0 & A4XX_TEX_CONST_0_SRGB);
assert(view->texconst0 & A4XX_TEX_CONST_0_SRGB);
OUT_RING(ring, view->texconst0 & ~A4XX_TEX_CONST_0_SRGB);
OUT_RING(ring, view->texconst1);
@ -320,7 +320,7 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
break;
default:
debug_assert(0);
assert(0);
}
}
@ -339,8 +339,8 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
OUT_RING(ring, 0x00000000);
}
} else {
debug_assert(v->astc_srgb.count == 0);
debug_assert(v->tg4.count == 0);
assert(v->astc_srgb.count == 0);
assert(v->tg4.count == 0);
}
if (needs_border) {
@ -428,7 +428,7 @@ fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs,
(format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT))
mrt_comp[i] = 0;
debug_assert(bufs[i]->u.tex.first_layer == bufs[i]->u.tex.last_layer);
assert(bufs[i]->u.tex.first_layer == bufs[i]->u.tex.last_layer);
OUT_RING(ring, A4XX_TEX_CONST_0_FMT(fd4_pipe2tex(format)) |
A4XX_TEX_CONST_0_TYPE(A4XX_TEX_2D) |
@ -569,7 +569,7 @@ fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit)
uint32_t fs = util_format_get_blocksize(pfmt);
uint32_t off = vb->buffer_offset + elem->src_offset;
uint32_t size = vb->buffer.resource->width0 - off;
debug_assert(fmt != VFMT4_NONE);
assert(fmt != VFMT4_NONE);
OUT_PKT0(ring, REG_A4XX_VFD_FETCH(j), 4);
OUT_RING(ring, A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs - 1) |

View File

@ -103,7 +103,7 @@ emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
else
pformat = util_format_linear(pformat);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
offset = fd_resource_offset(rsc, psurf->u.tex.level,
psurf->u.tex.first_layer);
@ -170,7 +170,7 @@ emit_gmem2mem_surf(struct fd_batch *batch, bool stencil, uint32_t base,
return;
if (stencil) {
debug_assert(rsc->stencil);
assert(rsc->stencil);
rsc = rsc->stencil;
pformat = rsc->b.b.format;
}
@ -179,7 +179,7 @@ emit_gmem2mem_surf(struct fd_batch *batch, bool stencil, uint32_t base,
fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
OUT_PKT0(ring, REG_A4XX_RB_COPY_CONTROL, 4);
OUT_RING(ring, A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(MSAA_ONE) |

View File

@ -162,7 +162,7 @@ fd4_program_emit(struct fd_ringbuffer *ring, struct fd4_emit *emit, int nr,
int constmode;
int i, j;
debug_assert(nr <= ARRAY_SIZE(color_regid));
assert(nr <= ARRAY_SIZE(color_regid));
if (emit->binning_pass)
nr = 0;

View File

@ -53,7 +53,7 @@ occlusion_get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring)
/* low bits of sample addr should be zero (since they are control
* flags in RB_SAMPLE_COUNT_CONTROL):
*/
debug_assert((samp->offset & 0x3) == 0);
assert((samp->offset & 0x3) == 0);
/* Set RB_SAMPLE_COUNT_ADDR to samp->offset plus value of
* HW_QUERY_BASE_REG register:
@ -135,7 +135,7 @@ time_elapsed_get_sample(struct fd_batch *batch,
const int sample_off = 128;
const int addr_off = sample_off + 8;
debug_assert(batch->ctx->screen->max_freq > 0);
assert(batch->ctx->screen->max_freq > 0);
/* Basic issue is that we need to read counter value to a relative
* destination (with per-tile offset) rather than absolute dest

View File

@ -250,7 +250,7 @@ fd4_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
} else if (shader == PIPE_SHADER_COMPUTE) {
sampler_swizzles = fd4_ctx->csampler_swizzles;
} else {
debug_assert(0);
assert(0);
sampler_swizzles = fd4_ctx->csampler_swizzles;
}
@ -279,7 +279,7 @@ fd4_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
sampler_swizzles[start + i] |= 0x4000;
break;
default:
debug_assert(0);
assert(0);
}
}
}

View File

@ -118,9 +118,9 @@ can_do_blit(const struct pipe_blit_info *info)
if (!ok_dims(info->dst.resource, &info->dst.box, info->dst.level))
return false;
debug_assert(info->dst.box.width >= 0);
debug_assert(info->dst.box.height >= 0);
debug_assert(info->dst.box.depth >= 0);
assert(info->dst.box.width >= 0);
assert(info->dst.box.height >= 0);
assert(info->dst.box.depth >= 0);
if ((info->dst.resource->nr_samples > 1) ||
(info->src.resource->nr_samples > 1))
@ -198,16 +198,16 @@ emit_blit_buffer(struct fd_ringbuffer *ring, const struct pipe_blit_info *info)
src = fd_resource(info->src.resource);
dst = fd_resource(info->dst.resource);
debug_assert(src->layout.cpp == 1);
debug_assert(dst->layout.cpp == 1);
debug_assert(info->src.resource->format == info->dst.resource->format);
debug_assert((sbox->y == 0) && (sbox->height == 1));
debug_assert((dbox->y == 0) && (dbox->height == 1));
debug_assert((sbox->z == 0) && (sbox->depth == 1));
debug_assert((dbox->z == 0) && (dbox->depth == 1));
debug_assert(sbox->width == dbox->width);
debug_assert(info->src.level == 0);
debug_assert(info->dst.level == 0);
assert(src->layout.cpp == 1);
assert(dst->layout.cpp == 1);
assert(info->src.resource->format == info->dst.resource->format);
assert((sbox->y == 0) && (sbox->height == 1));
assert((dbox->y == 0) && (dbox->height == 1));
assert((sbox->z == 0) && (sbox->depth == 1));
assert((dbox->z == 0) && (dbox->depth == 1));
assert(sbox->width == dbox->width);
assert(info->src.level == 0);
assert(info->dst.level == 0);
/*
* Buffers can have dimensions bigger than max width, remap into
@ -239,8 +239,8 @@ emit_blit_buffer(struct fd_ringbuffer *ring, const struct pipe_blit_info *info)
w = MIN2(sbox->width - off, (0x4000 - 0x40));
p = align(w, 64);
debug_assert((soff + w) <= fd_bo_size(src->bo));
debug_assert((doff + w) <= fd_bo_size(dst->bo));
assert((soff + w) <= fd_bo_size(src->bo));
assert((doff + w) <= fd_bo_size(dst->bo));
OUT_PKT7(ring, CP_SET_RENDER_MODE, 1);
OUT_RING(ring, CP_SET_RENDER_MODE_0_MODE(BLIT2D));
@ -336,7 +336,7 @@ emit_blit(struct fd_ringbuffer *ring, const struct pipe_blit_info *info)
* dst swap mode (so we don't change component order)
*/
if (stile || dtile) {
debug_assert(info->src.format == info->dst.format);
assert(info->src.format == info->dst.format);
sswap = dswap = WZYX;
}
@ -357,8 +357,8 @@ emit_blit(struct fd_ringbuffer *ring, const struct pipe_blit_info *info)
unsigned soff = fd_resource_offset(src, info->src.level, sbox->z + i);
unsigned doff = fd_resource_offset(dst, info->dst.level, dbox->z + i);
debug_assert((soff + (sbox->height * spitch)) <= fd_bo_size(src->bo));
debug_assert((doff + (dbox->height * dpitch)) <= fd_bo_size(dst->bo));
assert((soff + (sbox->height * spitch)) <= fd_bo_size(src->bo));
assert((doff + (dbox->height * dpitch)) <= fd_bo_size(dst->bo));
OUT_PKT7(ring, CP_SET_RENDER_MODE, 1);
OUT_RING(ring, CP_SET_RENDER_MODE_0_MODE(BLIT2D));
@ -455,8 +455,8 @@ fd5_blitter_blit(struct fd_context *ctx,
emit_blit_buffer(batch->draw, info);
} else {
/* I don't *think* we need to handle blits between buffer <-> !buffer */
debug_assert(info->src.resource->target != PIPE_BUFFER);
debug_assert(info->dst.resource->target != PIPE_BUFFER);
assert(info->src.resource->target != PIPE_BUFFER);
assert(info->dst.resource->target != PIPE_BUFFER);
emit_blit(batch->draw, info);
}

View File

@ -101,7 +101,7 @@ fd5_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
uint32_t anum = align(num, 2);
uint32_t i;
debug_assert((regid % 4) == 0);
assert((regid % 4) == 0);
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (2 * anum));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
@ -488,7 +488,7 @@ fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit)
bool isint = util_format_is_pure_integer(pfmt);
uint32_t off = vb->buffer_offset + elem->src_offset;
uint32_t size = vb->buffer.resource->width0 - off;
debug_assert(fmt != VFMT5_NONE);
assert(fmt != VFMT5_NONE);
OUT_PKT4(ring, REG_A5XX_VFD_FETCH(j), 4);
OUT_RELOC(ring, rsc->bo, off, 0, 0);

View File

@ -77,7 +77,7 @@ emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
sint = util_format_is_pure_sint(pformat);
uint = util_format_is_pure_uint(pformat);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
offset = fd_resource_offset(rsc, psurf->u.tex.level,
psurf->u.tex.first_layer);
@ -503,7 +503,7 @@ emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
struct fd_resource *rsc = fd_resource(psurf->texture);
uint32_t stride, size;
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
if (buf == BLIT_S)
rsc = rsc->stencil;
@ -634,7 +634,7 @@ emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
OUT_PKT4(ring, REG_A5XX_RB_BLIT_FLAG_DST_LO, 4);
OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_LO */

View File

@ -108,7 +108,7 @@ emit_stream_out(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
if (l->var[idx].slot == v->outputs[k].slot)
break;
debug_assert(idx < l->cnt);
assert(idx < l->cnt);
for (unsigned j = 0; j < out->num_components; j++) {
unsigned c = j + out->start_component;

View File

@ -283,7 +283,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
unsigned counter_idx = counters_per_group[entry->gid]++;
debug_assert(counter_idx < g->num_counters);
assert(counter_idx < g->num_counters);
OUT_PKT4(ring, g->counters[counter_idx].select_reg, 1);
OUT_RING(ring, g->countables[entry->cid].selector);

View File

@ -198,16 +198,16 @@ can_do_blit(const struct pipe_blit_info *info)
fail_if(!ok_format(info->src.format));
fail_if(!ok_format(info->dst.format));
debug_assert(!util_format_is_compressed(info->src.format));
debug_assert(!util_format_is_compressed(info->dst.format));
assert(!util_format_is_compressed(info->src.format));
assert(!util_format_is_compressed(info->dst.format));
fail_if(!ok_dims(info->src.resource, &info->src.box, info->src.level));
fail_if(!ok_dims(info->dst.resource, &info->dst.box, info->dst.level));
debug_assert(info->dst.box.width >= 0);
debug_assert(info->dst.box.height >= 0);
debug_assert(info->dst.box.depth >= 0);
assert(info->dst.box.width >= 0);
assert(info->dst.box.height >= 0);
assert(info->dst.box.depth >= 0);
fail_if(info->dst.resource->nr_samples > 1);
@ -315,16 +315,16 @@ emit_blit_buffer(struct fd_context *ctx, struct fd_ringbuffer *ring,
src = fd_resource(info->src.resource);
dst = fd_resource(info->dst.resource);
debug_assert(src->layout.cpp == 1);
debug_assert(dst->layout.cpp == 1);
debug_assert(info->src.resource->format == info->dst.resource->format);
debug_assert((sbox->y == 0) && (sbox->height == 1));
debug_assert((dbox->y == 0) && (dbox->height == 1));
debug_assert((sbox->z == 0) && (sbox->depth == 1));
debug_assert((dbox->z == 0) && (dbox->depth == 1));
debug_assert(sbox->width == dbox->width);
debug_assert(info->src.level == 0);
debug_assert(info->dst.level == 0);
assert(src->layout.cpp == 1);
assert(dst->layout.cpp == 1);
assert(info->src.resource->format == info->dst.resource->format);
assert((sbox->y == 0) && (sbox->height == 1));
assert((dbox->y == 0) && (dbox->height == 1));
assert((sbox->z == 0) && (sbox->depth == 1));
assert((dbox->z == 0) && (dbox->depth == 1));
assert(sbox->width == dbox->width);
assert(info->src.level == 0);
assert(info->dst.level == 0);
/*
* Buffers can have dimensions bigger than max width, remap into
@ -358,8 +358,8 @@ emit_blit_buffer(struct fd_context *ctx, struct fd_ringbuffer *ring,
w = MIN2(sbox->width - off, (0x4000 - 0x40));
p = align(w, 64);
debug_assert((soff + w) <= fd_bo_size(src->bo));
debug_assert((doff + w) <= fd_bo_size(dst->bo));
assert((soff + w) <= fd_bo_size(src->bo));
assert((doff + w) <= fd_bo_size(dst->bo));
/*
* Emit source:
@ -905,7 +905,7 @@ handle_rgba_blit(struct fd_context *ctx,
{
struct fd_batch *batch;
debug_assert(!(info->mask & PIPE_MASK_ZS));
assert(!(info->mask & PIPE_MASK_ZS));
if (!can_do_blit(info))
return false;
@ -950,8 +950,8 @@ handle_rgba_blit(struct fd_context *ctx,
emit_blit_buffer(ctx, batch->draw, info);
} else {
/* I don't *think* we need to handle blits between buffer <-> !buffer */
debug_assert(info->src.resource->target != PIPE_BUFFER);
debug_assert(info->dst.resource->target != PIPE_BUFFER);
assert(info->src.resource->target != PIPE_BUFFER);
assert(info->dst.resource->target != PIPE_BUFFER);
emit_blit_texture(ctx, batch->draw, info, sample_0);
}
@ -992,7 +992,7 @@ do_rewritten_blit(struct fd_context *ctx,
mesa_logw("sample averaging on fallback blit when we shouldn't.");
success = fd_blitter_blit(ctx, info);
}
debug_assert(success); /* fallback should never fail! */
assert(success); /* fallback should never fail! */
return success;
}
@ -1019,7 +1019,7 @@ handle_zs_blit(struct fd_context *ctx,
switch (info->dst.format) {
case PIPE_FORMAT_S8_UINT:
debug_assert(info->mask == PIPE_MASK_S);
assert(info->mask == PIPE_MASK_S);
blit.mask = PIPE_MASK_R;
blit.src.format = PIPE_FORMAT_R8_UINT;
blit.dst.format = PIPE_FORMAT_R8_UINT;
@ -1052,7 +1052,7 @@ handle_zs_blit(struct fd_context *ctx,
case PIPE_FORMAT_Z32_UNORM:
case PIPE_FORMAT_Z32_FLOAT:
debug_assert(info->mask == PIPE_MASK_Z);
assert(info->mask == PIPE_MASK_Z);
blit.mask = PIPE_MASK_R;
blit.src.format = PIPE_FORMAT_R32_UINT;
blit.dst.format = PIPE_FORMAT_R32_UINT;
@ -1107,7 +1107,7 @@ handle_compressed_blit(struct fd_context *ctx,
if (util_format_get_blocksize(info->src.format) == 8) {
blit.src.format = blit.dst.format = PIPE_FORMAT_R16G16B16A16_UINT;
} else {
debug_assert(util_format_get_blocksize(info->src.format) == 16);
assert(util_format_get_blocksize(info->src.format) == 16);
blit.src.format = blit.dst.format = PIPE_FORMAT_R32G32B32A32_UINT;
}
@ -1119,16 +1119,16 @@ handle_compressed_blit(struct fd_context *ctx,
* be:
*/
debug_assert((blit.src.box.x % bw) == 0);
debug_assert((blit.src.box.y % bh) == 0);
assert((blit.src.box.x % bw) == 0);
assert((blit.src.box.y % bh) == 0);
blit.src.box.x /= bw;
blit.src.box.y /= bh;
blit.src.box.width = DIV_ROUND_UP(blit.src.box.width, bw);
blit.src.box.height = DIV_ROUND_UP(blit.src.box.height, bh);
debug_assert((blit.dst.box.x % bw) == 0);
debug_assert((blit.dst.box.y % bh) == 0);
assert((blit.dst.box.x % bw) == 0);
assert((blit.dst.box.y % bh) == 0);
blit.dst.box.x /= bw;
blit.dst.box.y /= bh;

View File

@ -88,7 +88,7 @@ fd6_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
enum pipe_format pfmt = elem->src_format;
enum a6xx_format fmt = fd6_vertex_format(pfmt);
bool isint = util_format_is_pure_integer(pfmt);
debug_assert(fmt != FMT6_NONE);
assert(fmt != FMT6_NONE);
OUT_RING(ring, A6XX_VFD_DECODE_INSTR_IDX(elem->vertex_buffer_index) |
A6XX_VFD_DECODE_INSTR_OFFSET(elem->src_offset) |

View File

@ -497,7 +497,7 @@ fd6_emit_combined_textures(struct fd_ringbuffer *ring, struct fd6_emit *emit,
[PIPE_SHADER_FRAGMENT] = {FD6_GROUP_FS_TEX, ENABLE_DRAW},
};
debug_assert(s[type].state_id);
assert(s[type].state_id);
if (!v->image_mapping.num_tex && !v->fb_read) {
/* in the fast-path, when we don't have to mix in any image/SSBO
@ -830,11 +830,11 @@ build_ibo(struct fd6_emit *emit) assert_dt
struct fd_context *ctx = emit->ctx;
if (emit->hs) {
debug_assert(ir3_shader_nibo(emit->hs) == 0);
debug_assert(ir3_shader_nibo(emit->ds) == 0);
assert(ir3_shader_nibo(emit->hs) == 0);
assert(ir3_shader_nibo(emit->ds) == 0);
}
if (emit->gs) {
debug_assert(ir3_shader_nibo(emit->gs) == 0);
assert(ir3_shader_nibo(emit->gs) == 0);
}
struct fd_ringbuffer *ibo_state =
@ -1154,7 +1154,7 @@ fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit)
struct fd6_state_group *g = &emit->groups[i];
unsigned n = g->stateobj ? fd_ringbuffer_size(g->stateobj) / 4 : 0;
debug_assert((g->enable_mask & ~ENABLE_ALL) == 0);
assert((g->enable_mask & ~ENABLE_ALL) == 0);
if (n == 0) {
OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |

View File

@ -133,7 +133,7 @@ static inline void
fd6_emit_take_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
enum fd6_state_id group_id, unsigned enable_mask)
{
debug_assert(emit->num_groups < ARRAY_SIZE(emit->groups));
assert(emit->num_groups < ARRAY_SIZE(emit->groups));
struct fd6_state_group *g = &emit->groups[emit->num_groups++];
g->stateobj = stateobj;
g->group_id = group_id;

View File

@ -122,7 +122,7 @@ emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
max_layer_index = psurf->u.tex.last_layer - psurf->u.tex.first_layer;
debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
OUT_REG(
ring,
@ -482,8 +482,8 @@ emit_vsc_overflow_test(struct fd_batch *batch)
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
debug_assert((fd6_ctx->vsc_draw_strm_pitch & 0x3) == 0);
debug_assert((fd6_ctx->vsc_prim_strm_pitch & 0x3) == 0);
assert((fd6_ctx->vsc_draw_strm_pitch & 0x3) == 0);
assert((fd6_ctx->vsc_prim_strm_pitch & 0x3) == 0);
/* Check for overflow, write vsc_scratch if detected: */
for (int i = 0; i < gmem->num_vsc_pipes; i++) {
@ -687,7 +687,7 @@ emit_binning_pass(struct fd_batch *batch) assert_dt
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_screen *screen = batch->ctx->screen;
debug_assert(!batch->tessellation);
assert(!batch->tessellation);
set_scissor(ring, 0, 0, gmem->width - 1, gmem->height - 1);
@ -988,7 +988,7 @@ emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring, uint32_t base,
uint32_t offset;
bool ubwc_enabled;
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
/* separate stencil case: */
if (stencil) {
@ -1000,7 +1000,7 @@ emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring, uint32_t base,
fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
uint32_t tile_mode = fd_resource_tile_mode(&rsc->b.b, psurf->u.tex.level);
enum a6xx_format format = fd6_color_format(pfmt, tile_mode);

View File

@ -216,7 +216,7 @@ setup_stream_out(struct fd_context *ctx, struct fd6_program_state *state,
if (l->var[idx].slot == v->outputs[k].slot)
break;
debug_assert(idx < l->cnt);
assert(idx < l->cnt);
for (unsigned j = 0; j < out->num_components; j++) {
unsigned c = j + out->start_component;
@ -302,7 +302,7 @@ setup_config_stateobj(struct fd_context *ctx, struct fd6_program_state *state)
.fs_state = true, .cs_state = true,
.gfx_ibo = true, .cs_ibo = true, ));
debug_assert(state->vs->constlen >= state->bs->constlen);
assert(state->vs->constlen >= state->bs->constlen);
OUT_PKT4(ring, REG_A6XX_HLSQ_VS_CNTL, 4);
OUT_RING(ring, A6XX_HLSQ_VS_CNTL_CONSTLEN(state->vs->constlen) |
@ -655,7 +655,7 @@ setup_stateobj(struct fd_ringbuffer *ring, struct fd_context *ctx,
setup_stream_out_disable(ctx);
}
debug_assert(l.cnt <= 32);
assert(l.cnt <= 32);
if (gs)
OUT_PKT4(ring, REG_A6XX_SP_GS_OUT_REG(0), DIV_ROUND_UP(l.cnt, 2));
else if (ds)
@ -1263,7 +1263,7 @@ fd6_program_create(void *data, struct ir3_shader_variant *bs,
for (unsigned i = 0; i < bs->inputs_count; i++) {
if (vs->inputs[i].sysval)
continue;
debug_assert(bs->inputs[i].regid == vs->inputs[i].regid);
assert(bs->inputs[i].regid == vs->inputs[i].regid);
}
}
#endif

View File

@ -482,7 +482,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
unsigned counter_idx = counters_per_group[entry->gid]++;
debug_assert(counter_idx < g->num_counters);
assert(counter_idx < g->num_counters);
OUT_PKT4(ring, g->counters[counter_idx].select_reg, 1);
OUT_RING(ring, g->countables[entry->cid].selector);

View File

@ -256,7 +256,7 @@ batch_reset_resources(struct fd_batch *batch)
set_foreach (batch->resources, entry) {
struct fd_resource *rsc = (struct fd_resource *)entry->key;
_mesa_set_remove(batch->resources, entry);
debug_assert(rsc->track->batch_mask & (1 << batch->idx));
assert(rsc->track->batch_mask & (1 << batch->idx));
rsc->track->batch_mask &= ~(1 << batch->idx);
if (rsc->track->write_batch == batch)
fd_batch_reference_locked(&rsc->track->write_batch, NULL);
@ -297,12 +297,12 @@ __fd_batch_destroy(struct fd_batch *batch)
fd_bc_invalidate_batch(batch, true);
batch_reset_resources(batch);
debug_assert(batch->resources->entries == 0);
assert(batch->resources->entries == 0);
_mesa_set_destroy(batch->resources, NULL);
fd_screen_unlock(ctx->screen);
batch_reset_dependencies(batch);
debug_assert(batch->dependents_mask == 0);
assert(batch->dependents_mask == 0);
util_copy_framebuffer_state(&batch->framebuffer, NULL);
batch_fini(batch);
@ -367,7 +367,7 @@ batch_flush(struct fd_batch *batch) assert_dt
fd_gmem_render_tiles(batch);
debug_assert(batch->reference.count > 0);
assert(batch->reference.count > 0);
cleanup_submit(batch);
fd_batch_unlock_submit(batch);
@ -412,7 +412,7 @@ fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
return;
/* a loop should not be possible */
debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);
@ -438,11 +438,11 @@ fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
{
if (likely(fd_batch_references_resource(batch, rsc))) {
debug_assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
return;
}
debug_assert(!_mesa_set_search(batch->resources, rsc));
assert(!_mesa_set_search(batch->resources, rsc));
_mesa_set_add_pre_hashed(batch->resources, rsc->hash, rsc);
rsc->track->batch_mask |= (1 << batch->idx);

View File

@ -400,7 +400,7 @@ alloc_batch_locked(struct fd_batch_cache *cache, struct fd_context *ctx,
batch->idx = idx;
cache->batch_mask |= (1 << idx);
debug_assert(cache->batches[idx] == NULL);
assert(cache->batches[idx] == NULL);
cache->batches[idx] = batch;
return batch;

View File

@ -421,7 +421,7 @@ fd_get_reset_count(struct fd_context *ctx, bool per_context)
uint64_t val;
enum fd_param_id param = per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
int ret = fd_pipe_get_param(ctx->pipe, param, &val);
debug_assert(!ret);
assert(!ret);
return val;
}

View File

@ -371,7 +371,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
if (FD_DBG(DDRAW))
fd_context_all_dirty(ctx);
debug_assert(!batch->flushed);
assert(!batch->flushed);
fd_batch_unlock_submit(batch);
fd_batch_check_size(batch);
@ -484,7 +484,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
}
}
debug_assert(!batch->flushed);
assert(!batch->flushed);
fd_batch_unlock_submit(batch);

View File

@ -73,7 +73,7 @@ fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
util_queue_fence_wait(&fence->submit_fence.ready);
debug_assert(!fence->batch);
assert(!fence->batch);
return true;
}

View File

@ -716,7 +716,7 @@ fd_gmem_render_tiles(struct fd_batch *batch)
* bypass.
*/
if (batch->tessellation) {
debug_assert(ctx->emit_sysmem_prep);
assert(ctx->emit_sysmem_prep);
sysmem = true;
}

View File

@ -169,7 +169,7 @@ fd_prog_blit_fs(struct pipe_context *pctx, int rts, bool depth)
struct ureg_src tc;
struct ureg_program *ureg;
debug_assert(rts <= MAX_RENDER_TARGETS);
assert(rts <= MAX_RENDER_TARGETS);
ureg = ureg_create(PIPE_SHADER_FRAGMENT);
if (!ureg)

View File

@ -294,7 +294,7 @@ fd_hw_sample_init(struct fd_batch *batch, uint32_t size)
struct fd_hw_sample *samp = slab_alloc_st(&batch->ctx->sample_pool);
pipe_reference_init(&samp->reference, 1);
samp->size = size;
debug_assert(util_is_power_of_two_or_zero(size));
assert(util_is_power_of_two_or_zero(size));
batch->next_sample_offset = align(batch->next_sample_offset, size);
samp->offset = batch->next_sample_offset;
/* NOTE: slab_alloc_st() does not zero out the buffer: */

View File

@ -453,7 +453,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
* by any batches, but the existing rsc (probably) is. We need to
* transfer those references over:
*/
debug_assert(shadow->track->batch_mask == 0);
assert(shadow->track->batch_mask == 0);
foreach_batch (batch, &ctx->screen->batch_cache, rsc->track->batch_mask) {
struct set_entry *entry = _mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc);
_mesa_set_remove(batch->resources, entry);
@ -558,7 +558,7 @@ fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc, bool lin
bool success = fd_try_shadow_resource(ctx, rsc, 0, NULL, modifier);
/* shadow should not fail in any cases where we need to uncompress: */
debug_assert(success);
assert(success);
}
/**
@ -1078,9 +1078,9 @@ fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
{
struct fd_resource *rsc = fd_resource(prsc);
debug_assert(prsc->width0 == 0);
debug_assert(prsc->target == PIPE_BUFFER);
debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
assert(prsc->width0 == 0);
assert(prsc->target == PIPE_BUFFER);
assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
prsc->width0 = sz;
realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
@ -1283,7 +1283,7 @@ fd_resource_allocate_and_resolve(struct pipe_screen *pscreen,
*/
if (size == 0) {
/* note, semi-intention == instead of & */
debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
*psize = 0;
return prsc;
}

View File

@ -295,7 +295,7 @@ static inline uint32_t
fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
{
uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
debug_assert(offset < fd_bo_size(rsc->bo));
assert(offset < fd_bo_size(rsc->bo));
return offset;
}
@ -303,7 +303,7 @@ static inline uint32_t
fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
{
uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
debug_assert(offset < fd_bo_size(rsc->bo));
assert(offset < fd_bo_size(rsc->bo));
return offset;
}
@ -312,7 +312,7 @@ static inline bool
fd_resource_level_linear(const struct pipe_resource *prsc, int level)
{
struct fd_screen *screen = fd_screen(prsc->screen);
debug_assert(!is_a3xx(screen));
assert(!is_a3xx(screen));
return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
}

View File

@ -131,7 +131,7 @@ fd_screen_get_timestamp(struct pipe_screen *pscreen)
if (screen->has_timestamp) {
uint64_t n;
fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &n);
debug_assert(screen->max_freq > 0);
assert(screen->max_freq > 0);
return n * 1000000000 / screen->max_freq;
} else {
int64_t cpu_time = os_time_get() * 1000;

View File

@ -571,7 +571,7 @@ fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets,
struct fd_streamout_stateobj *so = &ctx->streamout;
unsigned i;
debug_assert(num_targets <= ARRAY_SIZE(so->targets));
assert(num_targets <= ARRAY_SIZE(so->targets));
/* Older targets need sw stats enabled for streamout emulation in VS: */
if (ctx->screen->gen < 5) {

View File

@ -441,10 +441,7 @@ fd_msaa_samples(unsigned samples)
{
switch (samples) {
default:
debug_assert(0);
#if defined(NDEBUG) || defined(DEBUG)
FALLTHROUGH;
#endif
assert(0);
case 0:
case 1:
return MSAA_ONE;

View File

@ -98,7 +98,7 @@ ir3_cache_lookup(struct ir3_cache *cache, const struct ir3_cache_key *key,
}
if (key->hs)
debug_assert(key->ds);
assert(key->ds);
struct ir3_shader *shaders[MESA_SHADER_STAGES] = {
[MESA_SHADER_VERTEX] = ir3_get_shader(key->vs),

View File

@ -186,9 +186,9 @@ ir3_emit_user_consts(struct fd_screen *screen,
continue;
/* things should be aligned to vec4: */
debug_assert((state->range[i].offset % 16) == 0);
debug_assert((size % 16) == 0);
debug_assert((offset % 16) == 0);
assert((state->range[i].offset % 16) == 0);
assert((size % 16) == 0);
assert((offset % 16) == 0);
if (cb->user_buffer) {
emit_const_user(ring, v, state->range[i].offset / 4, size / 4,
@ -538,7 +538,7 @@ ir3_emit_vs_consts(const struct ir3_shader_variant *v,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count_bias *draw) assert_dt
{
debug_assert(v->type == MESA_SHADER_VERTEX);
assert(v->type == MESA_SHADER_VERTEX);
emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
@ -553,7 +553,7 @@ static inline void
ir3_emit_fs_consts(const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_context *ctx) assert_dt
{
debug_assert(v->type == MESA_SHADER_FRAGMENT);
assert(v->type == MESA_SHADER_FRAGMENT);
emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
}
@ -564,7 +564,7 @@ ir3_emit_cs_consts(const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_context *ctx,
const struct pipe_grid_info *info) assert_dt
{
debug_assert(gl_shader_stage_is_compute(v->type));
assert(gl_shader_stage_is_compute(v->type));
emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
emit_kernel_params(ctx, v, ring, info);

View File

@ -299,7 +299,7 @@ ir3_shader_compute_state_create(struct pipe_context *pctx,
ir3_finalize_nir(compiler, nir);
} else {
debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
if (ir3_shader_debug & IR3_DBG_DISASM) {
tgsi_dump(cso->prog, 0);
}
@ -357,7 +357,7 @@ ir3_shader_state_create(struct pipe_context *pctx,
/* we take ownership of the reference: */
nir = cso->ir.nir;
} else {
debug_assert(cso->type == PIPE_SHADER_IR_TGSI);
assert(cso->type == PIPE_SHADER_IR_TGSI);
if (ir3_shader_debug & IR3_DBG_DISASM) {
tgsi_dump(cso->tokens, 0);
}

View File

@ -71,8 +71,8 @@ llvmpipe_set_scissor_states(struct pipe_context *pipe,
draw_flush(llvmpipe->draw);
debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
debug_assert((start_slot + num_scissors) <= PIPE_MAX_VIEWPORTS);
assert(start_slot < PIPE_MAX_VIEWPORTS);
assert((start_slot + num_scissors) <= PIPE_MAX_VIEWPORTS);
memcpy(llvmpipe->scissors + start_slot, scissors,
sizeof(struct pipe_scissor_state) * num_scissors);

View File

@ -71,8 +71,8 @@ softpipe_set_scissor_states(struct pipe_context *pipe,
draw_flush(softpipe->draw);
debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
debug_assert((start_slot + num_scissors) <= PIPE_MAX_VIEWPORTS);
assert(start_slot < PIPE_MAX_VIEWPORTS);
assert((start_slot + num_scissors) <= PIPE_MAX_VIEWPORTS);
memcpy(softpipe->scissors + start_slot, scissors,
sizeof(struct pipe_scissor_state) * num_scissors);

View File

@ -999,7 +999,7 @@ int virgl_encode_sampler_view(struct virgl_context *ctx,
virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
} else {
if (res->metadata.plane) {
debug_assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
} else {
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);

View File

@ -144,10 +144,7 @@ FormatYCBCRToPipe(VdpYCbCrFormat vdpau_format)
#endif
default:
/* NOTE: Can't be "unreachable", as it's quite reachable. */
debug_assert(!"unexpected VdpYCbCrFormat");
#if defined(NDEBUG) || defined(DEBUG)
FALLTHROUGH;
#endif
assert(!"unexpected VdpYCbCrFormat");
#ifdef VDP_YCBCR_FORMAT_Y_UV_444
case VDP_YCBCR_FORMAT_Y_UV_444:
#endif

View File

@ -489,7 +489,7 @@ xa_shaders_get(struct xa_shaders *sc, unsigned vs_traits, unsigned fs_traits)
fs = shader_from_cache(sc->r->pipe, PIPE_SHADER_FRAGMENT,
&sc->fs_hash, fs_traits);
debug_assert(vs && fs);
assert(vs && fs);
if (!vs || !fs)
return shader;

View File

@ -175,7 +175,7 @@ debug_free(const char *file, unsigned line, const char *function,
debug_printf("%s:%u:%s: freeing bad or corrupted memory %p\n",
file, line, function,
ptr);
debug_assert(0);
assert(0);
return;
}
@ -184,7 +184,7 @@ debug_free(const char *file, unsigned line, const char *function,
debug_printf("%s:%u:%s: buffer overflow %p\n",
hdr->file, hdr->line, hdr->function,
ptr);
debug_assert(0);
assert(0);
}
#if DEBUG_FREED_MEMORY
@ -239,7 +239,7 @@ debug_realloc(const char *file, unsigned line, const char *function,
debug_printf("%s:%u:%s: reallocating bad or corrupted memory %p\n",
file, line, function,
old_ptr);
debug_assert(0);
assert(0);
return NULL;
}
@ -248,7 +248,7 @@ debug_realloc(const char *file, unsigned line, const char *function,
debug_printf("%s:%u:%s: buffer overflow %p\n",
old_hdr->file, old_hdr->line, old_hdr->function,
old_ptr);
debug_assert(0);
assert(0);
}
/* alloc new */
@ -318,7 +318,7 @@ debug_memory_end(unsigned long start_no)
debug_printf("%s:%u:%s: bad or corrupted memory %p\n",
hdr->file, hdr->line, hdr->function,
ptr);
debug_assert(0);
assert(0);
}
if ((start_no <= hdr->no && hdr->no < last_no) ||
@ -336,7 +336,7 @@ debug_memory_end(unsigned long start_no)
debug_printf("%s:%u:%s: buffer overflow %p\n",
hdr->file, hdr->line, hdr->function,
ptr);
debug_assert(0);
assert(0);
}
}
@ -365,7 +365,7 @@ debug_memory_tag(void *ptr, unsigned tag)
hdr = header_from_data(ptr);
if (hdr->magic != DEBUG_MEMORY_MAGIC) {
debug_printf("%s corrupted memory at %p\n", __FUNCTION__, ptr);
debug_assert(0);
assert(0);
}
hdr->tag = tag;
@ -390,13 +390,13 @@ debug_memory_check_block(void *ptr)
if (hdr->magic != DEBUG_MEMORY_MAGIC) {
debug_printf("%s:%u:%s: bad or corrupted memory %p\n",
hdr->file, hdr->line, hdr->function, ptr);
debug_assert(0);
assert(0);
}
if (ftr->magic != DEBUG_MEMORY_MAGIC) {
debug_printf("%s:%u:%s: buffer overflow %p\n",
hdr->file, hdr->line, hdr->function, ptr);
debug_assert(0);
assert(0);
}
}
@ -424,13 +424,13 @@ debug_memory_check(void)
if (hdr->magic != DEBUG_MEMORY_MAGIC) {
debug_printf("%s:%u:%s: bad or corrupted memory %p\n",
hdr->file, hdr->line, hdr->function, ptr);
debug_assert(0);
assert(0);
}
if (ftr->magic != DEBUG_MEMORY_MAGIC) {
debug_printf("%s:%u:%s: buffer overflow %p\n",
hdr->file, hdr->line, hdr->function, ptr);
debug_assert(0);
assert(0);
}
#if DEBUG_FREED_MEMORY