intel: fix INTEL_DEBUG environment variable on 32-bit systems
INTEL_DEBUG is defined (since 4015e1876a
) as:
#define INTEL_DEBUG __builtin_expect(intel_debug, 0)
which unfortunately chops off upper 32 bits from intel_debug
on platforms where sizeof(long) != sizeof(uint64_t) because
__builtin_expect is defined only for the long type.
Fix this by changing the definition of INTEL_DEBUG to be function-like
macro with "flags" argument. New definition returns 0 or 1 when
any of the flags match.
Most of the changes in this commit were generated using:
for c in `git grep INTEL_DEBUG | grep "&" | grep -v i915 | awk -F: '{print $1}' | sort | uniq`; do
perl -pi -e "s/INTEL_DEBUG & ([A-Z0-9a-z_]+)/INTEL_DBG(\1)/" $c
perl -pi -e "s/INTEL_DEBUG & (\([A-Z0-9_ |]+\))/INTEL_DBG\1/" $c
done
but it didn't handle all cases and required minor cleanups (like removal
of round brackets which were not needed anymore).
Signed-off-by: Marcin Ślusarz <marcin.slusarz@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13334>
This commit is contained in:
parent
182237e1e8
commit
d05f7b4a2c
|
@ -216,7 +216,7 @@ crocus_init_batch(struct crocus_context *ice,
|
|||
if (devinfo->ver == 6)
|
||||
batch->valid_reloc_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
/* The shadow doesn't get relocs written so state decode fails. */
|
||||
batch->use_shadow_copy = false;
|
||||
} else
|
||||
|
@ -247,12 +247,12 @@ crocus_init_batch(struct crocus_context *ice,
|
|||
batch->other_batches[j++] = &ice->batches[i];
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
|
||||
batch->state_sizes = _mesa_hash_table_u64_create(NULL);
|
||||
const unsigned decode_flags =
|
||||
INTEL_BATCH_DECODE_FULL |
|
||||
((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
(INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
INTEL_BATCH_DECODE_OFFSETS | INTEL_BATCH_DECODE_FLOATS;
|
||||
|
||||
intel_batch_decode_ctx_init(&batch->decoder, &screen->devinfo, stderr,
|
||||
|
@ -940,7 +940,7 @@ _crocus_batch_flush(struct crocus_batch *batch, const char *file, int line)
|
|||
finish_growing_bos(&batch->state);
|
||||
int ret = submit_batch(batch);
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
|
||||
int bytes_for_commands = crocus_batch_bytes_used(batch);
|
||||
int second_bytes = 0;
|
||||
if (batch->command.bo != batch->exec_bos[0]) {
|
||||
|
@ -958,12 +958,12 @@ _crocus_batch_flush(struct crocus_batch *batch, const char *file, int line)
|
|||
batch->command.relocs.reloc_count,
|
||||
batch->state.relocs.reloc_count);
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
dump_fence_list(batch);
|
||||
dump_validation_list(batch);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
decode_batch(batch);
|
||||
}
|
||||
}
|
||||
|
@ -984,7 +984,7 @@ _crocus_batch_flush(struct crocus_batch *batch, const char *file, int line)
|
|||
|
||||
util_dynarray_clear(&batch->exec_fences);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC) {
|
||||
if (INTEL_DEBUG(DEBUG_SYNC)) {
|
||||
dbg_printf("waiting for idle\n");
|
||||
crocus_bo_wait_rendering(batch->command.bo); /* if execbuf failed; this is a nop */
|
||||
}
|
||||
|
@ -1008,7 +1008,7 @@ _crocus_batch_flush(struct crocus_batch *batch, const char *file, int line)
|
|||
|
||||
if (ret < 0) {
|
||||
#ifdef DEBUG
|
||||
const bool color = INTEL_DEBUG & DEBUG_COLOR;
|
||||
const bool color = INTEL_DEBUG(DEBUG_COLOR);
|
||||
fprintf(stderr, "%scrocus: Failed to submit batchbuffer: %-80s%s\n",
|
||||
color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
|
||||
#endif
|
||||
|
|
|
@ -73,7 +73,7 @@ can_fast_clear_color(struct crocus_context *ice,
|
|||
{
|
||||
struct crocus_resource *res = (void *) p_res;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return false;
|
||||
|
||||
if (!isl_aux_usage_has_fast_clears(res->aux.usage))
|
||||
|
@ -407,7 +407,7 @@ can_fast_clear_depth(struct crocus_context *ice,
|
|||
if (devinfo->ver < 6)
|
||||
return false;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return false;
|
||||
|
||||
/* Check for partial clears */
|
||||
|
|
|
@ -754,7 +754,7 @@ struct crocus_context {
|
|||
};
|
||||
|
||||
#define perf_debug(dbg, ...) do { \
|
||||
if (INTEL_DEBUG & DEBUG_PERF) \
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
if (unlikely(dbg)) \
|
||||
pipe_debug_message(dbg, PERF_INFO, __VA_ARGS__); \
|
||||
|
|
|
@ -237,7 +237,7 @@ void
|
|||
crocus_disk_cache_init(struct crocus_screen *screen)
|
||||
{
|
||||
#ifdef ENABLE_SHADER_CACHE
|
||||
if (INTEL_DEBUG & DEBUG_DISK_CACHE_DISABLE_MASK)
|
||||
if (INTEL_DEBUG(DEBUG_DISK_CACHE_DISABLE_MASK))
|
||||
return;
|
||||
|
||||
/* array length = print length + nul char + 1 extra to verify it's unused */
|
||||
|
|
|
@ -405,7 +405,7 @@ crocus_draw_vbo(struct pipe_context *ctx,
|
|||
/* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
|
||||
* write offsets, changing the behavior.
|
||||
*/
|
||||
if (INTEL_DEBUG & DEBUG_REEMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_REEMIT)) {
|
||||
ice->state.dirty |= CROCUS_ALL_DIRTY_FOR_RENDER & ~(CROCUS_DIRTY_GEN7_SO_BUFFERS | CROCUS_DIRTY_GEN6_SVBI);
|
||||
ice->state.stage_dirty |= CROCUS_ALL_STAGE_DIRTY_FOR_RENDER;
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ crocus_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
|
|||
if (!crocus_check_conditional_render(ice))
|
||||
return;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_REEMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_REEMIT)) {
|
||||
ice->state.dirty |= CROCUS_ALL_DIRTY_FOR_COMPUTE;
|
||||
ice->state.stage_dirty |= CROCUS_ALL_STAGE_DIRTY_FOR_COMPUTE;
|
||||
}
|
||||
|
|
|
@ -951,7 +951,7 @@ crocus_setup_binding_table(const struct intel_device_info *devinfo,
|
|||
}
|
||||
bt->size_bytes = next * 4;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BT) {
|
||||
if (INTEL_DEBUG(DEBUG_BT)) {
|
||||
crocus_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
|
||||
}
|
||||
|
||||
|
|
|
@ -434,11 +434,11 @@ crocus_resource_configure_aux(struct crocus_screen *screen,
|
|||
isl_surf_get_mcs_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
|
||||
|
||||
const bool has_hiz = devinfo->ver >= 6 && !res->mod_info &&
|
||||
!(INTEL_DEBUG & DEBUG_NO_HIZ) &&
|
||||
!INTEL_DEBUG(DEBUG_NO_HIZ) &&
|
||||
isl_surf_get_hiz_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
|
||||
|
||||
const bool has_ccs =
|
||||
((devinfo->ver >= 7 && !res->mod_info && !(INTEL_DEBUG & DEBUG_NO_RBC)) ||
|
||||
((devinfo->ver >= 7 && !res->mod_info && !INTEL_DEBUG(DEBUG_NO_RBC)) ||
|
||||
(res->mod_info && res->mod_info->aux_usage != ISL_AUX_USAGE_NONE)) &&
|
||||
isl_surf_get_ccs_surf(&screen->isl_dev, &res->surf, NULL,
|
||||
&res->aux.surf, 0);
|
||||
|
|
|
@ -694,7 +694,7 @@ crocus_shader_perf_log(void *data, unsigned *id, const char *fmt, ...)
|
|||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERF) {
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) {
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
vfprintf(stderr, fmt, args_copy);
|
||||
|
|
|
@ -771,12 +771,12 @@ crocus_calculate_urb_fence(struct crocus_batch *batch, unsigned csize,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_URB|DEBUG_PERF))
|
||||
if (INTEL_DEBUG(DEBUG_URB|DEBUG_PERF))
|
||||
fprintf(stderr, "URB CONSTRAINED\n");
|
||||
}
|
||||
|
||||
done:
|
||||
if (INTEL_DEBUG & DEBUG_URB)
|
||||
if (INTEL_DEBUG(DEBUG_URB))
|
||||
fprintf(stderr,
|
||||
"URB fence: %d ..VS.. %d ..GS.. %d ..CLP.. %d ..SF.. %d ..CS.. %d\n",
|
||||
ice->urb.vs_start,
|
||||
|
@ -1197,7 +1197,7 @@ emit_l3_state(struct crocus_batch *batch, bool compute)
|
|||
compute ? batch->screen->l3_config_cs : batch->screen->l3_config_3d;
|
||||
|
||||
setup_l3_config(batch, cfg);
|
||||
if (INTEL_DEBUG & DEBUG_L3) {
|
||||
if (INTEL_DEBUG(DEBUG_L3)) {
|
||||
intel_dump_l3_config(cfg, stderr);
|
||||
}
|
||||
}
|
||||
|
@ -8809,7 +8809,7 @@ crocus_emit_raw_pipe_control(struct crocus_batch *batch,
|
|||
|
||||
/* Emit --------------------------------------------------------------- */
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
|
||||
if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) {
|
||||
fprintf(stderr,
|
||||
" PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
|
||||
(flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
|
||||
|
|
|
@ -216,10 +216,10 @@ iris_init_batch(struct iris_context *ice,
|
|||
batch->other_batches[j++] = &ice->batches[i];
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG) {
|
||||
if (INTEL_DEBUG(DEBUG_ANY)) {
|
||||
const unsigned decode_flags =
|
||||
INTEL_BATCH_DECODE_FULL |
|
||||
((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
(INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
INTEL_BATCH_DECODE_OFFSETS |
|
||||
INTEL_BATCH_DECODE_FLOATS;
|
||||
|
||||
|
@ -463,7 +463,7 @@ iris_batch_free(struct iris_batch *batch)
|
|||
|
||||
_mesa_hash_table_destroy(batch->cache.render, NULL);
|
||||
|
||||
if (INTEL_DEBUG)
|
||||
if (INTEL_DEBUG(DEBUG_ANY))
|
||||
intel_batch_decode_ctx_finish(&batch->decoder);
|
||||
}
|
||||
|
||||
|
@ -784,12 +784,12 @@ submit_batch(struct iris_batch *batch)
|
|||
|
||||
free(index_for_handle);
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
dump_fence_list(batch);
|
||||
dump_bo_list(batch);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
decode_batch(batch);
|
||||
}
|
||||
|
||||
|
@ -875,7 +875,7 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
|
|||
|
||||
update_batch_syncobjs(batch);
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
|
||||
const char *basefile = strstr(file, "iris/");
|
||||
if (basefile)
|
||||
file = basefile + 5;
|
||||
|
@ -917,7 +917,7 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
|
|||
|
||||
util_dynarray_clear(&batch->exec_fences);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC) {
|
||||
if (INTEL_DEBUG(DEBUG_SYNC)) {
|
||||
dbg_printf("waiting for idle\n");
|
||||
iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */
|
||||
}
|
||||
|
@ -942,7 +942,7 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
|
|||
|
||||
if (ret < 0) {
|
||||
#ifdef DEBUG
|
||||
const bool color = INTEL_DEBUG & DEBUG_COLOR;
|
||||
const bool color = INTEL_DEBUG(DEBUG_COLOR);
|
||||
fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
|
||||
color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
|
||||
#endif
|
||||
|
|
|
@ -74,7 +74,7 @@ can_fast_clear_color(struct iris_context *ice,
|
|||
{
|
||||
struct iris_resource *res = (void *) p_res;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return false;
|
||||
|
||||
if (!isl_aux_usage_has_fast_clears(res->aux.usage))
|
||||
|
@ -424,7 +424,7 @@ can_fast_clear_depth(struct iris_context *ice,
|
|||
struct iris_screen *screen = (void *) ctx->screen;
|
||||
const struct intel_device_info *devinfo = &screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return false;
|
||||
|
||||
/* Check for partial clears */
|
||||
|
|
|
@ -359,7 +359,7 @@ iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
|
|||
if (flags & PIPE_CONTEXT_LOW_PRIORITY)
|
||||
priority = INTEL_CONTEXT_LOW_PRIORITY;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH)
|
||||
if (INTEL_DEBUG(DEBUG_BATCH))
|
||||
ice->state.sizes = _mesa_hash_table_u64_create(ice);
|
||||
|
||||
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
|
||||
|
|
|
@ -841,7 +841,7 @@ struct iris_context {
|
|||
};
|
||||
|
||||
#define perf_debug(dbg, ...) do { \
|
||||
if (INTEL_DEBUG & DEBUG_PERF) \
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
if (unlikely(dbg)) \
|
||||
pipe_debug_message(dbg, PERF_INFO, __VA_ARGS__); \
|
||||
|
|
|
@ -265,7 +265,7 @@ void
|
|||
iris_disk_cache_init(struct iris_screen *screen)
|
||||
{
|
||||
#ifdef ENABLE_SHADER_CACHE
|
||||
if (INTEL_DEBUG & DEBUG_DISK_CACHE_DISABLE_MASK)
|
||||
if (INTEL_DEBUG(DEBUG_DISK_CACHE_DISABLE_MASK))
|
||||
return;
|
||||
|
||||
/* array length = print length + nul char + 1 extra to verify it's unused */
|
||||
|
|
|
@ -275,7 +275,7 @@ iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
|
|||
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
|
||||
return;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_REEMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_REEMIT)) {
|
||||
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
|
||||
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
|
|||
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
|
||||
return;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_REEMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_REEMIT)) {
|
||||
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
|
||||
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
|
||||
}
|
||||
|
|
|
@ -252,11 +252,11 @@ iris_fence_flush(struct pipe_context *ctx,
|
|||
if (flags & PIPE_FLUSH_END_OF_FRAME) {
|
||||
ice->frame++;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SUBMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_SUBMIT)) {
|
||||
fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
|
||||
(INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
|
||||
INTEL_DEBUG(DEBUG_COLOR) ? BLUE_HEADER : "",
|
||||
ice->frame, ctx, ' ',
|
||||
(INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
|
||||
INTEL_DEBUG(DEBUG_COLOR) ? NORMAL : "");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1016,7 +1016,7 @@ iris_setup_binding_table(const struct intel_device_info *devinfo,
|
|||
}
|
||||
bt->size_bytes = next * 4;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BT) {
|
||||
if (INTEL_DEBUG(DEBUG_BT)) {
|
||||
iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ modifier_is_supported(const struct intel_device_info *devinfo,
|
|||
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
|
||||
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
|
||||
case I915_FORMAT_MOD_Y_TILED_CCS: {
|
||||
if (INTEL_DEBUG & DEBUG_NO_RBC)
|
||||
if (INTEL_DEBUG(DEBUG_NO_RBC))
|
||||
return false;
|
||||
|
||||
enum isl_format rt_format =
|
||||
|
@ -739,11 +739,11 @@ iris_resource_configure_aux(struct iris_screen *screen,
|
|||
const bool has_mcs = !res->mod_info &&
|
||||
isl_surf_get_mcs_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
|
||||
|
||||
const bool has_hiz = !res->mod_info && !(INTEL_DEBUG & DEBUG_NO_HIZ) &&
|
||||
const bool has_hiz = !res->mod_info && !INTEL_DEBUG(DEBUG_NO_HIZ) &&
|
||||
isl_surf_get_hiz_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
|
||||
|
||||
const bool has_ccs =
|
||||
((!res->mod_info && !(INTEL_DEBUG & DEBUG_NO_RBC)) ||
|
||||
((!res->mod_info && !INTEL_DEBUG(DEBUG_NO_RBC)) ||
|
||||
(res->mod_info && res->mod_info->aux_usage != ISL_AUX_USAGE_NONE)) &&
|
||||
iris_get_ccs_surf(&screen->isl_dev, &res->surf, &res->aux.surf,
|
||||
&res->aux.extra_aux.surf, 0);
|
||||
|
|
|
@ -705,7 +705,7 @@ iris_shader_perf_log(void *data, unsigned *id, const char *fmt, ...)
|
|||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERF) {
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) {
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
vfprintf(stderr, fmt, args_copy);
|
||||
|
|
|
@ -7846,7 +7846,7 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
|
|||
|
||||
/* Emit --------------------------------------------------------------- */
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
|
||||
if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) {
|
||||
fprintf(stderr,
|
||||
" PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
|
||||
(flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
|
||||
|
|
|
@ -85,7 +85,7 @@ brw_compile_clip(const struct brw_compiler *compiler,
|
|||
|
||||
const unsigned *program = brw_get_program(&c.func, final_assembly_size);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_CLIP) {
|
||||
if (INTEL_DEBUG(DEBUG_CLIP)) {
|
||||
fprintf(stderr, "clip:\n");
|
||||
brw_disassemble_with_labels(compiler->devinfo,
|
||||
program, 0, *final_assembly_size, stderr);
|
||||
|
|
|
@ -648,7 +648,7 @@ brw_compile_ff_gs_prog(struct brw_compiler *compiler,
|
|||
*/
|
||||
program = brw_get_program(&c.func, final_assembly_size);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_GS) {
|
||||
if (INTEL_DEBUG(DEBUG_GS)) {
|
||||
fprintf(stderr, "gs:\n");
|
||||
brw_disassemble_with_labels(compiler->devinfo, c.func.store,
|
||||
0, *final_assembly_size, stderr);
|
||||
|
|
|
@ -868,7 +868,7 @@ brw_compile_sf(const struct brw_compiler *compiler,
|
|||
|
||||
const unsigned *program = brw_get_program(&c.func, final_assembly_size);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SF) {
|
||||
if (INTEL_DEBUG(DEBUG_SF)) {
|
||||
fprintf(stderr, "sf:\n");
|
||||
brw_disassemble_with_labels(compiler->devinfo,
|
||||
program, 0, *final_assembly_size, stderr);
|
||||
|
|
|
@ -110,7 +110,7 @@ brw_compiler_create(void *mem_ctx, const struct intel_device_info *devinfo)
|
|||
|
||||
compiler->use_tcs_8_patch =
|
||||
devinfo->ver >= 12 ||
|
||||
(devinfo->ver >= 9 && (INTEL_DEBUG & DEBUG_TCS_EIGHT_PATCH));
|
||||
(devinfo->ver >= 9 && INTEL_DEBUG(DEBUG_TCS_EIGHT_PATCH));
|
||||
|
||||
/* Default to the sampler since that's what we've done since forever */
|
||||
compiler->indirect_ubos_use_sampler = true;
|
||||
|
@ -142,7 +142,7 @@ brw_compiler_create(void *mem_ctx, const struct intel_device_info *devinfo)
|
|||
nir_lower_dsub |
|
||||
nir_lower_ddiv;
|
||||
|
||||
if (!devinfo->has_64bit_float || (INTEL_DEBUG & DEBUG_SOFT64)) {
|
||||
if (!devinfo->has_64bit_float || INTEL_DEBUG(DEBUG_SOFT64)) {
|
||||
int64_options |= (nir_lower_int64_options)~0;
|
||||
fp64_options |= nir_lower_fp64_full_software;
|
||||
}
|
||||
|
@ -226,11 +226,10 @@ brw_get_compiler_config_value(const struct brw_compiler *compiler)
|
|||
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_EVAL]);
|
||||
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
|
||||
}
|
||||
uint64_t debug_bits = INTEL_DEBUG;
|
||||
uint64_t mask = DEBUG_DISK_CACHE_MASK;
|
||||
while (mask != 0) {
|
||||
const uint64_t bit = 1ULL << (ffsll(mask) - 1);
|
||||
insert_u64_bit(&config, (debug_bits & bit) != 0);
|
||||
insert_u64_bit(&config, INTEL_DEBUG(bit));
|
||||
mask &= ~bit;
|
||||
}
|
||||
return config;
|
||||
|
|
|
@ -141,7 +141,7 @@ disasm_annotate(struct disasm_info *disasm,
|
|||
exec_list_get_tail_raw(&disasm->group_list), link);
|
||||
}
|
||||
|
||||
if ((INTEL_DEBUG & DEBUG_ANNOTATION) != 0) {
|
||||
if (INTEL_DEBUG(DEBUG_ANNOTATION)) {
|
||||
group->ir = inst->ir;
|
||||
group->annotation = inst->annotation;
|
||||
}
|
||||
|
|
|
@ -529,7 +529,7 @@ brw_disassemble(const struct intel_device_info *devinfo,
|
|||
const void *assembly, int start, int end,
|
||||
const struct brw_label *root_label, FILE *out)
|
||||
{
|
||||
bool dump_hex = (INTEL_DEBUG & DEBUG_HEX) != 0;
|
||||
bool dump_hex = INTEL_DEBUG(DEBUG_HEX);
|
||||
|
||||
for (int offset = start; offset < end;) {
|
||||
const brw_inst *insn = (const brw_inst *)((char *)assembly + offset);
|
||||
|
|
|
@ -2447,7 +2447,7 @@ void
|
|||
brw_compact_instructions(struct brw_codegen *p, int start_offset,
|
||||
struct disasm_info *disasm)
|
||||
{
|
||||
if (INTEL_DEBUG & DEBUG_NO_COMPACTION)
|
||||
if (INTEL_DEBUG(DEBUG_NO_COMPACTION))
|
||||
return;
|
||||
|
||||
const struct intel_device_info *devinfo = p->devinfo;
|
||||
|
@ -2484,7 +2484,7 @@ brw_compact_instructions(struct brw_codegen *p, int start_offset,
|
|||
if (try_compact_instruction(&c, dst, &inst)) {
|
||||
compacted_count++;
|
||||
|
||||
if (INTEL_DEBUG) {
|
||||
if (INTEL_DEBUG(DEBUG_ANY)) {
|
||||
brw_inst uncompacted;
|
||||
uncompact_instruction(&c, &uncompacted, dst);
|
||||
if (memcmp(&saved, &uncompacted, sizeof(uncompacted))) {
|
||||
|
|
|
@ -8489,7 +8489,7 @@ fs_visitor::optimize()
|
|||
pass_num++; \
|
||||
bool this_progress = pass(args); \
|
||||
\
|
||||
if ((INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
|
||||
if (INTEL_DEBUG(DEBUG_OPTIMIZER) && this_progress) { \
|
||||
char filename[64]; \
|
||||
snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
|
||||
stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
|
||||
|
@ -8503,7 +8503,7 @@ fs_visitor::optimize()
|
|||
this_progress; \
|
||||
})
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_OPTIMIZER) {
|
||||
if (INTEL_DEBUG(DEBUG_OPTIMIZER)) {
|
||||
char filename[64];
|
||||
snprintf(filename, 64, "%s%d-%s-00-00-start",
|
||||
stage_abbrev, dispatch_width, nir->info.name);
|
||||
|
@ -8862,7 +8862,7 @@ fs_visitor::allocate_registers(bool allow_spilling)
|
|||
"lifo"
|
||||
};
|
||||
|
||||
bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
|
||||
bool spill_all = allow_spilling && INTEL_DEBUG(DEBUG_SPILL_FS);
|
||||
|
||||
/* Try each scheduling heuristic to see if it can successfully register
|
||||
* allocate without spilling. They should be ordered by decreasing
|
||||
|
@ -9714,7 +9714,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
struct brw_wm_prog_data *prog_data = params->prog_data;
|
||||
bool allow_spilling = params->allow_spilling;
|
||||
const bool debug_enabled =
|
||||
INTEL_DEBUG & (params->debug_flag ? params->debug_flag : DEBUG_WM);
|
||||
INTEL_DEBUG(params->debug_flag ? params->debug_flag : DEBUG_WM);
|
||||
|
||||
prog_data->base.stage = MESA_SHADER_FRAGMENT;
|
||||
|
||||
|
@ -9762,7 +9762,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
params->error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
|
||||
delete v8;
|
||||
return NULL;
|
||||
} else if (!(INTEL_DEBUG & DEBUG_NO8)) {
|
||||
} else if (!INTEL_DEBUG(DEBUG_NO8)) {
|
||||
simd8_cfg = v8->cfg;
|
||||
prog_data->base.dispatch_grf_start_reg = v8->payload.num_regs;
|
||||
prog_data->reg_blocks_8 = brw_register_blocks(v8->grf_used);
|
||||
|
@ -9776,7 +9776,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
* See: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1917
|
||||
*/
|
||||
if (devinfo->ver == 8 && prog_data->dual_src_blend &&
|
||||
!(INTEL_DEBUG & DEBUG_NO8)) {
|
||||
!INTEL_DEBUG(DEBUG_NO8)) {
|
||||
assert(!params->use_rep_send);
|
||||
v8->limit_dispatch_width(8, "gfx8 workaround: "
|
||||
"using SIMD8 when dual src blending.\n");
|
||||
|
@ -9793,7 +9793,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
|
||||
if (!has_spilled &&
|
||||
v8->max_dispatch_width >= 16 &&
|
||||
(!(INTEL_DEBUG & DEBUG_NO16) || params->use_rep_send)) {
|
||||
(!INTEL_DEBUG(DEBUG_NO16) || params->use_rep_send)) {
|
||||
/* Try a SIMD16 compile */
|
||||
v16 = new fs_visitor(compiler, params->log_data, mem_ctx, &key->base,
|
||||
&prog_data->base, nir, 16,
|
||||
|
@ -9821,7 +9821,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
if (!has_spilled &&
|
||||
v8->max_dispatch_width >= 32 && !params->use_rep_send &&
|
||||
devinfo->ver >= 6 && !simd16_failed &&
|
||||
!(INTEL_DEBUG & DEBUG_NO32)) {
|
||||
!INTEL_DEBUG(DEBUG_NO32)) {
|
||||
/* Try a SIMD32 compile */
|
||||
v32 = new fs_visitor(compiler, params->log_data, mem_ctx, &key->base,
|
||||
&prog_data->base, nir, 32,
|
||||
|
@ -9835,7 +9835,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
|
|||
} else {
|
||||
const performance &perf = v32->performance_analysis.require();
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_DO32) && throughput >= perf.throughput) {
|
||||
if (!INTEL_DEBUG(DEBUG_DO32) && throughput >= perf.throughput) {
|
||||
brw_shader_perf_log(compiler, params->log_data,
|
||||
"SIMD32 shader inefficient\n");
|
||||
} else {
|
||||
|
@ -10102,7 +10102,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
|
|||
int shader_time_index = params->shader_time ? params->shader_time_index : -1;
|
||||
|
||||
const bool debug_enabled =
|
||||
INTEL_DEBUG & (params->debug_flag ? params->debug_flag : DEBUG_CS);
|
||||
INTEL_DEBUG(params->debug_flag ? params->debug_flag : DEBUG_CS);
|
||||
|
||||
prog_data->base.stage = MESA_SHADER_COMPUTE;
|
||||
prog_data->base.total_shared = nir->info.shared_size;
|
||||
|
@ -10166,7 +10166,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
|
|||
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
|
||||
fs_visitor *v = NULL;
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO8) &&
|
||||
if (!INTEL_DEBUG(DEBUG_NO8) &&
|
||||
min_dispatch_width <= 8 && max_dispatch_width >= 8) {
|
||||
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
|
||||
nir, 8, debug_enabled);
|
||||
|
@ -10189,7 +10189,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
|
|||
cs_fill_push_const_info(compiler->devinfo, prog_data);
|
||||
}
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO16) &&
|
||||
if (!INTEL_DEBUG(DEBUG_NO16) &&
|
||||
(generate_all || !prog_data->prog_spilled) &&
|
||||
min_dispatch_width <= 16 && max_dispatch_width >= 16) {
|
||||
/* Try a SIMD16 compile */
|
||||
|
@ -10231,10 +10231,10 @@ brw_compile_cs(const struct brw_compiler *compiler,
|
|||
* TODO: Use performance_analysis and drop this boolean.
|
||||
*/
|
||||
const bool needs_32 = v == NULL ||
|
||||
(INTEL_DEBUG & DEBUG_DO32) ||
|
||||
INTEL_DEBUG(DEBUG_DO32) ||
|
||||
generate_all;
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO32) &&
|
||||
if (!INTEL_DEBUG(DEBUG_NO32) &&
|
||||
(generate_all || !prog_data->prog_spilled) &&
|
||||
needs_32 &&
|
||||
min_dispatch_width <= 32 && max_dispatch_width >= 32) {
|
||||
|
@ -10272,7 +10272,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
|
|||
}
|
||||
}
|
||||
|
||||
if (unlikely(!v) && (INTEL_DEBUG & (DEBUG_NO8 | DEBUG_NO16 | DEBUG_NO32))) {
|
||||
if (unlikely(!v) && INTEL_DEBUG(DEBUG_NO8 | DEBUG_NO16 | DEBUG_NO32)) {
|
||||
params->error_str =
|
||||
ralloc_strdup(mem_ctx,
|
||||
"Cannot satisfy INTEL_DEBUG flags SIMD restrictions");
|
||||
|
@ -10350,7 +10350,7 @@ brw_cs_simd_size_for_group_size(const struct intel_device_info *devinfo,
|
|||
static const unsigned simd16 = 1 << 1;
|
||||
static const unsigned simd32 = 1 << 2;
|
||||
|
||||
if ((INTEL_DEBUG & DEBUG_DO32) && (mask & simd32))
|
||||
if (INTEL_DEBUG(DEBUG_DO32) && (mask & simd32))
|
||||
return 32;
|
||||
|
||||
const uint32_t max_threads = devinfo->max_cs_workgroup_threads;
|
||||
|
@ -10408,7 +10408,7 @@ compile_single_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
int *prog_offset,
|
||||
char **error_str)
|
||||
{
|
||||
const bool debug_enabled = INTEL_DEBUG & DEBUG_RT;
|
||||
const bool debug_enabled = INTEL_DEBUG(DEBUG_RT);
|
||||
|
||||
prog_data->base.stage = shader->info.stage;
|
||||
prog_data->max_stack_size = MAX2(prog_data->max_stack_size,
|
||||
|
@ -10423,7 +10423,7 @@ compile_single_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
bool has_spilled = false;
|
||||
|
||||
uint8_t simd_size = 0;
|
||||
if (!(INTEL_DEBUG & DEBUG_NO8)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO8)) {
|
||||
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
|
||||
&prog_data->base, shader,
|
||||
8, -1 /* shader time */, debug_enabled);
|
||||
|
@ -10441,7 +10441,7 @@ compile_single_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
}
|
||||
}
|
||||
|
||||
if (!has_spilled && !(INTEL_DEBUG & DEBUG_NO16)) {
|
||||
if (!has_spilled && !INTEL_DEBUG(DEBUG_NO16)) {
|
||||
v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
|
||||
&prog_data->base, shader,
|
||||
16, -1 /* shader time */, debug_enabled);
|
||||
|
@ -10469,7 +10469,7 @@ compile_single_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
}
|
||||
|
||||
if (unlikely(v == NULL)) {
|
||||
assert(INTEL_DEBUG & (DEBUG_NO8 | DEBUG_NO16));
|
||||
assert(INTEL_DEBUG(DEBUG_NO8 | DEBUG_NO16));
|
||||
if (error_str) {
|
||||
*error_str = ralloc_strdup(mem_ctx,
|
||||
"Cannot satisfy INTEL_DEBUG flags SIMD restrictions");
|
||||
|
@ -10516,7 +10516,7 @@ brw_compile_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
struct brw_compile_stats *stats,
|
||||
char **error_str)
|
||||
{
|
||||
const bool debug_enabled = INTEL_DEBUG & DEBUG_RT;
|
||||
const bool debug_enabled = INTEL_DEBUG(DEBUG_RT);
|
||||
|
||||
prog_data->base.stage = shader->info.stage;
|
||||
prog_data->max_stack_size = 0;
|
||||
|
@ -10540,7 +10540,7 @@ brw_compile_bs(const struct brw_compiler *compiler, void *log_data,
|
|||
|
||||
uint64_t *resume_sbt = ralloc_array(mem_ctx, uint64_t, num_resume_shaders);
|
||||
for (unsigned i = 0; i < num_resume_shaders; i++) {
|
||||
if (INTEL_DEBUG & DEBUG_RT) {
|
||||
if (INTEL_DEBUG(DEBUG_RT)) {
|
||||
char *name = ralloc_asprintf(mem_ctx, "%s %s resume(%u) shader %s",
|
||||
shader->info.label ?
|
||||
shader->info.label : "unnamed",
|
||||
|
|
|
@ -1294,14 +1294,14 @@ backend_shader::dump_instructions(const char *name) const
|
|||
if (cfg) {
|
||||
int ip = 0;
|
||||
foreach_block_and_inst(block, backend_instruction, inst, cfg) {
|
||||
if (!(INTEL_DEBUG & DEBUG_OPTIMIZER))
|
||||
if (!INTEL_DEBUG(DEBUG_OPTIMIZER))
|
||||
fprintf(file, "%4d: ", ip++);
|
||||
dump_instruction(inst, file);
|
||||
}
|
||||
} else {
|
||||
int ip = 0;
|
||||
foreach_in_list(backend_instruction, inst, &instructions) {
|
||||
if (!(INTEL_DEBUG & DEBUG_OPTIMIZER))
|
||||
if (!INTEL_DEBUG(DEBUG_OPTIMIZER))
|
||||
fprintf(file, "%4d: ", ip++);
|
||||
dump_instruction(inst, file);
|
||||
}
|
||||
|
@ -1340,7 +1340,7 @@ brw_compile_tes(const struct brw_compiler *compiler,
|
|||
{
|
||||
const struct intel_device_info *devinfo = compiler->devinfo;
|
||||
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
|
||||
const bool debug_enabled = INTEL_DEBUG & DEBUG_TES;
|
||||
const bool debug_enabled = INTEL_DEBUG(DEBUG_TES);
|
||||
const unsigned *assembly;
|
||||
|
||||
prog_data->base.base.stage = MESA_SHADER_TESS_EVAL;
|
||||
|
|
|
@ -2748,7 +2748,7 @@ vec4_visitor::run()
|
|||
pass_num++; \
|
||||
bool this_progress = pass(args); \
|
||||
\
|
||||
if ((INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
|
||||
if (INTEL_DEBUG(DEBUG_OPTIMIZER) && this_progress) { \
|
||||
char filename[64]; \
|
||||
snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
|
||||
stage_abbrev, nir->info.name, iteration, pass_num); \
|
||||
|
@ -2761,7 +2761,7 @@ vec4_visitor::run()
|
|||
})
|
||||
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_OPTIMIZER) {
|
||||
if (INTEL_DEBUG(DEBUG_OPTIMIZER)) {
|
||||
char filename[64];
|
||||
snprintf(filename, 64, "%s-%s-00-00-start",
|
||||
stage_abbrev, nir->info.name);
|
||||
|
@ -2824,7 +2824,7 @@ vec4_visitor::run()
|
|||
|
||||
setup_payload();
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SPILL_VEC4) {
|
||||
if (INTEL_DEBUG(DEBUG_SPILL_VEC4)) {
|
||||
/* Debug of register spilling: Go spill everything. */
|
||||
const int grf_count = alloc.count;
|
||||
float spill_costs[alloc.count];
|
||||
|
@ -2893,7 +2893,7 @@ brw_compile_vs(const struct brw_compiler *compiler,
|
|||
const struct brw_vs_prog_key *key = params->key;
|
||||
struct brw_vs_prog_data *prog_data = params->prog_data;
|
||||
const bool debug_enabled =
|
||||
INTEL_DEBUG & (params->debug_flag ? params->debug_flag : DEBUG_VS);
|
||||
INTEL_DEBUG(params->debug_flag ? params->debug_flag : DEBUG_VS);
|
||||
|
||||
prog_data->base.base.stage = MESA_SHADER_VERTEX;
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ vec4_gs_visitor::emit_thread_end()
|
|||
vec4_instruction *inst = emit(MOV(mrf_reg, r0));
|
||||
inst->force_writemask_all = true;
|
||||
emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
emit_shader_time_end();
|
||||
inst = emit(GS_OPCODE_THREAD_END);
|
||||
inst->base_mrf = base_mrf;
|
||||
|
@ -597,7 +597,7 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
|
|||
c.key = *key;
|
||||
|
||||
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
|
||||
const bool debug_enabled = INTEL_DEBUG & DEBUG_GS;
|
||||
const bool debug_enabled = INTEL_DEBUG(DEBUG_GS);
|
||||
|
||||
prog_data->base.base.stage = MESA_SHADER_GEOMETRY;
|
||||
|
||||
|
@ -853,7 +853,7 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
|
|||
* dual object mode.
|
||||
*/
|
||||
if (prog_data->invocations <= 1 &&
|
||||
!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS)) {
|
||||
!INTEL_DEBUG(DEBUG_NO_DUAL_OBJECT_GS)) {
|
||||
prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
|
||||
|
||||
brw::vec4_gs_visitor v(compiler, log_data, &c, prog_data, nir,
|
||||
|
|
|
@ -143,7 +143,7 @@ vec4_tcs_visitor::emit_thread_end()
|
|||
emit(BRW_OPCODE_ENDIF);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
emit_shader_time_end();
|
||||
|
||||
inst = emit(TCS_OPCODE_THREAD_END);
|
||||
|
@ -368,7 +368,7 @@ brw_compile_tcs(const struct brw_compiler *compiler,
|
|||
const struct intel_device_info *devinfo = compiler->devinfo;
|
||||
struct brw_vue_prog_data *vue_prog_data = &prog_data->base;
|
||||
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
|
||||
const bool debug_enabled = INTEL_DEBUG & DEBUG_TCS;
|
||||
const bool debug_enabled = INTEL_DEBUG(DEBUG_TCS);
|
||||
const unsigned *assembly;
|
||||
|
||||
vue_prog_data->base.stage = MESA_SHADER_TESS_CTRL;
|
||||
|
@ -495,7 +495,7 @@ brw_compile_tcs(const struct brw_compiler *compiler,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_TCS)
|
||||
if (INTEL_DEBUG(DEBUG_TCS))
|
||||
v.dump_instructions();
|
||||
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ vec4_tes_visitor::emit_urb_write_opcode(bool complete)
|
|||
{
|
||||
/* For DS, the URB writes end the thread. */
|
||||
if (complete) {
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
emit_shader_time_end();
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ vec4_vs_visitor::emit_urb_write_opcode(bool complete)
|
|||
{
|
||||
/* For VS, the URB writes end the thread. */
|
||||
if (complete) {
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
emit_shader_time_end();
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,8 @@ extern "C" {
|
|||
|
||||
extern uint64_t intel_debug;
|
||||
|
||||
#define INTEL_DEBUG __builtin_expect(intel_debug, 0)
|
||||
/* Returns 0/1, not the matching bit mask. */
|
||||
#define INTEL_DEBUG(flags) unlikely(intel_debug & (flags))
|
||||
|
||||
#define DEBUG_TEXTURE (1ull << 0)
|
||||
#define DEBUG_STATE (1ull << 1)
|
||||
|
@ -94,6 +95,8 @@ extern uint64_t intel_debug;
|
|||
#define DEBUG_NO32 (1ull << 47)
|
||||
#define DEBUG_RT (1ull << 48)
|
||||
|
||||
#define DEBUG_ANY (~0ull)
|
||||
|
||||
/* These flags are not compatible with the disk shader cache */
|
||||
#define DEBUG_DISK_CACHE_DISABLE_MASK DEBUG_SHADER_TIME
|
||||
|
||||
|
@ -118,9 +121,9 @@ extern uint64_t intel_debug;
|
|||
#define dbg_printf(...) fprintf(stderr, __VA_ARGS__)
|
||||
#endif /* HAVE_ANDROID_PLATFORM */
|
||||
|
||||
#define DBG(...) do { \
|
||||
if (INTEL_DEBUG & FILE_DEBUG_FLAG) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
#define DBG(...) do { \
|
||||
if (INTEL_DEBUG(FILE_DEBUG_FLAG)) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
extern uint64_t intel_debug_flag_for_shader_stage(gl_shader_stage stage);
|
||||
|
|
|
@ -160,7 +160,7 @@ isl_drm_modifier_get_score(const struct intel_device_info *devinfo,
|
|||
if (devinfo->ver >= 12)
|
||||
return 0;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_RBC)
|
||||
if (INTEL_DEBUG(DEBUG_NO_RBC))
|
||||
return 0;
|
||||
|
||||
return 4;
|
||||
|
|
|
@ -78,7 +78,7 @@ get_sysfs_dev_dir(struct intel_perf_config *perf, int fd)
|
|||
|
||||
perf->sysfs_dev_dir[0] = '\0';
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_OACONFIG)
|
||||
if (INTEL_DEBUG(DEBUG_NO_OACONFIG))
|
||||
return true;
|
||||
|
||||
if (fstat(fd, &sb)) {
|
||||
|
@ -389,7 +389,7 @@ init_oa_sys_vars(struct intel_perf_config *perf,
|
|||
{
|
||||
uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
|
||||
return false;
|
||||
|
||||
|
@ -750,7 +750,7 @@ load_oa_metrics(struct intel_perf_config *perf, int fd,
|
|||
*/
|
||||
oa_register(perf);
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
if (kernel_has_dynamic_config_support(perf, fd))
|
||||
init_oa_configs(perf, fd, devinfo);
|
||||
else
|
||||
|
|
|
@ -1929,7 +1929,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
|
|||
submit->cmd_buffer_count &&
|
||||
submit->perf_query_pool;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SUBMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_SUBMIT)) {
|
||||
fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
|
||||
execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
|
||||
for (uint32_t i = 0; i < execbuf.bo_count; i++) {
|
||||
|
@ -1940,7 +1940,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
|
|||
}
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
|
||||
if (submit->cmd_buffer_count) {
|
||||
if (has_perf_query) {
|
||||
|
@ -2009,7 +2009,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
|
|||
/* Some performance queries just the pipeline statistic HW, no need for
|
||||
* OA in that case, so no need to reconfigure.
|
||||
*/
|
||||
if ((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0 &&
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
|
||||
(query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
|
||||
query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
|
||||
int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
|
||||
|
|
|
@ -106,7 +106,7 @@ compiler_perf_log(UNUSED void *data, UNUSED unsigned *id, const char *fmt, ...)
|
|||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERF)
|
||||
if (INTEL_DEBUG(DEBUG_PERF))
|
||||
mesa_logd_v(fmt, args);
|
||||
|
||||
va_end(args);
|
||||
|
@ -206,7 +206,7 @@ get_device_extensions(const struct anv_physical_device *device,
|
|||
.KHR_performance_query =
|
||||
device->use_softpin && device->perf &&
|
||||
(device->perf->i915_perf_version >= 3 ||
|
||||
INTEL_DEBUG & DEBUG_NO_OACONFIG) &&
|
||||
INTEL_DEBUG(DEBUG_NO_OACONFIG)) &&
|
||||
device->use_call_secondary,
|
||||
.KHR_pipeline_executable_properties = true,
|
||||
.KHR_push_descriptor = true,
|
||||
|
@ -924,7 +924,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
|
|||
device->has_reg_timestamp = anv_gem_reg_read(fd, TIMESTAMP | I915_REG_READ_8B_WA,
|
||||
&u64_ignore) == 0;
|
||||
|
||||
device->always_flush_cache = (INTEL_DEBUG & DEBUG_SYNC) ||
|
||||
device->always_flush_cache = INTEL_DEBUG(DEBUG_SYNC) ||
|
||||
driQueryOptionb(&instance->dri_options, "always_flush_cache");
|
||||
|
||||
device->has_mmap_offset =
|
||||
|
@ -2960,10 +2960,10 @@ VkResult anv_CreateDevice(
|
|||
if (result != VK_SUCCESS)
|
||||
goto fail_alloc;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
const unsigned decode_flags =
|
||||
INTEL_BATCH_DECODE_FULL |
|
||||
((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
(INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
INTEL_BATCH_DECODE_OFFSETS |
|
||||
INTEL_BATCH_DECODE_FLOATS;
|
||||
|
||||
|
@ -3381,7 +3381,7 @@ void anv_DestroyDevice(
|
|||
|
||||
anv_gem_destroy_context(device, device->context_id);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH)
|
||||
if (INTEL_DEBUG(DEBUG_BATCH))
|
||||
intel_batch_decode_ctx_finish(&device->decoder_ctx);
|
||||
|
||||
close(device->fd);
|
||||
|
|
|
@ -551,7 +551,7 @@ add_aux_surface_if_supported(struct anv_device *device,
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_HIZ)
|
||||
if (INTEL_DEBUG(DEBUG_NO_HIZ))
|
||||
return VK_SUCCESS;
|
||||
|
||||
ok = isl_surf_get_hiz_surf(&device->isl_dev,
|
||||
|
@ -592,7 +592,7 @@ add_aux_surface_if_supported(struct anv_device *device,
|
|||
return add_aux_state_tracking_buffer(device, image, plane);
|
||||
} else if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_RBC)
|
||||
if (INTEL_DEBUG(DEBUG_NO_RBC))
|
||||
return VK_SUCCESS;
|
||||
|
||||
if (!isl_surf_supports_ccs(&device->isl_dev,
|
||||
|
@ -648,7 +648,7 @@ add_aux_surface_if_supported(struct anv_device *device,
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_RBC)
|
||||
if (INTEL_DEBUG(DEBUG_NO_RBC))
|
||||
return VK_SUCCESS;
|
||||
|
||||
ok = isl_surf_get_ccs_surf(&device->isl_dev,
|
||||
|
@ -2238,7 +2238,7 @@ anv_layout_to_fast_clear_type(const struct intel_device_info * const devinfo,
|
|||
const VkImageAspectFlagBits aspect,
|
||||
const VkImageLayout layout)
|
||||
{
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return ANV_FAST_CLEAR_NONE;
|
||||
|
||||
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
|
||||
|
|
|
@ -68,7 +68,7 @@ anv_physical_device_init_perf(struct anv_physical_device *device, int fd)
|
|||
/* We need DRM_I915_PERF_PROP_HOLD_PREEMPTION support, only available in
|
||||
* perf revision 2.
|
||||
*/
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
if (!intel_perf_has_hold_preemption(perf))
|
||||
goto err;
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ VkResult anv_AcquirePerformanceConfigurationINTEL(
|
|||
if (!config)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
config->register_config =
|
||||
intel_perf_load_configuration(device->physical->perf, device->fd,
|
||||
INTEL_PERF_QUERY_GUID_MDAPI);
|
||||
|
@ -258,7 +258,7 @@ VkResult anv_ReleasePerformanceConfigurationINTEL(
|
|||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG))
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG))
|
||||
intel_ioctl(device->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG, &config->config_id);
|
||||
|
||||
ralloc_free(config->register_config);
|
||||
|
@ -276,7 +276,7 @@ VkResult anv_QueueSetPerformanceConfigurationINTEL(
|
|||
ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
|
||||
struct anv_device *device = queue->device;
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
if (device->perf_fd < 0) {
|
||||
device->perf_fd = anv_device_perf_open(device, config->config_id);
|
||||
if (device->perf_fd < 0)
|
||||
|
@ -405,7 +405,7 @@ VkResult anv_AcquireProfilingLockKHR(
|
|||
|
||||
assert(device->perf_fd == -1);
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
fd = anv_device_perf_open(device, first_metric_set->oa_metrics_set_id);
|
||||
if (fd < 0)
|
||||
return VK_TIMEOUT;
|
||||
|
@ -420,7 +420,7 @@ void anv_ReleaseProfilingLockKHR(
|
|||
{
|
||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
|
||||
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
|
||||
if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
|
||||
assert(device->perf_fd >= 0);
|
||||
close(device->perf_fd);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ anv_shader_compile_to_nir(struct anv_device *device,
|
|||
};
|
||||
NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
|
||||
|
||||
if (INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage)) {
|
||||
if (INTEL_DEBUG(intel_debug_flag_for_shader_stage(stage))) {
|
||||
fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
|
||||
gl_shader_stage_name(stage));
|
||||
nir_print_shader(nir, stderr);
|
||||
|
|
|
@ -406,7 +406,7 @@ void __anv_perf_warn(struct anv_device *device,
|
|||
#define anv_perf_warn(objects_macro, format, ...) \
|
||||
do { \
|
||||
static bool reported = false; \
|
||||
if (!reported && (INTEL_DEBUG & DEBUG_PERF)) { \
|
||||
if (!reported && INTEL_DEBUG(DEBUG_PERF)) { \
|
||||
__vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, \
|
||||
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, \
|
||||
objects_macro, __FILE__, __LINE__, \
|
||||
|
@ -4684,7 +4684,7 @@ anv_add_pending_pipe_bits(struct anv_cmd_buffer* cmd_buffer,
|
|||
const char* reason)
|
||||
{
|
||||
cmd_buffer->state.pending_pipe_bits |= bits;
|
||||
if ((INTEL_DEBUG & DEBUG_PIPE_CONTROL) && bits)
|
||||
if (INTEL_DEBUG(DEBUG_PIPE_CONTROL) && bits)
|
||||
{
|
||||
fputs("pc: add ", stderr);
|
||||
anv_dump_pipe_bits(bits);
|
||||
|
|
|
@ -72,7 +72,7 @@ convert_pc_to_bits(struct GENX(PIPE_CONTROL) *pc) {
|
|||
}
|
||||
|
||||
#define anv_debug_dump_pc(pc) \
|
||||
if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) { \
|
||||
if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) { \
|
||||
fputs("pc: emit PC=( ", stderr); \
|
||||
anv_dump_pipe_bits(convert_pc_to_bits(&(pc))); \
|
||||
fprintf(stderr, ") reason: %s\n", __FUNCTION__); \
|
||||
|
@ -2056,7 +2056,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
|
|||
*/
|
||||
assert(cfg == cmd_buffer->device->l3_config);
|
||||
#else
|
||||
if (INTEL_DEBUG & DEBUG_L3) {
|
||||
if (INTEL_DEBUG(DEBUG_L3)) {
|
||||
mesa_logd("L3 config transition: ");
|
||||
intel_dump_l3_config(cfg, stderr);
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ brw_batch_init(struct brw_context *brw)
|
|||
struct brw_batch *batch = &brw->batch;
|
||||
const struct intel_device_info *devinfo = &screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
/* The shadow doesn't get relocs written so state decode fails. */
|
||||
batch->use_shadow_copy = false;
|
||||
} else
|
||||
|
@ -157,13 +157,13 @@ brw_batch_init(struct brw_context *brw)
|
|||
malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
|
||||
batch->contains_fence_signal = false;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
batch->state_batch_sizes =
|
||||
_mesa_hash_table_u64_create(NULL);
|
||||
|
||||
const unsigned decode_flags =
|
||||
INTEL_BATCH_DECODE_FULL |
|
||||
((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
(INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
|
||||
INTEL_BATCH_DECODE_OFFSETS |
|
||||
INTEL_BATCH_DECODE_FLOATS;
|
||||
|
||||
|
@ -600,7 +600,7 @@ brw_new_batch(struct brw_context *brw)
|
|||
* while, because many programs won't cleanly destroy our context, so the
|
||||
* end-of-run printout may not happen.
|
||||
*/
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
brw_collect_and_report_shader_time(brw);
|
||||
|
||||
brw_batch_maybe_noop(brw);
|
||||
|
@ -850,7 +850,7 @@ submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
throttle(brw);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
intel_print_batch(&batch->decoder, batch->batch.map,
|
||||
4 * USED_BATCH(*batch),
|
||||
batch->batch.bo->gtt_offset, false);
|
||||
|
@ -899,7 +899,7 @@ _brw_batch_flush_fence(struct brw_context *brw,
|
|||
brw_bo_reference(brw->throttle_batch[0]);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
int bytes_for_commands = 4 * USED_BATCH(brw->batch);
|
||||
int bytes_for_state = brw->batch.state_used;
|
||||
fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
|
||||
|
@ -917,7 +917,7 @@ _brw_batch_flush_fence(struct brw_context *brw,
|
|||
|
||||
ret = submit_batch(brw, in_fence_fd, out_fence_fd);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC) {
|
||||
if (INTEL_DEBUG(DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
brw_bo_wait_rendering(brw->batch.batch.bo);
|
||||
}
|
||||
|
@ -1087,7 +1087,7 @@ brw_state_batch(struct brw_context *brw,
|
|||
assert(offset + size < batch->state.bo->size);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH)) {
|
||||
_mesa_hash_table_u64_insert(batch->state_batch_sizes,
|
||||
offset, (void *) (uintptr_t) size);
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ brw_upload_binding_table(struct brw_context *brw,
|
|||
stage_state->bind_bo_offset = 0;
|
||||
} else {
|
||||
/* Upload a new binding table. */
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
brw_emit_buffer_surface_state(
|
||||
brw, &stage_state->surf_offset[
|
||||
prog_data->binding_table.shader_time_start],
|
||||
|
|
|
@ -1209,7 +1209,7 @@ do_single_blorp_clear(struct brw_context *brw, struct gl_framebuffer *fb,
|
|||
|
||||
bool can_fast_clear = !partial_clear;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
can_fast_clear = false;
|
||||
|
||||
uint8_t color_write_disable = 0;
|
||||
|
|
|
@ -62,7 +62,7 @@ debug_mask(const char *name, GLbitfield mask)
|
|||
{
|
||||
GLuint i;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BLIT) {
|
||||
if (INTEL_DEBUG(DEBUG_BLIT)) {
|
||||
DBG("%s clear:", name);
|
||||
for (i = 0; i < BUFFER_COUNT; i++) {
|
||||
if (mask & (1 << i))
|
||||
|
@ -107,7 +107,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
|
|||
struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
|
||||
const struct intel_device_info *devinfo = &brw->screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
|
||||
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
|
||||
return false;
|
||||
|
||||
if (devinfo->ver < 6)
|
||||
|
|
|
@ -885,7 +885,7 @@ brw_process_driconf_options(struct brw_context *brw)
|
|||
struct gl_context *ctx = &brw->ctx;
|
||||
const driOptionCache *const options = &brw->screen->optionCache;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_NO_HIZ) {
|
||||
if (INTEL_DEBUG(DEBUG_NO_HIZ)) {
|
||||
brw->has_hiz = false;
|
||||
/* On gfx6, you can only do separate stencil with HIZ. */
|
||||
if (devinfo->ver == 6)
|
||||
|
@ -1074,7 +1074,7 @@ brw_create_context(gl_api api,
|
|||
|
||||
_mesa_meta_init(ctx);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERF)
|
||||
if (INTEL_DEBUG(DEBUG_PERF))
|
||||
brw->perf_debug = true;
|
||||
|
||||
brw_initialize_cs_context_constants(brw);
|
||||
|
@ -1172,7 +1172,7 @@ brw_create_context(gl_api api,
|
|||
ctx->Const.RobustAccess = GL_TRUE;
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
brw_init_shader_time(brw);
|
||||
|
||||
_mesa_override_extensions(ctx);
|
||||
|
@ -1251,7 +1251,7 @@ brw_destroy_context(__DRIcontext *driContextPriv)
|
|||
|
||||
_mesa_meta_free(&brw->ctx);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
/* Force a report. */
|
||||
brw->shader_time.report_time = 0;
|
||||
|
||||
|
@ -1510,7 +1510,7 @@ brw_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
|
|||
* thus ignore the invalidate. */
|
||||
drawable->lastStamp = drawable->dri2.stamp;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_DRI)
|
||||
if (INTEL_DEBUG(DEBUG_DRI))
|
||||
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
|
||||
|
||||
brw_query_dri2_buffers(brw, drawable, &buffers, &count);
|
||||
|
@ -1563,7 +1563,7 @@ brw_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|||
* thus ignore the invalidate. */
|
||||
drawable->lastStamp = drawable->dri2.stamp;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_DRI)
|
||||
if (INTEL_DEBUG(DEBUG_DRI))
|
||||
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
|
||||
|
||||
if (dri_screen->image.loader)
|
||||
|
@ -1742,7 +1742,7 @@ brw_process_dri2_buffer(struct brw_context *brw,
|
|||
if (old_name == buffer->name)
|
||||
return;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_DRI) {
|
||||
if (INTEL_DEBUG(DEBUG_DRI)) {
|
||||
fprintf(stderr,
|
||||
"attaching buffer %d, at %d, cpp %d, pitch %d\n",
|
||||
buffer->name, buffer->attachment,
|
||||
|
|
|
@ -376,7 +376,7 @@ struct brw_cache {
|
|||
|
||||
#define perf_debug(...) do { \
|
||||
static GLuint msg_id = 0; \
|
||||
if (INTEL_DEBUG & DEBUG_PERF) \
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
if (brw->perf_debug) \
|
||||
_mesa_gl_debugf(&brw->ctx, &msg_id, \
|
||||
|
|
|
@ -94,7 +94,7 @@ brw_codegen_cs_prog(struct brw_context *brw,
|
|||
.log_data = brw,
|
||||
};
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
params.shader_time = true;
|
||||
params.shader_time_index =
|
||||
brw_get_shader_time_index(brw, &cp->program, ST_CS, true);
|
||||
|
|
|
@ -49,7 +49,7 @@ debug_enabled_for_stage(gl_shader_stage stage)
|
|||
DEBUG_VS, DEBUG_TCS, DEBUG_TES, DEBUG_GS, DEBUG_WM, DEBUG_CS,
|
||||
};
|
||||
assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_debug_flags));
|
||||
return (INTEL_DEBUG & stage_debug_flags[stage]) != 0;
|
||||
return INTEL_DEBUG(stage_debug_flags[stage]);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -391,7 +391,7 @@ void
|
|||
brw_disk_cache_init(struct brw_screen *screen)
|
||||
{
|
||||
#ifdef ENABLE_SHADER_CACHE
|
||||
if (INTEL_DEBUG & DEBUG_DISK_CACHE_DISABLE_MASK)
|
||||
if (INTEL_DEBUG(DEBUG_DISK_CACHE_DISABLE_MASK))
|
||||
return;
|
||||
|
||||
/* array length: print length + null char + 1 extra to verify it is unused */
|
||||
|
|
|
@ -256,7 +256,7 @@ brw_get_vertex_surface_type(struct brw_context *brw,
|
|||
const bool is_ivybridge_or_older =
|
||||
devinfo->verx10 <= 70 && !devinfo->is_baytrail;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_VERTS)
|
||||
if (INTEL_DEBUG(DEBUG_VERTS))
|
||||
fprintf(stderr, "type %s size %d normalized %d\n",
|
||||
_mesa_enum_to_string(glformat->Type),
|
||||
glformat->Size, glformat->Normalized);
|
||||
|
|
|
@ -120,7 +120,7 @@ brw_codegen_gs_prog(struct brw_context *brw,
|
|||
&prog_data);
|
||||
|
||||
int st_index = -1;
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
|
|
|
@ -248,7 +248,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
|
|||
prog->ShadowSamplers = shader->shadow_samplers;
|
||||
|
||||
bool debug_enabled =
|
||||
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(shader->Stage));
|
||||
INTEL_DEBUG(intel_debug_flag_for_shader_stage(shader->Stage));
|
||||
|
||||
if (debug_enabled && shader->ir) {
|
||||
fprintf(stderr, "GLSL IR for native %s shader %d:\n",
|
||||
|
|
|
@ -170,7 +170,7 @@ brw_miptree_choose_aux_usage(struct brw_context *brw,
|
|||
if (_mesa_is_format_color_format(mt->format)) {
|
||||
if (mt->surf.samples > 1) {
|
||||
mt->aux_usage = ISL_AUX_USAGE_MCS;
|
||||
} else if (!(INTEL_DEBUG & DEBUG_NO_RBC) &&
|
||||
} else if (!INTEL_DEBUG(DEBUG_NO_RBC) &&
|
||||
format_supports_ccs_e(brw, mt->format)) {
|
||||
mt->aux_usage = ISL_AUX_USAGE_CCS_E;
|
||||
} else if (brw->mesa_format_supports_render[mt->format]) {
|
||||
|
|
|
@ -249,7 +249,7 @@ brw_begin_perf_query(struct gl_context *ctx,
|
|||
|
||||
bool ret = intel_perf_begin_query(perf_ctx, obj);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERFMON)
|
||||
if (INTEL_DEBUG(DEBUG_PERFMON))
|
||||
dump_perf_queries(brw);
|
||||
|
||||
return ret;
|
||||
|
@ -315,7 +315,7 @@ brw_get_perf_query_data(struct gl_context *ctx,
|
|||
|
||||
DBG("GetData(%d)\n", o->Id);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERFMON)
|
||||
if (INTEL_DEBUG(DEBUG_PERFMON))
|
||||
dump_perf_queries(brw);
|
||||
|
||||
/* We expect that the frontend only calls this hook when it knows
|
||||
|
|
|
@ -311,7 +311,7 @@ do_blit_bitmap(struct gl_context *ctx,
|
|||
}
|
||||
out:
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC)
|
||||
if (INTEL_DEBUG(DEBUG_SYNC))
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (unpack->BufferObj) {
|
||||
|
|
|
@ -791,7 +791,7 @@ brw_assign_common_binding_table_offsets(const struct intel_device_info *devinfo,
|
|||
stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
|
||||
next_binding_table_offset++;
|
||||
} else {
|
||||
|
|
|
@ -374,7 +374,7 @@ modifier_is_supported(const struct intel_device_info *devinfo,
|
|||
|
||||
if (modinfo->aux_usage == ISL_AUX_USAGE_CCS_E) {
|
||||
/* If INTEL_DEBUG=norbc is set, don't support any CCS_E modifiers */
|
||||
if (INTEL_DEBUG & DEBUG_NO_RBC)
|
||||
if (INTEL_DEBUG(DEBUG_NO_RBC))
|
||||
return false;
|
||||
|
||||
/* CCS_E is not supported for planar images */
|
||||
|
@ -2489,7 +2489,7 @@ shader_perf_log_mesa(void *data, unsigned *msg_id, const char *fmt, ...)
|
|||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_PERF) {
|
||||
if (INTEL_DEBUG(DEBUG_PERF)) {
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
vfprintf(stderr, fmt, args_copy);
|
||||
|
@ -2561,7 +2561,7 @@ __DRIconfig **brw_init_screen(__DRIscreen *dri_screen)
|
|||
|
||||
brw_process_intel_debug_variable();
|
||||
|
||||
if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && devinfo->ver < 7) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME) && devinfo->ver < 7) {
|
||||
fprintf(stderr,
|
||||
"shader_time debugging requires gfx7 (Ivybridge) or better.\n");
|
||||
intel_debug &= ~DEBUG_SHADER_TIME;
|
||||
|
@ -2812,7 +2812,7 @@ __DRIconfig **brw_init_screen(__DRIscreen *dri_screen)
|
|||
|
||||
brw_screen_init_surface_formats(screen);
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
|
||||
unsigned int caps = brw_get_integer(screen, I915_PARAM_HAS_SCHEDULER);
|
||||
if (caps) {
|
||||
fprintf(stderr, "Kernel scheduler detected: %08x\n", caps);
|
||||
|
|
|
@ -619,7 +619,7 @@ brw_upload_pipeline_state(struct brw_context *brw,
|
|||
if (pipeline == BRW_RENDER_PIPELINE && brw->current_hash_scale != 1)
|
||||
brw_emit_hashing_mode(brw, UINT_MAX, UINT_MAX, 1);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_REEMIT) {
|
||||
if (INTEL_DEBUG(DEBUG_REEMIT)) {
|
||||
/* Always re-emit all state. */
|
||||
brw->NewGLState = ~0;
|
||||
ctx->NewDriverState = ~0ull;
|
||||
|
@ -689,7 +689,7 @@ brw_upload_pipeline_state(struct brw_context *brw,
|
|||
brw_get_pipeline_atoms(brw, pipeline);
|
||||
const int num_atoms = brw->num_atoms[pipeline];
|
||||
|
||||
if (INTEL_DEBUG) {
|
||||
if (INTEL_DEBUG(DEBUG_ANY)) {
|
||||
/* Debug version which enforces various sanity checks on the
|
||||
* state flags which are generated and checked to help ensure
|
||||
* state atoms are ordered correctly in the list.
|
||||
|
@ -723,7 +723,7 @@ brw_upload_pipeline_state(struct brw_context *brw,
|
|||
}
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_STATE) {
|
||||
if (INTEL_DEBUG(DEBUG_STATE)) {
|
||||
STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
|
||||
|
||||
brw_update_dirty_count(mesa_bits, state.mesa);
|
||||
|
|
|
@ -100,7 +100,7 @@ brw_codegen_tcs_prog(struct brw_context *brw, struct brw_program *tcp,
|
|||
}
|
||||
|
||||
int st_index = -1;
|
||||
if (((INTEL_DEBUG & DEBUG_SHADER_TIME) && tep))
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME) && tep)
|
||||
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TCS, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
|
|
|
@ -63,7 +63,7 @@ brw_codegen_tes_prog(struct brw_context *brw,
|
|||
}
|
||||
|
||||
int st_index = -1;
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME))
|
||||
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TES, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
|
|
|
@ -189,12 +189,12 @@ brw_calculate_urb_fence(struct brw_context *brw, unsigned csize,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & (DEBUG_URB|DEBUG_PERF))
|
||||
if (INTEL_DEBUG(DEBUG_URB|DEBUG_PERF))
|
||||
fprintf(stderr, "URB CONSTRAINED\n");
|
||||
}
|
||||
|
||||
done:
|
||||
if (INTEL_DEBUG & DEBUG_URB)
|
||||
if (INTEL_DEBUG(DEBUG_URB))
|
||||
fprintf(stderr,
|
||||
"URB fence: %d ..VS.. %d ..GS.. %d ..CLP.. %d ..SF.. %d ..CS.. %d\n",
|
||||
brw->urb.vs_start,
|
||||
|
|
|
@ -172,7 +172,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
|
|||
start_time = get_time();
|
||||
}
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_VS) {
|
||||
if (INTEL_DEBUG(DEBUG_VS)) {
|
||||
if (vp->program.info.is_arb_asm)
|
||||
brw_dump_arb_asm("vertex", &vp->program);
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
|
|||
.log_data = brw,
|
||||
};
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
params.shader_time = true;
|
||||
params.shader_time_index =
|
||||
brw_get_shader_time_index(brw, &vp->program, ST_VS,
|
||||
|
|
|
@ -101,7 +101,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
|
|||
} else {
|
||||
brw_nir_setup_arb_uniforms(mem_ctx, nir, &fp->program, &prog_data.base);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_WM)
|
||||
if (INTEL_DEBUG(DEBUG_WM))
|
||||
brw_dump_arb_asm("fragment", &fp->program);
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
|
|||
.log_data = brw,
|
||||
};
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
params.shader_time = true;
|
||||
params.shader_time_index8 =
|
||||
brw_get_shader_time_index(brw, &fp->program, ST_FS8,
|
||||
|
@ -164,7 +164,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
|
|||
|
||||
brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch);
|
||||
|
||||
if (((INTEL_DEBUG & DEBUG_WM) && fp->program.info.is_arb_asm))
|
||||
if (INTEL_DEBUG(DEBUG_WM) && fp->program.info.is_arb_asm)
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
/* The param and pull_param arrays will be freed by the shader cache. */
|
||||
|
|
|
@ -4273,7 +4273,7 @@ genX(upload_cs_state)(struct brw_context *brw)
|
|||
const struct brw_cs_dispatch_info dispatch =
|
||||
brw_cs_get_dispatch_info(devinfo, cs_prog_data, brw->compute.group_size);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
if (INTEL_DEBUG(DEBUG_SHADER_TIME)) {
|
||||
brw_emit_buffer_surface_state(
|
||||
brw, &stage_state->surf_offset[
|
||||
prog_data->binding_table.shader_time_start],
|
||||
|
|
|
@ -242,7 +242,7 @@ brw_emit_l3_state(struct brw_context *brw)
|
|||
update_urb_size(brw, cfg);
|
||||
brw->l3.config = cfg;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_L3) {
|
||||
if (INTEL_DEBUG(DEBUG_L3)) {
|
||||
fprintf(stderr, "L3 config transition (%f > %f): ", dw, dw_threshold);
|
||||
intel_dump_l3_config(cfg, stderr);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue