ac: rename num_render_backends -> max_render_backends

Acked-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7542>
This commit is contained in:
Marek Olšák 2020-11-10 21:34:27 -05:00 committed by Marge Bot
parent f2977a162a
commit 603b5340b9
17 changed files with 49 additions and 49 deletions

View File

@ -537,14 +537,14 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
info->has_tmz_support = has_tmz_support(dev, info, amdinfo);
info->pa_sc_tile_steering_override = device_info.pa_sc_tile_steering_override;
info->num_render_backends = amdinfo->rb_pipes;
info->max_render_backends = amdinfo->rb_pipes;
/* The value returned by the kernel driver was wrong. */
if (info->family == CHIP_KAVERI)
info->num_render_backends = 2;
info->max_render_backends = 2;
/* Guess the number of enabled SEs because the kernel doesn't tell us. */
if (info->chip_class >= GFX10_3 && info->max_se > 1) {
unsigned num_rbs_per_se = info->num_render_backends / info->max_se;
unsigned num_rbs_per_se = info->max_render_backends / info->max_se;
info->num_se = util_bitcount(amdinfo->enabled_rb_pipes_mask) / num_rbs_per_se;
} else {
info->num_se = info->max_se;
@ -730,7 +730,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
info->family == CHIP_RENOIR)) ||
(info->drm_minor >= 34 && (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14)) ||
info->chip_class >= GFX10_3) {
if (info->num_render_backends == 1)
if (info->max_render_backends == 1)
info->use_display_dcc_unaligned = true;
else
info->use_display_dcc_with_retile_blit = true;
@ -796,7 +796,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
info->min_sgpr_alloc = 128;
info->sgpr_alloc_granularity = 128;
/* Don't use late alloc on small chips. */
info->use_late_alloc = info->num_render_backends > 4;
info->use_late_alloc = info->max_render_backends > 4;
} else if (info->chip_class >= GFX8) {
info->num_physical_sgprs_per_simd = 800;
info->min_sgpr_alloc = 16;
@ -988,7 +988,7 @@ void ac_print_gpu_info(struct radeon_info *info, FILE *f)
fprintf(f, "Render backend info:\n");
fprintf(f, " pa_sc_tile_steering_override = 0x%x\n", info->pa_sc_tile_steering_override);
fprintf(f, " num_render_backends = %i\n", info->num_render_backends);
fprintf(f, " max_render_backends = %i\n", info->max_render_backends);
fprintf(f, " num_tile_pipes = %i\n", info->num_tile_pipes);
fprintf(f, " pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
fprintf(f, " enabled_rb_mask = 0x%x\n", info->enabled_rb_mask);
@ -1168,7 +1168,7 @@ void ac_get_harvested_configs(struct radeon_info *info, unsigned raster_config,
unsigned sh_per_se = MAX2(info->max_sh_per_se, 1);
unsigned num_se = MAX2(info->max_se, 1);
unsigned rb_mask = info->enabled_rb_mask;
unsigned num_rb = MIN2(info->num_render_backends, 16);
unsigned num_rb = MIN2(info->max_render_backends, 16);
unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
unsigned rb_per_se = num_rb / num_se;
unsigned se_mask[4];

View File

@ -189,7 +189,7 @@ struct radeon_info {
uint32_t r600_num_banks;
uint32_t gb_addr_config;
uint32_t pa_sc_tile_steering_override; /* CLEAR_STATE also sets this */
uint32_t num_render_backends;
uint32_t max_render_backends; /* number of render backends incl. disabled ones */
uint32_t num_tile_pipes; /* pipe count from PIPE_CONFIG */
uint32_t pipe_interleave_bytes;
uint32_t enabled_rb_mask; /* GCN harvest config */

View File

@ -464,7 +464,7 @@ bool ac_get_supported_modifiers(const struct radeon_info *info,
AMD_FMT_MOD_SET(PIPE, pipes))
if (util_format_get_blocksize(format) == 4) {
if (info->num_render_backends == 1) {
if (info->max_render_backends == 1) {
ADD_MOD(AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
@ -526,7 +526,7 @@ bool ac_get_supported_modifiers(const struct radeon_info *info,
if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14 || info->chip_class >= GFX10_3) {
bool independent_128b = info->chip_class >= GFX10_3;
if (info->num_render_backends == 1) {
if (info->max_render_backends == 1) {
ADD_MOD(AMD_FMT_MOD | common_dcc |
AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |

View File

@ -521,7 +521,7 @@ int main()
testcases[i].init(&info);
info.num_render_backends = 1u << (testcases[i].se +
info.max_render_backends = 1u << (testcases[i].se +
testcases[i].rb_per_se);
switch(info.chip_class) {
case GFX10:

View File

@ -440,7 +440,7 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
unsigned num_db = cmd_buffer->device->physical_device->rad_info.max_render_backends;
unsigned fence_offset, eop_bug_offset;
void *fence_ptr;

View File

@ -3715,7 +3715,7 @@ radv_gfx9_compute_bin_size(const struct radv_pipeline *pipeline, const VkGraphic
VkExtent2D extent = {512, 512};
unsigned log_num_rb_per_se =
util_logbase2_ceil(pipeline->device->physical_device->rad_info.num_render_backends /
util_logbase2_ceil(pipeline->device->physical_device->rad_info.max_render_backends /
pipeline->device->physical_device->rad_info.max_se);
unsigned log_num_se = util_logbase2_ceil(pipeline->device->physical_device->rad_info.max_se);
@ -3783,7 +3783,7 @@ radv_gfx10_compute_bin_size(const struct radv_pipeline *pipeline, const VkGraphi
const unsigned fmask_tag_size = 256;
const unsigned fmask_tag_count = 44;
const unsigned rb_count = pipeline->device->physical_device->rad_info.num_render_backends;
const unsigned rb_count = pipeline->device->physical_device->rad_info.max_render_backends;
const unsigned pipe_count = MAX2(rb_count, pipeline->device->physical_device->rad_info.num_sdp_interfaces);
const unsigned db_tag_part = (db_tag_count * rb_count / pipe_count) * db_tag_size * pipe_count;
@ -3910,7 +3910,7 @@ radv_get_binning_settings(const struct radv_physical_device *pdev)
{
struct radv_binning_settings settings;
if (pdev->rad_info.has_dedicated_vram) {
if (pdev->rad_info.num_render_backends > 4) {
if (pdev->rad_info.max_render_backends > 4) {
settings.context_states_per_bin = 1;
settings.persistent_states_per_bin = 1;
} else {

View File

@ -164,7 +164,7 @@ build_occlusion_query_shader(struct radv_device *device) {
nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
unsigned enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
unsigned db_count = device->physical_device->rad_info.num_render_backends;
unsigned db_count = device->physical_device->rad_info.max_render_backends;
nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
@ -1160,7 +1160,7 @@ VkResult radv_CreateQueryPool(
switch(pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
pool->stride = 16 * device->physical_device->rad_info.num_render_backends;
pool->stride = 16 * device->physical_device->rad_info.max_render_backends;
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
pool->stride = pipelinestat_block_size * 2;
@ -1266,7 +1266,7 @@ VkResult radv_GetQueryPoolResults(
}
case VK_QUERY_TYPE_OCCLUSION: {
uint64_t const *src64 = (uint64_t const *)src;
uint32_t db_count = device->physical_device->rad_info.num_render_backends;
uint32_t db_count = device->physical_device->rad_info.max_render_backends;
uint32_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
uint64_t sample_count = 0;
available = 1;

View File

@ -154,7 +154,7 @@ static void
si_set_raster_config(struct radv_physical_device *physical_device,
struct radeon_cmdbuf *cs)
{
unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
unsigned num_rb = MIN2(physical_device->rad_info.max_render_backends, 16);
unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
unsigned raster_config, raster_config_1;
@ -426,7 +426,7 @@ si_emit_graphics(struct radv_device *device,
unsigned meta_write_policy, meta_read_policy;
/* TODO: investigate whether LRU improves performance on other chips too */
if (physical_device->rad_info.num_render_backends <= 4) {
if (physical_device->rad_info.max_render_backends <= 4) {
meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */
meta_read_policy = V_02807C_CACHE_LRU_RD; /* cache reads */
} else {

View File

@ -127,7 +127,7 @@ static void radv_null_winsys_query_info(struct radeon_winsys *rws,
info->num_physical_wave64_vgprs_per_simd = info->chip_class >= GFX10 ? 512 : 256;
info->num_simd_per_compute_unit = info->chip_class >= GFX10 ? 2 : 4;
info->lds_size_per_workgroup = info->chip_class >= GFX10 ? 128 * 1024 : 64 * 1024;
info->num_render_backends = gpu_info[info->family].num_render_backends;
info->max_render_backends = gpu_info[info->family].num_render_backends;
info->has_dedicated_vram = gpu_info[info->family].has_dedicated_vram;
}

View File

@ -1349,7 +1349,7 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen,
printf("r600_gb_backend_map = %i\n", rscreen->info.r600_gb_backend_map);
printf("r600_gb_backend_map_valid = %i\n", rscreen->info.r600_gb_backend_map_valid);
printf("r600_num_banks = %i\n", rscreen->info.r600_num_banks);
printf("num_render_backends = %i\n", rscreen->info.num_render_backends);
printf("num_render_backends = %i\n", rscreen->info.max_render_backends);
printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
printf("enabled_rb_mask = 0x%x\n", rscreen->info.enabled_rb_mask);

View File

@ -430,7 +430,7 @@ static bool r600_query_sw_get_result(struct r600_common_context *rctx,
result->u32 = rctx->screen->info.num_good_compute_units;
return true;
case R600_QUERY_GPIN_NUM_RB:
result->u32 = rctx->screen->info.num_render_backends;
result->u32 = rctx->screen->info.max_render_backends;
return true;
case R600_QUERY_GPIN_NUM_SPI:
result->u32 = 1; /* all supported chips have one SPI per SE */
@ -537,7 +537,7 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
unsigned max_rbs = rscreen->info.num_render_backends;
unsigned max_rbs = rscreen->info.max_render_backends;
unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
unsigned num_results;
unsigned i, j;
@ -622,7 +622,7 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscree
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
query->result_size = 16 * rscreen->info.num_render_backends;
query->result_size = 16 * rscreen->info.max_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
@ -821,7 +821,7 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
fence_va = va + ctx->screen->info.max_render_backends * 16 - 8;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
@ -1082,7 +1082,7 @@ static void r600_get_hw_query_params(struct r600_common_context *rctx,
struct r600_query_hw *rquery, int index,
struct r600_hw_query_params *params)
{
unsigned max_rbs = rctx->screen->info.num_render_backends;
unsigned max_rbs = rctx->screen->info.max_render_backends;
params->pair_stride = 0;
params->pair_count = 1;
@ -1173,7 +1173,7 @@ static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
void *buffer,
union pipe_query_result *result)
{
unsigned max_rbs = rscreen->info.num_render_backends;
unsigned max_rbs = rscreen->info.max_render_backends;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER: {
@ -1848,9 +1848,9 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
* written to. By increasing this number we'll write the
* status bit for these as per the normal disabled rb logic.
*/
ctx->screen->info.num_render_backends = 8;
ctx->screen->info.max_render_backends = 8;
}
max_rbs = ctx->screen->info.num_render_backends;
max_rbs = ctx->screen->info.max_render_backends;
assert(rscreen->chip_class <= CAYMAN);
@ -2123,7 +2123,7 @@ void r600_query_init(struct r600_common_context *rctx)
rctx->b.get_query_result_resource = r600_get_query_result_resource;
rctx->render_cond_atom.emit = r600_emit_query_predication;
if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
if (((struct r600_common_screen*)rctx->b.screen)->info.max_render_backends > 0)
rctx->b.render_condition = r600_render_condition;
list_inithead(&rctx->active_queries);

View File

@ -90,7 +90,7 @@ void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs, unsigne
struct si_resource *scratch = unlikely(ctx->ws->cs_is_secure(ctx->gfx_cs)) ?
ctx->eop_bug_scratch_tmz : ctx->eop_bug_scratch;
assert(16 * ctx->screen->info.num_render_backends <= scratch->b.b.width0);
assert(16 * ctx->screen->info.max_render_backends <= scratch->b.b.width0);
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, scratch->gpu_address);

View File

@ -477,11 +477,11 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
if (sctx->chip_class == GFX7 || sctx->chip_class == GFX8 || sctx->chip_class == GFX9) {
sctx->eop_bug_scratch = si_aligned_buffer_create(
&sscreen->b, SI_RESOURCE_FLAG_DRIVER_INTERNAL,
PIPE_USAGE_DEFAULT, 16 * sscreen->info.num_render_backends, 256);
PIPE_USAGE_DEFAULT, 16 * sscreen->info.max_render_backends, 256);
if (sctx->screen->info.has_tmz_support)
sctx->eop_bug_scratch_tmz = si_aligned_buffer_create(
&sscreen->b, PIPE_RESOURCE_FLAG_ENCRYPTED | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
PIPE_USAGE_DEFAULT, 16 * sscreen->info.num_render_backends, 256);
PIPE_USAGE_DEFAULT, 16 * sscreen->info.max_render_backends, 256);
if (!sctx->eop_bug_scratch)
goto fail;
}
@ -1235,7 +1235,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
if (sscreen->dpbb_allowed) {
if (sscreen->info.has_dedicated_vram) {
if (sscreen->info.num_render_backends > 4) {
if (sscreen->info.max_render_backends > 4) {
sscreen->pbb_context_states_per_bin = 1;
sscreen->pbb_persistent_states_per_bin = 1;
} else {

View File

@ -529,7 +529,7 @@ static bool si_query_sw_get_result(struct si_context *sctx, struct si_query *squ
result->u32 = sctx->screen->info.num_good_compute_units;
return true;
case SI_QUERY_GPIN_NUM_RB:
result->u32 = sctx->screen->info.num_render_backends;
result->u32 = sctx->screen->info.max_render_backends;
return true;
case SI_QUERY_GPIN_NUM_SPI:
result->u32 = 1; /* all supported chips have one SPI per SE */
@ -678,7 +678,7 @@ static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
unsigned max_rbs = screen->info.num_render_backends;
unsigned max_rbs = screen->info.max_render_backends;
unsigned enabled_rb_mask = screen->info.enabled_rb_mask;
unsigned num_results;
unsigned i, j;
@ -735,7 +735,7 @@ static struct pipe_query *si_query_hw_create(struct si_screen *sscreen, unsigned
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
query->result_size = 16 * sscreen->info.num_render_backends;
query->result_size = 16 * sscreen->info.max_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
break;
@ -912,7 +912,7 @@ static void si_query_hw_do_emit_stop(struct si_context *sctx, struct si_query_hw
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
fence_va = va + sctx->screen->info.num_render_backends * 16 - 8;
fence_va = va + sctx->screen->info.max_render_backends * 16 - 8;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
@ -1180,7 +1180,7 @@ bool si_query_hw_end(struct si_context *sctx, struct si_query *squery)
static void si_get_hw_query_params(struct si_context *sctx, struct si_query_hw *squery, int index,
struct si_hw_query_params *params)
{
unsigned max_rbs = sctx->screen->info.num_render_backends;
unsigned max_rbs = sctx->screen->info.max_render_backends;
params->pair_stride = 0;
params->pair_count = 1;
@ -1264,7 +1264,7 @@ static unsigned si_query_read_result(void *map, unsigned start_index, unsigned e
static void si_query_hw_add_result(struct si_screen *sscreen, struct si_query_hw *query,
void *buffer, union pipe_query_result *result)
{
unsigned max_rbs = sscreen->info.num_render_backends;
unsigned max_rbs = sscreen->info.max_render_backends;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER: {

View File

@ -2152,7 +2152,7 @@ static bool si_is_format_supported(struct pipe_screen *screen, enum pipe_format
/* Chips with 1 RB don't increment occlusion queries at 16x MSAA sample rate,
* so don't expose 16 samples there.
*/
const unsigned max_eqaa_samples = sscreen->info.num_render_backends == 1 ? 8 : 16;
const unsigned max_eqaa_samples = sscreen->info.max_render_backends == 1 ? 8 : 16;
const unsigned max_samples = 8;
/* MSAA support without framebuffer attachments. */
@ -5067,7 +5067,7 @@ static void si_write_harvested_raster_configs(struct si_context *sctx, struct si
static void si_set_raster_config(struct si_context *sctx, struct si_pm4_state *pm4)
{
struct si_screen *sscreen = sctx->screen;
unsigned num_rb = MIN2(sscreen->info.num_render_backends, 16);
unsigned num_rb = MIN2(sscreen->info.max_render_backends, 16);
unsigned rb_mask = sscreen->info.enabled_rb_mask;
unsigned raster_config = sscreen->pa_sc_raster_config;
unsigned raster_config_1 = sscreen->pa_sc_raster_config_1;
@ -5326,7 +5326,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
/* Enable CMASK/FMASK/HTILE/DCC caching in L2 for small chips. */
unsigned meta_write_policy, meta_read_policy;
if (sscreen->info.num_render_backends <= 4) {
if (sscreen->info.max_render_backends <= 4) {
meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */
meta_read_policy = V_02807C_CACHE_LRU_RD; /* cache reads */
} else {

View File

@ -44,7 +44,7 @@ static struct uvec2 si_find_bin_size(struct si_screen *sscreen, const si_bin_siz
unsigned sum)
{
unsigned log_num_rb_per_se =
util_logbase2_ceil(sscreen->info.num_render_backends / sscreen->info.max_se);
util_logbase2_ceil(sscreen->info.max_render_backends / sscreen->info.max_se);
unsigned log_num_se = util_logbase2_ceil(sscreen->info.max_se);
unsigned i;
@ -309,7 +309,7 @@ static void gfx10_get_bin_sizes(struct si_context *sctx, unsigned cb_target_enab
const unsigned FcTagSize = 256;
const unsigned FcReadTags = 44;
const unsigned num_rbs = sctx->screen->info.num_render_backends;
const unsigned num_rbs = sctx->screen->info.max_render_backends;
const unsigned num_pipes = MAX2(num_rbs, sctx->screen->info.num_sdp_interfaces);
const unsigned depthBinSizeTagPart =
@ -470,7 +470,7 @@ void si_emit_dpbb_state(struct si_context *sctx)
G_02880C_DEPTH_BEFORE_SHADER(db_shader_control);
/* Disable DPBB when it's believed to be inefficient. */
if (sscreen->info.num_render_backends > 4 && ps_can_kill && db_can_reject_z_trivially &&
if (sscreen->info.max_render_backends > 4 && ps_can_kill && db_can_reject_z_trivially &&
sctx->framebuffer.state.zsbuf && dsa->db_can_write) {
si_emit_dpbb_disable(sctx);
return;

View File

@ -404,7 +404,7 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
"num backends",
&ws->info.num_render_backends))
&ws->info.max_render_backends))
return false;
/* get the GPU counter frequency, failure is not fatal */
@ -444,7 +444,7 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
ws->info.r600_gb_backend_map_valid = true;
/* Default value. */
ws->info.enabled_rb_mask = u_bit_consecutive(0, ws->info.num_render_backends);
ws->info.enabled_rb_mask = u_bit_consecutive(0, ws->info.max_render_backends);
/*
* This fails (silently) on non-GCN or older kernels, overwriting the
* default enabled_rb_mask with the result of the last query.