radeonsi: remove tabs from code

v2: fix indentation after if (Marek Olšák)

Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17504>
This commit is contained in:
Yogesh Mohan Marimuthu 2022-07-13 08:49:52 +05:30 committed by Marge Bot
parent 301bcbac0e
commit 2330c71751
10 changed files with 115 additions and 116 deletions

View File

@ -829,12 +829,12 @@ static struct ruvd_vc1 get_vc1_msg(struct pipe_vc1_picture_desc *pic)
result.chroma_format = 1;
#if 0
//(((unsigned int)(pPicParams->advance.reserved1)) << SPS_INFO_VC1_RESERVED_SHIFT)
uint32_t slice_count
uint8_t picture_type
uint8_t frame_coding_mode
uint8_t deblockEnable
uint8_t pquant
//(((unsigned int)(pPicParams->advance.reserved1)) << SPS_INFO_VC1_RESERVED_SHIFT)
uint32_t slice_count
uint8_t picture_type
uint8_t frame_coding_mode
uint8_t deblockEnable
uint8_t pquant
#endif
return result;
@ -952,14 +952,14 @@ static struct ruvd_mpeg4 get_mpeg4_msg(struct ruvd_decoder *dec,
}
/*
int32_t trd [2]
int32_t trb [2]
uint8_t vop_coding_type
uint8_t vop_fcode_forward
uint8_t vop_fcode_backward
uint8_t rounding_control
uint8_t alternate_vertical_scan_flag
uint8_t top_field_first
int32_t trd [2]
int32_t trb [2]
uint8_t vop_coding_type
uint8_t vop_fcode_forward
uint8_t vop_fcode_backward
uint8_t rounding_control
uint8_t alternate_vertical_scan_flag
uint8_t top_field_first
*/
return result;

View File

@ -628,25 +628,25 @@ static rvcn_dec_message_vp9_t get_vp9_msg(struct radeon_decoder *dec,
//clear the dec->render list if it is not used as a reference
for (i = 0; i < ARRAY_SIZE(dec->render_pic_list); i++) {
if (dec->render_pic_list[i]) {
for (j=0;j<8;j++) {
for (j=0;j<8;j++) {
if (dec->render_pic_list[i] == pic->ref[j])
break;
}
if(j == 8)
dec->render_pic_list[i] = NULL;
break;
}
if (j == 8)
dec->render_pic_list[i] = NULL;
}
}
for (i = 0; i < ARRAY_SIZE(dec->render_pic_list); ++i) {
if (dec->render_pic_list[i] && dec->render_pic_list[i] == target) {
if (target->codec != NULL){
result.curr_pic_idx =(uintptr_t)vl_video_buffer_get_associated_data(target, &dec->base);
} else {
result.curr_pic_idx = i;
vl_video_buffer_set_associated_data(target, &dec->base, (void *)(uintptr_t)i,
&radeon_dec_destroy_associated_data);
}
break;
if (target->codec != NULL) {
result.curr_pic_idx =(uintptr_t)vl_video_buffer_get_associated_data(target, &dec->base);
} else {
result.curr_pic_idx = i;
vl_video_buffer_set_associated_data(target, &dec->base, (void *)(uintptr_t)i,
&radeon_dec_destroy_associated_data);
}
break;
} else if (!dec->render_pic_list[i]) {
dec->render_pic_list[i] = target;
result.curr_pic_idx = i;
@ -1317,33 +1317,33 @@ static void rvcn_av1_default_coef_probs(void *prob, int index)
static void rvcn_vcn4_av1_default_coef_probs(void *prob, int index)
{
rvcn_av1_vcn4_frame_context_t * fc = (rvcn_av1_vcn4_frame_context_t*)prob;
void *p;
int i, j;
unsigned size;
rvcn_av1_vcn4_frame_context_t *fc = (rvcn_av1_vcn4_frame_context_t*)prob;
void *p;
int i, j;
unsigned size;
memcpy(fc->txb_skip_cdf, av1_default_txb_skip_cdfs[index], sizeof(av1_default_txb_skip_cdfs[index]));
memcpy(fc->txb_skip_cdf, av1_default_txb_skip_cdfs[index], sizeof(av1_default_txb_skip_cdfs[index]));
p = (void *)fc->eob_extra_cdf;
size = sizeof(av1_default_eob_extra_cdfs[0][0][0][0]) * EOB_COEF_CONTEXTS_VCN4;
for (i = 0; i < AV1_TX_SIZES; i++) {
for ( j = 0; j < AV1_PLANE_TYPES; j++) {
memcpy(p, &av1_default_eob_extra_cdfs[index][i][j][3], size);
p += size;
}
}
p = (void *)fc->eob_extra_cdf;
size = sizeof(av1_default_eob_extra_cdfs[0][0][0][0]) * EOB_COEF_CONTEXTS_VCN4;
for (i = 0; i < AV1_TX_SIZES; i++) {
for ( j = 0; j < AV1_PLANE_TYPES; j++) {
memcpy(p, &av1_default_eob_extra_cdfs[index][i][j][3], size);
p += size;
}
}
memcpy(fc->dc_sign_cdf, av1_default_dc_sign_cdfs[index], sizeof(av1_default_dc_sign_cdfs[index]));
memcpy(fc->coeff_br_cdf, av1_default_coeff_lps_multi_cdfs[index], sizeof(av1_default_coeff_lps_multi_cdfs[index]));
memcpy(fc->coeff_base_cdf, av1_default_coeff_base_multi_cdfs[index], sizeof(av1_default_coeff_base_multi_cdfs[index]));
memcpy(fc->coeff_base_eob_cdf, av1_default_coeff_base_eob_multi_cdfs[index], sizeof(av1_default_coeff_base_eob_multi_cdfs[index]));
memcpy(fc->eob_flag_cdf16, av1_default_eob_multi16_cdfs[index], sizeof(av1_default_eob_multi16_cdfs[index]));
memcpy(fc->eob_flag_cdf32, av1_default_eob_multi32_cdfs[index], sizeof(av1_default_eob_multi32_cdfs[index]));
memcpy(fc->eob_flag_cdf64, av1_default_eob_multi64_cdfs[index], sizeof(av1_default_eob_multi64_cdfs[index]));
memcpy(fc->eob_flag_cdf128, av1_default_eob_multi128_cdfs[index], sizeof(av1_default_eob_multi128_cdfs[index]));
memcpy(fc->eob_flag_cdf256, av1_default_eob_multi256_cdfs[index], sizeof(av1_default_eob_multi256_cdfs[index]));
memcpy(fc->eob_flag_cdf512, av1_default_eob_multi512_cdfs[index], sizeof(av1_default_eob_multi512_cdfs[index]));
memcpy(fc->eob_flag_cdf1024, av1_default_eob_multi1024_cdfs[index], sizeof(av1_default_eob_multi1024_cdfs[index]));
memcpy(fc->dc_sign_cdf, av1_default_dc_sign_cdfs[index], sizeof(av1_default_dc_sign_cdfs[index]));
memcpy(fc->coeff_br_cdf, av1_default_coeff_lps_multi_cdfs[index], sizeof(av1_default_coeff_lps_multi_cdfs[index]));
memcpy(fc->coeff_base_cdf, av1_default_coeff_base_multi_cdfs[index], sizeof(av1_default_coeff_base_multi_cdfs[index]));
memcpy(fc->coeff_base_eob_cdf, av1_default_coeff_base_eob_multi_cdfs[index], sizeof(av1_default_coeff_base_eob_multi_cdfs[index]));
memcpy(fc->eob_flag_cdf16, av1_default_eob_multi16_cdfs[index], sizeof(av1_default_eob_multi16_cdfs[index]));
memcpy(fc->eob_flag_cdf32, av1_default_eob_multi32_cdfs[index], sizeof(av1_default_eob_multi32_cdfs[index]));
memcpy(fc->eob_flag_cdf64, av1_default_eob_multi64_cdfs[index], sizeof(av1_default_eob_multi64_cdfs[index]));
memcpy(fc->eob_flag_cdf128, av1_default_eob_multi128_cdfs[index], sizeof(av1_default_eob_multi128_cdfs[index]));
memcpy(fc->eob_flag_cdf256, av1_default_eob_multi256_cdfs[index], sizeof(av1_default_eob_multi256_cdfs[index]));
memcpy(fc->eob_flag_cdf512, av1_default_eob_multi512_cdfs[index], sizeof(av1_default_eob_multi512_cdfs[index]));
memcpy(fc->eob_flag_cdf1024, av1_default_eob_multi1024_cdfs[index], sizeof(av1_default_eob_multi1024_cdfs[index]));
}
static unsigned calc_ctx_size_h265_main(struct radeon_decoder *dec)
@ -2033,10 +2033,9 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
index_codec->message_id = RDECODE_MESSAGE_AV1;
if (dec->ctx.res == NULL) {
unsigned frame_ctxt_size = dec->av1_version == RDECODE_AV1_VER_0
? align(sizeof(rvcn_av1_frame_context_t), 2048)
: align(sizeof(rvcn_av1_vcn4_frame_context_t), 2048);
? align(sizeof(rvcn_av1_frame_context_t), 2048)
: align(sizeof(rvcn_av1_vcn4_frame_context_t), 2048);
unsigned ctx_size = (9 + 4) * frame_ctxt_size + 9 * 64 * 34 * 512 + 9 * 64 * 34 * 256 * 5;
int num_64x64_CTB_8k = 68;

View File

@ -129,13 +129,13 @@ si_create_shadowing_ib_preamble(struct si_context *sctx)
S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
si_pm4_cmd_add(pm4, 0);
@ -149,9 +149,9 @@ si_create_shadowing_ib_preamble(struct si_context *sctx)
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));

View File

@ -276,13 +276,13 @@ static void si_set_buf_desc_address(struct si_resource *buf, uint64_t offset, ui
/* Set texture descriptor fields that can be changed by reallocations.
*
* \param tex texture
* \param base_level_info information of the level of BASE_ADDRESS
* \param base_level the level of BASE_ADDRESS
* \param first_level pipe_sampler_view.u.tex.first_level
* \param block_width util_format_get_blockwidth()
* \param is_stencil select between separate Z & Stencil
* \param state descriptor to update
* \param tex texture
* \param base_level_info information of the level of BASE_ADDRESS
* \param base_level the level of BASE_ADDRESS
* \param first_level pipe_sampler_view.u.tex.first_level
* \param block_width util_format_get_blockwidth()
* \param is_stencil select between separate Z & Stencil
* \param state descriptor to update
*/
void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
const struct legacy_surf_level *base_level_info,

View File

@ -54,15 +54,15 @@ struct si_fence {
/**
* Write an EOP event.
*
* \param event EVENT_TYPE_*
* \param event_flags Optional cache flush flags (TC)
* \param dst_sel MEM or TC_L2
* \param int_sel NONE or SEND_DATA_AFTER_WR_CONFIRM
* \param data_sel DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
* \param buf Buffer
* \param va GPU address
* \param old_value Previous fence value (for a bug workaround)
* \param new_value Fence value to write for this event.
* \param event EVENT_TYPE_*
* \param event_flags Optional cache flush flags (TC)
* \param dst_sel MEM or TC_L2
* \param int_sel NONE or SEND_DATA_AFTER_WR_CONFIRM
* \param data_sel DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
* \param buf Buffer
* \param va GPU address
* \param old_value Previous fence value (for a bug workaround)
* \param new_value Fence value to write for this event.
*/
void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs, unsigned event,
unsigned event_flags, unsigned dst_sel, unsigned int_sel, unsigned data_sel,
@ -490,10 +490,10 @@ static void si_flush_all_queues(struct pipe_context *ctx,
tc_driver_internal_flush_notify(sctx->tc);
} else {
/* Instead of flushing, create a deferred fence. Constraints:
* - the gallium frontend must allow a deferred flush.
* - the gallium frontend must request a fence.
* - the gallium frontend must allow a deferred flush.
* - the gallium frontend must request a fence.
* - fence_get_fd is not allowed.
* Thread safety in fence_finish must be ensured by the gallium frontend.
* Thread safety in fence_finish must be ensured by the gallium frontend.
*/
if (flags & PIPE_FLUSH_DEFERRED && !(flags & PIPE_FLUSH_FENCE_FD) && fence) {
gfx_fence = sctx->ws->cs_get_next_fence(&sctx->gfx_cs);

View File

@ -1923,15 +1923,15 @@ bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compi
/**
* Create, compile and return a shader part (prolog or epilog).
*
* \param sscreen screen
* \param list list of shader parts of the same category
* \param type shader type
* \param key shader part key
* \param prolog whether the part being requested is a prolog
* \param tm LLVM target machine
* \param debug debug callback
* \param build the callback responsible for building the main function
* \return non-NULL on success
* \param sscreen screen
* \param list list of shader parts of the same category
* \param type shader type
* \param key shader part key
* \param prolog whether the part being requested is a prolog
* \param tm LLVM target machine
* \param debug debug callback
* \param build the callback responsible for building the main function
* \return non-NULL on success
*/
static struct si_shader_part *
si_get_shader_part(struct si_screen *sscreen, struct si_shader_part **list,

View File

@ -127,15 +127,15 @@ static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx, unsigned a
/**
* Interpolate a fragment shader input.
*
* @param ctx context
* @param input_index index of the input in hardware
* @param semantic_index semantic index
* @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
* @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
* @param interp_param interpolation weights (i,j)
* @param prim_mask SI_PARAM_PRIM_MASK
* @param face SI_PARAM_FRONT_FACE
* @param result the return value (4 components)
* @param ctx context
* @param input_index index of the input in hardware
* @param semantic_index semantic index
* @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
* @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
* @param interp_param interpolation weights (i,j)
* @param prim_mask SI_PARAM_PRIM_MASK
* @param face SI_PARAM_FRONT_FACE
* @param result the return value (4 components)
*/
static void interp_fs_color(struct si_shader_context *ctx, unsigned input_index,
unsigned semantic_index, unsigned num_interp_inputs,

View File

@ -49,7 +49,7 @@ LLVMValueRef si_get_rel_patch_id(struct si_shader_context *ctx)
* The LDS layout is:
* - TCS inputs for patch 0
* - TCS inputs for patch 1
* - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
* - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
* - ...
* - TCS outputs for patch 0 = get_tcs_out_patch0_offset
* - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
@ -183,9 +183,9 @@ static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
/**
* Load from LSHS LDS storage.
*
* \param type output value type
* \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
* \param dw_addr address in dwords
* \param type output value type
* \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
* \param dw_addr address in dwords
*/
static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx, LLVMTypeRef type, unsigned swizzle,
LLVMValueRef dw_addr)

View File

@ -871,7 +871,7 @@ static void si_emit_clip_regs(struct si_context *sctx)
radeon_begin(&sctx->gfx_cs);
radeon_opt_set_context_reg(sctx, R_02881C_PA_CL_VS_OUT_CNTL, SI_TRACKED_PA_CL_VS_OUT_CNTL,
pa_cl_cntl | vs->pa_cl_vs_out_cntl);
pa_cl_cntl | vs->pa_cl_vs_out_cntl);
radeon_opt_set_context_reg(sctx, R_028810_PA_CL_CLIP_CNTL, SI_TRACKED_PA_CL_CLIP_CNTL,
rs->pa_cl_clip_cntl | ucp_mask | S_028810_CLIP_DISABLE(window_space));
radeon_end_update_context_roll(sctx);
@ -4527,9 +4527,9 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
/**
* Create a sampler view.
*
* @param ctx context
* @param texture texture
* @param state sampler view template
* @param ctx context
* @param texture texture
* @param state sampler view template
*/
static struct pipe_sampler_view *si_create_sampler_view(struct pipe_context *ctx,
struct pipe_resource *texture,

View File

@ -891,14 +891,14 @@ void si_print_texture_info(struct si_screen *sscreen, struct si_texture *tex,
/**
* Common function for si_texture_create and si_texture_from_handle.
*
* \param screen screen
* \param base resource template
* \param surface radeon_surf
* \param plane0 if a non-zero plane is being created, this is the first plane
* \param imported_buf from si_texture_from_handle
* \param offset offset for non-zero planes or imported buffers
* \param alloc_size the size to allocate if plane0 != NULL
* \param alignment alignment for the allocation
* \param screen screen
* \param base resource template
* \param surface radeon_surf
* \param plane0 if a non-zero plane is being created, this is the first plane
* \param imported_buf from si_texture_from_handle
* \param offset offset for non-zero planes or imported buffers
* \param alloc_size the size to allocate if plane0 != NULL
* \param alignment alignment for the allocation
*/
static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
@ -1190,7 +1190,7 @@ static enum radeon_surf_mode si_choose_tiling(struct si_screen *sscreen,
*/
if (!force_tiling && !is_depth_stencil && !util_format_is_compressed(templ->format)) {
if (sscreen->debug_flags & DBG(NO_TILING) ||
(templ->bind & PIPE_BIND_SCANOUT && sscreen->debug_flags & DBG(NO_DISPLAY_TILING)))
(templ->bind & PIPE_BIND_SCANOUT && sscreen->debug_flags & DBG(NO_DISPLAY_TILING)))
return RADEON_SURF_MODE_LINEAR_ALIGNED;
/* Tiling doesn't work with the 422 (SUBSAMPLED) formats. */