Commit b2b39dae authored by Anton Mitrofanov's avatar Anton Mitrofanov Committed by Henrik Gramner

Cosmetics

Also make x264_weighted_reference_duplicate() static.
parent 9c82d2b6
......@@ -154,7 +154,7 @@ static void mc_luma_neon( uint8_t *dst, intptr_t i_dst_stride,
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
if ( (mvy&3) == 3 ) // explict if() to force conditional add
if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
......@@ -180,7 +180,7 @@ static uint8_t *get_ref_neon( uint8_t *dst, intptr_t *i_dst_stride,
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
if ( (mvy&3) == 3 ) // explict if() to force conditional add
if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
......
......@@ -54,13 +54,13 @@ void x264_predict_16x16_p_neon( uint8_t *src );
void x264_predict_4x4_init_aarch64( int cpu, x264_predict_t pf[12] )
{
#if !HIGH_BIT_DEPTH
if (cpu&X264_CPU_ARMV8)
if( cpu&X264_CPU_ARMV8 )
{
pf[I_PRED_4x4_H] = x264_predict_4x4_h_aarch64;
pf[I_PRED_4x4_V] = x264_predict_4x4_v_aarch64;
}
if (cpu&X264_CPU_NEON)
if( cpu&X264_CPU_NEON )
{
pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_neon;
pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_neon;
......@@ -73,11 +73,12 @@ void x264_predict_4x4_init_aarch64( int cpu, x264_predict_t pf[12] )
void x264_predict_8x8c_init_aarch64( int cpu, x264_predict_t pf[7] )
{
#if !HIGH_BIT_DEPTH
if (cpu&X264_CPU_ARMV8) {
if( cpu&X264_CPU_ARMV8 )
{
pf[I_PRED_CHROMA_V] = x264_predict_8x8c_v_aarch64;
}
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_neon;
......@@ -91,7 +92,7 @@ void x264_predict_8x8c_init_aarch64( int cpu, x264_predict_t pf[7] )
void x264_predict_8x16c_init_aarch64( int cpu, x264_predict_t pf[7] )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......@@ -106,7 +107,7 @@ void x264_predict_8x16c_init_aarch64( int cpu, x264_predict_t pf[7] )
void x264_predict_8x8_init_aarch64( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......@@ -124,7 +125,7 @@ void x264_predict_8x8_init_aarch64( int cpu, x264_predict8x8_t pf[12], x264_pred
void x264_predict_16x16_init_aarch64( int cpu, x264_predict_t pf[7] )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......
......@@ -161,7 +161,7 @@ static void mc_luma_neon( uint8_t *dst, intptr_t i_dst_stride,
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
if ( (mvy&3) == 3 ) // explict if() to force conditional add
if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
......@@ -187,7 +187,7 @@ static uint8_t *get_ref_neon( uint8_t *dst, intptr_t *i_dst_stride,
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
if ( (mvy&3) == 3 ) // explict if() to force conditional add
if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
......
......@@ -29,7 +29,7 @@
void x264_predict_4x4_init_arm( int cpu, x264_predict_t pf[12] )
{
if (!(cpu&X264_CPU_ARMV6))
if( !(cpu&X264_CPU_ARMV6) )
return;
#if !HIGH_BIT_DEPTH
......@@ -38,7 +38,7 @@ void x264_predict_4x4_init_arm( int cpu, x264_predict_t pf[12] )
pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_armv6;
pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_armv6;
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_neon;
......@@ -48,7 +48,7 @@ void x264_predict_4x4_init_arm( int cpu, x264_predict_t pf[12] )
void x264_predict_8x8c_init_arm( int cpu, x264_predict_t pf[7] )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......@@ -63,7 +63,7 @@ void x264_predict_8x8c_init_arm( int cpu, x264_predict_t pf[7] )
void x264_predict_8x16c_init_arm( int cpu, x264_predict_t pf[7] )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......@@ -76,7 +76,7 @@ void x264_predict_8x16c_init_arm( int cpu, x264_predict_t pf[7] )
void x264_predict_8x8_init_arm( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......@@ -94,7 +94,7 @@ void x264_predict_8x8_init_arm( int cpu, x264_predict8x8_t pf[12], x264_predict_
void x264_predict_16x16_init_arm( int cpu, x264_predict_t pf[7] )
{
if (!(cpu&X264_CPU_NEON))
if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
......
......@@ -50,8 +50,8 @@ void x264_cabac_block_residual_8x8_rd_internal_sse2 ( dctcoef *l, int b_in
void x264_cabac_block_residual_8x8_rd_internal_sse2_lzcnt ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_8x8_rd_internal_ssse3 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_8x8_rd_internal_ssse3_lzcnt( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_sse2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_sse2_lzcnt ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_sse2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_sse2_lzcnt( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_avx2_bmi2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
uint8_t *x264_nal_escape_neon( uint8_t *dst, uint8_t *src, uint8_t *end );
......
......@@ -68,7 +68,7 @@ extern const vlc_t x264_total_zeros_2x4_dc[7][8];
typedef struct
{
uint8_t *(*nal_escape) ( uint8_t *dst, uint8_t *src, uint8_t *end );
uint8_t *(*nal_escape)( uint8_t *dst, uint8_t *src, uint8_t *end );
void (*cabac_block_residual_internal)( dctcoef *l, int b_interlaced,
intptr_t ctx_block_cat, x264_cabac_t *cb );
void (*cabac_block_residual_rd_internal)( dctcoef *l, int b_interlaced,
......
......@@ -58,7 +58,7 @@ extern const uint16_t x264_cabac_entropy[128];
void x264_cabac_context_init( x264_t *h, x264_cabac_t *cb, int i_slice_type, int i_qp, int i_model );
void x264_cabac_encode_init_core( x264_cabac_t *cb );
void x264_cabac_encode_init ( x264_cabac_t *cb, uint8_t *p_data, uint8_t *p_end );
void x264_cabac_encode_init( x264_cabac_t *cb, uint8_t *p_data, uint8_t *p_end );
void x264_cabac_encode_decision_c( x264_cabac_t *cb, int i_ctx, int b );
void x264_cabac_encode_decision_asm( x264_cabac_t *cb, int i_ctx, int b );
void x264_cabac_encode_bypass_c( x264_cabac_t *cb, int b );
......
......@@ -610,7 +610,7 @@ int x264_param_parse( x264_param_t *p, const char *name, const char *value )
#define OPT(STR) else if( !strcmp( name, STR ) )
#define OPT2(STR0, STR1) else if( !strcmp( name, STR0 ) || !strcmp( name, STR1 ) )
if(0);
if( 0 );
OPT("asm")
{
p->cpu = isdigit(value[0]) ? atoi(value) :
......
......@@ -36,7 +36,7 @@
#define X264_MAX3(a,b,c) X264_MAX((a),X264_MAX((b),(c)))
#define X264_MIN4(a,b,c,d) X264_MIN((a),X264_MIN3((b),(c),(d)))
#define X264_MAX4(a,b,c,d) X264_MAX((a),X264_MAX3((b),(c),(d)))
#define XCHG(type,a,b) do{ type t = a; a = b; b = t; } while(0)
#define XCHG(type,a,b) do { type t = a; a = b; b = t; } while( 0 )
#define IS_DISPOSABLE(type) ( type == X264_TYPE_B )
#define FIX8(f) ((int)(f*(1<<8)+.5))
#define ALIGN(x,a) (((x)+((a)-1))&~((a)-1))
......@@ -70,14 +70,14 @@ do {\
var = (void*)prealloc_size;\
preallocs[prealloc_idx++] = (uint8_t**)&var;\
prealloc_size += ALIGN(size, NATIVE_ALIGN);\
} while(0)
} while( 0 )
#define PREALLOC_END( ptr )\
do {\
CHECKED_MALLOC( ptr, prealloc_size );\
while( prealloc_idx-- )\
*preallocs[prealloc_idx] += (intptr_t)ptr;\
} while(0)
} while( 0 )
#define ARRAY_SIZE(array) (sizeof(array)/sizeof(array[0]))
......
......@@ -36,22 +36,22 @@ typedef struct
// pix1 stride = FENC_STRIDE
// pix2 stride = FDEC_STRIDE
// p_dst stride = FDEC_STRIDE
void (*sub4x4_dct) ( dctcoef dct[16], pixel *pix1, pixel *pix2 );
void (*add4x4_idct) ( pixel *p_dst, dctcoef dct[16] );
void (*sub4x4_dct) ( dctcoef dct[16], pixel *pix1, pixel *pix2 );
void (*add4x4_idct)( pixel *p_dst, dctcoef dct[16] );
void (*sub8x8_dct) ( dctcoef dct[4][16], pixel *pix1, pixel *pix2 );
void (*sub8x8_dct_dc)( dctcoef dct[4], pixel *pix1, pixel *pix2 );
void (*add8x8_idct) ( pixel *p_dst, dctcoef dct[4][16] );
void (*add8x8_idct_dc) ( pixel *p_dst, dctcoef dct[4] );
void (*sub8x8_dct) ( dctcoef dct[4][16], pixel *pix1, pixel *pix2 );
void (*sub8x8_dct_dc) ( dctcoef dct[4], pixel *pix1, pixel *pix2 );
void (*add8x8_idct) ( pixel *p_dst, dctcoef dct[4][16] );
void (*add8x8_idct_dc)( pixel *p_dst, dctcoef dct[4] );
void (*sub8x16_dct_dc)( dctcoef dct[8], pixel *pix1, pixel *pix2 );
void (*sub16x16_dct) ( dctcoef dct[16][16], pixel *pix1, pixel *pix2 );
void (*add16x16_idct)( pixel *p_dst, dctcoef dct[16][16] );
void (*add16x16_idct_dc) ( pixel *p_dst, dctcoef dct[16] );
void (*sub16x16_dct) ( dctcoef dct[16][16], pixel *pix1, pixel *pix2 );
void (*add16x16_idct) ( pixel *p_dst, dctcoef dct[16][16] );
void (*add16x16_idct_dc)( pixel *p_dst, dctcoef dct[16] );
void (*sub8x8_dct8) ( dctcoef dct[64], pixel *pix1, pixel *pix2 );
void (*add8x8_idct8) ( pixel *p_dst, dctcoef dct[64] );
void (*sub8x8_dct8) ( dctcoef dct[64], pixel *pix1, pixel *pix2 );
void (*add8x8_idct8)( pixel *p_dst, dctcoef dct[64] );
void (*sub16x16_dct8) ( dctcoef dct[4][64], pixel *pix1, pixel *pix2 );
void (*add16x16_idct8)( pixel *p_dst, dctcoef dct[4][64] );
......
......@@ -442,7 +442,7 @@ void x264_frame_deblock_row( x264_t *h, int mb_y )
stride2uv, bs[dir][edge], chroma_qp, a, b, 1,\
h->loopf.deblock_chroma##intra[dir] );\
}\
} while(0)
} while( 0 )
if( h->mb.i_neighbour & MB_LEFT )
{
......@@ -638,7 +638,7 @@ void x264_macroblock_deblock( x264_t *h )
FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
h->loopf.deblock_luma[dir] );\
}\
} while(0)
} while( 0 )
if( !transform_8x8 ) FILTER( 0, 1 );
FILTER( 0, 2 );
......
......@@ -357,7 +357,7 @@ static int get_plane_ptr( x264_t *h, x264_picture_t *src, uint8_t **pix, int *st
return 0;
}
#define get_plane_ptr(...) do{ if( get_plane_ptr(__VA_ARGS__) < 0 ) return -1; }while(0)
#define get_plane_ptr(...) do { if( get_plane_ptr(__VA_ARGS__) < 0 ) return -1; } while( 0 )
int x264_frame_copy_picture( x264_t *h, x264_frame_t *dst, x264_picture_t *src )
{
......
......@@ -211,9 +211,9 @@ typedef struct
x264_deblock_intra_t deblock_chroma_intra_mbaff;
x264_deblock_intra_t deblock_chroma_420_intra_mbaff;
x264_deblock_intra_t deblock_chroma_422_intra_mbaff;
void (*deblock_strength) ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
int bframe );
void (*deblock_strength)( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
int bframe );
} x264_deblock_function_t;
void x264_frame_delete( x264_frame_t *frame );
......
......@@ -1183,7 +1183,7 @@ static void ALWAYS_INLINE x264_macroblock_cache_load( x264_t *h, int mb_x, int m
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
}
if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0) )
if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >= 0) )
{
CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
......
......@@ -108,10 +108,10 @@ static const uint8_t x264_mb_type_list_table[X264_MBTYPE_MAX][2][2] =
{{0,0},{0,0}} /* B_SKIP */
};
#define IS_SUB4x4(type) ( (type ==D_L0_4x4)||(type ==D_L1_4x4)||(type ==D_BI_4x4))
#define IS_SUB4x8(type) ( (type ==D_L0_4x8)||(type ==D_L1_4x8)||(type ==D_BI_4x8))
#define IS_SUB8x4(type) ( (type ==D_L0_8x4)||(type ==D_L1_8x4)||(type ==D_BI_8x4))
#define IS_SUB8x8(type) ( (type ==D_L0_8x8)||(type ==D_L1_8x8)||(type ==D_BI_8x8)||(type ==D_DIRECT_8x8))
#define IS_SUB4x4(type) ( (type == D_L0_4x4)||(type == D_L1_4x4)||(type == D_BI_4x4) )
#define IS_SUB4x8(type) ( (type == D_L0_4x8)||(type == D_L1_4x8)||(type == D_BI_4x8) )
#define IS_SUB8x4(type) ( (type == D_L0_8x4)||(type == D_L1_8x4)||(type == D_BI_8x4) )
#define IS_SUB8x8(type) ( (type == D_L0_8x8)||(type == D_L1_8x8)||(type == D_BI_8x8)||(type == D_DIRECT_8x8) )
enum mb_partition_e
{
/* sub partition type for P_8x8 and B_8x8 */
......
......@@ -32,7 +32,7 @@ do\
{\
MC_CLIP_ADD((s)[0], (x)[0]);\
MC_CLIP_ADD((s)[1], (x)[1]);\
} while(0)
} while( 0 )
#define PROPAGATE_LIST(cpu)\
void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
......
......@@ -65,7 +65,7 @@ static uint32_t sad_4width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 diff;
v8u16 sad = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, u_src0, u_src1, u_src2, u_src3 );
p_src += ( 4 * i_src_stride );
......@@ -90,7 +90,7 @@ static uint32_t sad_8width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3;
v8u16 sad = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
......@@ -113,7 +113,7 @@ static uint32_t sad_16width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 src0, src1, ref0, ref1;
v8u16 sad = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB2( p_src, i_src_stride, src0, src1 );
p_src += ( 2 * i_src_stride );
......@@ -147,7 +147,7 @@ static void sad_4width_x3d_msa( uint8_t *p_src, int32_t i_src_stride,
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, src0, src1, src2, src3 );
INSERT_W4_UB( src0, src1, src2, src3, src );
......@@ -192,7 +192,7 @@ static void sad_8width_x3d_msa( uint8_t *p_src, int32_t i_src_stride,
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
......@@ -233,7 +233,7 @@ static void sad_16width_x3d_msa( uint8_t *p_src, int32_t i_src_stride,
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
for ( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
......@@ -298,7 +298,7 @@ static void sad_4width_x4d_msa( uint8_t *p_src, int32_t i_src_stride,
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, src0, src1, src2, src3 );
INSERT_W4_UB( src0, src1, src2, src3, src );
......@@ -358,7 +358,7 @@ static void sad_8width_x4d_msa( uint8_t *p_src, int32_t i_src_stride,
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
......@@ -408,7 +408,7 @@ static void sad_16width_x4d_msa( uint8_t *p_src, int32_t i_src_stride,
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
for ( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
......@@ -465,7 +465,7 @@ static uint64_t avc_pixel_var16width_msa( uint8_t *p_pix, int32_t i_stride,
v8u16 add, pix_r, pix_l;
v4u32 sqr = { 0 };
for ( u_cnt = i_height; u_cnt--; )
for( u_cnt = i_height; u_cnt--; )
{
pix = LD_SB( p_pix );
p_pix += i_stride;
......@@ -489,7 +489,7 @@ static uint64_t avc_pixel_var8width_msa( uint8_t *p_pix, int32_t i_stride,
v8u16 add, pix_r;
v4u32 sqr = { 0 };
for ( u_cnt = i_height; u_cnt--; )
for( u_cnt = i_height; u_cnt--; )
{
pix = LD_SB( p_pix );
p_pix += i_stride;
......@@ -515,7 +515,7 @@ static uint32_t sse_diff_8width_msa( uint8_t *p_src, int32_t i_src_stride,
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
......@@ -547,7 +547,7 @@ static uint32_t sse_4width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 ref = { 0 };
v4i32 var = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, u_src0, u_src1, u_src2, u_src3 );
p_src += ( 4 * i_src_stride );
......@@ -574,7 +574,7 @@ static uint32_t sse_8width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 ref0, ref1, ref2, ref3;
v4i32 var = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
......@@ -601,7 +601,7 @@ static uint32_t sse_16width_msa( uint8_t *p_src, int32_t i_src_stride,
v16u8 src, ref;
v4i32 var = { 0 };
for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
......@@ -715,7 +715,7 @@ static int32_t pixel_satd_4width_msa( uint8_t *p_src, int32_t i_src_stride,
v8i16 diff0, diff1, diff2, diff3;
v8i16 temp0, temp1, temp2, temp3;
for ( cnt = i_height >> 2; cnt--; )
for( cnt = i_height >> 2; cnt--; )
{
LD_SB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += 4 * i_src_stride;
......@@ -759,7 +759,7 @@ static int32_t pixel_satd_8width_msa( uint8_t *p_src, int32_t i_src_stride,
v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7;
v8i16 temp0, temp1, temp2, temp3;
for ( cnt = i_height >> 2; cnt--; )
for( cnt = i_height >> 2; cnt--; )
{
LD_SB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += 4 * i_src_stride;
......
......@@ -101,7 +101,7 @@ static void intra_predict_horiz_16x16_msa( uint8_t *p_src, int32_t i_src_stride,
uint8_t u_inp0, u_inp1, u_inp2, u_inp3;
v16u8 src0, src1, src2, src3;
for ( u_row = 4; u_row--; )
for( u_row = 4; u_row--; )
{
u_inp0 = p_src[0];
p_src += i_src_stride;
......@@ -133,7 +133,7 @@ static void intra_predict_dc_4x4_msa( uint8_t *p_src_top, uint8_t *p_src_left,
v8u16 sum_above;
v4u32 sum;
if ( is_left && is_above )
if( is_left && is_above )
{
src_above = LD_UB( p_src_top );
......@@ -141,7 +141,7 @@ static void intra_predict_dc_4x4_msa( uint8_t *p_src_top, uint8_t *p_src_left,
sum = __msa_hadd_u_w( sum_above, sum_above );
u_addition = __msa_copy_u_w( ( v4i32 ) sum, 0 );
for ( u_row = 0; u_row < 4; u_row++ )
for( u_row = 0; u_row < 4; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
......@@ -149,9 +149,9 @@ static void intra_predict_dc_4x4_msa( uint8_t *p_src_top, uint8_t *p_src_left,
u_addition = ( u_addition + 4 ) >> 3;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
else if ( is_left )
else if( is_left )
{
for ( u_row = 0; u_row < 4; u_row++ )
for( u_row = 0; u_row < 4; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
......@@ -159,7 +159,7 @@ static void intra_predict_dc_4x4_msa( uint8_t *p_src_top, uint8_t *p_src_left,
u_addition = ( u_addition + 2 ) >> 2;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
else if ( is_above )
else if( is_above )
{
src_above = LD_UB( p_src_top );
......@@ -217,7 +217,7 @@ static void intra_predict_dc_16x16_msa( uint8_t *p_src_top, uint8_t *p_src_left,
v4u32 sum_top;
v2u64 sum;
if ( is_left && is_above )
if( is_left && is_above )
{
src_above = LD_UB( p_src_top );
......@@ -228,7 +228,7 @@ static void intra_predict_dc_16x16_msa( uint8_t *p_src_top, uint8_t *p_src_left,
sum = __msa_hadd_u_d( sum_top, sum_top );
u_addition = __msa_copy_u_w( ( v4i32 ) sum, 0 );
for ( u_row = 0; u_row < 16; u_row++ )
for( u_row = 0; u_row < 16; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
......@@ -236,9 +236,9 @@ static void intra_predict_dc_16x16_msa( uint8_t *p_src_top, uint8_t *p_src_left,
u_addition = ( u_addition + 16 ) >> 5;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
else if ( is_left )
else if( is_left )
{
for ( u_row = 0; u_row < 16; u_row++ )
for( u_row = 0; u_row < 16; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
......@@ -246,7 +246,7 @@ static void intra_predict_dc_16x16_msa( uint8_t *p_src_top, uint8_t *p_src_left,
u_addition = ( u_addition + 8 ) >> 4;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
else if ( is_above )
else if( is_above )
{
src_above = LD_UB( p_src_top );
......@@ -314,7 +314,7 @@ static void intra_predict_plane_8x8_msa( uint8_t *p_src, int32_t i_stride )
vec5 = vec8 * int_multiplier;
vec3 = vec8 * 4;
for ( u_lpcnt = 4; u_lpcnt--; )
for( u_lpcnt = 4; u_lpcnt--; )
{
vec0 = vec5;
vec0 += vec4;
......@@ -391,7 +391,7 @@ static void intra_predict_plane_16x16_msa( uint8_t *p_src, int32_t i_stride )
vec6 = vec8 * 4;
vec7 = vec8 * int_multiplier;
for ( u_lpcnt = 16; u_lpcnt--; )
for( u_lpcnt = 16; u_lpcnt--; )
{
vec0 = vec7;
vec0 += vec4;
......@@ -424,7 +424,7 @@ static void intra_predict_dc_4blk_8x8_msa( uint8_t *p_src, int32_t i_stride )
u_src0 = __msa_copy_u_w( ( v4i32 ) sum, 0 );
u_src1 = __msa_copy_u_w( ( v4i32 ) sum, 1 );
for ( u_lp_cnt = 0; u_lp_cnt < 4; u_lp_cnt++ )
for( u_lp_cnt = 0; u_lp_cnt < 4; u_lp_cnt++ )
{
u_src0 += p_src[u_lp_cnt * i_stride - 1];
u_src2 += p_src[( 4 + u_lp_cnt ) * i_stride - 1];
......@@ -440,7 +440,7 @@ static void intra_predict_dc_4blk_8x8_msa( uint8_t *p_src, int32_t i_stride )
u_out2 = u_src2 * 0x01010101;
u_out3 = u_src3 * 0x01010101;
for ( u_lp_cnt = 4; u_lp_cnt--; )
for( u_lp_cnt = 4; u_lp_cnt--; )
{
SW( u_out0, p_src );
SW( u_out1, ( p_src + 4 ) );
......
......@@ -40,7 +40,7 @@ static void avc_dequant_4x4_msa( int16_t *p_dct, int32_t pi_dequant_mf[6][16],
LD_SW2( pi_dequant_mf[i_mf], 4, dequant_m_f0, dequant_m_f1 );
LD_SW2( pi_dequant_mf[i_mf] + 8, 4, dequant_m_f2, dequant_m_f3 );
if ( q_bits >= 0 )
if( q_bits >= 0 )
{
v8i16 dequant_mf_h0, dequant_mf_h1, q_bits_vec;
......@@ -106,7 +106,7 @@ static void avc_dequant_8x8_msa( int16_t *p_dct, int32_t pi_dequant_mf[6][64],
LD_SW2( pi_dequant_mf[i_mf] + 48, 4, dequant_m_f12, dequant_m_f13 );
LD_SW2( pi_dequant_mf[i_mf] + 56, 4, dequant_m_f14, dequant_m_f15 );
if ( q_bits >= 0 )
if( q_bits >= 0 )
{
v8i16 q_bits_vec;
v8i16 dequant_mf_h0, dequant_mf_h1, dequant_mf_h2, dequant_mf_h3;
......@@ -220,7 +220,7 @@ static void avc_dequant_4x4_dc_msa( int16_t *p_dct,
LD_SH2( p_dct, 8, dct0, dct1 );
if ( q_bits >= 0 )
if( q_bits >= 0 )
{
i_dmf <<= q_bits;
......
......@@ -135,7 +135,7 @@ static cl_program x264_opencl_cache_load( x264_t *h, const char *dev_name, const
rewind( fp );
CHECKED_MALLOC( binary, size );
if ( fread( binary, 1, size, fp ) != size )
if( fread( binary, 1, size, fp ) != size )
goto fail;
const uint8_t *ptr = (const uint8_t*)binary;
......
......@@ -27,7 +27,7 @@ typedef uint32_t sum2_t;
#define INTRA_MBS 2
#define COPY2_IF_LT( x, y, a, b )\
if((y)<(x))\
if( (y) < (x) )\
{\
(x) = (y);\
(a) = (b);\
......
......@@ -30,9 +30,9 @@
// SSD assumes all args aligned
// other cmp functions assume first arg aligned
typedef int (*x264_pixel_cmp_t) ( pixel *, intptr_t, pixel *, intptr_t );
typedef void (*x264_pixel_cmp_x3_t) ( pixel *, pixel *, pixel *, pixel *, intptr_t, int[3] );
typedef void (*x264_pixel_cmp_x4_t) ( pixel *, pixel *, pixel *, pixel *, pixel *, intptr_t, int[4] );
typedef int (*x264_pixel_cmp_t)( pixel *, intptr_t, pixel *, intptr_t );
typedef void (*x264_pixel_cmp_x3_t)( pixel *, pixel *, pixel *, pixel *, intptr_t, int[3] );
typedef void (*x264_pixel_cmp_x4_t)( pixel *, pixel *, pixel *, pixel *, pixel *, intptr_t, int[4] );
enum
{
......
......@@ -248,14 +248,14 @@ static inline vec_u8_t h264_deblock_q1( register vec_u8_t p0, register vec_u8_t
finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \
\
p1mask = diff_lt_altivec(p2, p0, betavec); \
p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta) */ \
p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta ) */ \
tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
/*end if*/ \
\
q1mask = diff_lt_altivec(q2, q0, betavec); \
q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
q1mask = vec_and(q1mask, mask); /* if( |q2 - q0| < beta ) */ \
tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
......
......@@ -1207,8 +1207,8 @@ static void pixel_sad_x3_8x8_altivec( uint8_t *fenc, uint8_t *pix0,
* SSD routines
**********************************************************************/
static int pixel_ssd_16x16_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
uint8_t *pix2, intptr_t i_stride_pix2 )
static int pixel_ssd_16x16_altivec( uint8_t *pix1, intptr_t i_stride_pix1,
uint8_t *pix2, intptr_t i_stride_pix2 )
{
ALIGNED_16( int sum );
......@@ -1273,8 +1273,8 @@ static int pixel_ssd_16x16_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
return sum;
}
static int pixel_ssd_8x8_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
uint8_t *pix2, intptr_t i_stride_pix2 )
static int pixel_ssd_8x8_altivec( uint8_t *pix1, intptr_t i_stride_pix1,
uint8_t *pix2, intptr_t i_stride_pix2 )
{
ALIGNED_16( int sum );
......
......@@ -26,7 +26,7 @@
#ifndef X264_PPC_PREDICT_H
#define X264_PPC_PREDICT_H
void x264_predict_16x16_init_altivec ( x264_predict_t pf[7] );
void x264_predict_16x16_init_altivec( x264_predict_t pf[7] );
void x264_predict_8x8c_init_altivec( x264_predict_t pf[7] );
#endif /* X264_PPC_PREDICT_H */
......@@ -340,7 +340,7 @@ static void x264_predict_8x16c_dc_top_c( pixel *src )
{
int dc0 = 0, dc1 = 0;
for(int x = 0; x < 4; x++ )
for( int x = 0; x < 4; x++ )
{
dc0 += src[x - FDEC_STRIDE];
dc1 += src[x + 4 - FDEC_STRIDE];
......
......@@ -29,7 +29,7 @@
typedef void (*x264_predict_t)( pixel *src );