Commit e8cc72c7 authored by Guillaume Poirier's avatar Guillaume Poirier

Add Altivec implementations of add8x8_idct8, add16x16_idct8, sa8d_8x8 and sa8d_16x16

Note: doesn't take advantage of some possible aligned memory accesses, so there's still room for improvement


git-svn-id: svn://svn.videolan.org/x264/trunk@604 df754926-b1dd-0310-bc7b-ec298dee348c
parent 575b238c
......@@ -442,6 +442,9 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
dctf->sub8x8_dct8 = x264_sub8x8_dct8_altivec;
dctf->sub16x16_dct8 = x264_sub16x16_dct8_altivec;
dctf->add8x8_idct8 = x264_add8x8_idct8_altivec;
dctf->add16x16_idct8= x264_add16x16_idct8_altivec;
}
#endif
}
......
......@@ -228,3 +228,138 @@ void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *p
x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
}
/****************************************************************************
* IDCT transform:
****************************************************************************/
#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
{\
/* a0 = SRC(0) + SRC(4); */ \
vec_s16_t a0v = vec_add(s0, s4); \
/* a2 = SRC(0) - SRC(4); */ \
vec_s16_t a2v = vec_sub(s0, s4); \
/* a4 = (SRC(2)>>1) - SRC(6); */ \
vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
/* a6 = (SRC(6)>>1) + SRC(2); */ \
vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
/* b0 = a0 + a6; */ \
vec_s16_t b0v = vec_add(a0v, a6v); \
/* b2 = a2 + a4; */ \
vec_s16_t b2v = vec_add(a2v, a4v); \
/* b4 = a2 - a4; */ \
vec_s16_t b4v = vec_sub(a2v, a4v); \
/* b6 = a0 - a6; */ \
vec_s16_t b6v = vec_sub(a0v, a6v); \
/* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
/* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
/* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
/* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
/* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
/* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
/* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
/* b1 = (a7>>2) + a1; */ \
vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
/* b3 = a3 + (a5>>2); */ \
vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
/* b5 = (a3>>2) - a5; */ \
vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
/* b7 = a7 - (a1>>2); */ \
vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
/* DST(0, b0 + b7); */ \
d0 = vec_add(b0v, b7v); \
/* DST(1, b2 + b5); */ \
d1 = vec_add(b2v, b5v); \
/* DST(2, b4 + b3); */ \
d2 = vec_add(b4v, b3v); \
/* DST(3, b6 + b1); */ \
d3 = vec_add(b6v, b1v); \
/* DST(4, b6 - b1); */ \
d4 = vec_sub(b6v, b1v); \
/* DST(5, b4 - b3); */ \
d5 = vec_sub(b4v, b3v); \
/* DST(6, b2 - b5); */ \
d6 = vec_sub(b2v, b5v); \
/* DST(7, b0 - b7); */ \
d7 = vec_sub(b0v, b7v); \
}
#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel)\
{\
/* unaligned load */ \
vec_u8_t hv = vec_ld( 0, dest ); \
vec_u8_t lv = vec_ld( 7, dest ); \
vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
vec_u16_t dst16 = vec_mergeh(zero_u8v, dstv); \
vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
/* unaligned store */ \
vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
lv = vec_sel( lv, bodyv, edgelv ); \
vec_st( lv, 7, dest ); \
hv = vec_ld( 0, dest ); \
vec_u8_t edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
hv = vec_sel( hv, bodyv, edgehv ); \
vec_st( hv, 0, dest ); \
}
void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
{
vec_s16_t onev = vec_splat_s16(1);
vec_s16_t twov = vec_splat_s16(2);
dct[0][0] += 32; // rounding for the >>6 at the end
vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
s0 = vec_ld(0x00, (int16_t*)dct);
s1 = vec_ld(0x10, (int16_t*)dct);
s2 = vec_ld(0x20, (int16_t*)dct);
s3 = vec_ld(0x30, (int16_t*)dct);
s4 = vec_ld(0x40, (int16_t*)dct);
s5 = vec_ld(0x50, (int16_t*)dct);
s6 = vec_ld(0x60, (int16_t*)dct);
s7 = vec_ld(0x70, (int16_t*)dct);
vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
vec_u8_t perm_ldv = vec_lvsl(0, dst);
vec_u8_t perm_stv = vec_lvsr(8, dst);
vec_s16_t sixv = vec_splat_s16(6);
const vec_u8_t sel = (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
LOAD_ZERO;
ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6, perm_ldv, perm_stv, sel);
ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
}
void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
{
x264_add8x8_idct8_altivec( &dst[0], dct[0] );
x264_add8x8_idct8_altivec( &dst[8], dct[1] );
x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
}
......@@ -5,6 +5,7 @@
* $Id$
*
* Authors: Eric Petit <titer@m0k.org>
* Guillaume Poirier <gpoirier@mplayerhq.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -36,4 +37,7 @@ void x264_sub8x8_dct8_altivec( int16_t dct[8][8],
void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8],
uint8_t *pix1, uint8_t *pix2 );
void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] );
void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] );
#endif
......@@ -5,6 +5,7 @@
* $Id: pixel.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
*
* Authors: Eric Petit <titer@m0k.org>
* Guillaume Poirier <gpoirier@mplayerhq.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -1603,6 +1604,146 @@ static int pixel_ssd_16x16_altivec ( uint8_t *pix1, int i_stride_pix1,
return sum;
}
/**********************************************************************
* SA8D routines: sum of 8x8 Hadamard transformed differences
**********************************************************************/
/* SA8D_1D unrolled by 8 in Altivec */
#define SA8D_1D_ALTIVEC( sa8d0v, sa8d1v, sa8d2v, sa8d3v, sa8d4v, sa8d5v, sa8d6v, sa8d7v )\
{\
/* int a0 = SRC(0) + SRC(4) */\
vec_s16_t a0v = vec_add(sa8d0v, sa8d4v); \
/* int a4 = SRC(0) - SRC(4) */\
vec_s16_t a4v = vec_sub(sa8d0v, sa8d4v); \
/* int a1 = SRC(1) + SRC(5) */\
vec_s16_t a1v = vec_add(sa8d1v, sa8d5v); \
/* int a5 = SRC(1) - SRC(5) */\
vec_s16_t a5v = vec_sub(sa8d1v, sa8d5v); \
/* int a2 = SRC(2) + SRC(6) */\
vec_s16_t a2v = vec_add(sa8d2v, sa8d6v); \
/* int a6 = SRC(2) - SRC(6) */\
vec_s16_t a6v = vec_sub(sa8d2v, sa8d6v); \
/* int a3 = SRC(3) + SRC(7) */\
vec_s16_t a3v = vec_add(sa8d3v, sa8d7v); \
/* int a7 = SRC(3) - SRC(7) */\
vec_s16_t a7v = vec_sub(sa8d3v, sa8d7v); \
\
/* int b0 = a0 + a2 */\
vec_s16_t b0v = vec_add(a0v, a2v); \
/* int b2 = a0 - a2; */\
vec_s16_t b2v = vec_sub(a0v, a2v);\
/* int b1 = a1 + a3; */\
vec_s16_t b1v = vec_add(a1v, a3v); \
/* int b3 = a1 - a3; */\
vec_s16_t b3v = vec_sub(a1v, a3v); \
/* int b4 = a4 + a6; */\
vec_s16_t b4v = vec_add(a4v, a6v); \
/* int b6 = a4 - a6; */\
vec_s16_t b6v = vec_sub(a4v, a6v); \
/* int b5 = a5 + a7; */\
vec_s16_t b5v = vec_add(a5v, a7v); \
/* int b7 = a5 - a7; */\
vec_s16_t b7v = vec_sub(a5v, a7v); \
\
/* DST(0, b0 + b1) */\
sa8d0v = vec_add(b0v, b1v); \
/* DST(1, b0 - b1) */\
sa8d1v = vec_sub(b0v, b1v); \
/* DST(2, b2 + b3) */\
sa8d2v = vec_add(b2v, b3v); \
/* DST(3, b2 - b3) */\
sa8d3v = vec_sub(b2v, b3v); \
/* DST(4, b4 + b5) */\
sa8d4v = vec_add(b4v, b5v); \
/* DST(5, b4 - b5) */\
sa8d5v = vec_sub(b4v, b5v); \
/* DST(6, b6 + b7) */\
sa8d6v = vec_add(b6v, b7v); \
/* DST(7, b6 - b7) */\
sa8d7v = vec_sub(b6v, b7v); \
}
static int pixel_sa8d_8x8_core_altivec( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
int32_t i_satd=0;
PREP_DIFF;
vec_s16_t diff0v, diff1v, diff2v, diff3v, diff4v, diff5v, diff6v, diff7v;
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff0v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff1v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff2v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff3v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff4v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff5v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff6v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff7v );
vec_s16_t sa8d0v, sa8d1v, sa8d2v, sa8d3v, sa8d4v, sa8d5v, sa8d6v, sa8d7v;
SA8D_1D_ALTIVEC(diff0v, diff1v, diff2v, diff3v,
diff4v, diff5v, diff6v, diff7v);
VEC_TRANSPOSE_8(diff0v, diff1v, diff2v, diff3v,
diff4v, diff5v, diff6v, diff7v,
sa8d0v, sa8d1v, sa8d2v, sa8d3v,
sa8d4v, sa8d5v, sa8d6v, sa8d7v );
SA8D_1D_ALTIVEC(sa8d0v, sa8d1v, sa8d2v, sa8d3v,
sa8d4v, sa8d5v, sa8d6v, sa8d7v );
/* accumulation of the absolute value of all elements of the resulting bloc */
vec_s16_t abs0v = VEC_ABS(sa8d0v);
vec_s16_t abs1v = VEC_ABS(sa8d1v);
vec_s16_t sum01v = vec_add(abs0v, abs1v);
vec_s16_t abs2v = VEC_ABS(sa8d2v);
vec_s16_t abs3v = VEC_ABS(sa8d3v);
vec_s16_t sum23v = vec_add(abs2v, abs3v);
vec_s16_t abs4v = VEC_ABS(sa8d4v);
vec_s16_t abs5v = VEC_ABS(sa8d5v);
vec_s16_t sum45v = vec_add(abs4v, abs5v);
vec_s16_t abs6v = VEC_ABS(sa8d6v);
vec_s16_t abs7v = VEC_ABS(sa8d7v);
vec_s16_t sum67v = vec_add(abs6v, abs7v);
vec_s16_t sum0123v = vec_add(sum01v, sum23v);
vec_s16_t sum4567v = vec_add(sum45v, sum67v);
vec_s32_t sumblocv;
sumblocv = vec_sum4s(sum0123v, (vec_s32_t)zerov );
sumblocv = vec_sum4s(sum4567v, sumblocv );
sumblocv = vec_sums(sumblocv, (vec_s32_t)zerov );
sumblocv = vec_splat(sumblocv, 3);
vec_ste(sumblocv, 0, &i_satd);
return i_satd;
}
static int pixel_sa8d_8x8_altivec( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
int32_t i_satd;
i_satd = (pixel_sa8d_8x8_core_altivec( pix1, i_pix1, pix2, i_pix2 )+2)>>2;
return i_satd;
}
static int pixel_sa8d_16x16_altivec( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
int32_t i_satd;
i_satd = (pixel_sa8d_8x8_core_altivec( &pix1[0], i_pix1, &pix2[0], i_pix2 )
+ pixel_sa8d_8x8_core_altivec( &pix1[8], i_pix1, &pix2[8], i_pix2 )
+ pixel_sa8d_8x8_core_altivec( &pix1[8*i_pix1], i_pix1, &pix2[8*i_pix2], i_pix2 )
+ pixel_sa8d_8x8_core_altivec( &pix1[8*i_pix1+8], i_pix1, &pix2[8*i_pix2+8], i_pix2 ) +2)>>2;
return i_satd;
}
/****************************************************************************
* x264_pixel_init:
......@@ -1633,4 +1774,7 @@ void x264_pixel_altivec_init( x264_pixel_function_t *pixf )
pixf->satd[PIXEL_4x4] = pixel_satd_4x4_altivec;
pixf->ssd[PIXEL_16x16] = pixel_ssd_16x16_altivec;
pixf->sa8d[PIXEL_16x16] = pixel_sa8d_16x16_altivec;
pixf->sa8d[PIXEL_8x8] = pixel_sa8d_8x8_altivec;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment