Commit 9a6815d3 authored by Eric Petit's avatar Eric Petit

Enables more agressive optimizations (-fastf -mcpu=G4) on OS X.

Adds AltiVec interleaved SAD and SSD16x16.
Overall speedup up to 20%.

Patch by anonymous


git-svn-id: svn://svn.videolan.org/x264/trunk@502 df754926-b1dd-0310-bc7b-ec298dee348c
parent 97ab2190
......@@ -65,6 +65,8 @@ PIXEL_SAD_ALTIVEC( pixel_sad_8x16_altivec, 8, 16, 2s, 1 )
PIXEL_SAD_ALTIVEC( pixel_sad_16x8_altivec, 16, 8, s, 3 )
PIXEL_SAD_ALTIVEC( pixel_sad_8x8_altivec, 8, 8, 2s, 1 )
/***********************************************************************
* SATD routines
**********************************************************************/
......@@ -594,25 +596,1023 @@ static int pixel_satd_16x16_altivec( uint8_t *pix1, int i_pix1,
return i_satd / 2;
}
#define SAD_X( size ) \
static void pixel_sad_x3_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
{\
scores[0] = pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
}\
static void pixel_sad_x4_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
{\
scores[0] = pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
scores[3] = pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
/***********************************************************************
* Interleaved SAD routines
**********************************************************************/
static void pixel_sad_x4_16x16_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
DECLARE_ALIGNED( int, sum3, 16 );
int y;
vec_u8_t temp_lv, temp_hv;
vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
//vec_u8_t perm0v, perm1v, perm2v, perm3v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
vec_u32_t sum0v, sum1v, sum2v, sum3v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
sum3v = vec_splat_u32(0);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm3vA = vec_lvsl(0, pix3);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
perm3vB = vec_lvsl(0, pix3 + i_stride);
for (y = 0; y < 8; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
}
sum0v = vec_sums( sum0v, zero );
sum1v = vec_sums( sum1v, zero );
sum2v = vec_sums( sum2v, zero );
sum3v = vec_sums( sum3v, zero );
sum0v = vec_splat( sum0v, 3 );
sum1v = vec_splat( sum1v, 3 );
sum2v = vec_splat( sum2v, 3 );
sum3v = vec_splat( sum3v, 3 );
vec_ste( sum0v, 0, &sum0);
vec_ste( sum1v, 0, &sum1);
vec_ste( sum2v, 0, &sum2);
vec_ste( sum3v, 0, &sum3);
scores[0] = sum0;
scores[1] = sum1;
scores[2] = sum2;
scores[3] = sum3;
}
static void pixel_sad_x3_16x16_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
int y;
vec_u8_t temp_lv, temp_hv; // temporary load vectors
vec_u8_t fencv, pix0v, pix1v, pix2v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
vec_u32_t sum0v, sum1v, sum2v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
for (y = 0; y < 8; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
}
sum0v = vec_sums( sum0v, zero );
sum1v = vec_sums( sum1v, zero );
sum2v = vec_sums( sum2v, zero );
sum0v = vec_splat( sum0v, 3 );
sum1v = vec_splat( sum1v, 3 );
sum2v = vec_splat( sum2v, 3 );
vec_ste( sum0v, 0, &sum0);
vec_ste( sum1v, 0, &sum1);
vec_ste( sum2v, 0, &sum2);
scores[0] = sum0;
scores[1] = sum1;
scores[2] = sum2;
}
static void pixel_sad_x4_16x8_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
DECLARE_ALIGNED( int, sum3, 16 );
int y;
vec_u8_t temp_lv, temp_hv;
vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
vec_u32_t sum0v, sum1v, sum2v, sum3v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
sum3v = vec_splat_u32(0);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm3vA = vec_lvsl(0, pix3);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
perm3vB = vec_lvsl(0, pix3 + i_stride);
for (y = 0; y < 4; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
}
sum0v = vec_sums( sum0v, zero );
sum1v = vec_sums( sum1v, zero );
sum2v = vec_sums( sum2v, zero );
sum3v = vec_sums( sum3v, zero );
sum0v = vec_splat( sum0v, 3 );
sum1v = vec_splat( sum1v, 3 );
sum2v = vec_splat( sum2v, 3 );
sum3v = vec_splat( sum3v, 3 );
vec_ste( sum0v, 0, &sum0);
vec_ste( sum1v, 0, &sum1);
vec_ste( sum2v, 0, &sum2);
vec_ste( sum3v, 0, &sum3);
scores[0] = sum0;
scores[1] = sum1;
scores[2] = sum2;
scores[3] = sum3;
}
static void pixel_sad_x3_16x8_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
int y;
vec_u8_t temp_lv, temp_hv;
vec_u8_t fencv, pix0v, pix1v, pix2v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
vec_u32_t sum0v, sum1v, sum2v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
for (y = 0; y < 4; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
fencv = vec_ld(0, fenc);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
}
sum0v = vec_sums( sum0v, zero );
sum1v = vec_sums( sum1v, zero );
sum2v = vec_sums( sum2v, zero );
sum0v = vec_splat( sum0v, 3 );
sum1v = vec_splat( sum1v, 3 );
sum2v = vec_splat( sum2v, 3 );
vec_ste( sum0v, 0, &sum0);
vec_ste( sum1v, 0, &sum1);
vec_ste( sum2v, 0, &sum2);
scores[0] = sum0;
scores[1] = sum1;
scores[2] = sum2;
}
static void pixel_sad_x4_8x16_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
DECLARE_ALIGNED( int, sum3, 16 );
int y;
vec_u8_t temp_lv, temp_hv;
vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB, permEncv;
vec_u32_t sum0v, sum1v, sum2v, sum3v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
sum3v = vec_splat_u32(0);
permEncv = vec_lvsl(0, fenc);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm3vA = vec_lvsl(0, pix3);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
perm3vB = vec_lvsl(0, pix3 + i_stride);
for (y = 0; y < 8; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
temp_lv = vec_ld(0, fenc);
fencv = vec_perm(temp_lv, temp_hv, permEncv);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
temp_lv = vec_ld(0, fenc);
fencv = vec_perm(temp_lv, temp_hv, permEncv);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
temp_lv = vec_ld(0, pix3);
temp_hv = vec_ld(16, pix3);
pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
pix3 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
}
sum0v = vec_sum2s( sum0v, zero );
sum1v = vec_sum2s( sum1v, zero );
sum2v = vec_sum2s( sum2v, zero );
sum3v = vec_sum2s( sum3v, zero );
sum0v = vec_splat( sum0v, 1 );
sum1v = vec_splat( sum1v, 1 );
sum2v = vec_splat( sum2v, 1 );
sum3v = vec_splat( sum3v, 1 );
vec_ste( sum0v, 0, &sum0);
vec_ste( sum1v, 0, &sum1);
vec_ste( sum2v, 0, &sum2);
vec_ste( sum3v, 0, &sum3);
scores[0] = sum0;
scores[1] = sum1;
scores[2] = sum2;
scores[3] = sum3;
}
static void pixel_sad_x3_8x16_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )
{
DECLARE_ALIGNED( int, sum0, 16 );
DECLARE_ALIGNED( int, sum1, 16 );
DECLARE_ALIGNED( int, sum2, 16 );
int y;
vec_u8_t temp_lv, temp_hv;
vec_u8_t fencv, pix0v, pix1v, pix2v;
vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB,permEncv;
vec_u32_t sum0v, sum1v, sum2v;
vec_s32_t zero;
zero = vec_splat_s32(0);
sum0v = vec_splat_u32(0);
sum1v = vec_splat_u32(0);
sum2v = vec_splat_u32(0);
permEncv = vec_lvsl(0, fenc);
perm0vA = vec_lvsl(0, pix0);
perm1vA = vec_lvsl(0, pix1);
perm2vA = vec_lvsl(0, pix2);
perm0vB = vec_lvsl(0, pix0 + i_stride);
perm1vB = vec_lvsl(0, pix1 + i_stride);
perm2vB = vec_lvsl(0, pix2 + i_stride);
for (y = 0; y < 8; y++)
{
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
pix1 += i_stride;
temp_lv = vec_ld(0, fenc);
fencv = vec_perm(temp_lv, temp_hv, permEncv);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
temp_lv = vec_ld(0, pix0);
temp_hv = vec_ld(16, pix0);
pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
pix0 += i_stride;
temp_lv = vec_ld(0, pix1);
temp_hv = vec_ld(16, pix1);
pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
pix1 += i_stride;
temp_lv = vec_ld(0, fenc);
fencv = vec_perm(temp_lv, temp_hv, permEncv);
fenc += FENC_STRIDE;
temp_lv = vec_ld(0, pix2);
temp_hv = vec_ld(16, pix2);
pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
pix2 += i_stride;
sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
}
sum0v = vec_sum2s( sum0v, zero );
sum1v = vec_sum2s( sum1v, zero );
sum2v = vec_sum2s( sum2v, zero );
sum0v = vec_splat( sum0v, 1 );
sum1v = vec_splat( sum1v, 1 );
sum2v = vec_splat( sum2v, 1 );