Commit 809c516a authored by Eric Petit's avatar Eric Petit

Optimized subXxX_dct


git-svn-id: svn://svn.videolan.org/x264/trunk@233 df754926-b1dd-0310-bc7b-ec298dee348c
parent f025abc9
......@@ -34,7 +34,7 @@ endif
# AltiVec optims
ifeq ($(ARCH),PPC)
SRCS += common/ppc/mc.c common/ppc/pixel.c
SRCS += common/ppc/mc.c common/ppc/pixel.c common/ppc/dct.c
endif
OBJS = $(SRCS:%.c=%.o)
......
......@@ -35,6 +35,9 @@
#ifdef HAVE_MMXEXT
# include "i386/dct.h"
#endif
#ifdef ARCH_PPC
# include "ppc/dct.h"
#endif
static inline int clip_uint8( int a )
......@@ -290,5 +293,13 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
dctf->idct4x4dc = x264_idct4x4dc_mmxext;
}
#endif
#ifdef ARCH_PPC
if( cpu&X264_CPU_ALTIVEC )
{
dctf->sub4x4_dct = x264_sub4x4_dct_altivec;
dctf->sub8x8_dct = x264_sub8x8_dct_altivec;
dctf->sub16x16_dct = x264_sub16x16_dct_altivec;
}
#endif
}
/*****************************************************************************
* dct.c: h264 encoder library
*****************************************************************************
* Copyright (C) 2003 Laurent Aimar
* $Id$
*
* Authors: Eric Petit <titer@m0k.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
*****************************************************************************/
#ifdef SYS_LINUX
#include <altivec.h>
#endif
#include "common/common.h"
#include "ppccommon.h"
#define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
b1 = vec_add( a0, a3 ); \
b3 = vec_add( a1, a2 ); \
b0 = vec_add( b1, b3 ); \
b2 = vec_sub( b1, b3 ); \
a0 = vec_sub( a0, a3 ); \
a1 = vec_sub( a1, a2 ); \
b1 = vec_add( a0, a0 ); \
b1 = vec_add( b1, a1 ); \
b3 = vec_sub( a0, a1 ); \
b3 = vec_sub( b3, a1 )
void x264_sub4x4_dct_altivec( int16_t dct[4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
PREP_DIFF;
PREP_STORE8;
vec_s16_t dct0v, dct1v, dct2v, dct3v;
vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct0v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct1v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct2v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct3v );
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
dct0v, dct1v, dct2v, dct3v );
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
dct0v, dct1v, dct2v, dct3v );
VEC_STORE8( dct0v, dct[0] );
VEC_STORE8( dct1v, dct[1] );
VEC_STORE8( dct2v, dct[2] );
VEC_STORE8( dct3v, dct[3] );
}
void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
PREP_DIFF;
PREP_STORE8_HL;
vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct0v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct1v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct2v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct3v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct4v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct5v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct6v );
VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct7v );
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
tmp4v, tmp5v, tmp6v, tmp7v,
dct0v, dct1v, dct2v, dct3v,
dct4v, dct5v, dct6v, dct7v );
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
tmp4v, tmp5v, tmp6v, tmp7v,
dct0v, dct1v, dct2v, dct3v,
dct4v, dct5v, dct6v, dct7v );
VEC_STORE8_H( dct0v, dct[0][0] );
VEC_STORE8_L( dct0v, dct[1][0] );
VEC_STORE8_H( dct1v, dct[0][1] );
VEC_STORE8_L( dct1v, dct[1][1] );
VEC_STORE8_H( dct2v, dct[0][2] );
VEC_STORE8_L( dct2v, dct[1][2] );
VEC_STORE8_H( dct3v, dct[0][3] );
VEC_STORE8_L( dct3v, dct[1][3] );
VEC_STORE8_H( dct4v, dct[2][0] );
VEC_STORE8_L( dct4v, dct[3][0] );
VEC_STORE8_H( dct5v, dct[2][1] );
VEC_STORE8_L( dct5v, dct[3][1] );
VEC_STORE8_H( dct6v, dct[2][2] );
VEC_STORE8_L( dct6v, dct[3][2] );
VEC_STORE8_H( dct7v, dct[2][3] );
VEC_STORE8_L( dct7v, dct[3][3] );
}
void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
PREP_DIFF;
PREP_STORE8_HL;
vec_s16_t dcth0v, dcth1v, dcth2v, dcth3v,
dcth4v, dcth5v, dcth6v, dcth7v,
dctl0v, dctl1v, dctl2v, dctl3v,
dctl4v, dctl5v, dctl6v, dctl7v;
vec_s16_t temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v;
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth0v, dctl0v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth1v, dctl1v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth2v, dctl2v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth3v, dctl3v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth4v, dctl4v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth5v, dctl5v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth6v, dctl6v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth7v, dctl7v );
VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dcth0v, dcth1v, dcth2v, dcth3v,
dcth4v, dcth5v, dcth6v, dcth7v );
VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dcth0v, dcth1v, dcth2v, dcth3v,
dcth4v, dcth5v, dcth6v, dcth7v );
VEC_STORE8_H( dcth0v, dct[0][0] );
VEC_STORE8_L( dcth0v, dct[1][0] );
VEC_STORE8_H( dcth1v, dct[0][1] );
VEC_STORE8_L( dcth1v, dct[1][1] );
VEC_STORE8_H( dcth2v, dct[0][2] );
VEC_STORE8_L( dcth2v, dct[1][2] );
VEC_STORE8_H( dcth3v, dct[0][3] );
VEC_STORE8_L( dcth3v, dct[1][3] );
VEC_STORE8_H( dcth4v, dct[2][0] );
VEC_STORE8_L( dcth4v, dct[3][0] );
VEC_STORE8_H( dcth5v, dct[2][1] );
VEC_STORE8_L( dcth5v, dct[3][1] );
VEC_STORE8_H( dcth6v, dct[2][2] );
VEC_STORE8_L( dcth6v, dct[3][2] );
VEC_STORE8_H( dcth7v, dct[2][3] );
VEC_STORE8_L( dcth7v, dct[3][3] );
VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dctl0v, dctl1v, dctl2v, dctl3v,
dctl4v, dctl5v, dctl6v, dctl7v );
VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dctl0v, dctl1v, dctl2v, dctl3v,
dctl4v, dctl5v, dctl6v, dctl7v );
VEC_STORE8_H( dctl0v, dct[4][0] );
VEC_STORE8_L( dctl0v, dct[5][0] );
VEC_STORE8_H( dctl1v, dct[4][1] );
VEC_STORE8_L( dctl1v, dct[5][1] );
VEC_STORE8_H( dctl2v, dct[4][2] );
VEC_STORE8_L( dctl2v, dct[5][2] );
VEC_STORE8_H( dctl3v, dct[4][3] );
VEC_STORE8_L( dctl3v, dct[5][3] );
VEC_STORE8_H( dctl4v, dct[6][0] );
VEC_STORE8_L( dctl4v, dct[7][0] );
VEC_STORE8_H( dctl5v, dct[6][1] );
VEC_STORE8_L( dctl5v, dct[7][1] );
VEC_STORE8_H( dctl6v, dct[6][2] );
VEC_STORE8_L( dctl6v, dct[7][2] );
VEC_STORE8_H( dctl7v, dct[6][3] );
VEC_STORE8_L( dctl7v, dct[7][3] );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth0v, dctl0v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth1v, dctl1v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth2v, dctl2v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth3v, dctl3v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth4v, dctl4v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth5v, dctl5v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth6v, dctl6v );
VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth7v, dctl7v );
VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dcth0v, dcth1v, dcth2v, dcth3v,
dcth4v, dcth5v, dcth6v, dcth7v );
VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dcth0v, dcth1v, dcth2v, dcth3v,
dcth4v, dcth5v, dcth6v, dcth7v );
VEC_STORE8_H( dcth0v, dct[8][0] );
VEC_STORE8_L( dcth0v, dct[9][0] );
VEC_STORE8_H( dcth1v, dct[8][1] );
VEC_STORE8_L( dcth1v, dct[9][1] );
VEC_STORE8_H( dcth2v, dct[8][2] );
VEC_STORE8_L( dcth2v, dct[9][2] );
VEC_STORE8_H( dcth3v, dct[8][3] );
VEC_STORE8_L( dcth3v, dct[9][3] );
VEC_STORE8_H( dcth4v, dct[10][0] );
VEC_STORE8_L( dcth4v, dct[11][0] );
VEC_STORE8_H( dcth5v, dct[10][1] );
VEC_STORE8_L( dcth5v, dct[11][1] );
VEC_STORE8_H( dcth6v, dct[10][2] );
VEC_STORE8_L( dcth6v, dct[11][2] );
VEC_STORE8_H( dcth7v, dct[10][3] );
VEC_STORE8_L( dcth7v, dct[11][3] );
VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dctl0v, dctl1v, dctl2v, dctl3v,
dctl4v, dctl5v, dctl6v, dctl7v );
VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v,
temp0v, temp1v, temp2v, temp3v );
VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v,
temp4v, temp5v, temp6v, temp7v );
VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
temp4v, temp5v, temp6v, temp7v,
dctl0v, dctl1v, dctl2v, dctl3v,
dctl4v, dctl5v, dctl6v, dctl7v );
VEC_STORE8_H( dctl0v, dct[12][0] );
VEC_STORE8_L( dctl0v, dct[13][0] );
VEC_STORE8_H( dctl1v, dct[12][1] );
VEC_STORE8_L( dctl1v, dct[13][1] );
VEC_STORE8_H( dctl2v, dct[12][2] );
VEC_STORE8_L( dctl2v, dct[13][2] );
VEC_STORE8_H( dctl3v, dct[12][3] );
VEC_STORE8_L( dctl3v, dct[13][3] );
VEC_STORE8_H( dctl4v, dct[14][0] );
VEC_STORE8_L( dctl4v, dct[15][0] );
VEC_STORE8_H( dctl5v, dct[14][1] );
VEC_STORE8_L( dctl5v, dct[15][1] );
VEC_STORE8_H( dctl6v, dct[14][2] );
VEC_STORE8_L( dctl6v, dct[15][2] );
VEC_STORE8_H( dctl7v, dct[14][3] );
VEC_STORE8_L( dctl7v, dct[15][3] );
}
/*****************************************************************************
* dct.h: h264 encoder library
*****************************************************************************
* Copyright (C) 2003 Laurent Aimar
* $Id$
*
* Authors: Eric Petit <titer@m0k.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
*****************************************************************************/
#ifndef _PPC_DCT_H
#define _PPC_DCT_H 1
void x264_sub4x4_dct_altivec( int16_t dct[4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 );
void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 );
void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 );
#endif
......@@ -21,19 +21,11 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
*****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdarg.h>
#ifdef SYS_LINUX
#include <altivec.h>
#endif
#include "x264.h"
#include "common/pixel.h"
#include "pixel.h"
#include "common/common.h"
#include "ppccommon.h"
/***********************************************************************
......@@ -77,56 +69,6 @@ PIXEL_SAD_ALTIVEC( pixel_sad_8x8_altivec, 8, 8, 2s, 1 )
* SATD routines
**********************************************************************/
/***********************************************************************
* VEC_TRANSPOSE_8
***********************************************************************
* Transposes a 8x8 matrix of s16 vectors
**********************************************************************/
#define VEC_TRANSPOSE_8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
b0 = vec_mergeh( a0, a4 ); \
b1 = vec_mergel( a0, a4 ); \
b2 = vec_mergeh( a1, a5 ); \
b3 = vec_mergel( a1, a5 ); \
b4 = vec_mergeh( a2, a6 ); \
b5 = vec_mergel( a2, a6 ); \
b6 = vec_mergeh( a3, a7 ); \
b7 = vec_mergel( a3, a7 ); \
a0 = vec_mergeh( b0, b4 ); \
a1 = vec_mergel( b0, b4 ); \
a2 = vec_mergeh( b1, b5 ); \
a3 = vec_mergel( b1, b5 ); \
a4 = vec_mergeh( b2, b6 ); \
a5 = vec_mergel( b2, b6 ); \
a6 = vec_mergeh( b3, b7 ); \
a7 = vec_mergel( b3, b7 ); \
b0 = vec_mergeh( a0, a4 ); \
b1 = vec_mergel( a0, a4 ); \
b2 = vec_mergeh( a1, a5 ); \
b3 = vec_mergel( a1, a5 ); \
b4 = vec_mergeh( a2, a6 ); \
b5 = vec_mergel( a2, a6 ); \
b6 = vec_mergeh( a3, a7 ); \
b7 = vec_mergel( a3, a7 )
/***********************************************************************
* VEC_TRANSPOSE_4
***********************************************************************
* Transposes a 4x4 matrix of s16 vectors
**********************************************************************/
#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
b0 = vec_mergeh( a0, a0 ); \
b1 = vec_mergeh( a1, a0 ); \
b2 = vec_mergeh( a2, a0 ); \
b3 = vec_mergeh( a3, a0 ); \
a0 = vec_mergeh( b0, b2 ); \
a1 = vec_mergel( b0, b2 ); \
a2 = vec_mergeh( b1, b3 ); \
a3 = vec_mergel( b1, b3 ); \
b0 = vec_mergeh( a0, a2 ); \
b1 = vec_mergel( a0, a2 ); \
b2 = vec_mergeh( a1, a3 ); \
b3 = vec_mergel( a1, a3 )
/***********************************************************************
* VEC_HADAMAR
***********************************************************************
......@@ -145,53 +87,6 @@ PIXEL_SAD_ALTIVEC( pixel_sad_8x8_altivec, 8, 8, 2s, 1 )
b2 = vec_sub( a0, a2 ); \
b3 = vec_add( a0, a2 )
/***********************************************************************
* VEC_DIFF_H
***********************************************************************
* p1, p2: u8 *
* i1, i2, n: int
* d: s16v
*
* Loads n bytes from p1 and p2, do the diff of the high elements into
* d, increments p1 and p2 by i1 and i2
**********************************************************************/
#define PREP_DIFF \
LOAD_ZERO; \
PREP_LOAD; \
vec_s16_t pix1v, pix2v;
#define VEC_DIFF_H(p1,i1,p2,i2,n,d) \
VEC_LOAD( p1, pix1v, n, vec_s16_t ); \
pix1v = vec_u8_to_s16( pix1v ); \
VEC_LOAD( p2, pix2v, n, vec_s16_t ); \
pix2v = vec_u8_to_s16( pix2v ); \
d = vec_sub( pix1v, pix2v ); \
p1 += i1; \
p2 += i2
/***********************************************************************
* VEC_DIFF_HL
***********************************************************************
* p1, p2: u8 *
* i1, i2: int
* dh, dl: s16v
*
* Loads 16 bytes from p1 and p2, do the diff of the high elements into
* dh, the diff of the low elements into dl, increments p1 and p2 by i1
* and i2
**********************************************************************/
#define VEC_DIFF_HL(p1,i1,p2,i2,dh,dl) \
VEC_LOAD( p1, pix1v, 16, vec_s16_t ); \
temp0v = vec_u8_to_s16_h( pix1v ); \
temp1v = vec_u8_to_s16_l( pix1v ); \
VEC_LOAD( p2, pix2v, 16, vec_s16_t ); \
temp2v = vec_u8_to_s16_h( pix2v ); \
temp3v = vec_u8_to_s16_l( pix2v ); \
dh = vec_sub( temp0v, temp2v ); \
dl = vec_sub( temp1v, temp3v ); \
p1 += i1; \
p2 += i2
/***********************************************************************
* VEC_ABS
***********************************************************************
......
......@@ -93,28 +93,47 @@
_lv = vec_ld( 15, p ); \
_tmp1v = vec_perm( _lv, _hv, _tmp2v ); \
_tmp2v = vec_lvsr( 0, p ); \
_lv = vec_perm( v, _tmp1v, _tmp2v ); \
vec_st( _lv, 15, p ); \
_hv = vec_perm( _tmp1v, v, _tmp2v ); \
vec_st( _hv, 0, p )
_lv = vec_perm( (vec_u8_t) v, _tmp1v, _tmp2v ); \
vec_st( _lv, 15, (uint8_t *) p ); \
_hv = vec_perm( _tmp1v, (vec_u8_t) v, _tmp2v ); \
vec_st( _hv, 0, (uint8_t *) p )
#define PREP_STORE8 \
PREP_STORE16; \
vec_u8_t _tmp3v; \
const vec_u8_t sel = \
vec_u8_t _tmp3v, _tmp4v; \
const vec_u8_t sel_h = \
(vec_u8_t) CV(-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0)
#define VEC_STORE8( v, p ) \
_tmp3v = vec_lvsr( 0, p ); \
v = vec_perm( v, v, _tmp3v ); \
_lv = vec_ld( 7, p ); \
_tmp1v = vec_perm( sel, zero_u8v, _tmp3v ); \
_lv = vec_sel( _lv, v, _tmp1v ); \
vec_st( _lv, 7, p ); \
_hv = vec_ld( 0, p ); \
_tmp2v = vec_perm( zero_u8v, sel, _tmp3v ); \
_hv = vec_sel( _hv, v, _tmp2v ); \
vec_st( _hv, 0, p )
#define PREP_STORE8_HL \
PREP_STORE8; \
const vec_u8_t sel_l = \
(vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1)
#define VEC_STORE8 VEC_STORE8_H
#define VEC_STORE8_H( v, p ) \
_tmp3v = vec_lvsr( 0, (uint8_t *) p ); \
_tmp4v = vec_perm( (vec_u8_t) v, (vec_u8_t) v, _tmp3v ); \
_lv = vec_ld( 7, (uint8_t *) p ); \
_tmp1v = vec_perm( sel_h, zero_u8v, _tmp3v ); \
_lv = vec_sel( _lv, _tmp4v, _tmp1v ); \
vec_st( _lv, 7, (uint8_t *) p ); \
_hv = vec_ld( 0, (uint8_t *) p ); \
_tmp2v = vec_perm( zero_u8v, sel_h, _tmp3v ); \
_hv = vec_sel( _hv, _tmp4v, _tmp2v ); \
vec_st( _hv, 0, (uint8_t *) p )
#define VEC_STORE8_L( v, p ) \
_tmp3v = vec_lvsr( 8, (uint8_t *) p ); \
_tmp4v = vec_perm( (vec_u8_t) v, (vec_u8_t) v, _tmp3v ); \
_lv = vec_ld( 7, (uint8_t *) p ); \
_tmp1v = vec_perm( sel_l, zero_u8v, _tmp3v ); \
_lv = vec_sel( _lv, _tmp4v, _tmp1v ); \
vec_st( _lv, 7, (uint8_t *) p ); \
_hv = vec_ld( 0, (uint8_t *) p ); \
_tmp2v = vec_perm( zero_u8v, sel_l, _tmp3v ); \
_hv = vec_sel( _hv, _tmp4v, _tmp2v ); \
vec_st( _hv, 0, (uint8_t *) p )
#define PREP_STORE4 \
PREP_STORE16; \
......@@ -134,3 +153,102 @@
_hv = vec_sel( _hv, v, _tmp2v ); \
vec_st( _hv, 0, p )
/***********************************************************************
* VEC_TRANSPOSE_8
***********************************************************************
* Transposes a 8x8 matrix of s16 vectors
**********************************************************************/
#define VEC_TRANSPOSE_8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
b0 = vec_mergeh( a0, a4 ); \
b1 = vec_mergel( a0, a4 ); \
b2 = vec_mergeh( a1, a5 ); \
b3 = vec_mergel( a1, a5 ); \
b4 = vec_mergeh( a2, a6 ); \
b5 = vec_mergel( a2, a6 ); \
b6 = vec_mergeh( a3, a7 ); \
b7 = vec_mergel( a3, a7 ); \
a0 = vec_mergeh( b0, b4 ); \
a1 = vec_mergel( b0, b4 ); \
a2 = vec_mergeh( b1, b5 ); \
a3 = vec_mergel( b1, b5 ); \
a4 = vec_mergeh( b2, b6 ); \
a5 = vec_mergel( b2, b6 ); \
a6 = vec_mergeh( b3, b7 ); \
a7 = vec_mergel( b3, b7 ); \
b0 = vec_mergeh( a0, a4 ); \
b1 = vec_mergel( a0, a4 ); \
b2 = vec_mergeh( a1, a5 ); \
b3 = vec_mergel( a1, a5 ); \
b4 = vec_mergeh( a2, a6 ); \
b5 = vec_mergel( a2, a6 ); \
b6 = vec_mergeh( a3, a7 ); \
b7 = vec_mergel( a3, a7 )
/***********************************************************************
* VEC_TRANSPOSE_4
***********************************************************************
* Transposes a 4x4 matrix of s16 vectors.
* Actually source and destination are 8x4. The low elements of the
* source are discarded and the low elements of the destination mustn't
* be used.
**********************************************************************/
#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
b0 = vec_mergeh( a0, a0 ); \
b1 = vec_mergeh( a1, a0 ); \
b2 = vec_mergeh( a2, a0 ); \
b3 = vec_mergeh( a3, a0 ); \
a0 = vec_mergeh( b0, b2 ); \
a1 = vec_mergel( b0, b2 ); \
a2 = vec_mergeh( b1, b3 ); \
a3 = vec_mergel( b1, b3 ); \
b0 = vec_mergeh( a0, a2 ); \
b1 = vec_mergel( a0, a2 ); \
b2 = vec_mergeh( a1, a3 ); \
b3 = vec_mergel( a1, a3 )
/***********************************************************************
* VEC_DIFF_H
***********************************************************************
* p1, p2: u8 *
* i1, i2, n: int
* d: s16v
*
* Loads n bytes from p1 and p2, do the diff of the high elements into
* d, increments p1 and p2 by i1 and i2
**********************************************************************/
#define PREP_DIFF \
LOAD_ZERO; \
PREP_LOAD; \
vec_s16_t pix1v, pix2v;
#define VEC_DIFF_H(p1,i1,p2,i2,n,d) \
VEC_LOAD( p1, pix1v, n, vec_s16_t ); \
pix1v = vec_u8_to_s16( pix1v ); \
VEC_LOAD( p2, pix2v, n, vec_s16_t ); \
pix2v = vec_u8_to_s16( pix2v ); \
d = vec_sub( pix1v, pix2v ); \
p1 += i1; \
p2 += i2