Commit f9bc2de2 authored by Steven Walters's avatar Steven Walters Committed by Fiona Glaser

Preprocessing cosmetics

Unify input/output defines to HAVE_* format.
Define values as 1 to simplify conditionals.
parent 691e2db1
......@@ -23,7 +23,7 @@ SRCSO =
CONFIG := $(shell cat config.h)
# Optional muxer module sources
ifneq ($(findstring AVS_INPUT, $(CONFIG)),)
ifneq ($(findstring HAVE_AVS, $(CONFIG)),)
SRCCLI += input/avs.c
endif
......@@ -31,15 +31,15 @@ ifneq ($(findstring HAVE_PTHREAD, $(CONFIG)),)
SRCCLI += input/thread.c
endif
ifneq ($(findstring LAVF_INPUT, $(CONFIG)),)
ifneq ($(findstring HAVE_LAVF, $(CONFIG)),)
SRCCLI += input/lavf.c
endif
ifneq ($(findstring FFMS_INPUT, $(CONFIG)),)
ifneq ($(findstring HAVE_FFMS, $(CONFIG)),)
SRCCLI += input/ffms.c
endif
ifneq ($(findstring MP4_OUTPUT, $(CONFIG)),)
ifneq ($(findstring HAVE_GPAC, $(CONFIG)),)
SRCCLI += output/mp4.c
endif
......
......@@ -121,7 +121,7 @@ static inline void bs_write( bs_t *s, int i_count, uint32_t i_bits )
s->i_left -= i_count;
if( s->i_left <= 32 )
{
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
M32( s->p ) = s->cur_bits >> (32 - s->i_left);
#else
M32( s->p ) = endian_fix( s->cur_bits << s->i_left );
......
......@@ -62,7 +62,7 @@ void x264_cabac_encode_terminal_asm( x264_cabac_t *cb );
void x264_cabac_encode_ue_bypass( x264_cabac_t *cb, int exp_bits, int val );
void x264_cabac_encode_flush( x264_t *h, x264_cabac_t *cb );
#ifdef HAVE_MMX
#if HAVE_MMX
#define x264_cabac_encode_decision x264_cabac_encode_decision_asm
#define x264_cabac_encode_bypass x264_cabac_encode_bypass_asm
#define x264_cabac_encode_terminal x264_cabac_encode_terminal_asm
......
......@@ -26,7 +26,7 @@
#include <stdarg.h>
#include <ctype.h>
#ifdef HAVE_MALLOC_H
#if HAVE_MALLOC_H
#include <malloc.h>
#endif
......@@ -780,7 +780,7 @@ int x264_param_parse( x264_param_t *p, const char *name, const char *value )
}
OPT("log")
p->i_log_level = atoi(value);
#ifdef HAVE_VISUALIZE
#if HAVE_VISUALIZE
OPT("visualize")
p->b_visualize = atobool(value);
#endif
......@@ -1040,10 +1040,10 @@ void x264_picture_clean( x264_picture_t *pic )
void *x264_malloc( int i_size )
{
uint8_t *align_buf = NULL;
#ifdef SYS_MACOSX
#if SYS_MACOSX
/* Mac OS X always returns 16 bytes aligned memory */
align_buf = malloc( i_size );
#elif defined( HAVE_MALLOC_H )
#elif HAVE_MALLOC_H
align_buf = memalign( 16, i_size );
#else
uint8_t *buf = malloc( i_size + 15 + sizeof(void **) + sizeof(int) );
......@@ -1067,7 +1067,7 @@ void x264_free( void *p )
{
if( p )
{
#if defined( HAVE_MALLOC_H ) || defined( SYS_MACOSX )
#if HAVE_MALLOC_H || SYS_MACOSX
free( p );
#else
free( *( ( ( void **) p ) - 1 ) );
......
......@@ -809,7 +809,7 @@ struct x264_t
x264_deblock_function_t loopf;
x264_bitstream_function_t bsf;
#ifdef HAVE_VISUALIZE
#if HAVE_VISUALIZE
struct visualize_t *visualize;
#endif
x264_lookahead_t *lookahead;
......@@ -819,7 +819,7 @@ struct x264_t
#include "macroblock.h"
#include "rectangle.h"
#ifdef HAVE_MMX
#if HAVE_MMX
#include "x86/util.h"
#endif
......
......@@ -26,17 +26,17 @@
#include "common.h"
#include "cpu.h"
#if defined(HAVE_PTHREAD) && defined(SYS_LINUX)
#if HAVE_PTHREAD && SYS_LINUX
#include <sched.h>
#endif
#ifdef SYS_BEOS
#if SYS_BEOS
#include <kernel/OS.h>
#endif
#if defined(SYS_MACOSX) || defined(SYS_FREEBSD)
#if SYS_MACOSX || SYS_FREEBSD
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#ifdef SYS_OPENBSD
#if SYS_OPENBSD
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
......@@ -69,7 +69,7 @@ const x264_cpu_name_t x264_cpu_names[] = {
{"", 0},
};
#if (defined(ARCH_PPC) && defined(SYS_LINUX)) || (defined(ARCH_ARM) && !defined(HAVE_NEON))
#if (ARCH_PPC && SYS_LINUX) || (ARCH_ARM && !HAVE_NEON)
#include <signal.h>
#include <setjmp.h>
static sigjmp_buf jmpbuf;
......@@ -88,7 +88,7 @@ static void sigill_handler( int sig )
}
#endif
#ifdef HAVE_MMX
#if HAVE_MMX
int x264_cpu_cpuid_test( void );
uint32_t x264_cpu_cpuid( uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx );
......@@ -100,7 +100,7 @@ uint32_t x264_cpu_detect( void )
int max_extended_cap;
int cache;
#ifndef ARCH_X86_64
#if !ARCH_X86_64
if( !x264_cpu_cpuid_test() )
return 0;
#endif
......@@ -225,22 +225,22 @@ uint32_t x264_cpu_detect( void )
x264_log( NULL, X264_LOG_WARNING, "unable to determine cacheline size\n" );
}
#ifdef BROKEN_STACK_ALIGNMENT
#if BROKEN_STACK_ALIGNMENT
cpu |= X264_CPU_STACK_MOD4;
#endif
return cpu;
}
#elif defined( ARCH_PPC )
#elif ARCH_PPC
#if defined(SYS_MACOSX) || defined(SYS_OPENBSD)
#if SYS_MACOSX || SYS_OPENBSD
#include <sys/sysctl.h>
uint32_t x264_cpu_detect( void )
{
/* Thank you VLC */
uint32_t cpu = 0;
#ifdef SYS_OPENBSD
#if SYS_OPENBSD
int selectors[2] = { CTL_MACHDEP, CPU_ALTIVEC };
#else
int selectors[2] = { CTL_HW, HW_VECTORUNIT };
......@@ -255,7 +255,7 @@ uint32_t x264_cpu_detect( void )
return cpu;
}
#elif defined( SYS_LINUX )
#elif SYS_LINUX
uint32_t x264_cpu_detect( void )
{
......@@ -281,7 +281,7 @@ uint32_t x264_cpu_detect( void )
}
#endif
#elif defined( ARCH_ARM )
#elif ARCH_ARM
void x264_cpu_neon_test();
int x264_cpu_fast_neon_mrc_test();
......@@ -289,11 +289,11 @@ int x264_cpu_fast_neon_mrc_test();
uint32_t x264_cpu_detect( void )
{
int flags = 0;
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
flags |= X264_CPU_ARMV6;
// don't do this hack if compiled with -mfpu=neon
#ifndef HAVE_NEON
#if !HAVE_NEON
static void (* oldsig)( int );
oldsig = signal( SIGILL, sigill_handler );
if( sigsetjmp( jmpbuf, 1 ) )
......@@ -334,13 +334,13 @@ uint32_t x264_cpu_detect( void )
int x264_cpu_num_processors( void )
{
#if !defined(HAVE_PTHREAD)
#if !HAVE_PTHREAD
return 1;
#elif defined(_WIN32)
return pthread_num_processors_np();
#elif defined(SYS_LINUX)
#elif SYS_LINUX
unsigned int bit;
int np;
cpu_set_t p_aff;
......@@ -350,15 +350,15 @@ int x264_cpu_num_processors( void )
np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;
return np;
#elif defined(SYS_BEOS)
#elif SYS_BEOS
system_info info;
get_system_info( &info );
return info.cpu_count;
#elif defined(SYS_MACOSX) || defined(SYS_FREEBSD) || defined(SYS_OPENBSD)
#elif SYS_MACOSX || SYS_FREEBSD || SYS_OPENBSD
int numberOfCPUs;
size_t length = sizeof( numberOfCPUs );
#ifdef SYS_OPENBSD
#if SYS_OPENBSD
int mib[2] = { CTL_HW, HW_NCPU };
if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) )
#else
......
......@@ -25,7 +25,7 @@ uint32_t x264_cpu_detect( void );
int x264_cpu_num_processors( void );
void x264_cpu_emms( void );
void x264_cpu_sfence( void );
#ifdef HAVE_MMX
#if HAVE_MMX
#define x264_emms() x264_cpu_emms()
#else
#define x264_emms()
......@@ -41,7 +41,7 @@ void x264_cpu_mask_misalign_sse( void );
* problem, but I don't want to require such a new version.
* This applies only to x86_32, since other architectures that need alignment
* either have ABIs that ensure aligned stack, or don't support it at all. */
#if defined(ARCH_X86) && defined(HAVE_MMX)
#if ARCH_X86 && HAVE_MMX
int x264_stack_align( void (*func)(), ... );
#define x264_stack_align(func,...) x264_stack_align((void (*)())func, __VA_ARGS__)
#else
......
......@@ -22,13 +22,13 @@
*****************************************************************************/
#include "common.h"
#ifdef HAVE_MMX
#if HAVE_MMX
# include "x86/dct.h"
#endif
#ifdef ARCH_PPC
#if ARCH_PPC
# include "ppc/dct.h"
#endif
#ifdef ARCH_ARM
#if ARCH_ARM
# include "arm/dct.h"
#endif
......@@ -418,7 +418,7 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
dctf->dct4x4dc = dct4x4dc;
dctf->idct4x4dc = idct4x4dc;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMX )
{
dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
......@@ -429,7 +429,7 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
dctf->idct4x4dc = x264_idct4x4dc_mmx;
dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmxext;
#ifndef ARCH_X86_64
#if !ARCH_X86_64
dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
dctf->add8x8_idct = x264_add8x8_idct_mmx;
......@@ -473,7 +473,7 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
#endif //HAVE_MMX
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
{
dctf->sub4x4_dct = x264_sub4x4_dct_altivec;
......@@ -492,7 +492,7 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf )
}
#endif
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
if( cpu&X264_CPU_NEON )
{
dctf->sub4x4_dct = x264_sub4x4_dct_neon;
......@@ -709,7 +709,7 @@ void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced )
pf->sub_8x8 = zigzag_sub_8x8_field;
pf->sub_4x4 = zigzag_sub_4x4_field;
pf->sub_4x4ac = zigzag_sub_4x4ac_field;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMXEXT )
{
pf->scan_4x4 = x264_zigzag_scan_4x4_field_mmxext;
......@@ -722,7 +722,7 @@ void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced )
}
#endif
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
pf->scan_4x4 = x264_zigzag_scan_4x4_field_altivec;
#endif
......@@ -734,7 +734,7 @@ void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced )
pf->sub_8x8 = zigzag_sub_8x8_frame;
pf->sub_4x4 = zigzag_sub_4x4_frame;
pf->sub_4x4ac = zigzag_sub_4x4ac_frame;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMX )
pf->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx;
if( cpu&X264_CPU_MMXEXT )
......@@ -751,18 +751,18 @@ void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced )
}
#endif
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
pf->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
#endif
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
if( cpu&X264_CPU_NEON )
pf->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
#endif
}
pf->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMX )
pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
if( cpu&X264_CPU_SHUFFLE_IS_FAST )
......
......@@ -385,7 +385,7 @@ void x264_frame_deblock_row( x264_t *h, int mb_y )
}
}
#ifdef HAVE_MMX
#if HAVE_MMX
void x264_deblock_v_chroma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_h_chroma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_v_chroma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
......@@ -404,7 +404,7 @@ void x264_deblock_strength_sse2 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X
void x264_deblock_strength_ssse3 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
int mvy_limit, int bframe );
#ifdef ARCH_X86
#if ARCH_X86
void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_h_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
......@@ -423,12 +423,12 @@ static void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alph
#endif
#endif
#ifdef ARCH_PPC
#if ARCH_PPC
void x264_deblock_v_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_h_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
#endif // ARCH_PPC
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
void x264_deblock_v_luma_neon( uint8_t *, int, int, int, int8_t * );
void x264_deblock_h_luma_neon( uint8_t *, int, int, int, int8_t * );
void x264_deblock_v_chroma_neon( uint8_t *, int, int, int, int8_t * );
......@@ -447,14 +447,14 @@ void x264_deblock_init( int cpu, x264_deblock_function_t *pf )
pf->deblock_chroma_intra[0] = deblock_h_chroma_intra_c;
pf->deblock_strength = deblock_strength_c;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMXEXT )
{
pf->deblock_chroma[1] = x264_deblock_v_chroma_mmxext;
pf->deblock_chroma[0] = x264_deblock_h_chroma_mmxext;
pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_mmxext;
pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_mmxext;
#ifdef ARCH_X86
#if ARCH_X86
pf->deblock_luma[1] = x264_deblock_v_luma_mmxext;
pf->deblock_luma[0] = x264_deblock_h_luma_mmxext;
pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_mmxext;
......@@ -477,7 +477,7 @@ void x264_deblock_init( int cpu, x264_deblock_function_t *pf )
}
#endif
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
{
pf->deblock_luma[1] = x264_deblock_v_luma_altivec;
......@@ -485,7 +485,7 @@ void x264_deblock_init( int cpu, x264_deblock_function_t *pf )
}
#endif // HAVE_ALTIVEC
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
if( cpu&X264_CPU_NEON )
{
pf->deblock_luma[1] = x264_deblock_v_luma_neon;
......
......@@ -153,7 +153,7 @@ void disp_gray( int num, char *data, int width, int height, int stride, const un
int dpy_depth = DefaultDepth( disp_display, screen );
XImage *ximage = XCreateImage( disp_display, visual, dpy_depth, ZPixmap, 0, &dummy, width, height, 8, 0 );
disp_chkerror( !ximage, "no ximage" );
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
ximage->byte_order = MSBFirst;
ximage->bitmap_bit_order = MSBFirst;
#else
......
......@@ -310,7 +310,7 @@ void x264_mb_mc_8x8( x264_t *h, int i8 );
static ALWAYS_INLINE uint32_t pack16to32( int a, int b )
{
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
return b + (a<<16);
#else
return a + (b<<16);
......@@ -318,7 +318,7 @@ static ALWAYS_INLINE uint32_t pack16to32( int a, int b )
}
static ALWAYS_INLINE uint32_t pack8to16( int a, int b )
{
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
return b + (a<<8);
#else
return a + (b<<8);
......@@ -326,7 +326,7 @@ static ALWAYS_INLINE uint32_t pack8to16( int a, int b )
}
static ALWAYS_INLINE uint32_t pack8to32( int a, int b, int c, int d )
{
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
return d + (c<<8) + (b<<16) + (a<<24);
#else
return a + (b<<8) + (c<<16) + (d<<24);
......@@ -334,7 +334,7 @@ static ALWAYS_INLINE uint32_t pack8to32( int a, int b, int c, int d )
}
static ALWAYS_INLINE uint32_t pack16to32_mask( int a, int b )
{
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
return (b&0xFFFF) + (a<<16);
#else
return (a&0xFFFF) + (b<<16);
......
......@@ -23,13 +23,13 @@
#include "common.h"
#ifdef HAVE_MMX
#if HAVE_MMX
#include "x86/mc.h"
#endif
#ifdef ARCH_PPC
#if ARCH_PPC
#include "ppc/mc.h"
#endif
#ifdef ARCH_ARM
#if ARCH_ARM
#include "arm/mc.h"
#endif
......@@ -404,7 +404,7 @@ static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel
}
}
#if defined(__GNUC__) && (defined(ARCH_X86) || defined(ARCH_X86_64))
#if defined(__GNUC__) && (ARCH_X86 || ARCH_X86_64)
// gcc isn't smart enough to use the "idiv" instruction
static ALWAYS_INLINE int32_t div_64_32(int64_t x, int32_t y)
{
......@@ -474,14 +474,14 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
pf->mbtree_propagate_cost = mbtree_propagate_cost;
#ifdef HAVE_MMX
#if HAVE_MMX
x264_mc_init_mmx( cpu, pf );
#endif
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
x264_mc_altivec_init( pf );
#endif
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
x264_mc_init_arm( cpu, pf );
#endif
}
......
......@@ -31,13 +31,13 @@
#include "config.h"
#ifdef HAVE_STDINT_H
#if HAVE_STDINT_H
#include <stdint.h>
#else
#include <inttypes.h>
#endif
#ifndef HAVE_LOG2F
#if !HAVE_LOG2F
#define log2f(x) (logf(x)/0.693147180559945f)
#define log2(x) (log(x)/0.693147180559945)
#endif
......@@ -47,7 +47,7 @@
#include <fcntl.h> // _O_BINARY
#endif
#if (defined(SYS_OPENBSD) && !defined(isfinite)) || defined(SYS_SunOS)
#if (SYS_OPENBSD && !defined(isfinite)) || SYS_SunOS
#define isfinite finite
#endif
#ifdef _WIN32
......@@ -68,7 +68,7 @@
// - armcc can't either, but is nice enough to actually tell you so
// - Apple gcc only maintains 4 byte alignment
// - llvm can align the stack, but only in svn and (unrelated) it exposes bugs in all released GNU binutils...
#if defined(ARCH_ARM) && defined(SYS_MACOSX)
#if ARCH_ARM && SYS_MACOSX
#define ALIGNED_ARRAY_8( type, name, sub1, ... )\
uint8_t name##_u [sizeof(type sub1 __VA_ARGS__) + 7]; \
type (*name) __VA_ARGS__ = (void*)((intptr_t)(name##_u+7) & ~7)
......@@ -77,7 +77,7 @@
ALIGNED_8( type name sub1 __VA_ARGS__ )
#endif
#ifdef ARCH_ARM
#if ARCH_ARM
#define ALIGNED_ARRAY_16( type, name, sub1, ... )\
uint8_t name##_u [sizeof(type sub1 __VA_ARGS__) + 15];\
type (*name) __VA_ARGS__ = (void*)((intptr_t)(name##_u+15) & ~15)
......@@ -103,7 +103,7 @@
#endif
/* threads */
#if defined(SYS_BEOS)
#if SYS_BEOS
#include <kernel/OS.h>
#define x264_pthread_t thread_id
static inline int x264_pthread_create( x264_pthread_t *t, void *a, void *(*f)(void *), void *d )
......@@ -121,9 +121,9 @@ static inline int x264_pthread_create( x264_pthread_t *t, void *a, void *(*f)(vo
#endif
#define HAVE_PTHREAD 1
#elif defined(HAVE_PTHREAD)
#elif HAVE_PTHREAD
#include <pthread.h>
#define USE_REAL_PTHREAD
#define USE_REAL_PTHREAD 1
#else
#define x264_pthread_t int
......@@ -131,7 +131,7 @@ static inline int x264_pthread_create( x264_pthread_t *t, void *a, void *(*f)(vo
#define x264_pthread_join(t,s)
#endif //SYS_*
#ifdef USE_REAL_PTHREAD
#if USE_REAL_PTHREAD
#define x264_pthread_t pthread_t
#define x264_pthread_create pthread_create
#define x264_pthread_join pthread_join
......@@ -172,23 +172,23 @@ static inline int x264_pthread_create( x264_pthread_t *t, void *a, void *(*f)(vo
#if !defined(_WIN64) && !defined(__LP64__)
#if defined(__INTEL_COMPILER)
#define BROKEN_STACK_ALIGNMENT /* define it if stack is not mod16 */
#define BROKEN_STACK_ALIGNMENT 1 /* define it if stack is not mod16 */
#endif
#endif
#ifdef WORDS_BIGENDIAN
#if WORDS_BIGENDIAN
#define endian_fix(x) (x)
#define endian_fix64(x) (x)
#define endian_fix32(x) (x)
#define endian_fix16(x) (x)
#else
#if defined(__GNUC__) && defined(HAVE_MMX)
#if defined(__GNUC__) && HAVE_MMX
static ALWAYS_INLINE uint32_t endian_fix32( uint32_t x )
{
asm("bswap %0":"+r"(x));
return x;
}
#elif defined(__GNUC__) && defined(HAVE_ARMV6)
#elif defined(__GNUC__) && HAVE_ARMV6
static ALWAYS_INLINE uint32_t endian_fix32( uint32_t x )
{
asm("rev %0, %0":"+r"(x));
......@@ -200,7 +200,7 @@ static ALWAYS_INLINE uint32_t endian_fix32( uint32_t x )
return (x<<24) + ((x<<8)&0xff0000) + ((x>>8)&0xff00) + (x>>24);
}
#endif
#if defined(__GNUC__) && defined(ARCH_X86_64)
#if defined(__GNUC__) && ARCH_X86_64
static ALWAYS_INLINE uint64_t endian_fix64( uint64_t x )
{
asm("bswap %0":"+r"(x));
......@@ -251,7 +251,7 @@ static int ALWAYS_INLINE x264_ctz( uint32_t x )
}
#endif
#if defined(__GNUC__) && defined(HAVE_MMX)
#if defined(__GNUC__) && HAVE_MMX
/* Don't use __builtin_prefetch; even as recent as 4.3.4, GCC seems incapable of
* using complex address modes properly unless we use inline asm. */
static ALWAYS_INLINE void x264_prefetch( void *p )
......@@ -261,14 +261,14 @@ static ALWAYS_INLINE void x264_prefetch( void *p )
/* We require that prefetch not fault on invalid reads, so we only enable it on
* known architectures. */
#elif defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 1) &&\
(defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_ARM) || defined(ARCH_PPC))
(ARCH_X86 || ARCH_X86_64 || ARCH_ARM || ARCH_PPC)
#define x264_prefetch(x) __builtin_prefetch(x)
#else
#define x264_prefetch(x)
#endif
#ifdef USE_REAL_PTHREAD
#ifdef SYS_MINGW
#if USE_REAL_PTHREAD
#if SYS_MINGW
#define x264_lower_thread_priority(p)\
{\
x264_pthread_t handle = pthread_self();\
......
......@@ -23,16 +23,16 @@
#include "common.h"
#ifdef HAVE_MMX
#if HAVE_MMX
# include "x86/pixel.h"
#endif
#ifdef ARCH_PPC
#if ARCH_PPC
# include "ppc/pixel.h"
#endif
#ifdef ARCH_ARM
#if ARCH_ARM
# include "arm/pixel.h"
#endif
#ifdef ARCH_UltraSparc
#if ARCH_UltraSparc
# include "sparc/pixel.h"
#endif
......@@ -406,7 +406,7 @@ SAD_X( 8x4 )
SAD_X( 4x8 )
SAD_X( 4x4 )
#ifdef ARCH_UltraSparc
#if ARCH_UltraSparc
SAD_X( 16x16_vis )
SAD_X( 16x8_vis )
SAD_X( 8x16_vis )
......@@ -444,14 +444,14 @@ SATD_X_DECL6( cpu )\
SATD_X( 4x4, cpu )
SATD_X_DECL7()
#ifdef HAVE_MMX
#if HAVE_MMX
SATD_X_DECL7( _mmxext )
SATD_X_DECL6( _sse2 )
SATD_X_DECL7( _ssse3 )
SATD_X_DECL7( _sse4 )
#endif
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
SATD_X_DECL7( _neon )
#endif
......@@ -678,7 +678,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16;
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
#ifdef HAVE_MMX
#if HAVE_MMX
if( cpu&X264_CPU_MMX )
{
INIT7( ssd, _mmx );
......@@ -697,7 +697,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
INIT_ADS( _mmxext );
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
#ifdef ARCH_X86
#if ARCH_X86
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmxext;
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
......@@ -746,7 +746,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
#ifdef ARCH_X86_64
#if ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
#endif
pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
......@@ -770,7 +770,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( ssd, _sse2); /* faster for width 16 on p4 */
#ifdef ARCH_X86
#if ARCH_X86
INIT2( sad, _cache64_sse2 );
INIT2( sad_x3, _cache64_sse2 );
INIT2( sad_x4, _cache64_sse2 );
......@@ -829,7 +829,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
#ifdef ARCH_X86_64
#if ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
#endif
pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
......@@ -859,7 +859,7 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
}
#endif //HAVE_MMX
#ifdef HAVE_ARMV6
#if HAVE_ARMV6
if( cpu&X264_CPU_ARMV6 )
{
pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
......@@ -900,13 +900,13 @@ void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
}
}
#endif
#ifdef HAVE_ALTIVEC
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
{
x264_pixel_altivec_init( pixf );
}
#endif
#ifdef ARCH_UltraSparc
#if ARCH_UltraSparc
INIT4( sad, _vis );
INIT4( sad_x3, _vis );
INIT4( sad_x4, _vis );
......
......@@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#ifdef HAVE_ALTIVEC_H
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
......
......@@ -27,13 +27,13 @@
#include "common.h"
#ifdef HAVE_MMX