cpu.h 2.86 KB
Newer Older
1
/*****************************************************************************
2
 * cpu.h: cpu detection
3
 *****************************************************************************
Loren Merritt's avatar
Loren Merritt committed
4
 * Copyright (C) 2004-2013 x264 project
5 6
 *
 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
21 22 23
 *
 * This program is also available under a commercial proprietary license.
 * For more information, contact us at licensing@x264.com.
24 25
 *****************************************************************************/

26 27
#ifndef X264_CPU_H
#define X264_CPU_H
28 29

uint32_t x264_cpu_detect( void );
30
int      x264_cpu_num_processors( void );
31 32
void     x264_cpu_emms( void );
void     x264_cpu_sfence( void );
Steven Walters's avatar
Steven Walters committed
33
#if HAVE_MMX
34 35 36 37 38 39 40 41
/* There is no way to forbid the compiler from using float instructions
 * before the emms so miscompilation could theoretically occur in the
 * unlikely event that the compiler reorders emms and float instructions. */
#if HAVE_X86_INLINE_ASM
/* Clobbering memory makes the compiler less likely to reorder code. */
#define x264_emms() asm volatile( "emms":::"memory","st","st(1)","st(2)", \
                                  "st(3)","st(4)","st(5)","st(6)","st(7)" )
#else
42
#define x264_emms() x264_cpu_emms()
43
#endif
44 45 46 47
#else
#define x264_emms()
#endif
#define x264_sfence x264_cpu_sfence
48
void     x264_cpu_mask_misalign_sse( void );
49
void     x264_safe_intel_cpu_indicator_init( void );
50

51 52 53 54 55 56 57
/* kluge:
 * gcc can't give variables any greater alignment than the stack frame has.
 * We need 16 byte alignment for SSE2, so here we make sure that the stack is
 * aligned to 16 bytes.
 * gcc 4.2 introduced __attribute__((force_align_arg_pointer)) to fix this
 * problem, but I don't want to require such a new version.
 * This applies only to x86_32, since other architectures that need alignment
Steven Walters's avatar
Steven Walters committed
58
 * either have ABIs that ensure aligned stack, or don't support it at all. */
Steven Walters's avatar
Steven Walters committed
59
#if ARCH_X86 && HAVE_MMX
Steven Walters's avatar
Steven Walters committed
60 61
int x264_stack_align( void (*func)(), ... );
#define x264_stack_align(func,...) x264_stack_align((void (*)())func, __VA_ARGS__)
62
#else
Steven Walters's avatar
Steven Walters committed
63
#define x264_stack_align(func,...) func(__VA_ARGS__)
64 65
#endif

66 67
typedef struct
{
68
    const char name[16];
69
    uint32_t flags;
70 71
} x264_cpu_name_t;
extern const x264_cpu_name_t x264_cpu_names[];
72

73
#endif