Removing clang_includes; now in ycmd repo

This commit is contained in:
Strahinja Val Markovic 2014-05-21 13:10:36 -07:00
parent 08da35472e
commit 26ab52e7a7
46 changed files with 0 additions and 24760 deletions

View File

@ -1,784 +0,0 @@
/* ===-------- Intrin.h ---------------------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
/* Only include this if we're compiling for the windows platform. */
#ifndef _MSC_VER
#include_next <Intrin.h>
#else
#ifndef __INTRIN_H
#define __INTRIN_H
/* First include the standard intrinsics. */
#include <x86intrin.h>
#ifdef __cplusplus
extern "C" {
#endif
/* And the random ones that aren't in those files. */
__m64 _m_from_float(float);
__m64 _m_from_int(int _l);
void _m_prefetch(void *);
float _m_to_float(__m64);
int _m_to_int(__m64 _M);
/* Other assorted instruction intrinsics. */
void __addfsbyte(unsigned long, unsigned char);
void __addfsdword(unsigned long, unsigned long);
void __addfsword(unsigned long, unsigned short);
void __code_seg(const char *);
void __cpuid(int[4], int);
void __cpuidex(int[4], int, int);
void __debugbreak(void);
__int64 __emul(int, int);
unsigned __int64 __emulu(unsigned int, unsigned int);
void __cdecl __fastfail(unsigned int);
unsigned int __getcallerseflags(void);
void __halt(void);
unsigned char __inbyte(unsigned short);
void __inbytestring(unsigned short, unsigned char *, unsigned long);
void __incfsbyte(unsigned long);
void __incfsdword(unsigned long);
void __incfsword(unsigned long);
unsigned long __indword(unsigned short);
void __indwordstring(unsigned short, unsigned long *, unsigned long);
void __int2c(void);
void __invlpg(void *);
unsigned short __inword(unsigned short);
void __inwordstring(unsigned short, unsigned short *, unsigned long);
void __lidt(void *);
unsigned __int64 __ll_lshift(unsigned __int64, int);
__int64 __ll_rshift(__int64, int);
void __llwpcb(void *);
unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
void __lwpval32(unsigned int, unsigned int, unsigned int);
unsigned int __lzcnt(unsigned int);
unsigned short __lzcnt16(unsigned short);
void __movsb(unsigned char *, unsigned char const *, size_t);
void __movsd(unsigned long *, unsigned long const *, size_t);
void __movsw(unsigned short *, unsigned short const *, size_t);
void __nop(void);
void __nvreg_restore_fence(void);
void __nvreg_save_fence(void);
void __outbyte(unsigned short, unsigned char);
void __outbytestring(unsigned short, unsigned char *, unsigned long);
void __outdword(unsigned short, unsigned long);
void __outdwordstring(unsigned short, unsigned long *, unsigned long);
void __outword(unsigned short, unsigned short);
void __outwordstring(unsigned short, unsigned short *, unsigned long);
static __inline__
unsigned int __popcnt(unsigned int);
static __inline__
unsigned short __popcnt16(unsigned short);
unsigned __int64 __rdtsc(void);
unsigned __int64 __rdtscp(unsigned int *);
unsigned long __readcr0(void);
unsigned long __readcr2(void);
unsigned long __readcr3(void);
unsigned long __readcr5(void);
unsigned long __readcr8(void);
unsigned int __readdr(unsigned int);
unsigned int __readeflags(void);
unsigned char __readfsbyte(unsigned long);
unsigned long __readfsdword(unsigned long);
unsigned __int64 __readfsqword(unsigned long);
unsigned short __readfsword(unsigned long);
unsigned __int64 __readmsr(unsigned long);
unsigned __int64 __readpmc(unsigned long);
unsigned long __segmentlimit(unsigned long);
void __sidt(void *);
void *__slwpcb(void);
void __stosb(unsigned char *, unsigned char, size_t);
void __stosd(unsigned long *, unsigned long, size_t);
void __stosw(unsigned short *, unsigned short, size_t);
void __svm_clgi(void);
void __svm_invlpga(void *, int);
void __svm_skinit(int);
void __svm_stgi(void);
void __svm_vmload(size_t);
void __svm_vmrun(size_t);
void __svm_vmsave(size_t);
void __ud2(void);
unsigned __int64 __ull_rshift(unsigned __int64, int);
void __vmx_off(void);
void __vmx_vmptrst(unsigned __int64 *);
void __wbinvd(void);
void __writecr0(unsigned int);
void __writecr3(unsigned int);
void __writecr4(unsigned int);
void __writecr8(unsigned int);
void __writedr(unsigned int, unsigned int);
void __writeeflags(unsigned int);
void __writefsbyte(unsigned long, unsigned char);
void __writefsdword(unsigned long, unsigned long);
void __writefsqword(unsigned long, unsigned __int64);
void __writefsword(unsigned long, unsigned short);
void __writemsr(unsigned long, unsigned __int64);
static __inline__
void *_AddressOfReturnAddress(void);
unsigned int _andn_u32(unsigned int, unsigned int);
unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
unsigned int _bextri_u32(unsigned int, unsigned int);
static __inline__
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
static __inline__
unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
static __inline__
unsigned char _bittest(long const *, long);
static __inline__
unsigned char _bittestandcomplement(long *, long);
static __inline__
unsigned char _bittestandreset(long *, long);
static __inline__
unsigned char _bittestandset(long *, long);
unsigned int _blcfill_u32(unsigned int);
unsigned int _blci_u32(unsigned int);
unsigned int _blcic_u32(unsigned int);
unsigned int _blcmsk_u32(unsigned int);
unsigned int _blcs_u32(unsigned int);
unsigned int _blsfill_u32(unsigned int);
unsigned int _blsi_u32(unsigned int);
unsigned int _blsic_u32(unsigned int);
unsigned int _blsmsk_u32(unsigned int);
unsigned int _blsmsk_u32(unsigned int);
unsigned int _blsr_u32(unsigned int);
unsigned int _blsr_u32(unsigned int);
unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
unsigned long __cdecl _byteswap_ulong(unsigned long);
unsigned short __cdecl _byteswap_ushort(unsigned short);
unsigned _bzhi_u32(unsigned int, unsigned int);
void __cdecl _disable(void);
void __cdecl _enable(void);
void __cdecl _fxrstor(void const *);
void __cdecl _fxsave(void *);
long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
static __inline__
long _InterlockedAnd(long volatile *_Value, long _Mask);
static __inline__
short _InterlockedAnd16(short volatile *_Value, short _Mask);
static __inline__
char _InterlockedAnd8(char volatile *_Value, char _Mask);
unsigned char _interlockedbittestandreset(long volatile *, long);
unsigned char _interlockedbittestandset(long volatile *, long);
static __inline__
long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
long _Exchange, long _Comparand);
long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
static __inline__
short _InterlockedCompareExchange16(short volatile *_Destination,
short _Exchange, short _Comparand);
static __inline__
__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand);
__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
__int64);
__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
__int64);
static __inline__
char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
char _Comparand);
void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
void *);
void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
void *);
static __inline__
long __cdecl _InterlockedDecrement(long volatile *_Addend);
static __inline__
short _InterlockedDecrement16(short volatile *_Addend);
static __inline__
long __cdecl _InterlockedExchange(long volatile *_Target, long _Value);
static __inline__
short _InterlockedExchange16(short volatile *_Target, short _Value);
static __inline__
char _InterlockedExchange8(char volatile *_Target, char _Value);
static __inline__
long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
static __inline__
char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
static __inline__
long __cdecl _InterlockedIncrement(long volatile *_Addend);
static __inline__
short _InterlockedIncrement16(short volatile *_Addend);
static __inline__
long _InterlockedOr(long volatile *_Value, long _Mask);
static __inline__
short _InterlockedOr16(short volatile *_Value, short _Mask);
static __inline__
char _InterlockedOr8(char volatile *_Value, char _Mask);
static __inline__
long _InterlockedXor(long volatile *_Value, long _Mask);
static __inline__
short _InterlockedXor16(short volatile *_Value, short _Mask);
static __inline__
char _InterlockedXor8(char volatile *_Value, char _Mask);
void __cdecl _invpcid(unsigned int, void *);
static __inline__
unsigned long __cdecl _lrotl(unsigned long, int);
static __inline__
unsigned long __cdecl _lrotr(unsigned long, int);
static __inline__
unsigned int _lzcnt_u32(unsigned int);
static __inline__
void _ReadBarrier(void);
static __inline__
void _ReadWriteBarrier(void);
static __inline__
void *_ReturnAddress(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
int __cdecl _rdrand16_step(unsigned short *);
int __cdecl _rdrand32_step(unsigned int *);
static __inline__
unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
static __inline__
unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
static __inline__
unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
static __inline__
unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
static __inline__
unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
static __inline__
unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
static __inline__
unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
static __inline__
unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
int _sarx_i32(int, unsigned int);
/* FIXME: Need definition for jmp_buf.
int __cdecl _setjmp(jmp_buf); */
unsigned int _shlx_u32(unsigned int, unsigned int);
unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
unsigned int _t1mskc_u32(unsigned int);
unsigned int _tzcnt_u32(unsigned int);
unsigned int _tzcnt_u32(unsigned int);
unsigned int _tzmsk_u32(unsigned int);
static __inline__
void _WriteBarrier(void);
void _xabort(const unsigned int imm);
unsigned __int32 xbegin(void);
void _xend(void);
unsigned __int64 __cdecl _xgetbv(unsigned int);
void __cdecl _xrstor(void const *, unsigned __int64);
void __cdecl _xsave(void *, unsigned __int64);
void __cdecl _xsaveopt(void *, unsigned __int64);
void __cdecl _xsetbv(unsigned int, unsigned __int64);
unsigned char _xtest(void);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
#ifdef __x86_64__
void __addgsbyte(unsigned long, unsigned char);
void __addgsdword(unsigned long, unsigned long);
void __addgsqword(unsigned long, unsigned __int64);
void __addgsword(unsigned long, unsigned short);
void __faststorefence(void);
void __incgsbyte(unsigned long);
void __incgsdword(unsigned long);
void __incgsqword(unsigned long);
void __incgsword(unsigned long);
unsigned __int64 __popcnt64(unsigned __int64);
unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
unsigned __int64 _HighPart,
unsigned char _Shift);
unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
unsigned __int64 _HighPart,
unsigned char _Shift);
void __stosq(unsigned __int64 *, unsigned __int64, size_t);
unsigned __int64 _andn_u64(unsigned __int64, unsigned __int64);
unsigned __int64 _bextr_u64(unsigned __int64, unsigned int, unsigned int);
unsigned __int64 _bextri_u64(unsigned __int64, unsigned int);
static __inline__
unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
unsigned char _bittest64(__int64 const *, __int64);
static __inline__
unsigned char _bittestandcomplement64(__int64 *, __int64);
static __inline__
unsigned char _bittestandreset64(__int64 *, __int64);
static __inline__
unsigned char _bittestandset64(__int64 *, __int64);
unsigned __int64 _blcfill_u64(unsigned __int64);
unsigned __int64 _blci_u64(unsigned __int64);
unsigned __int64 _blcic_u64(unsigned __int64);
unsigned __int64 _blcmsk_u64(unsigned __int64);
unsigned __int64 _blcs_u64(unsigned __int64);
unsigned __int64 _blsfill_u64(unsigned __int64);
unsigned __int64 _blsi_u64(unsigned __int64);
unsigned __int64 _blsic_u64(unsigned __int64);
unsigned __int64 _blmsk_u64(unsigned __int64);
unsigned __int64 _blsr_u64(unsigned __int64);
unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
unsigned __int64 _bzhi_u64(unsigned __int64, unsigned int);
void __cdecl _fxrstor64(void const *);
void __cdecl _fxsave64(void *);
long _InterlockedAnd_np(long volatile *_Value, long _Mask);
short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
long _Comparand);
unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
__int64 _ExchangeHigh,
__int64 _ExchangeLow,
__int64 *_CompareandResult);
unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
__int64 _ExchangeHigh,
__int64 _ExchangeLow,
__int64 *_ComparandResult);
short _InterlockedCompareExchange16_np(short volatile *_Destination,
short _Exchange, short _Comparand);
__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand);
void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
void *_Exchange, void *_Comparand);
long _InterlockedOr_np(long volatile *_Value, long _Mask);
short _InterlockedOr16_np(short volatile *_Value, short _Mask);
__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedOr8_np(char volatile *_Value, char _Mask);
long _InterlockedXor_np(long volatile *_Value, long _Mask);
short _InterlockedXor16_np(short volatile *_Value, short _Mask);
__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedXor8_np(char volatile *_Value, char _Mask);
unsigned __int64 _lzcnt_u64(unsigned __int64);
__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
__int64 *_HighProduct);
unsigned int __cdecl _readfsbase_u32(void);
unsigned __int64 __cdecl _readfsbase_u64(void);
unsigned int __cdecl _readgsbase_u32(void);
unsigned __int64 __cdecl _readgsbase_u64(void);
unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
unsigned __int64 _tzcnt_u64(unsigned __int64);
unsigned __int64 _tzmsk_u64(unsigned __int64);
unsigned __int64 _umul128(unsigned __int64 _Multiplier,
unsigned __int64 _Multiplicand,
unsigned __int64 *_HighProduct);
void __cdecl _writefsbase_u32(unsigned int);
void _cdecl _writefsbase_u64(unsigned __int64);
void __cdecl _writegsbase_u32(unsigned int);
void __cdecl _writegsbase_u64(unsigned __int64);
void __cdecl _xrstor64(void const *, unsigned __int64);
void __cdecl _xsave64(void *, unsigned __int64);
void __cdecl _xsaveopt64(void *, unsigned __int64);
#endif /* __x86_64__ */
/*----------------------------------------------------------------------------*\
|* Bit Twiddling
\*----------------------------------------------------------------------------*/
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_rotl8(unsigned char _Value, unsigned char _Shift) {
_Shift &= 0x7;
return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_rotr8(unsigned char _Value, unsigned char _Shift) {
_Shift &= 0x7;
return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
}
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
_rotl16(unsigned short _Value, unsigned char _Shift) {
_Shift &= 0xf;
return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
}
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
_rotr16(unsigned short _Value, unsigned char _Shift) {
_Shift &= 0xf;
return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_rotl(unsigned int _Value, int _Shift) {
_Shift &= 0x1f;
return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_rotr(unsigned int _Value, int _Shift) {
_Shift &= 0x1f;
return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
_lrotl(unsigned long _Value, int _Shift) {
_Shift &= 0x1f;
return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
_lrotr(unsigned long _Value, int _Shift) {
_Shift &= 0x1f;
return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
}
static
__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
_rotl64(unsigned __int64 _Value, int _Shift) {
_Shift &= 0x3f;
return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
}
static
__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
_rotr64(unsigned __int64 _Value, int _Shift) {
_Shift &= 0x3f;
return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
}
/*----------------------------------------------------------------------------*\
|* Bit Counting and Testing
\*----------------------------------------------------------------------------*/
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
if (!_Mask)
return 0;
*_Index = __builtin_ctzl(_Mask);
return 1;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
if (!_Mask)
return 0;
*_Index = 31 - __builtin_clzl(_Mask);
return 1;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_lzcnt_u32(unsigned int a) {
if (!a)
return 32;
return __builtin_clzl(a);
}
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__popcnt16(unsigned short value) {
return __builtin_popcount((int)value);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__popcnt(unsigned int value) {
return __builtin_popcount(value);
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittest(long const *a, long b) {
return (*a >> b) & 1;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandcomplement(long *a, long b) {
unsigned char x = (*a >> b) & 1;
*a = *a ^ (1 << b);
return x;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandreset(long *a, long b) {
unsigned char x = (*a >> b) & 1;
*a = *a & ~(1 << b);
return x;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandset(long *a, long b) {
unsigned char x = (*a >> b) & 1;
*a = *a | (1 << b);
return x;
}
#ifdef __x86_64__
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
if (!_Mask)
return 0;
*_Index = __builtin_ctzll(_Mask);
return 1;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
if (!_Mask)
return 0;
*_Index = 63 - __builtin_clzll(_Mask);
return 1;
}
static
__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
_lzcnt_u64(unsigned __int64 a) {
if (!a)
return 64;
return __builtin_clzll(a);
}
static __inline__
unsigned __int64 __attribute__((__always_inline__, __nodebug__))
__popcnt64(unsigned __int64 value) {
return __builtin_popcountll(value);
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittest64(__int64 const *a, __int64 b) {
return (*a >> b) & 1;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandcomplement64(__int64 *a, __int64 b) {
unsigned char x = (*a >> b) & 1;
*a = *a ^ (1ll << b);
return x;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandreset64(__int64 *a, __int64 b) {
unsigned char x = (*a >> b) & 1;
*a = *a & ~(1ll << b);
return x;
}
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_bittestandset64(__int64 *a, __int64 b) {
unsigned char x = (*a >> b) & 1;
*a = *a | (1ll << b);
return x;
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange Add
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange Sub
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Increment
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement16(char volatile *_Value) {
return __atomic_add_fetch(_Value, 1, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement(long volatile *_Value) {
return __atomic_add_fetch(_Value, 1, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement64(__int64 volatile *_Value) {
return __atomic_add_fetch(_Value, 1, 0);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Decrement
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement16(char volatile *_Value) {
return __atomic_sub_fetch(_Value, 1, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement(long volatile *_Value) {
return __atomic_sub_fetch(_Value, 1, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement64(__int64 volatile *_Value) {
return __atomic_sub_fetch(_Value, 1, 0);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked And
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedAnd8(char volatile *_Value, char _Mask) {
return __atomic_and_fetch(_Value, _Mask, 0);
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedAnd16(short volatile *_Value, short _Mask) {
return __atomic_and_fetch(_Value, _Mask, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedAnd(long volatile *_Value, long _Mask) {
return __atomic_and_fetch(_Value, _Mask, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
return __atomic_and_fetch(_Value, _Mask, 0);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Or
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedOr8(char volatile *_Value, char _Mask) {
return __atomic_or_fetch(_Value, _Mask, 0);
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedOr16(short volatile *_Value, short _Mask) {
return __atomic_or_fetch(_Value, _Mask, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedOr(long volatile *_Value, long _Mask) {
return __atomic_or_fetch(_Value, _Mask, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
return __atomic_or_fetch(_Value, _Mask, 0);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Xor
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedXor8(char volatile *_Value, char _Mask) {
return __atomic_xor_fetch(_Value, _Mask, 0);
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedXor16(short volatile *_Value, short _Mask) {
return __atomic_xor_fetch(_Value, _Mask, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedXor(long volatile *_Value, long _Mask) {
return __atomic_xor_fetch(_Value, _Mask, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
return __atomic_xor_fetch(_Value, _Mask, 0);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedExchange8(char volatile *_Target, char _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedExchange16(short volatile *_Target, short _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedExchange(long volatile *_Target, long _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Compare Exchange
\*----------------------------------------------------------------------------*/
static __inline__ char __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange8(char volatile *_Destination,
char _Exchange, char _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange16(short volatile *_Destination,
short _Exchange, short _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange(long volatile *_Destination,
long _Exchange, long _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange64(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
#endif
/*----------------------------------------------------------------------------*\
|* Barriers
\*----------------------------------------------------------------------------*/
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_ReadWriteBarrier(void) {
__asm__ volatile ("" : : : "memory");
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_ReadBarrier(void) {
__asm__ volatile ("" : : : "memory");
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_WriteBarrier(void) {
__asm__ volatile ("" : : : "memory");
}
/*----------------------------------------------------------------------------*\
|* Misc
\*----------------------------------------------------------------------------*/
static __inline__ void * __attribute__((__always_inline__, __nodebug__))
_AddressOfReturnAddress(void) {
return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
}
static __inline__ void * __attribute__((__always_inline__, __nodebug__))
_ReturnAddress(void) {
return __builtin_return_address(0);
}
#ifdef __cplusplus
}
#endif
#endif /* __INTRIN_H */
#endif /* _MSC_VER */

View File

@ -1,67 +0,0 @@
/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _WMMINTRIN_AES_H
#define _WMMINTRIN_AES_H
#include <emmintrin.h>
#if !defined (__AES__)
# error "AES instructions not enabled"
#else
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesenc_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesenc128(__V, __R);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesenclast_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesdec_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesdec128(__V, __R);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesdeclast_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesimc_si128(__m128i __V)
{
return (__m128i)__builtin_ia32_aesimc128(__V);
}
#define _mm_aeskeygenassist_si128(C, R) \
__builtin_ia32_aeskeygenassist128((C), (R))
#endif
#endif /* _WMMINTRIN_AES_H */

View File

@ -1,34 +0,0 @@
/*===---- __wmmintrin_pclmul.h - AES intrinsics ----------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _WMMINTRIN_PCLMUL_H
#define _WMMINTRIN_PCLMUL_H
#if !defined (__PCLMUL__)
# error "PCLMUL instruction is not enabled"
#else
#define _mm_clmulepi64_si128(__X, __Y, __I) \
((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
(__v2di)(__m128i)(__Y), (char)(__I)))
#endif
#endif /* _WMMINTRIN_PCLMUL_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,68 +0,0 @@
/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __AMMINTRIN_H
#define __AMMINTRIN_H
#ifndef __SSE4A__
#error "SSE4A instruction set not enabled"
#else
#include <pmmintrin.h>
#define _mm_extracti_si64(x, len, idx) \
((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
(char)(len), (char)(idx)))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_extract_si64(__m128i __x, __m128i __y)
{
return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
}
#define _mm_inserti_si64(x, y, len, idx) \
((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
(__v2di)(__m128i)(y), \
(char)(len), (char)(idx)))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_insert_si64(__m128i __x, __m128i __y)
{
return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_sd(double *__p, __m128d __a)
{
__builtin_ia32_movntsd(__p, (__v2df)__a);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_ss(float *__p, __m128 __a)
{
__builtin_ia32_movntss(__p, (__v4sf)__a);
}
#endif /* __SSE4A__ */
#endif /* __AMMINTRIN_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,94 +0,0 @@
/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __BMI2__
# error "BMI2 instruction set not enabled"
#endif /* __BMI2__ */
#ifndef __BMI2INTRIN_H
#define __BMI2INTRIN_H
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_bzhi_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bzhi_si(__X, __Y);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_pdep_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pdep_si(__X, __Y);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_pext_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pext_si(__X, __Y);
}
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_bzhi_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bzhi_di(__X, __Y);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_pdep_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pdep_di(__X, __Y);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_pext_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pext_di(__X, __Y);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_mulx_u64 (unsigned long long __X, unsigned long long __Y,
unsigned long long *__P)
{
unsigned __int128 __res = (unsigned __int128) __X * __Y;
*__P = (unsigned long long) (__res >> 64);
return (unsigned long long) __res;
}
#else /* !__x86_64__ */
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
{
unsigned long long __res = (unsigned long long) __X * __Y;
*__P = (unsigned int) (__res >> 32);
return (unsigned int) __res;
}
#endif /* !__x86_64__ */
#endif /* __BMI2INTRIN_H */

View File

@ -1,115 +0,0 @@
/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __BMI__
# error "BMI instruction set not enabled"
#endif /* __BMI__ */
#ifndef __BMIINTRIN_H
#define __BMIINTRIN_H
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__tzcnt_u16(unsigned short __X)
{
return __builtin_ctzs(__X);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__andn_u32(unsigned int __X, unsigned int __Y)
{
return ~__X & __Y;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__bextr_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bextr_u32(__X, __Y);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsi_u32(unsigned int __X)
{
return __X & -__X;
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsmsk_u32(unsigned int __X)
{
return __X ^ (__X - 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsr_u32(unsigned int __X)
{
return __X & (__X - 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__tzcnt_u32(unsigned int __X)
{
return __builtin_ctz(__X);
}
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__andn_u64 (unsigned long long __X, unsigned long long __Y)
{
return ~__X & __Y;
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__bextr_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bextr_u64(__X, __Y);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__blsi_u64(unsigned long long __X)
{
return __X & -__X;
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__blsmsk_u64(unsigned long long __X)
{
return __X ^ (__X - 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__blsr_u64(unsigned long long __X)
{
return __X & (__X - 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__tzcnt_u64(unsigned long long __X)
{
return __builtin_ctzll(__X);
}
#endif
#endif /* __BMIINTRIN_H */

View File

@ -1,156 +0,0 @@
/*===---- cpuid.h - X86 cpu model detection --------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !(__x86_64__ || __i386__)
#error this header is for x86 only
#endif
/* Features in %ecx for level 1 */
#define bit_SSE3 0x00000001
#define bit_PCLMULQDQ 0x00000002
#define bit_DTES64 0x00000004
#define bit_MONITOR 0x00000008
#define bit_DSCPL 0x00000010
#define bit_VMX 0x00000020
#define bit_SMX 0x00000040
#define bit_EIST 0x00000080
#define bit_TM2 0x00000100
#define bit_SSSE3 0x00000200
#define bit_CNXTID 0x00000400
#define bit_FMA 0x00001000
#define bit_CMPXCHG16B 0x00002000
#define bit_xTPR 0x00004000
#define bit_PDCM 0x00008000
#define bit_PCID 0x00020000
#define bit_DCA 0x00040000
#define bit_SSE41 0x00080000
#define bit_SSE42 0x00100000
#define bit_x2APIC 0x00200000
#define bit_MOVBE 0x00400000
#define bit_POPCNT 0x00800000
#define bit_TSCDeadline 0x01000000
#define bit_AESNI 0x02000000
#define bit_XSAVE 0x04000000
#define bit_OSXSAVE 0x08000000
#define bit_AVX 0x10000000
#define bit_RDRAND 0x40000000
/* Features in %edx for level 1 */
#define bit_FPU 0x00000001
#define bit_VME 0x00000002
#define bit_DE 0x00000004
#define bit_PSE 0x00000008
#define bit_TSC 0x00000010
#define bit_MSR 0x00000020
#define bit_PAE 0x00000040
#define bit_MCE 0x00000080
#define bit_CX8 0x00000100
#define bit_APIC 0x00000200
#define bit_SEP 0x00000800
#define bit_MTRR 0x00001000
#define bit_PGE 0x00002000
#define bit_MCA 0x00004000
#define bit_CMOV 0x00008000
#define bit_PAT 0x00010000
#define bit_PSE36 0x00020000
#define bit_PSN 0x00040000
#define bit_CLFSH 0x00080000
#define bit_DS 0x00200000
#define bit_ACPI 0x00400000
#define bit_MMX 0x00800000
#define bit_FXSR 0x01000000
#define bit_SSE 0x02000000
#define bit_SSE2 0x04000000
#define bit_SS 0x08000000
#define bit_HTT 0x10000000
#define bit_TM 0x20000000
#define bit_PBE 0x80000000
/* Features in %ebx for level 7 sub-leaf 0 */
#define bit_FSGSBASE 0x00000001
#define bit_SMEP 0x00000080
#define bit_ENH_MOVSB 0x00000200
/* PIC on i386 uses %ebx, so preserve it. */
#if __i386__
#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
__asm(" pushl %%ebx\n" \
" cpuid\n" \
" mov %%ebx,%1\n" \
" popl %%ebx" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level))
#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
__asm(" pushl %%ebx\n" \
" cpuid\n" \
" mov %%ebx,%1\n" \
" popl %%ebx" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level), "2"(__count))
#else
#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level))
#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__level), "2"(__count))
#endif
static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax,
unsigned int *__ebx, unsigned int *__ecx,
unsigned int *__edx) {
__cpuid(__level, *__eax, *__ebx, *__ecx, *__edx);
return 1;
}
static __inline int __get_cpuid_max (unsigned int __level, unsigned int *__sig)
{
unsigned int __eax, __ebx, __ecx, __edx;
#if __i386__
int __cpuid_supported;
__asm(" pushfl\n"
" popl %%eax\n"
" movl %%eax,%%ecx\n"
" xorl $0x00200000,%%eax\n"
" pushl %%eax\n"
" popfl\n"
" pushfl\n"
" popl %%eax\n"
" movl $0,%0\n"
" cmpl %%eax,%%ecx\n"
" je 1f\n"
" movl $1,%0\n"
"1:"
: "=r" (__cpuid_supported) : : "eax", "ecx");
if (!__cpuid_supported)
return 0;
#endif
__cpuid(__level, __eax, __ebx, __ecx, __edx);
if (__sig)
*__sig = __ebx;
return __eax;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <f16cintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __F16C__
# error "F16C instruction is not enabled"
#endif /* __F16C__ */
#ifndef __F16CINTRIN_H
#define __F16CINTRIN_H
typedef float __v8sf __attribute__ ((__vector_size__ (32)));
typedef float __m256 __attribute__ ((__vector_size__ (32)));
#define _mm_cvtps_ph(a, imm) __extension__ ({ \
__m128 __a = (a); \
(__m128i)__builtin_ia32_vcvtps2ph((__v4sf)__a, (imm)); })
#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
__m256 __a = (a); \
(__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)__a, (imm)); })
static __inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtph_ps(__m128i __a)
{
return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
}
static __inline __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_cvtph_ps(__m128i __a)
{
return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
}
#endif /* __F16CINTRIN_H */

View File

@ -1,124 +0,0 @@
/*===---- float.h - Characteristics of floating point types ----------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __FLOAT_H
#define __FLOAT_H
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*/
#if (defined(__MINGW32__) || defined(_MSC_VER)) && \
defined(__has_include_next) && __has_include_next(<float.h>)
# include_next <float.h>
/* Undefine anything that we'll be redefining below. */
# undef FLT_EVAL_METHOD
# undef FLT_ROUNDS
# undef FLT_RADIX
# undef FLT_MANT_DIG
# undef DBL_MANT_DIG
# undef LDBL_MANT_DIG
# undef DECIMAL_DIG
# undef FLT_DIG
# undef DBL_DIG
# undef LDBL_DIG
# undef FLT_MIN_EXP
# undef DBL_MIN_EXP
# undef LDBL_MIN_EXP
# undef FLT_MIN_10_EXP
# undef DBL_MIN_10_EXP
# undef LDBL_MIN_10_EXP
# undef FLT_MAX_EXP
# undef DBL_MAX_EXP
# undef LDBL_MAX_EXP
# undef FLT_MAX_10_EXP
# undef DBL_MAX_10_EXP
# undef LDBL_MAX_10_EXP
# undef FLT_MAX
# undef DBL_MAX
# undef LDBL_MAX
# undef FLT_EPSILON
# undef DBL_EPSILON
# undef LDBL_EPSILON
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
# undef FLT_TRUE_MIN
# undef DBL_TRUE_MIN
# undef LDBL_TRUE_MIN
# endif
#endif
/* Characteristics of floating point types, C99 5.2.4.2.2 */
#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
#define FLT_ROUNDS (__builtin_flt_rounds())
#define FLT_RADIX __FLT_RADIX__
#define FLT_MANT_DIG __FLT_MANT_DIG__
#define DBL_MANT_DIG __DBL_MANT_DIG__
#define LDBL_MANT_DIG __LDBL_MANT_DIG__
#define DECIMAL_DIG __DECIMAL_DIG__
#define FLT_DIG __FLT_DIG__
#define DBL_DIG __DBL_DIG__
#define LDBL_DIG __LDBL_DIG__
#define FLT_MIN_EXP __FLT_MIN_EXP__
#define DBL_MIN_EXP __DBL_MIN_EXP__
#define LDBL_MIN_EXP __LDBL_MIN_EXP__
#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
#define FLT_MAX_EXP __FLT_MAX_EXP__
#define DBL_MAX_EXP __DBL_MAX_EXP__
#define LDBL_MAX_EXP __LDBL_MAX_EXP__
#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
#define FLT_MAX __FLT_MAX__
#define DBL_MAX __DBL_MAX__
#define LDBL_MAX __LDBL_MAX__
#define FLT_EPSILON __FLT_EPSILON__
#define DBL_EPSILON __DBL_EPSILON__
#define LDBL_EPSILON __LDBL_EPSILON__
#define FLT_MIN __FLT_MIN__
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
# define FLT_TRUE_MIN __FLT_DENORM_MIN__
# define DBL_TRUE_MIN __DBL_DENORM_MIN__
# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
#endif
#endif /* __FLOAT_H */

View File

@ -1,231 +0,0 @@
/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __X86INTRIN_H
#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __FMA4INTRIN_H
#define __FMA4INTRIN_H
#ifndef __FMA4__
# error "FMA4 instruction set is not enabled"
#else
#include <pmmintrin.h>
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
}
#endif /* __FMA4__ */
#endif /* __FMA4INTRIN_H */

View File

@ -1,229 +0,0 @@
/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __FMAINTRIN_H
#define __FMAINTRIN_H
#ifndef __FMA__
# error "FMA instruction set is not enabled"
#else
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
}
#endif /* __FMA__ */
#endif /* __FMAINTRIN_H */

View File

@ -1,118 +0,0 @@
/*===---- immintrin.h - Intel intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
#ifdef __MMX__
#include <mmintrin.h>
#endif
#ifdef __SSE__
#include <xmmintrin.h>
#endif
#ifdef __SSE2__
#include <emmintrin.h>
#endif
#ifdef __SSE3__
#include <pmmintrin.h>
#endif
#ifdef __SSSE3__
#include <tmmintrin.h>
#endif
#if defined (__SSE4_2__) || defined (__SSE4_1__)
#include <smmintrin.h>
#endif
#if defined (__AES__)
#include <wmmintrin.h>
#endif
#ifdef __AVX__
#include <avxintrin.h>
#endif
#ifdef __AVX2__
#include <avx2intrin.h>
#endif
#ifdef __BMI__
#include <bmiintrin.h>
#endif
#ifdef __BMI2__
#include <bmi2intrin.h>
#endif
#ifdef __LZCNT__
#include <lzcntintrin.h>
#endif
#ifdef __FMA__
#include <fmaintrin.h>
#endif
#ifdef __RDRND__
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand16_step(unsigned short *__p)
{
return __builtin_ia32_rdrand16_step(__p);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand32_step(unsigned int *__p)
{
return __builtin_ia32_rdrand32_step(__p);
}
#ifdef __x86_64__
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand64_step(unsigned long long *__p)
{
return __builtin_ia32_rdrand64_step(__p);
}
#endif
#endif /* __RDRND__ */
#ifdef __RTM__
#include <rtmintrin.h>
#endif
/* FIXME: check __HLE__ as well when HLE is supported. */
#if defined (__RTM__)
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_xtest(void)
{
return __builtin_ia32_xtest();
}
#endif
#ifdef __SHA__
#include <shaintrin.h>
#endif
#endif /* __IMMINTRIN_H */

View File

@ -1,43 +0,0 @@
/*===---- iso646.h - Standard header for alternate spellings of operators---===
*
* Copyright (c) 2008 Eli Friedman
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __ISO646_H
#define __ISO646_H
#ifndef __cplusplus
#define and &&
#define and_eq &=
#define bitand &
#define bitor |
#define compl ~
#define not !
#define not_eq !=
#define or ||
#define or_eq |=
#define xor ^
#define xor_eq ^=
#endif
#endif /* __ISO646_H */

View File

@ -1,119 +0,0 @@
/*===---- limits.h - Standard header for integer sizes --------------------===*\
*
* Copyright (c) 2009 Chris Lattner
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
\*===----------------------------------------------------------------------===*/
#ifndef __CLANG_LIMITS_H
#define __CLANG_LIMITS_H
/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
Avert this #include_next madness. */
#if defined __GNUC__ && !defined _GCC_LIMITS_H_
#define _GCC_LIMITS_H_
#endif
/* System headers include a number of constants from POSIX in <limits.h>.
Include it if we're hosted. */
#if __STDC_HOSTED__ && \
defined(__has_include_next) && __has_include_next(<limits.h>)
#include_next <limits.h>
#endif
/* Many system headers try to "help us out" by defining these. No really, we
know how big each datatype is. */
#undef SCHAR_MIN
#undef SCHAR_MAX
#undef UCHAR_MAX
#undef SHRT_MIN
#undef SHRT_MAX
#undef USHRT_MAX
#undef INT_MIN
#undef INT_MAX
#undef UINT_MAX
#undef LONG_MIN
#undef LONG_MAX
#undef ULONG_MAX
#undef CHAR_BIT
#undef CHAR_MIN
#undef CHAR_MAX
/* C90/99 5.2.4.2.1 */
#define SCHAR_MAX __SCHAR_MAX__
#define SHRT_MAX __SHRT_MAX__
#define INT_MAX __INT_MAX__
#define LONG_MAX __LONG_MAX__
#define SCHAR_MIN (-__SCHAR_MAX__-1)
#define SHRT_MIN (-__SHRT_MAX__ -1)
#define INT_MIN (-__INT_MAX__ -1)
#define LONG_MIN (-__LONG_MAX__ -1L)
#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
#define USHRT_MAX (__SHRT_MAX__ *2 +1)
#define UINT_MAX (__INT_MAX__ *2U +1U)
#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
#ifndef MB_LEN_MAX
#define MB_LEN_MAX 1
#endif
#define CHAR_BIT __CHAR_BIT__
#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
#define CHAR_MIN 0
#define CHAR_MAX UCHAR_MAX
#else
#define CHAR_MIN SCHAR_MIN
#define CHAR_MAX __SCHAR_MAX__
#endif
/* C99 5.2.4.2.1: Added long long.
C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
*/
#if __STDC_VERSION__ >= 199901 || __cplusplus >= 201103L
#undef LLONG_MIN
#undef LLONG_MAX
#undef ULLONG_MAX
#define LLONG_MAX __LONG_LONG_MAX__
#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
#endif
/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
that we don't have something like #pragma poison that could be used to
deprecate a macro - the code should just use LLONG_MAX and friends.
*/
#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
#undef LONG_LONG_MIN
#undef LONG_LONG_MAX
#undef ULONG_LONG_MAX
#define LONG_LONG_MAX __LONG_LONG_MAX__
#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
#endif
#endif /* __CLANG_LIMITS_H */

View File

@ -1,55 +0,0 @@
/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __LZCNT__
# error "LZCNT instruction is not enabled"
#endif /* __LZCNT__ */
#ifndef __LZCNTINTRIN_H
#define __LZCNTINTRIN_H
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__lzcnt16(unsigned short __X)
{
return __builtin_clzs(__X);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__lzcnt32(unsigned int __X)
{
return __builtin_clz(__X);
}
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__lzcnt64(unsigned long long __X)
{
return __builtin_clzll(__X);
}
#endif
#endif /* __LZCNTINTRIN_H */

View File

@ -1,162 +0,0 @@
/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _MM3DNOW_H_INCLUDED
#define _MM3DNOW_H_INCLUDED
#include <mmintrin.h>
#include <prfchwintrin.h>
typedef float __v2sf __attribute__((__vector_size__(8)));
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_femms() {
__builtin_ia32_femms();
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pavgusb(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pf2id(__m64 __m) {
return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfacc(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfadd(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfcmpeq(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfcmpge(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfcmpgt(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfmax(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfmin(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfmul(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrcp(__m64 __m) {
return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrcpit1(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrcpit2(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrsqrt(__m64 __m) {
return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfsub(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfsubr(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pi2fd(__m64 __m) {
return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pmulhrw(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pf2iw(__m64 __m) {
return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfnacc(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfpnacc(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pi2fw(__m64 __m) {
return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pswapdsf(__m64 __m) {
return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pswapdsi(__m64 __m) {
return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
}
#endif

View File

@ -1,75 +0,0 @@
/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __MM_MALLOC_H
#define __MM_MALLOC_H
#include <stdlib.h>
#ifdef _WIN32
#include <malloc.h>
#else
#ifndef __cplusplus
extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
#else
// Some systems (e.g. those with GNU libc) declare posix_memalign with an
// exception specifier. Via an "egregious workaround" in
// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
// redeclaration of glibc's declaration.
extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
#endif
#endif
#if !(defined(_WIN32) && defined(_mm_malloc))
static __inline__ void *__attribute__((__always_inline__, __nodebug__,
__malloc__))
_mm_malloc(size_t __size, size_t __align)
{
if (__align == 1) {
return malloc(__size);
}
if (!(__align & (__align - 1)) && __align < sizeof(void *))
__align = sizeof(void *);
void *__mallocedMemory;
#if defined(__MINGW32__)
__mallocedMemory = __mingw_aligned_malloc(__size, __align);
#elif defined(_WIN32)
__mallocedMemory = _aligned_malloc(__size, __align);
#else
if (posix_memalign(&__mallocedMemory, __align, __size))
return 0;
#endif
return __mallocedMemory;
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_free(void *__p)
{
free(__p);
}
#endif
#endif /* __MM_MALLOC_H */

View File

@ -1,503 +0,0 @@
/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __MMINTRIN_H
#define __MMINTRIN_H
#ifndef __MMX__
#error "MMX instruction set not enabled"
#else
typedef long long __m64 __attribute__((__vector_size__(8)));
typedef int __v2si __attribute__((__vector_size__(8)));
typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_empty(void)
{
__builtin_ia32_emms();
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_si64(int __i)
{
return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_si32(__m64 __m)
{
return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_m64(long long __i)
{
return (__m64)__i;
}
static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtm64_si64(__m64 __m)
{
return (long long)__m;
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_madd_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_si64(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psllq(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_si64(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psllqi(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sra_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srai_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sra_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srai_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_si64(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrlq(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_si64(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrlqi(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_and_si64(__m64 __m1, __m64 __m2)
{
return __builtin_ia32_pand(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_andnot_si64(__m64 __m1, __m64 __m2)
{
return __builtin_ia32_pandn(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_or_si64(__m64 __m1, __m64 __m2)
{
return __builtin_ia32_por(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_xor_si64(__m64 __m1, __m64 __m2)
{
return __builtin_ia32_pxor(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setzero_si64(void)
{
return (__m64){ 0LL };
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi32(int __i1, int __i0)
{
return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
{
return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
char __b1, char __b0)
{
return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
__b4, __b5, __b6, __b7);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi32(int __i)
{
return _mm_set_pi32(__i, __i);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi16(short __w)
{
return _mm_set_pi16(__w, __w, __w, __w);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi8(char __b)
{
return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi32(int __i0, int __i1)
{
return _mm_set_pi32(__i1, __i0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
{
return _mm_set_pi16(__w3, __w2, __w1, __w0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
char __b6, char __b7)
{
return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
}
/* Aliases for compatibility. */
#define _m_empty _mm_empty
#define _m_from_int _mm_cvtsi32_si64
#define _m_to_int _mm_cvtsi64_si32
#define _m_packsswb _mm_packs_pi16
#define _m_packssdw _mm_packs_pi32
#define _m_packuswb _mm_packs_pu16
#define _m_punpckhbw _mm_unpackhi_pi8
#define _m_punpckhwd _mm_unpackhi_pi16
#define _m_punpckhdq _mm_unpackhi_pi32
#define _m_punpcklbw _mm_unpacklo_pi8
#define _m_punpcklwd _mm_unpacklo_pi16
#define _m_punpckldq _mm_unpacklo_pi32
#define _m_paddb _mm_add_pi8
#define _m_paddw _mm_add_pi16
#define _m_paddd _mm_add_pi32
#define _m_paddsb _mm_adds_pi8
#define _m_paddsw _mm_adds_pi16
#define _m_paddusb _mm_adds_pu8
#define _m_paddusw _mm_adds_pu16
#define _m_psubb _mm_sub_pi8
#define _m_psubw _mm_sub_pi16
#define _m_psubd _mm_sub_pi32
#define _m_psubsb _mm_subs_pi8
#define _m_psubsw _mm_subs_pi16
#define _m_psubusb _mm_subs_pu8
#define _m_psubusw _mm_subs_pu16
#define _m_pmaddwd _mm_madd_pi16
#define _m_pmulhw _mm_mulhi_pi16
#define _m_pmullw _mm_mullo_pi16
#define _m_psllw _mm_sll_pi16
#define _m_psllwi _mm_slli_pi16
#define _m_pslld _mm_sll_pi32
#define _m_pslldi _mm_slli_pi32
#define _m_psllq _mm_sll_si64
#define _m_psllqi _mm_slli_si64
#define _m_psraw _mm_sra_pi16
#define _m_psrawi _mm_srai_pi16
#define _m_psrad _mm_sra_pi32
#define _m_psradi _mm_srai_pi32
#define _m_psrlw _mm_srl_pi16
#define _m_psrlwi _mm_srli_pi16
#define _m_psrld _mm_srl_pi32
#define _m_psrldi _mm_srli_pi32
#define _m_psrlq _mm_srl_si64
#define _m_psrlqi _mm_srli_si64
#define _m_pand _mm_and_si64
#define _m_pandn _mm_andnot_si64
#define _m_por _mm_or_si64
#define _m_pxor _mm_xor_si64
#define _m_pcmpeqb _mm_cmpeq_pi8
#define _m_pcmpeqw _mm_cmpeq_pi16
#define _m_pcmpeqd _mm_cmpeq_pi32
#define _m_pcmpgtb _mm_cmpgt_pi8
#define _m_pcmpgtw _mm_cmpgt_pi16
#define _m_pcmpgtd _mm_cmpgt_pi32
#endif /* __MMX__ */
#endif /* __MMINTRIN_H */

View File

@ -1,146 +0,0 @@
module _Builtin_intrinsics [system] {
explicit module altivec {
requires altivec
header "altivec.h"
}
explicit module intel {
requires x86
export *
header "immintrin.h"
header "x86intrin.h"
explicit module mm_malloc {
header "mm_malloc.h"
export * // note: for <stdlib.h> dependency
}
explicit module cpuid {
requires x86
header "cpuid.h"
}
explicit module mmx {
requires mmx
header "mmintrin.h"
}
explicit module f16c {
requires f16c
header "f16cintrin.h"
}
explicit module sse {
requires sse
export mmx
export * // note: for hackish <emmintrin.h> dependency
header "xmmintrin.h"
}
explicit module sse2 {
requires sse2
export sse
header "emmintrin.h"
}
explicit module sse3 {
requires sse3
export sse2
header "pmmintrin.h"
}
explicit module ssse3 {
requires ssse3
export sse3
header "tmmintrin.h"
}
explicit module sse4_1 {
requires sse41
export ssse3
header "smmintrin.h"
}
explicit module sse4_2 {
requires sse42
export sse4_1
header "nmmintrin.h"
}
explicit module sse4a {
requires sse4a
export sse3
header "ammintrin.h"
}
explicit module avx {
requires avx
export sse4_2
header "avxintrin.h"
}
explicit module avx2 {
requires avx2
export avx
header "avx2intrin.h"
}
explicit module bmi {
requires bmi
header "bmiintrin.h"
}
explicit module bmi2 {
requires bmi2
header "bmi2intrin.h"
}
explicit module fma {
requires fma
header "fmaintrin.h"
}
explicit module fma4 {
requires fma4
export sse3
header "fma4intrin.h"
}
explicit module lzcnt {
requires lzcnt
header "lzcntintrin.h"
}
explicit module popcnt {
requires popcnt
header "popcntintrin.h"
}
explicit module mm3dnow {
requires mm3dnow
header "mm3dnow.h"
}
explicit module xop {
requires xop
export fma4
header "xopintrin.h"
}
explicit module aes_pclmul {
requires aes, pclmul
header "wmmintrin.h"
}
explicit module aes {
requires aes
header "__wmmintrin_aes.h"
}
explicit module pclmul {
requires pclmul
header "__wmmintrin_pclmul.h"
}
}
}

View File

@ -1,35 +0,0 @@
/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _NMMINTRIN_H
#define _NMMINTRIN_H
#ifndef __SSE4_2__
#error "SSE4.2 instruction set not enabled"
#else
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
just include it now then. */
#include <smmintrin.h>
#endif /* __SSE4_2__ */
#endif /* _NMMINTRIN_H */

View File

@ -1,117 +0,0 @@
/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __PMMINTRIN_H
#define __PMMINTRIN_H
#ifndef __SSE3__
#error "SSE3 instruction set not enabled"
#else
#include <emmintrin.h>
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_lddqu_si128(__m128i const *__p)
{
return (__m128i)__builtin_ia32_lddqu((char const *)__p);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_addsub_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_addsubps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_haddps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_hsubps(__a, __b);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_movehdup_ps(__m128 __a)
{
return __builtin_shufflevector(__a, __a, 1, 1, 3, 3);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_moveldup_ps(__m128 __a)
{
return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_addsub_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_addsubpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_haddpd(__a, __b);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_hsubpd(__a, __b);
}
#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_movedup_pd(__m128d __a)
{
return __builtin_shufflevector(__a, __a, 0, 0);
}
#define _MM_DENORMALS_ZERO_ON (0x0040)
#define _MM_DENORMALS_ZERO_OFF (0x0000)
#define _MM_DENORMALS_ZERO_MASK (0x0040)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
{
__builtin_ia32_monitor((void *)__p, __extensions, __hints);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_mwait(unsigned __extensions, unsigned __hints)
{
__builtin_ia32_mwait(__extensions, __hints);
}
#endif /* __SSE3__ */
#endif /* __PMMINTRIN_H */

View File

@ -1,45 +0,0 @@
/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __POPCNT__
#error "POPCNT instruction set not enabled"
#endif
#ifndef _POPCNTINTRIN_H
#define _POPCNTINTRIN_H
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_popcnt_u32(unsigned int __A)
{
return __builtin_popcount(__A);
}
#ifdef __x86_64__
static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_popcnt_u64(unsigned long long __A)
{
return __builtin_popcountll(__A);
}
#endif /* __x86_64__ */
#endif /* _POPCNTINTRIN_H */

View File

@ -1,39 +0,0 @@
/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)
#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> or <mm3dnow.h> instead."
#endif
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
#if defined(__PRFCHW__) || defined(__3dNOW__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetchw(void *__P)
{
__builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
}
#endif
#endif /* __PRFCHWINTRIN_H */

View File

@ -1,52 +0,0 @@
/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __X86INTRIN_H
#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __RDSEEDINTRIN_H
#define __RDSEEDINTRIN_H
#ifdef __RDSEED__
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdseed16_step(unsigned short *__p)
{
return __builtin_ia32_rdseed16_step(__p);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdseed32_step(unsigned int *__p)
{
return __builtin_ia32_rdseed32_step(__p);
}
#ifdef __x86_64__
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdseed64_step(unsigned long long *__p)
{
return __builtin_ia32_rdseed64_step(__p);
}
#endif
#endif /* __RDSEED__ */
#endif /* __RDSEEDINTRIN_H */

View File

@ -1,54 +0,0 @@
/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <rtmintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __RTMINTRIN_H
#define __RTMINTRIN_H
#define _XBEGIN_STARTED (~0u)
#define _XABORT_EXPLICIT (1 << 0)
#define _XABORT_RETRY (1 << 1)
#define _XABORT_CONFLICT (1 << 2)
#define _XABORT_CAPACITY (1 << 3)
#define _XABORT_DEBUG (1 << 4)
#define _XABORT_NESTED (1 << 5)
#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_xbegin(void)
{
return __builtin_ia32_xbegin();
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_xend(void)
{
__builtin_ia32_xend();
}
#define _xabort(imm) __builtin_ia32_xabort((imm))
#endif /* __RTMINTRIN_H */

View File

@ -1,74 +0,0 @@
/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <shaintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __SHAINTRIN_H
#define __SHAINTRIN_H
#if !defined (__SHA__)
# error "SHA instructions not enabled"
#endif
#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
__builtin_ia32_sha1rnds4((V1), (V2), (M)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
{
return __builtin_ia32_sha1nexte(__X, __Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
{
return __builtin_ia32_sha1msg1(__X, __Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
{
return __builtin_ia32_sha1msg2(__X, __Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
{
return __builtin_ia32_sha256rnds2(__X, __Y, __Z);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
{
return __builtin_ia32_sha256msg1(__X, __Y);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
{
return __builtin_ia32_sha256msg2(__X, __Y);
}
#endif /* __SHAINTRIN_H */

View File

@ -1,468 +0,0 @@
/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _SMMINTRIN_H
#define _SMMINTRIN_H
#ifndef __SSE4_1__
#error "SSE4.1 instruction set not enabled"
#else
#include <tmmintrin.h>
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
#define _MM_FROUND_TO_NEG_INF 0x01
#define _MM_FROUND_TO_POS_INF 0x02
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
#define _MM_FROUND_RAISE_EXC 0x00
#define _MM_FROUND_NO_EXC 0x08
#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
#define _mm_round_ps(X, M) __extension__ ({ \
__m128 __X = (X); \
(__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
#define _mm_round_ss(X, Y, M) __extension__ ({ \
__m128 __X = (X); \
__m128 __Y = (Y); \
(__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
#define _mm_round_pd(X, M) __extension__ ({ \
__m128d __X = (X); \
(__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
#define _mm_round_sd(X, Y, M) __extension__ ({ \
__m128d __X = (X); \
__m128d __Y = (Y); \
(__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Packed Blending Intrinsics. */
#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
__m128d __V1 = (V1); \
__m128d __V2 = (V2); \
(__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
__m128 __V1 = (V1); \
__m128 __V2 = (V2); \
(__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
{
return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
(__v2df)__M);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
{
return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
(__v4sf)__M);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
{
return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
(__v16qi)__M);
}
#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
__m128i __V1 = (V1); \
__m128i __V2 = (V2); \
(__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
/* SSE4 Dword Multiply Instructions. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mul_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Floating Point Dot Product Instructions. */
#define _mm_dp_ps(X, Y, M) __extension__ ({ \
__m128 __X = (X); \
__m128 __Y = (Y); \
(__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
#define _mm_dp_pd(X, Y, M) __extension__ ({\
__m128d __X = (X); \
__m128d __Y = (Y); \
(__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Streaming Load Hint Instruction. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_stream_load_si128 (__m128i *__V)
{
return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
#define _mm_extract_ps(X, N) (__extension__ \
({ union { int __i; float __f; } __t; \
__v4sf __a = (__v4sf)(X); \
__t.__f = __a[(N) & 3]; \
__t.__i;}))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
(D) = __a[N]; }))
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
/* Extract a float from X at index N into the first index of the return. */
#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
__a[(N) & 15] = (I); \
__a;}))
#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
__a[(N) & 3] = (I); \
__a;}))
#ifdef __x86_64__
#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
__a[(N) & 1] = (I); \
__a;}))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
* as a zero extended value, so it is unsigned.
*/
#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
(int)(unsigned char) \
__a[(N) & 15];}))
#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
__a[(N) & 3];}))
#ifdef __x86_64__
#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
__a[(N) & 1];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testz_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testc_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testnzc_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
/* SSE4 Packed Integer Sign-Extension. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi16(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi32_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
}
/* SSE4 Packed Integer Zero-Extension. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi16(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu32_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
}
/* SSE4 Pack with Unsigned Saturation. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packus_epi32(__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
__m128i __X = (X); \
__m128i __Y = (Y); \
(__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_minpos_epu16(__m128i __V)
{
return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
}
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
#ifdef __SSE4_2__
/* These specify the type of data that we're comparing. */
#define _SIDD_UBYTE_OPS 0x00
#define _SIDD_UWORD_OPS 0x01
#define _SIDD_SBYTE_OPS 0x02
#define _SIDD_SWORD_OPS 0x03
/* These specify the type of comparison operation. */
#define _SIDD_CMP_EQUAL_ANY 0x00
#define _SIDD_CMP_RANGES 0x04
#define _SIDD_CMP_EQUAL_EACH 0x08
#define _SIDD_CMP_EQUAL_ORDERED 0x0c
/* These macros specify the polarity of the operation. */
#define _SIDD_POSITIVE_POLARITY 0x00
#define _SIDD_NEGATIVE_POLARITY 0x10
#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
/* These macros are used in _mm_cmpXstri() to specify the return. */
#define _SIDD_LEAST_SIGNIFICANT 0x00
#define _SIDD_MOST_SIGNIFICANT 0x40
/* These macros are used in _mm_cmpXstri() to specify the return. */
#define _SIDD_BIT_MASK 0x00
#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
#define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M))
#define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M))
#define _mm_cmpestrm(A, LA, B, LB, M) \
__builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
#define _mm_cmpestri(A, LA, B, LB, M) \
__builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
#define _mm_cmpistra(A, B, M) \
__builtin_ia32_pcmpistria128((A), (B), (M))
#define _mm_cmpistrc(A, B, M) \
__builtin_ia32_pcmpistric128((A), (B), (M))
#define _mm_cmpistro(A, B, M) \
__builtin_ia32_pcmpistrio128((A), (B), (M))
#define _mm_cmpistrs(A, B, M) \
__builtin_ia32_pcmpistris128((A), (B), (M))
#define _mm_cmpistrz(A, B, M) \
__builtin_ia32_pcmpistriz128((A), (B), (M))
#define _mm_cmpestra(A, LA, B, LB, M) \
__builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
#define _mm_cmpestrc(A, LA, B, LB, M) \
__builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M))
#define _mm_cmpestro(A, LA, B, LB, M) \
__builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M))
#define _mm_cmpestrs(A, LA, B, LB, M) \
__builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M))
#define _mm_cmpestrz(A, LA, B, LB, M) \
__builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
/* SSE4.2 Compare Packed Data -- Greater Than. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
/* SSE4.2 Accumulate CRC32. */
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u8(unsigned int __C, unsigned char __D)
{
return __builtin_ia32_crc32qi(__C, __D);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u16(unsigned int __C, unsigned short __D)
{
return __builtin_ia32_crc32hi(__C, __D);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u32(unsigned int __C, unsigned int __D)
{
return __builtin_ia32_crc32si(__C, __D);
}
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
{
return __builtin_ia32_crc32di(__C, __D);
}
#endif /* __x86_64__ */
#ifdef __POPCNT__
#include <popcntintrin.h>
#endif
#endif /* __SSE4_2__ */
#endif /* __SSE4_1__ */
#endif /* _SMMINTRIN_H */

View File

@ -1,35 +0,0 @@
/*===---- stdalign.h - Standard header for alignment ------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDALIGN_H
#define __STDALIGN_H
#ifndef __cplusplus
#define alignas _Alignas
#define alignof _Alignof
#endif
#define __alignas_is_defined 1
#define __alignof_is_defined 1
#endif /* __STDALIGN_H */

View File

@ -1,50 +0,0 @@
/*===---- stdarg.h - Variable argument handling ----------------------------===
*
* Copyright (c) 2008 Eli Friedman
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDARG_H
#define __STDARG_H
#ifndef _VA_LIST
typedef __builtin_va_list va_list;
#define _VA_LIST
#endif
#define va_start(ap, param) __builtin_va_start(ap, param)
#define va_end(ap) __builtin_va_end(ap)
#define va_arg(ap, type) __builtin_va_arg(ap, type)
/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
* or -ansi is not specified, since it was not part of C90.
*/
#define __va_copy(d,s) __builtin_va_copy(d,s)
#if __STDC_VERSION__ >= 199900L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
#define va_copy(dest, src) __builtin_va_copy(dest, src)
#endif
/* Hack required to make standard headers work, at least on Ubuntu */
#define __GNUC_VA_LIST 1
typedef __builtin_va_list __gnuc_va_list;
#endif /* __STDARG_H */

View File

@ -1,44 +0,0 @@
/*===---- stdbool.h - Standard header for booleans -------------------------===
*
* Copyright (c) 2008 Eli Friedman
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDBOOL_H
#define __STDBOOL_H
/* Don't define bool, true, and false in C++, except as a GNU extension. */
#ifndef __cplusplus
#define bool _Bool
#define true 1
#define false 0
#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Define _Bool, bool, false, true as a GNU extension. */
#define _Bool bool
#define bool bool
#define false false
#define true true
#endif
#define __bool_true_false_are_defined 1
#endif /* __STDBOOL_H */

View File

@ -1,102 +0,0 @@
/*===---- stddef.h - Basic type definitions --------------------------------===
*
* Copyright (c) 2008 Eli Friedman
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDDEF_H
#define __STDDEF_H
#if !defined(_PTRDIFF_T) || __has_feature(modules)
/* Always define ptrdiff_t when modules are available. */
#if !__has_feature(modules)
#define _PTRDIFF_T
#endif
typedef __PTRDIFF_TYPE__ ptrdiff_t;
#endif
#if !defined(_SIZE_T) || __has_feature(modules)
/* Always define size_t when modules are available. */
#if !__has_feature(modules)
#define _SIZE_T
#endif
typedef __SIZE_TYPE__ size_t;
#endif
/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
* enabled. */
#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
!defined(_RSIZE_T)) || __has_feature(modules)
/* Always define rsize_t when modules are available. */
#if !__has_feature(modules)
#define _RSIZE_T
#endif
typedef __SIZE_TYPE__ rsize_t;
#endif
#ifndef __cplusplus
/* Always define wchar_t when modules are available. */
#if !defined(_WCHAR_T) || __has_feature(modules)
#if !__has_feature(modules)
#define _WCHAR_T
#if defined(_MSC_EXTENSIONS)
#define _WCHAR_T_DEFINED
#endif
#endif
typedef __WCHAR_TYPE__ wchar_t;
#endif
#endif
#undef NULL
#ifdef __cplusplus
# if !defined(__MINGW32__) && !defined(_MSC_VER)
# define NULL __null
# else
# define NULL 0
# endif
#else
# define NULL ((void*)0)
#endif
#ifdef __cplusplus
#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
namespace std { typedef decltype(nullptr) nullptr_t; }
using ::std::nullptr_t;
#endif
#endif
#define offsetof(t, d) __builtin_offsetof(t, d)
#endif /* __STDDEF_H */
/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
#if defined(__need_wint_t)
/* Always define wint_t when modules are available. */
#if !defined(_WINT_T) || __has_feature(modules)
#if !__has_feature(modules)
#define _WINT_T
#endif
typedef __WINT_TYPE__ wint_t;
#endif
#undef __need_wint_t
#endif /* __need_wint_t */

View File

@ -1,708 +0,0 @@
/*===---- stdint.h - Standard header for sized integer types --------------===*\
*
* Copyright (c) 2009 Chris Lattner
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
\*===----------------------------------------------------------------------===*/
#ifndef __CLANG_STDINT_H
#define __CLANG_STDINT_H
/* If we're hosted, fall back to the system's stdint.h, which might have
* additional definitions.
*/
#if __STDC_HOSTED__ && \
defined(__has_include_next) && __has_include_next(<stdint.h>)
// C99 7.18.3 Limits of other integer types
//
// Footnote 219, 220: C++ implementations should define these macros only when
// __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
//
// Footnote 222: C++ implementations should define these macros only when
// __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
//
// C++11 [cstdint.syn]p2:
//
// The macros defined by <cstdint> are provided unconditionally. In particular,
// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
// footnotes 219, 220, and 222 in the C standard) play no role in C++.
//
// C11 removed the problematic footnotes.
//
// Work around this inconsistency by always defining those macros in C++ mode,
// so that a C library implementation which follows the C99 standard can be
// used in C++.
# ifdef __cplusplus
# if !defined(__STDC_LIMIT_MACROS)
# define __STDC_LIMIT_MACROS
# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
# endif
# if !defined(__STDC_CONSTANT_MACROS)
# define __STDC_CONSTANT_MACROS
# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
# endif
# endif
# include_next <stdint.h>
# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
# undef __STDC_LIMIT_MACROS
# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
# endif
# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
# undef __STDC_CONSTANT_MACROS
# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
# endif
#else
/* C99 7.18.1.1 Exact-width integer types.
* C99 7.18.1.2 Minimum-width integer types.
* C99 7.18.1.3 Fastest minimum-width integer types.
*
* The standard requires that exact-width type be defined for 8-, 16-, 32-, and
* 64-bit types if they are implemented. Other exact width types are optional.
* This implementation defines an exact-width types for every integer width
* that is represented in the standard integer types.
*
* The standard also requires minimum-width types be defined for 8-, 16-, 32-,
* and 64-bit widths regardless of whether there are corresponding exact-width
* types.
*
* To accommodate targets that are missing types that are exactly 8, 16, 32, or
* 64 bits wide, this implementation takes an approach of cascading
* redefintions, redefining __int_leastN_t to successively smaller exact-width
* types. It is therefore important that the types are defined in order of
* descending widths.
*
* We currently assume that the minimum-width types and the fastest
* minimum-width types are the same. This is allowed by the standard, but is
* suboptimal.
*
* In violation of the standard, some targets do not implement a type that is
* wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
* To accommodate these targets, a required minimum-width type is only
* defined if there exists an exact-width type of equal or greater width.
*/
#ifdef __INT64_TYPE__
# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
typedef signed __INT64_TYPE__ int64_t;
# endif /* __int8_t_defined */
typedef unsigned __INT64_TYPE__ uint64_t;
# define __int_least64_t int64_t
# define __uint_least64_t uint64_t
# define __int_least32_t int64_t
# define __uint_least32_t uint64_t
# define __int_least16_t int64_t
# define __uint_least16_t uint64_t
# define __int_least8_t int64_t
# define __uint_least8_t uint64_t
#endif /* __INT64_TYPE__ */
#ifdef __int_least64_t
typedef __int_least64_t int_least64_t;
typedef __uint_least64_t uint_least64_t;
typedef __int_least64_t int_fast64_t;
typedef __uint_least64_t uint_fast64_t;
#endif /* __int_least64_t */
#ifdef __INT56_TYPE__
typedef signed __INT56_TYPE__ int56_t;
typedef unsigned __INT56_TYPE__ uint56_t;
typedef int56_t int_least56_t;
typedef uint56_t uint_least56_t;
typedef int56_t int_fast56_t;
typedef uint56_t uint_fast56_t;
# define __int_least32_t int56_t
# define __uint_least32_t uint56_t
# define __int_least16_t int56_t
# define __uint_least16_t uint56_t
# define __int_least8_t int56_t
# define __uint_least8_t uint56_t
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
typedef signed __INT48_TYPE__ int48_t;
typedef unsigned __INT48_TYPE__ uint48_t;
typedef int48_t int_least48_t;
typedef uint48_t uint_least48_t;
typedef int48_t int_fast48_t;
typedef uint48_t uint_fast48_t;
# define __int_least32_t int48_t
# define __uint_least32_t uint48_t
# define __int_least16_t int48_t
# define __uint_least16_t uint48_t
# define __int_least8_t int48_t
# define __uint_least8_t uint48_t
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
typedef signed __INT40_TYPE__ int40_t;
typedef unsigned __INT40_TYPE__ uint40_t;
typedef int40_t int_least40_t;
typedef uint40_t uint_least40_t;
typedef int40_t int_fast40_t;
typedef uint40_t uint_fast40_t;
# define __int_least32_t int40_t
# define __uint_least32_t uint40_t
# define __int_least16_t int40_t
# define __uint_least16_t uint40_t
# define __int_least8_t int40_t
# define __uint_least8_t uint40_t
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
typedef signed __INT32_TYPE__ int32_t;
# endif /* __int8_t_defined */
# ifndef __uint32_t_defined /* more glibc compatibility */
# define __uint32_t_defined
typedef unsigned __INT32_TYPE__ uint32_t;
# endif /* __uint32_t_defined */
# define __int_least32_t int32_t
# define __uint_least32_t uint32_t
# define __int_least16_t int32_t
# define __uint_least16_t uint32_t
# define __int_least8_t int32_t
# define __uint_least8_t uint32_t
#endif /* __INT32_TYPE__ */
#ifdef __int_least32_t
typedef __int_least32_t int_least32_t;
typedef __uint_least32_t uint_least32_t;
typedef __int_least32_t int_fast32_t;
typedef __uint_least32_t uint_fast32_t;
#endif /* __int_least32_t */
#ifdef __INT24_TYPE__
typedef signed __INT24_TYPE__ int24_t;
typedef unsigned __INT24_TYPE__ uint24_t;
typedef int24_t int_least24_t;
typedef uint24_t uint_least24_t;
typedef int24_t int_fast24_t;
typedef uint24_t uint_fast24_t;
# define __int_least16_t int24_t
# define __uint_least16_t uint24_t
# define __int_least8_t int24_t
# define __uint_least8_t uint24_t
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
typedef signed __INT16_TYPE__ int16_t;
#endif /* __int8_t_defined */
typedef unsigned __INT16_TYPE__ uint16_t;
# define __int_least16_t int16_t
# define __uint_least16_t uint16_t
# define __int_least8_t int16_t
# define __uint_least8_t uint16_t
#endif /* __INT16_TYPE__ */
#ifdef __int_least16_t
typedef __int_least16_t int_least16_t;
typedef __uint_least16_t uint_least16_t;
typedef __int_least16_t int_fast16_t;
typedef __uint_least16_t uint_fast16_t;
#endif /* __int_least16_t */
#ifdef __INT8_TYPE__
#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
typedef signed __INT8_TYPE__ int8_t;
#endif /* __int8_t_defined */
typedef unsigned __INT8_TYPE__ uint8_t;
# define __int_least8_t int8_t
# define __uint_least8_t uint8_t
#endif /* __INT8_TYPE__ */
#ifdef __int_least8_t
typedef __int_least8_t int_least8_t;
typedef __uint_least8_t uint_least8_t;
typedef __int_least8_t int_fast8_t;
typedef __uint_least8_t uint_fast8_t;
#endif /* __int_least8_t */
/* prevent glibc sys/types.h from defining conflicting types */
#ifndef __int8_t_defined
# define __int8_t_defined
#endif /* __int8_t_defined */
/* C99 7.18.1.4 Integer types capable of holding object pointers.
*/
#define __stdint_join3(a,b,c) a ## b ## c
#define __intn_t(n) __stdint_join3( int, n, _t)
#define __uintn_t(n) __stdint_join3(uint, n, _t)
#ifndef _INTPTR_T
#ifndef __intptr_t_defined
typedef __intn_t(__INTPTR_WIDTH__) intptr_t;
#define __intptr_t_defined
#define _INTPTR_T
#endif
#endif
#ifndef _UINTPTR_T
typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
#define _UINTPTR_T
#endif
/* C99 7.18.1.5 Greatest-width integer types.
*/
typedef __INTMAX_TYPE__ intmax_t;
typedef __UINTMAX_TYPE__ uintmax_t;
/* C99 7.18.4 Macros for minimum-width integer constants.
*
* The standard requires that integer constant macros be defined for all the
* minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
* types are required, the corresponding integer constant macros are defined
* here. This implementation also defines minimum-width types for every other
* integer width that the target implements, so corresponding macros are
* defined below, too.
*
* These macros are defined using the same successive-shrinking approach as
* the type definitions above. It is likewise important that macros are defined
* in order of decending width.
*
* Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
* claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
*/
#define __int_c_join(a, b) a ## b
#define __int_c(v, suffix) __int_c_join(v, suffix)
#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
#ifdef __INT64_TYPE__
# ifdef __INT64_C_SUFFIX__
# define __int64_c_suffix __INT64_C_SUFFIX__
# define __int32_c_suffix __INT64_C_SUFFIX__
# define __int16_c_suffix __INT64_C_SUFFIX__
# define __int8_c_suffix __INT64_C_SUFFIX__
# else
# undef __int64_c_suffix
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT64_C_SUFFIX__ */
#endif /* __INT64_TYPE__ */
#ifdef __int_least64_t
# ifdef __int64_c_suffix
# define INT64_C(v) __int_c(v, __int64_c_suffix)
# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
# else
# define INT64_C(v) v
# define UINT64_C(v) v ## U
# endif /* __int64_c_suffix */
#endif /* __int_least64_t */
#ifdef __INT56_TYPE__
# ifdef __INT56_C_SUFFIX__
# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
# define __int32_c_suffix __INT56_C_SUFFIX__
# define __int16_c_suffix __INT56_C_SUFFIX__
# define __int8_c_suffix __INT56_C_SUFFIX__
# else
# define INT56_C(v) v
# define UINT56_C(v) v ## U
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT56_C_SUFFIX__ */
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
# ifdef __INT48_C_SUFFIX__
# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
# define __int32_c_suffix __INT48_C_SUFFIX__
# define __int16_c_suffix __INT48_C_SUFFIX__
# define __int8_c_suffix __INT48_C_SUFFIX__
# else
# define INT48_C(v) v
# define UINT48_C(v) v ## U
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT48_C_SUFFIX__ */
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
# ifdef __INT40_C_SUFFIX__
# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
# define __int32_c_suffix __INT40_C_SUFFIX__
# define __int16_c_suffix __INT40_C_SUFFIX__
# define __int8_c_suffix __INT40_C_SUFFIX__
# else
# define INT40_C(v) v
# define UINT40_C(v) v ## U
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT40_C_SUFFIX__ */
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
# ifdef __INT32_C_SUFFIX__
# define __int32_c_suffix __INT32_C_SUFFIX__
# define __int16_c_suffix __INT32_C_SUFFIX__
# define __int8_c_suffix __INT32_C_SUFFIX__
#else
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT32_C_SUFFIX__ */
#endif /* __INT32_TYPE__ */
#ifdef __int_least32_t
# ifdef __int32_c_suffix
# define INT32_C(v) __int_c(v, __int32_c_suffix)
# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
# else
# define INT32_C(v) v
# define UINT32_C(v) v ## U
# endif /* __int32_c_suffix */
#endif /* __int_least32_t */
#ifdef __INT24_TYPE__
# ifdef __INT24_C_SUFFIX__
# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
# define __int16_c_suffix __INT24_C_SUFFIX__
# define __int8_c_suffix __INT24_C_SUFFIX__
# else
# define INT24_C(v) v
# define UINT24_C(v) v ## U
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT24_C_SUFFIX__ */
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
# ifdef __INT16_C_SUFFIX__
# define __int16_c_suffix __INT16_C_SUFFIX__
# define __int8_c_suffix __INT16_C_SUFFIX__
#else
# undef __int16_c_suffix
# undef __int8_c_suffix
# endif /* __INT16_C_SUFFIX__ */
#endif /* __INT16_TYPE__ */
#ifdef __int_least16_t
# ifdef __int16_c_suffix
# define INT16_C(v) __int_c(v, __int16_c_suffix)
# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
# else
# define INT16_C(v) v
# define UINT16_C(v) v ## U
# endif /* __int16_c_suffix */
#endif /* __int_least16_t */
#ifdef __INT8_TYPE__
# ifdef __INT8_C_SUFFIX__
# define __int8_c_suffix __INT8_C_SUFFIX__
#else
# undef __int8_c_suffix
# endif /* __INT8_C_SUFFIX__ */
#endif /* __INT8_TYPE__ */
#ifdef __int_least8_t
# ifdef __int8_c_suffix
# define INT8_C(v) __int_c(v, __int8_c_suffix)
# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
# else
# define INT8_C(v) v
# define UINT8_C(v) v ## U
# endif /* __int8_c_suffix */
#endif /* __int_least8_t */
/* C99 7.18.2.1 Limits of exact-width integer types.
* C99 7.18.2.2 Limits of minimum-width integer types.
* C99 7.18.2.3 Limits of fastest minimum-width integer types.
*
* The presence of limit macros are completely optional in C99. This
* implementation defines limits for all of the types (exact- and
* minimum-width) that it defines above, using the limits of the minimum-width
* type for any types that do not have exact-width representations.
*
* As in the type definitions, this section takes an approach of
* successive-shrinking to determine which limits to use for the standard (8,
* 16, 32, 64) bit widths when they don't have exact representations. It is
* therefore important that the defintions be kept in order of decending
* widths.
*
* Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
* claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
*/
#ifdef __INT64_TYPE__
# define INT64_MAX INT64_C( 9223372036854775807)
# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
# define UINT64_MAX UINT64_C(18446744073709551615)
# define __INT_LEAST64_MIN INT64_MIN
# define __INT_LEAST64_MAX INT64_MAX
# define __UINT_LEAST64_MAX UINT64_MAX
# define __INT_LEAST32_MIN INT64_MIN
# define __INT_LEAST32_MAX INT64_MAX
# define __UINT_LEAST32_MAX UINT64_MAX
# define __INT_LEAST16_MIN INT64_MIN
# define __INT_LEAST16_MAX INT64_MAX
# define __UINT_LEAST16_MAX UINT64_MAX
# define __INT_LEAST8_MIN INT64_MIN
# define __INT_LEAST8_MAX INT64_MAX
# define __UINT_LEAST8_MAX UINT64_MAX
#endif /* __INT64_TYPE__ */
#ifdef __INT_LEAST64_MIN
# define INT_LEAST64_MIN __INT_LEAST64_MIN
# define INT_LEAST64_MAX __INT_LEAST64_MAX
# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
# define INT_FAST64_MIN __INT_LEAST64_MIN
# define INT_FAST64_MAX __INT_LEAST64_MAX
# define UINT_FAST64_MAX __UINT_LEAST64_MAX
#endif /* __INT_LEAST64_MIN */
#ifdef __INT56_TYPE__
# define INT56_MAX INT56_C(36028797018963967)
# define INT56_MIN (-INT56_C(36028797018963967)-1)
# define UINT56_MAX UINT56_C(72057594037927935)
# define INT_LEAST56_MIN INT56_MIN
# define INT_LEAST56_MAX INT56_MAX
# define UINT_LEAST56_MAX UINT56_MAX
# define INT_FAST56_MIN INT56_MIN
# define INT_FAST56_MAX INT56_MAX
# define UINT_FAST56_MAX UINT56_MAX
# define __INT_LEAST32_MIN INT56_MIN
# define __INT_LEAST32_MAX INT56_MAX
# define __UINT_LEAST32_MAX UINT56_MAX
# define __INT_LEAST16_MIN INT56_MIN
# define __INT_LEAST16_MAX INT56_MAX
# define __UINT_LEAST16_MAX UINT56_MAX
# define __INT_LEAST8_MIN INT56_MIN
# define __INT_LEAST8_MAX INT56_MAX
# define __UINT_LEAST8_MAX UINT56_MAX
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
# define INT48_MAX INT48_C(140737488355327)
# define INT48_MIN (-INT48_C(140737488355327)-1)
# define UINT48_MAX UINT48_C(281474976710655)
# define INT_LEAST48_MIN INT48_MIN
# define INT_LEAST48_MAX INT48_MAX
# define UINT_LEAST48_MAX UINT48_MAX
# define INT_FAST48_MIN INT48_MIN
# define INT_FAST48_MAX INT48_MAX
# define UINT_FAST48_MAX UINT48_MAX
# define __INT_LEAST32_MIN INT48_MIN
# define __INT_LEAST32_MAX INT48_MAX
# define __UINT_LEAST32_MAX UINT48_MAX
# define __INT_LEAST16_MIN INT48_MIN
# define __INT_LEAST16_MAX INT48_MAX
# define __UINT_LEAST16_MAX UINT48_MAX
# define __INT_LEAST8_MIN INT48_MIN
# define __INT_LEAST8_MAX INT48_MAX
# define __UINT_LEAST8_MAX UINT48_MAX
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
# define INT40_MAX INT40_C(549755813887)
# define INT40_MIN (-INT40_C(549755813887)-1)
# define UINT40_MAX UINT40_C(1099511627775)
# define INT_LEAST40_MIN INT40_MIN
# define INT_LEAST40_MAX INT40_MAX
# define UINT_LEAST40_MAX UINT40_MAX
# define INT_FAST40_MIN INT40_MIN
# define INT_FAST40_MAX INT40_MAX
# define UINT_FAST40_MAX UINT40_MAX
# define __INT_LEAST32_MIN INT40_MIN
# define __INT_LEAST32_MAX INT40_MAX
# define __UINT_LEAST32_MAX UINT40_MAX
# define __INT_LEAST16_MIN INT40_MIN
# define __INT_LEAST16_MAX INT40_MAX
# define __UINT_LEAST16_MAX UINT40_MAX
# define __INT_LEAST8_MIN INT40_MIN
# define __INT_LEAST8_MAX INT40_MAX
# define __UINT_LEAST8_MAX UINT40_MAX
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
# define INT32_MAX INT32_C(2147483647)
# define INT32_MIN (-INT32_C(2147483647)-1)
# define UINT32_MAX UINT32_C(4294967295)
# define __INT_LEAST32_MIN INT32_MIN
# define __INT_LEAST32_MAX INT32_MAX
# define __UINT_LEAST32_MAX UINT32_MAX
# define __INT_LEAST16_MIN INT32_MIN
# define __INT_LEAST16_MAX INT32_MAX
# define __UINT_LEAST16_MAX UINT32_MAX
# define __INT_LEAST8_MIN INT32_MIN
# define __INT_LEAST8_MAX INT32_MAX
# define __UINT_LEAST8_MAX UINT32_MAX
#endif /* __INT32_TYPE__ */
#ifdef __INT_LEAST32_MIN
# define INT_LEAST32_MIN __INT_LEAST32_MIN
# define INT_LEAST32_MAX __INT_LEAST32_MAX
# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
# define INT_FAST32_MIN __INT_LEAST32_MIN
# define INT_FAST32_MAX __INT_LEAST32_MAX
# define UINT_FAST32_MAX __UINT_LEAST32_MAX
#endif /* __INT_LEAST32_MIN */
#ifdef __INT24_TYPE__
# define INT24_MAX INT24_C(8388607)
# define INT24_MIN (-INT24_C(8388607)-1)
# define UINT24_MAX UINT24_C(16777215)
# define INT_LEAST24_MIN INT24_MIN
# define INT_LEAST24_MAX INT24_MAX
# define UINT_LEAST24_MAX UINT24_MAX
# define INT_FAST24_MIN INT24_MIN
# define INT_FAST24_MAX INT24_MAX
# define UINT_FAST24_MAX UINT24_MAX
# define __INT_LEAST16_MIN INT24_MIN
# define __INT_LEAST16_MAX INT24_MAX
# define __UINT_LEAST16_MAX UINT24_MAX
# define __INT_LEAST8_MIN INT24_MIN
# define __INT_LEAST8_MAX INT24_MAX
# define __UINT_LEAST8_MAX UINT24_MAX
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
#define INT16_MAX INT16_C(32767)
#define INT16_MIN (-INT16_C(32767)-1)
#define UINT16_MAX UINT16_C(65535)
# define __INT_LEAST16_MIN INT16_MIN
# define __INT_LEAST16_MAX INT16_MAX
# define __UINT_LEAST16_MAX UINT16_MAX
# define __INT_LEAST8_MIN INT16_MIN
# define __INT_LEAST8_MAX INT16_MAX
# define __UINT_LEAST8_MAX UINT16_MAX
#endif /* __INT16_TYPE__ */
#ifdef __INT_LEAST16_MIN
# define INT_LEAST16_MIN __INT_LEAST16_MIN
# define INT_LEAST16_MAX __INT_LEAST16_MAX
# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
# define INT_FAST16_MIN __INT_LEAST16_MIN
# define INT_FAST16_MAX __INT_LEAST16_MAX
# define UINT_FAST16_MAX __UINT_LEAST16_MAX
#endif /* __INT_LEAST16_MIN */
#ifdef __INT8_TYPE__
# define INT8_MAX INT8_C(127)
# define INT8_MIN (-INT8_C(127)-1)
# define UINT8_MAX UINT8_C(255)
# define __INT_LEAST8_MIN INT8_MIN
# define __INT_LEAST8_MAX INT8_MAX
# define __UINT_LEAST8_MAX UINT8_MAX
#endif /* __INT8_TYPE__ */
#ifdef __INT_LEAST8_MIN
# define INT_LEAST8_MIN __INT_LEAST8_MIN
# define INT_LEAST8_MAX __INT_LEAST8_MAX
# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
# define INT_FAST8_MIN __INT_LEAST8_MIN
# define INT_FAST8_MAX __INT_LEAST8_MAX
# define UINT_FAST8_MAX __UINT_LEAST8_MAX
#endif /* __INT_LEAST8_MIN */
/* Some utility macros */
#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)
#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)
#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)
#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
/* C99 7.18.3 Limits of other integer types. */
#define INTPTR_MIN __INTN_MIN(__INTPTR_WIDTH__)
#define INTPTR_MAX __INTN_MAX(__INTPTR_WIDTH__)
#define UINTPTR_MAX __UINTN_MAX(__INTPTR_WIDTH__)
#define PTRDIFF_MIN __INTN_MIN(__PTRDIFF_WIDTH__)
#define PTRDIFF_MAX __INTN_MAX(__PTRDIFF_WIDTH__)
#define SIZE_MAX __UINTN_MAX(__SIZE_WIDTH__)
/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
* is enabled. */
#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
#define RSIZE_MAX (SIZE_MAX >> 1)
#endif
/* C99 7.18.2.5 Limits of greatest-width integer types. */
#define INTMAX_MIN __INTN_MIN(__INTMAX_WIDTH__)
#define INTMAX_MAX __INTN_MAX(__INTMAX_WIDTH__)
#define UINTMAX_MAX __UINTN_MAX(__INTMAX_WIDTH__)
/* C99 7.18.3 Limits of other integer types. */
#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
#ifdef __WINT_UNSIGNED__
# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)
# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)
#else
# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)
# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)
#endif
#ifndef WCHAR_MAX
# define WCHAR_MAX __WCHAR_MAX__
#endif
#ifndef WCHAR_MIN
# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
# else
# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
# endif
#endif
/* 7.18.4.2 Macros for greatest-width integer constants. */
#define INTMAX_C(v) __INTN_C(__INTMAX_WIDTH__, v)
#define UINTMAX_C(v) __UINTN_C(__INTMAX_WIDTH__, v)
#endif /* __STDC_HOSTED__ */
#endif /* __CLANG_STDINT_H */

View File

@ -1,30 +0,0 @@
/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDNORETURN_H
#define __STDNORETURN_H
#define noreturn _Noreturn
#define __noreturn_is_defined 1
#endif /* __STDNORETURN_H */

View File

@ -1,158 +0,0 @@
/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __TBM__
#error "TBM instruction set is not enabled"
#endif
#ifndef __X86INTRIN_H
#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __TBMINTRIN_H
#define __TBMINTRIN_H
#define __bextri_u32(a, b) (__builtin_ia32_bextri_u32((a), (b)))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blcfill_u32(unsigned int a)
{
return a & (a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blci_u32(unsigned int a)
{
return a | ~(a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blcic_u32(unsigned int a)
{
return ~a & (a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blcmsk_u32(unsigned int a)
{
return a ^ (a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blcs_u32(unsigned int a)
{
return a | (a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsfill_u32(unsigned int a)
{
return a | (a - 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsic_u32(unsigned int a)
{
return ~a | (a - 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__t1mskc_u32(unsigned int a)
{
return ~a | (a + 1);
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__tzmsk_u32(unsigned int a)
{
return ~a & (a - 1);
}
#ifdef __x86_64__
#define __bextri_u64(a, b) (__builtin_ia32_bextri_u64((a), (int)(b)))
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blcfill_u64(unsigned long long a)
{
return a & (a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blci_u64(unsigned long long a)
{
return a | ~(a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blcic_u64(unsigned long long a)
{
return ~a & (a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blcmsk_u64(unsigned long long a)
{
return a ^ (a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blcs_u64(unsigned long long a)
{
return a | (a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blsfill_u64(unsigned long long a)
{
return a | (a - 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__blsic_u64(unsigned long long a)
{
return ~a | (a - 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__t1mskc_u64(unsigned long long a)
{
return ~a | (a + 1);
}
static __inline__ unsigned long long __attribute__((__always_inline__,
__nodebug__))
__tzmsk_u64(unsigned long long a)
{
return ~a & (a - 1);
}
#endif
#endif /* __TBMINTRIN_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,225 +0,0 @@
/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __TMMINTRIN_H
#define __TMMINTRIN_H
#ifndef __SSSE3__
#error "SSSE3 instruction set not enabled"
#else
#include <pmmintrin.h>
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi8(__m64 __a)
{
return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi8(__m128i __a)
{
return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi16(__m64 __a)
{
return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi16(__m128i __a)
{
return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi32(__m64 __a)
{
return (__m64)__builtin_ia32_pabsd((__v2si)__a);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi32(__m128i __a)
{
return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
}
#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
__m128i __a = (a); \
__m128i __b = (b); \
(__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
__m64 __a = (a); \
__m64 __b = (b); \
(__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadds_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadds_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsub_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsub_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsubs_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsubs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maddubs_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_maddubs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mulhrs_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mulhrs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shuffle_epi8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_shuffle_pi8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);
}
#endif /* __SSSE3__ */
#endif /* __TMMINTRIN_H */

View File

@ -1,280 +0,0 @@
/*===---- unwind.h - Stack unwinding ----------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
#ifndef __CLANG_UNWIND_H
#define __CLANG_UNWIND_H
#if __has_include_next(<unwind.h>)
/* Darwin (from 11.x on) and libunwind provide an unwind.h. If that's available,
* use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
* so define that around the include.*/
# ifndef _GNU_SOURCE
# define _SHOULD_UNDEFINE_GNU_SOURCE
# define _GNU_SOURCE
# endif
// libunwind's unwind.h reflects the current visibility. However, Mozilla
// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
// visibility to default and export its contents. gcc also allows users to
// override its override by #defining HIDE_EXPORTS (but note, this only obeys
// the user's -fvisibility setting; it doesn't hide any exports on its own). We
// imitate gcc's header here:
# ifdef HIDE_EXPORTS
# include_next <unwind.h>
# else
# pragma GCC visibility push(default)
# include_next <unwind.h>
# pragma GCC visibility pop
# endif
# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
# undef _GNU_SOURCE
# undef _SHOULD_UNDEFINE_GNU_SOURCE
# endif
#else
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* It is a bit strange for a header to play with the visibility of the
symbols it declares, but this matches gcc's behavior and some programs
depend on it */
#ifndef HIDE_EXPORTS
#pragma GCC visibility push(default)
#endif
typedef uintptr_t _Unwind_Word;
typedef intptr_t _Unwind_Sword;
typedef uintptr_t _Unwind_Ptr;
typedef uintptr_t _Unwind_Internal_Ptr;
typedef uint64_t _Unwind_Exception_Class;
typedef intptr_t _sleb128_t;
typedef uintptr_t _uleb128_t;
struct _Unwind_Context;
struct _Unwind_Exception;
typedef enum {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_FATAL_PHASE2_ERROR = 2,
_URC_FATAL_PHASE1_ERROR = 3,
_URC_NORMAL_STOP = 4,
_URC_END_OF_STACK = 5,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8
} _Unwind_Reason_Code;
typedef enum {
_UA_SEARCH_PHASE = 1,
_UA_CLEANUP_PHASE = 2,
_UA_HANDLER_FRAME = 4,
_UA_FORCE_UNWIND = 8,
_UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
} _Unwind_Action;
typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
struct _Unwind_Exception *);
struct _Unwind_Exception {
_Unwind_Exception_Class exception_class;
_Unwind_Exception_Cleanup_Fn exception_cleanup;
_Unwind_Word private_1;
_Unwind_Word private_2;
/* The Itanium ABI requires that _Unwind_Exception objects are "double-word
* aligned". GCC has interpreted this to mean "use the maximum useful
* alignment for the target"; so do we. */
} __attribute__((__aligned__));
typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
_Unwind_Exception_Class,
struct _Unwind_Exception *,
struct _Unwind_Context *,
void *);
typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(
int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
struct _Unwind_Context *);
typedef _Unwind_Personality_Fn __personality_routine;
typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
void *);
#if defined(__arm__) && !defined(__APPLE__)
typedef enum {
_UVRSC_CORE = 0, /* integer register */
_UVRSC_VFP = 1, /* vfp */
_UVRSC_WMMXD = 3, /* Intel WMMX data register */
_UVRSC_WMMXC = 4 /* Intel WMMX control register */
} _Unwind_VRS_RegClass;
typedef enum {
_UVRSD_UINT32 = 0,
_UVRSD_VFPX = 1,
_UVRSD_UINT64 = 3,
_UVRSD_FLOAT = 4,
_UVRSD_DOUBLE = 5
} _Unwind_VRS_DataRepresentation;
typedef enum {
_UVRSR_OK = 0,
_UVRSR_NOT_IMPLEMENTED = 1,
_UVRSR_FAILED = 2
} _Unwind_VRS_Result;
_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
_Unwind_VRS_RegClass __regclass,
uint32_t __regno,
_Unwind_VRS_DataRepresentation __representation,
void *__valuep);
_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
_Unwind_VRS_RegClass __regclass,
uint32_t __regno,
_Unwind_VRS_DataRepresentation __representation,
void *__valuep);
static __inline__
_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
_Unwind_Word __value;
_Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
return __value;
}
static __inline__
void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
_Unwind_Word __value) {
_Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
}
static __inline__
_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
_Unwind_Word __ip = _Unwind_GetGR(__context, 15);
return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
}
static __inline__
void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
_Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
_Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
}
#else
_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
#endif
_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
/* DWARF EH functions; currently not available on Darwin/ARM */
#if !defined(__APPLE__) || !defined(__arm__)
_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *);
_Unwind_Reason_Code _Unwind_ForcedUnwind(struct _Unwind_Exception *,
_Unwind_Stop_Fn, void *);
void _Unwind_DeleteException(struct _Unwind_Exception *);
void _Unwind_Resume(struct _Unwind_Exception *);
_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
#endif
_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
/* setjmp(3)/longjmp(3) stuff */
typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
_Unwind_Reason_Code _Unwind_SjLj_RaiseException(struct _Unwind_Exception *);
_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(struct _Unwind_Exception *,
_Unwind_Stop_Fn, void *);
void _Unwind_SjLj_Resume(struct _Unwind_Exception *);
_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *);
void *_Unwind_FindEnclosingFunction(void *);
#ifdef __APPLE__
_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
__attribute__((unavailable));
_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
__attribute__((unavailable));
/* Darwin-specific functions */
void __register_frame(const void *);
void __deregister_frame(const void *);
struct dwarf_eh_bases {
uintptr_t tbase;
uintptr_t dbase;
uintptr_t func;
};
void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
void __register_frame_info_bases(const void *, void *, void *, void *)
__attribute__((unavailable));
void __register_frame_info(const void *, void *) __attribute__((unavailable));
void __register_frame_info_table_bases(const void *, void*, void *, void *)
__attribute__((unavailable));
void __register_frame_info_table(const void *, void *)
__attribute__((unavailable));
void __register_frame_table(const void *) __attribute__((unavailable));
void __deregister_frame_info(const void *) __attribute__((unavailable));
void __deregister_frame_info_bases(const void *)__attribute__((unavailable));
#else
_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
#endif
#ifndef HIDE_EXPORTS
#pragma GCC visibility pop
#endif
#ifdef __cplusplus
}
#endif
#endif
#endif /* __CLANG_UNWIND_H */

View File

@ -1,26 +0,0 @@
/*===---- varargs.h - Variable argument handling -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __VARARGS_H
#define __VARARGS_H
#error "Please use <stdarg.h> instead of <varargs.h>"
#endif

View File

@ -1,42 +0,0 @@
/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef _WMMINTRIN_H
#define _WMMINTRIN_H
#include <emmintrin.h>
#if !defined (__AES__) && !defined (__PCLMUL__)
# error "AES/PCLMUL instructions not enabled"
#else
#ifdef __AES__
#include <__wmmintrin_aes.h>
#endif /* __AES__ */
#ifdef __PCLMUL__
#include <__wmmintrin_pclmul.h>
#endif /* __PCLMUL__ */
#endif /* __AES__ || __PCLMUL__ */
#endif /* _WMMINTRIN_H */

View File

@ -1,79 +0,0 @@
/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __X86INTRIN_H
#define __X86INTRIN_H
#include <immintrin.h>
#ifdef __3dNOW__
#include <mm3dnow.h>
#endif
#ifdef __BMI__
#include <bmiintrin.h>
#endif
#ifdef __BMI2__
#include <bmi2intrin.h>
#endif
#ifdef __LZCNT__
#include <lzcntintrin.h>
#endif
#ifdef __POPCNT__
#include <popcntintrin.h>
#endif
#ifdef __RDSEED__
#include <rdseedintrin.h>
#endif
#ifdef __PRFCHW__
#include <prfchwintrin.h>
#endif
#ifdef __SSE4A__
#include <ammintrin.h>
#endif
#ifdef __FMA4__
#include <fma4intrin.h>
#endif
#ifdef __XOP__
#include <xopintrin.h>
#endif
#ifdef __TBM__
#include <tbmintrin.h>
#endif
#ifdef __F16C__
#include <f16cintrin.h>
#endif
// FIXME: LWP
#endif /* __X86INTRIN_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,804 +0,0 @@
/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __X86INTRIN_H
#error "Never use <xopintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __XOPINTRIN_H
#define __XOPINTRIN_H
#ifndef __XOP__
# error "XOP instruction set is not enabled"
#else
#include <fma4intrin.h>
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddw_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddd_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddd_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epi32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddw_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddd_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddd_epu16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epu16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_haddq_epu32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsubw_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsubd_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsubq_epi32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
}
static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
{
return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_rot_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_rot_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_rot_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_rot_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
}
#define _mm_roti_epi8(A, N) __extension__ ({ \
__m128i __A = (A); \
(__m128i)__builtin_ia32_vprotbi((__v16qi)__A, (N)); })
#define _mm_roti_epi16(A, N) __extension__ ({ \
__m128i __A = (A); \
(__m128i)__builtin_ia32_vprotwi((__v8hi)__A, (N)); })
#define _mm_roti_epi32(A, N) __extension__ ({ \
__m128i __A = (A); \
(__m128i)__builtin_ia32_vprotdi((__v4si)__A, (N)); })
#define _mm_roti_epi64(A, N) __extension__ ({ \
__m128i __A = (A); \
(__m128i)__builtin_ia32_vprotqi((__v2di)__A, (N)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shl_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shl_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shl_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shl_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sha_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
}
#define _mm_com_epu8(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomub((__v16qi)__A, (__v16qi)__B, (N)); })
#define _mm_com_epu16(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomuw((__v8hi)__A, (__v8hi)__B, (N)); })
#define _mm_com_epu32(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomud((__v4si)__A, (__v4si)__B, (N)); })
#define _mm_com_epu64(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomuq((__v2di)__A, (__v2di)__B, (N)); })
#define _mm_com_epi8(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomb((__v16qi)__A, (__v16qi)__B, (N)); })
#define _mm_com_epi16(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomw((__v8hi)__A, (__v8hi)__B, (N)); })
#define _mm_com_epi32(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomd((__v4si)__A, (__v4si)__B, (N)); })
#define _mm_com_epi64(A, B, N) __extension__ ({ \
__m128i __A = (A); \
__m128i __B = (B); \
(__m128i)__builtin_ia32_vpcomq((__v2di)__A, (__v2di)__B, (N)); })
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
#define _MM_PCOMCTRL_GT 2
#define _MM_PCOMCTRL_GE 3
#define _MM_PCOMCTRL_EQ 4
#define _MM_PCOMCTRL_NEQ 5
#define _MM_PCOMCTRL_FALSE 6
#define _MM_PCOMCTRL_TRUE 7
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comlt_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comle_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comgt_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comge_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comeq_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comneq_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comfalse_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_comtrue_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
}
#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
__m128d __X = (X); \
__m128d __Y = (Y); \
__m128i __C = (C); \
(__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, \
(__v2di)__C, (I)); })
#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
__m256d __X = (X); \
__m256d __Y = (Y); \
__m256i __C = (C); \
(__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, \
(__v4di)__C, (I)); })
#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
__m128 __X = (X); \
__m128 __Y = (Y); \
__m128i __C = (C); \
(__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, \
(__v4si)__C, (I)); })
#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
__m256 __X = (X); \
__m256 __Y = (Y); \
__m256i __C = (C); \
(__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, \
(__v8si)__C, (I)); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_frcz_ss(__m128 __A)
{
return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_frcz_sd(__m128d __A)
{
return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_frcz_ps(__m128 __A)
{
return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
}
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_frcz_pd(__m128d __A)
{
return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
}
static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_frcz_ps(__m256 __A)
{
return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
}
static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_frcz_pd(__m256d __A)
{
return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
}
#endif /* __XOP__ */
#endif /* __XOPINTRIN_H */