📄 xmmintrin.h
字号:
_MM_SET_ROUNDING_MODE (unsigned int __mode)
{
_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
}
static __inline void
_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
{
_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
}
/* Create a vector with element 0 as *P and the rest zero. */
static __inline __m128
_mm_load_ss (float const *__P)
{
return (__m128) __builtin_ia32_loadss (__P);
}
/* Create a vector with all four elements equal to *P. */
static __inline __m128
_mm_load1_ps (float const *__P)
{
__v4sf __tmp = __builtin_ia32_loadss (__P);
return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
}
static __inline __m128
_mm_load_ps1 (float const *__P)
{
return _mm_load1_ps (__P);
}
/* Load four SPFP values from P. The address must be 16-byte aligned. */
static __inline __m128
_mm_load_ps (float const *__P)
{
return (__m128) __builtin_ia32_loadaps (__P);
}
/* Load four SPFP values from P. The address need not be 16-byte aligned. */
static __inline __m128
_mm_loadu_ps (float const *__P)
{
return (__m128) __builtin_ia32_loadups (__P);
}
/* Load four SPFP values in reverse order. The address must be aligned. */
static __inline __m128
_mm_loadr_ps (float const *__P)
{
__v4sf __tmp = __builtin_ia32_loadaps (__P);
return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
}
/* Create a vector with element 0 as F and the rest zero. */
static __inline __m128
_mm_set_ss (float __F)
{
return (__m128) __builtin_ia32_loadss (&__F);
}
/* Create a vector with all four elements equal to F. */
static __inline __m128
_mm_set1_ps (float __F)
{
__v4sf __tmp = __builtin_ia32_loadss (&__F);
return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
}
static __inline __m128
_mm_set_ps1 (float __F)
{
return _mm_set1_ps (__F);
}
/* Create the vector [Z Y X W]. */
static __inline __m128
_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
{
return (__v4sf) {__W, __X, __Y, __Z};
}
/* Create the vector [W X Y Z]. */
static __inline __m128
_mm_setr_ps (float __Z, float __Y, float __X, float __W)
{
return _mm_set_ps (__W, __X, __Y, __Z);
}
/* Create a vector of zeros. */
static __inline __m128
_mm_setzero_ps (void)
{
return (__m128) __builtin_ia32_setzerops ();
}
/* Stores the lower SPFP value. */
static __inline void
_mm_store_ss (float *__P, __m128 __A)
{
__builtin_ia32_storess (__P, (__v4sf)__A);
}
/* Store the lower SPFP value across four words. */
static __inline void
_mm_store1_ps (float *__P, __m128 __A)
{
__v4sf __va = (__v4sf)__A;
__v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
__builtin_ia32_storeaps (__P, __tmp);
}
static __inline void
_mm_store_ps1 (float *__P, __m128 __A)
{
_mm_store1_ps (__P, __A);
}
/* Store four SPFP values. The address must be 16-byte aligned. */
static __inline void
_mm_store_ps (float *__P, __m128 __A)
{
__builtin_ia32_storeaps (__P, (__v4sf)__A);
}
/* Store four SPFP values. The address need not be 16-byte aligned. */
static __inline void
_mm_storeu_ps (float *__P, __m128 __A)
{
__builtin_ia32_storeups (__P, (__v4sf)__A);
}
/* Store four SPFP values in reverse order. The address must be aligned. */
static __inline void
_mm_storer_ps (float *__P, __m128 __A)
{
__v4sf __va = (__v4sf)__A;
__v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
__builtin_ia32_storeaps (__P, __tmp);
}
/* Sets the low SPFP value of A from the low value of B. */
static __inline __m128
_mm_move_ss (__m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
}
/* Extracts one of the four words of A. The selector N must be immediate. */
#if 0
static __inline int
_mm_extract_pi16 (__m64 __A, int __N)
{
return __builtin_ia32_pextrw ((__v4hi)__A, __N);
}
static __inline int
_m_pextrw (__m64 __A, int __N)
{
return _mm_extract_pi16 (__A, __N);
}
#else
#define _mm_extract_pi16(A, N) \
__builtin_ia32_pextrw ((__v4hi)(A), (N))
#define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
#endif
/* Inserts word D into one of four words of A. The selector N must be
immediate. */
#if 0
static __inline __m64
_mm_insert_pi16 (__m64 __A, int __D, int __N)
{
return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
}
static __inline __m64
_m_pinsrw (__m64 __A, int __D, int __N)
{
return _mm_insert_pi16 (__A, __D, __N);
}
#else
#define _mm_insert_pi16(A, D, N) \
((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
#endif
/* Compute the element-wise maximum of signed 16-bit values. */
static __inline __m64
_mm_max_pi16 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
}
static __inline __m64
_m_pmaxsw (__m64 __A, __m64 __B)
{
return _mm_max_pi16 (__A, __B);
}
/* Compute the element-wise maximum of unsigned 8-bit values. */
static __inline __m64
_mm_max_pu8 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
}
static __inline __m64
_m_pmaxub (__m64 __A, __m64 __B)
{
return _mm_max_pu8 (__A, __B);
}
/* Compute the element-wise minimum of signed 16-bit values. */
static __inline __m64
_mm_min_pi16 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
}
static __inline __m64
_m_pminsw (__m64 __A, __m64 __B)
{
return _mm_min_pi16 (__A, __B);
}
/* Compute the element-wise minimum of unsigned 8-bit values. */
static __inline __m64
_mm_min_pu8 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
}
static __inline __m64
_m_pminub (__m64 __A, __m64 __B)
{
return _mm_min_pu8 (__A, __B);
}
/* Create an 8-bit mask of the signs of 8-bit values. */
static __inline int
_mm_movemask_pi8 (__m64 __A)
{
return __builtin_ia32_pmovmskb ((__v8qi)__A);
}
static __inline int
_m_pmovmskb (__m64 __A)
{
return _mm_movemask_pi8 (__A);
}
/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
in B and produce the high 16 bits of the 32-bit results. */
static __inline __m64
_mm_mulhi_pu16 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
}
static __inline __m64
_m_pmulhuw (__m64 __A, __m64 __B)
{
return _mm_mulhi_pu16 (__A, __B);
}
/* Return a combination of the four 16-bit values in A. The selector
must be an immediate. */
#if 0
static __inline __m64
_mm_shuffle_pi16 (__m64 __A, int __N)
{
return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
}
static __inline __m64
_m_pshufw (__m64 __A, int __N)
{
return _mm_shuffle_pi16 (__A, __N);
}
#else
#define _mm_shuffle_pi16(A, N) \
((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
#endif
/* Conditionally store byte elements of A into P. The high bit of each
byte in the selector N determines whether the corresponding byte from
A is stored. */
static __inline void
_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
{
__builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
}
static __inline void
_m_maskmovq (__m64 __A, __m64 __N, char *__P)
{
_mm_maskmove_si64 (__A, __N, __P);
}
/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
static __inline __m64
_mm_avg_pu8 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
}
static __inline __m64
_m_pavgb (__m64 __A, __m64 __B)
{
return _mm_avg_pu8 (__A, __B);
}
/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
static __inline __m64
_mm_avg_pu16 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
}
static __inline __m64
_m_pavgw (__m64 __A, __m64 __B)
{
return _mm_avg_pu16 (__A, __B);
}
/* Compute the sum of the absolute differences of the unsigned 8-bit
values in A and B. Return the value in the lower 16-bit word; the
upper words are cleared. */
static __inline __m64
_mm_sad_pu8 (__m64 __A, __m64 __B)
{
return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
}
static __inline __m64
_m_psadbw (__m64 __A, __m64 __B)
{
return _mm_sad_pu8 (__A, __B);
}
/* Loads one cache line from address P to a location "closer" to the
processor. The selector I specifies the type of prefetch operation. */
#if 0
static __inline void
_mm_prefetch (void *__P, enum _mm_hint __I)
{
__builtin_prefetch (__P, 0, __I);
}
#else
#define _mm_prefetch(P, I) \
__builtin_prefetch ((P), 0, (I))
#endif
/* Stores the data in A to the address P without polluting the caches. */
static __inline void
_mm_stream_pi (__m64 *__P, __m64 __A)
{
__builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
}
/* Likewise. The address must be 16-byte aligned. */
static __inline void
_mm_stream_ps (float *__P, __m128 __A)
{
__builtin_ia32_movntps (__P, (__v4sf)__A);
}
/* Guarantees that every preceding store is globally visible before
any subsequent store. */
static __inline void
_mm_sfence (void)
{
__builtin_ia32_sfence ();
}
/* The execution of the next instruction is delayed by an implementation
specific amount of time. The instruction does not modify the
architectural state. */
static __inline void
_mm_pause (void)
{
__asm__ __volatile__ ("rep; nop" : : );
}
/* Transpose the 4x4 matrix composed of row[0-3]. */
#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
__v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
__v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \
__v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \
__v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \
__v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \
(row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \
(row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \
(row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \
(row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
} while (0)
/* For backward source compatibility. */
#include <emmintrin.h>
#endif /* __SSE__ */
#endif /* _XMMINTRIN_H_INCLUDED */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -