📄 mmintrin.h
字号:
_mm_subs_pu16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psubusw (__m64 __m1, __m64 __m2){ return _mm_subs_pu16 (__m1, __m2);}/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing four 32-bit intermediate results, which are then summed by pairs to produce two 32-bit results. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_madd_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pmaddwd (__m64 __m1, __m64 __m2){ return _mm_madd_pi16 (__m1, __m2);}/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in M2 and produce the high 16 bits of the 32-bit results. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_mulhi_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pmulhw (__m64 __m1, __m64 __m2){ return _mm_mulhi_pi16 (__m1, __m2);}/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_mullo_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pmullw (__m64 __m1, __m64 __m2){ return _mm_mullo_pi16 (__m1, __m2);}/* Shift four 16-bit values in M left by COUNT. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_sll_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psllw (__m64 __m, __m64 __count){ return _mm_sll_pi16 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_slli_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psllwi (__m64 __m, int __count){ return _mm_slli_pi16 (__m, __count);}/* Shift two 32-bit values in M left by COUNT. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_sll_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pslld (__m64 __m, __m64 __count){ return _mm_sll_pi32 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_slli_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pslldi (__m64 __m, int __count){ return _mm_slli_pi32 (__m, __count);}/* Shift the 64-bit value in M left by COUNT. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_sll_si64 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psllq (__m64 __m, __m64 __count){ return _mm_sll_si64 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_slli_si64 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psllqi (__m64 __m, int __count){ return _mm_slli_si64 (__m, __count);}/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_sra_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psraw (__m64 __m, __m64 __count){ return _mm_sra_pi16 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srai_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrawi (__m64 __m, int __count){ return _mm_srai_pi16 (__m, __count);}/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_sra_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrad (__m64 __m, __m64 __count){ return _mm_sra_pi32 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srai_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psradi (__m64 __m, int __count){ return _mm_srai_pi32 (__m, __count);}/* Shift four 16-bit values in M right by COUNT; shift in zeros. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srl_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrlw (__m64 __m, __m64 __count){ return _mm_srl_pi16 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srli_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrlwi (__m64 __m, int __count){ return _mm_srli_pi16 (__m, __count);}/* Shift two 32-bit values in M right by COUNT; shift in zeros. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srl_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrld (__m64 __m, __m64 __count){ return _mm_srl_pi32 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srli_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrldi (__m64 __m, int __count){ return _mm_srli_pi32 (__m, __count);}/* Shift the 64-bit value in M left by COUNT; shift in zeros. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srl_si64 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrlq (__m64 __m, __m64 __count){ return _mm_srl_si64 (__m, __count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_srli_si64 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_psrlqi (__m64 __m, int __count){ return _mm_srli_si64 (__m, __count);}/* Bit-wise AND the 64-bit values in M1 and M2. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_and_si64 (__m64 __m1, __m64 __m2){ return __builtin_ia32_pand (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pand (__m64 __m1, __m64 __m2){ return _mm_and_si64 (__m1, __m2);}/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the 64-bit value in M2. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_andnot_si64 (__m64 __m1, __m64 __m2){ return __builtin_ia32_pandn (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pandn (__m64 __m1, __m64 __m2){ return _mm_andnot_si64 (__m1, __m2);}/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_or_si64 (__m64 __m1, __m64 __m2){ return __builtin_ia32_por (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_por (__m64 __m1, __m64 __m2){ return _mm_or_si64 (__m1, __m2);}/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_xor_si64 (__m64 __m1, __m64 __m2){ return __builtin_ia32_pxor (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pxor (__m64 __m1, __m64 __m2){ return _mm_xor_si64 (__m1, __m2);}/* Compare eight 8-bit values. The result of the comparison is 0xFF if the test is true and zero if false. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpeqb (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi8 (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpgtb (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi8 (__m1, __m2);}/* Compare four 16-bit values. The result of the comparison is 0xFFFF if the test is true and zero if false. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpeqw (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi16 (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpgtw (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi16 (__m1, __m2);}/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if the test is true and zero if false. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpeqd (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi32 (__m1, __m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_m_pcmpgtd (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi32 (__m1, __m2);}/* Creates a 64-bit zero. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_setzero_si64 (void){ return (__m64)0LL;}/* Creates a vector of two 32-bit values; I0 is least significant. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set_pi32 (int __i1, int __i0){ return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);}/* Creates a vector of four 16-bit values; W0 is least significant. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0){ return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);}/* Creates a vector of eight 8-bit values; B0 is least significant. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0){ return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7);}/* Similar, but with the arguments in reverse order. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_setr_pi32 (int __i0, int __i1){ return _mm_set_pi32 (__i1, __i0);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3){ return _mm_set_pi16 (__w3, __w2, __w1, __w0);}static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7){ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);}/* Creates a vector of two 32-bit values, both elements containing I. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set1_pi32 (int __i){ return _mm_set_pi32 (__i, __i);}/* Creates a vector of four 16-bit values, all elements containing W. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set1_pi16 (short __w){ return _mm_set_pi16 (__w, __w, __w, __w);}/* Creates a vector of eight 8-bit values, all elements containing B. */static __inline __m64 __attribute__((__always_inline__, __nodebug__))_mm_set1_pi8 (char __b){ return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);}/* APPLE LOCAL end radar 4152603 */#endif /* __MMX__ */#endif /* _MMINTRIN_H_INCLUDED */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -