📄 mmintrin.h
字号:
static __inline __m64_mm_mulhi_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64_m_pmulhw (__m64 __m1, __m64 __m2){ return _mm_mulhi_pi16 (__m1, __m2);}/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */static __inline __m64_mm_mullo_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64_m_pmullw (__m64 __m1, __m64 __m2){ return _mm_mullo_pi16 (__m1, __m2);}/* Shift four 16-bit values in M left by COUNT. */static __inline __m64_mm_sll_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);}static __inline __m64_m_psllw (__m64 __m, __m64 __count){ return _mm_sll_pi16 (__m, __count);}static __inline __m64_mm_slli_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);}static __inline __m64_m_psllwi (__m64 __m, int __count){ return _mm_slli_pi16 (__m, __count);}/* Shift two 32-bit values in M left by COUNT. */static __inline __m64_mm_sll_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);}static __inline __m64_m_pslld (__m64 __m, __m64 __count){ return _mm_sll_pi32 (__m, __count);}static __inline __m64_mm_slli_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);}static __inline __m64_m_pslldi (__m64 __m, int __count){ return _mm_slli_pi32 (__m, __count);}/* Shift the 64-bit value in M left by COUNT. */static __inline __m64_mm_sll_si64 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);}static __inline __m64_m_psllq (__m64 __m, __m64 __count){ return _mm_sll_si64 (__m, __count);}static __inline __m64_mm_slli_si64 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);}static __inline __m64_m_psllqi (__m64 __m, int __count){ return _mm_slli_si64 (__m, __count);}/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */static __inline __m64_mm_sra_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);}static __inline __m64_m_psraw (__m64 __m, __m64 __count){ return _mm_sra_pi16 (__m, __count);}static __inline __m64_mm_srai_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);}static __inline __m64_m_psrawi (__m64 __m, int __count){ return _mm_srai_pi16 (__m, __count);}/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */static __inline __m64_mm_sra_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);}static __inline __m64_m_psrad (__m64 __m, __m64 __count){ return _mm_sra_pi32 (__m, __count);}static __inline __m64_mm_srai_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);}static __inline __m64_m_psradi (__m64 __m, int __count){ return _mm_srai_pi32 (__m, __count);}/* Shift four 16-bit values in M right by COUNT; shift in zeros. */static __inline __m64_mm_srl_pi16 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);}static __inline __m64_m_psrlw (__m64 __m, __m64 __count){ return _mm_srl_pi16 (__m, __count);}static __inline __m64_mm_srli_pi16 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);}static __inline __m64_m_psrlwi (__m64 __m, int __count){ return _mm_srli_pi16 (__m, __count);}/* Shift two 32-bit values in M right by COUNT; shift in zeros. */static __inline __m64_mm_srl_pi32 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);}static __inline __m64_m_psrld (__m64 __m, __m64 __count){ return _mm_srl_pi32 (__m, __count);}static __inline __m64_mm_srli_pi32 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);}static __inline __m64_m_psrldi (__m64 __m, int __count){ return _mm_srli_pi32 (__m, __count);}/* Shift the 64-bit value in M left by COUNT; shift in zeros. */static __inline __m64_mm_srl_si64 (__m64 __m, __m64 __count){ return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);}static __inline __m64_m_psrlq (__m64 __m, __m64 __count){ return _mm_srl_si64 (__m, __count);}static __inline __m64_mm_srli_si64 (__m64 __m, int __count){ return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);}static __inline __m64_m_psrlqi (__m64 __m, int __count){ return _mm_srli_si64 (__m, __count);}/* Bit-wise AND the 64-bit values in M1 and M2. */static __inline __m64_mm_and_si64 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pand ((long long)__m1, (long long)__m2);}static __inline __m64_m_pand (__m64 __m1, __m64 __m2){ return _mm_and_si64 (__m1, __m2);}/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the 64-bit value in M2. */static __inline __m64_mm_andnot_si64 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pandn ((long long)__m1, (long long)__m2);}static __inline __m64_m_pandn (__m64 __m1, __m64 __m2){ return _mm_andnot_si64 (__m1, __m2);}/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */static __inline __m64_mm_or_si64 (__m64 __m1, __m64 __m2){ return (__m64)__builtin_ia32_por ((long long)__m1, (long long)__m2);}static __inline __m64_m_por (__m64 __m1, __m64 __m2){ return _mm_or_si64 (__m1, __m2);}/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */static __inline __m64_mm_xor_si64 (__m64 __m1, __m64 __m2){ return (__m64)__builtin_ia32_pxor ((long long)__m1, (long long)__m2);}static __inline __m64_m_pxor (__m64 __m1, __m64 __m2){ return _mm_xor_si64 (__m1, __m2);}/* Compare eight 8-bit values. The result of the comparison is 0xFF if the test is true and zero if false. */static __inline __m64_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);}static __inline __m64_m_pcmpeqb (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi8 (__m1, __m2);}static __inline __m64_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);}static __inline __m64_m_pcmpgtb (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi8 (__m1, __m2);}/* Compare four 16-bit values. The result of the comparison is 0xFFFF if the test is true and zero if false. */static __inline __m64_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64_m_pcmpeqw (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi16 (__m1, __m2);}static __inline __m64_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);}static __inline __m64_m_pcmpgtw (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi16 (__m1, __m2);}/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if the test is true and zero if false. */static __inline __m64_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);}static __inline __m64_m_pcmpeqd (__m64 __m1, __m64 __m2){ return _mm_cmpeq_pi32 (__m1, __m2);}static __inline __m64_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2){ return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);}static __inline __m64_m_pcmpgtd (__m64 __m1, __m64 __m2){ return _mm_cmpgt_pi32 (__m1, __m2);}/* Creates a 64-bit zero. */static __inline __m64_mm_setzero_si64 (void){ return (__m64)__builtin_ia32_mmx_zero ();}/* Creates a vector of two 32-bit values; I0 is least significant. */static __inline __m64_mm_set_pi32 (int __i1, int __i0){ union { __m64 __q; struct { unsigned int __i0; unsigned int __i1; } __s; } __u; __u.__s.__i0 = __i0; __u.__s.__i1 = __i1; return __u.__q;}/* Creates a vector of four 16-bit values; W0 is least significant. */static __inline __m64_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0){ unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2; unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0; return _mm_set_pi32 (__i1, __i0); }/* Creates a vector of eight 8-bit values; B0 is least significant. */static __inline __m64_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0){ unsigned int __i1, __i0; __i1 = (unsigned char)__b7; __i1 = __i1 << 8 | (unsigned char)__b6; __i1 = __i1 << 8 | (unsigned char)__b5; __i1 = __i1 << 8 | (unsigned char)__b4; __i0 = (unsigned char)__b3; __i0 = __i0 << 8 | (unsigned char)__b2; __i0 = __i0 << 8 | (unsigned char)__b1; __i0 = __i0 << 8 | (unsigned char)__b0; return _mm_set_pi32 (__i1, __i0);}/* Similar, but with the arguments in reverse order. */static __inline __m64_mm_setr_pi32 (int __i0, int __i1){ return _mm_set_pi32 (__i1, __i0);}static __inline __m64_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3){ return _mm_set_pi16 (__w3, __w2, __w1, __w0);}static __inline __m64_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7){ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);}/* Creates a vector of two 32-bit values, both elements containing I. */static __inline __m64_mm_set1_pi32 (int __i){ return _mm_set_pi32 (__i, __i);}/* Creates a vector of four 16-bit values, all elements containing W. */static __inline __m64_mm_set1_pi16 (short __w){ unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w; return _mm_set1_pi32 (__i);}/* Creates a vector of eight 8-bit values, all elements containing B. */static __inline __m64_mm_set1_pi8 (char __b){ unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b; unsigned int __i = __w << 16 | __w; return _mm_set1_pi32 (__i);}#endif /* __MMX__ */#endif /* _MMINTRIN_H_INCLUDED */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -