⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i420_yuy2.h

📁 VLC Player Source Code
💻 H
📖 第 1 页 / 共 2 页
字号:
punpckhbw %%xmm1, %%xmm0  #                   v3 y7 u3 y6 v2 y5 u2 y4     \n\movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\movdqa    %%xmm3, %%xmm4  #                   Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\punpcklbw %%xmm1, %%xmm4  #                   v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\punpckhbw %%xmm1, %%xmm3  #                   v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\"#define SSE2_YUV420_YVYU_ALIGNED "                                        \n\movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\punpcklbw %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\movdqa    %%xmm0, %%xmm1  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\punpcklbw %%xmm2, %%xmm1  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\movntdq   %%xmm1, (%0)    # Store low YUYV                                \n\punpckhbw %%xmm2, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\movdqa    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\punpcklbw %%xmm2, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\movntdq   %%xmm4, (%1)    # Store low YUYV                                \n\punpckhbw %%xmm2, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\movntdq   %%xmm3, 16(%1)  # Store high YUYV                               \n\"#define SSE2_YUV420_YVYU_UNALIGNED "                                      \n\movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\prefetchnta (%0)          # Tell CPU not to cache output YVYU data        \n\prefetchnta (%1)          # Tell CPU not to cache output YVYU data        \n\punpcklbw %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\movdqu    %%xmm0, %%xmm1  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\punpcklbw %%xmm2, %%xmm1  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\movdqu    %%xmm1, (%0)    # Store low YUYV                                \n\punpckhbw %%xmm2, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\movdqu    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\punpcklbw %%xmm2, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\punpckhbw %%xmm2, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\"#define SSE2_YUV420_UYVY_ALIGNED "                                        \n\movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\movdqa    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\movntdq   %%xmm2, (%0)    # Store low UYVY                                \n\movdqa    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\movntdq   %%xmm2, 16(%0)  # Store high UYVY                               \n\movdqa    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\movntdq   %%xmm4, (%1)    # Store low UYVY                                \n\punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\movntdq   %%xmm1, 16(%1)  # Store high UYVY                               \n\"#define SSE2_YUV420_UYVY_UNALIGNED "                                      \n\movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\prefetchnta (%0)          # Tell CPU not to cache output UYVY data        \n\prefetchnta (%1)          # Tell CPU not to cache output UYVY data        \n\punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\movdqu    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\movdqu    %%xmm2, (%0)    # Store low UYVY                                \n\movdqu    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\movdqu    %%xmm2, 16(%0)  # Store high UYVY                               \n\movdqu    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\movdqu    %%xmm4, (%1)    # Store low UYVY                                \n\punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\"#elif defined(HAVE_SSE2_INTRINSICS)/* SSE2 intrinsics */#include <emmintrin.h>#define SSE2_CALL(SSE2_INSTRUCTIONS)            \    do {                                        \        __m128i xmm0, xmm1, xmm2, xmm3, xmm4;   \        SSE2_INSTRUCTIONS                       \        p_line1 += 32; p_line2 += 32;           \        p_y1 += 16; p_y2 += 16;                 \        p_u += 8; p_v += 8;                     \    } while(0)#define SSE2_END  _mm_sfence()#define SSE2_YUV420_YUYV_ALIGNED                    \    xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm0 = _mm_load_si128((__m128i *)p_y1);         \    xmm3 = _mm_load_si128((__m128i *)p_y2);         \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm0;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1), xmm2);    \    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \    xmm4 = xmm3;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \    _mm_stream_si128((__m128i*)(p_line2), xmm4);    \    xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm3);#define SSE2_YUV420_YUYV_UNALIGNED                  \    xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \    _mm_prefetch(p_line1, _MM_HINT_NTA);            \    _mm_prefetch(p_line2, _MM_HINT_NTA);            \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm0;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \    xmm4 = xmm3;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \    xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);#define SSE2_YUV420_YVYU_ALIGNED                    \    xmm1 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm0 = _mm_load_si128((__m128i *)p_y1);         \    xmm3 = _mm_load_si128((__m128i *)p_y2);         \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm0;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1), xmm2);    \    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \    xmm4 = xmm3;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \    _mm_stream_si128((__m128i*)(p_line2), xmm4);    \    xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm3);#define SSE2_YUV420_YVYU_UNALIGNED                  \    xmm1 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \    _mm_prefetch(p_line1, _MM_HINT_NTA);            \    _mm_prefetch(p_line2, _MM_HINT_NTA);            \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm0;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \    xmm4 = xmm3;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \    xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);#define SSE2_YUV420_UYVY_ALIGNED                    \    xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm0 = _mm_load_si128((__m128i *)p_y1);         \    xmm3 = _mm_load_si128((__m128i *)p_y2);         \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm1;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);           \    _mm_stream_si128((__m128i*)(p_line1), xmm2);    \    xmm2 = xmm1;                                    \    xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm2); \    xmm4 = xmm1;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm3);           \    _mm_stream_si128((__m128i*)(p_line2), xmm4);    \    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \    _mm_stream_si128((__m128i*)(p_line1+16), xmm1);#define SSE2_YUV420_UYVY_UNALIGNED                  \    xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \    xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \    _mm_prefetch(p_line1, _MM_HINT_NTA);            \    _mm_prefetch(p_line2, _MM_HINT_NTA);            \    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \    xmm2 = xmm1;                                    \    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);           \    _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \    xmm2 = xmm1;                                    \    xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm2); \    xmm4 = xmm1;                                    \    xmm4 = _mm_unpacklo_epi8(xmm4, xmm3);           \    _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \    _mm_storeu_si128((__m128i*)(p_line1+16), xmm1);#endif#endif/* Used in both accelerated and C modules */#define C_YUV420_YVYU( )                                                    \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \    *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \    *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \#define C_YUV420_Y211( )                                                    \    *(p_line1)++ = *(p_y1); p_y1 += 2;                                      \    *(p_line2)++ = *(p_y2); p_y2 += 2;                                      \    *(p_line1)++ = *(p_line2)++ = *(p_u) - 0x80; p_u += 2;                  \    *(p_line1)++ = *(p_y1); p_y1 += 2;                                      \    *(p_line2)++ = *(p_y2); p_y2 += 2;                                      \    *(p_line1)++ = *(p_line2)++ = *(p_v) - 0x80; p_v += 2;                  \#define C_YUV420_YUYV( )                                                    \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \    *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \    *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \#define C_YUV420_UYVY( )                                                    \    *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \    *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \    *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -