📄 i420_rgb_mmx.h
字号:
*(uint64_t *)p_buffer = (uint64_t)mm0; \ \ mm7 = _mm_unpackhi_pi8(mm7, mm4); \ mm5 = _mm_unpackhi_pi8(mm5, mm1); \ mm7 = _mm_slli_pi16(mm7, 2); \ mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \ mm5 = _mm_or_si64(mm5, mm7); \ mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;#define MMX_UNPACK_16 \ mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \ mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \ mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \ mm0 = _mm_srli_pi16(mm0, 3); \ mm4 = _mm_setzero_si64(); \ mm5 = mm0; \ mm7 = mm2; \ \ mm2 = _mm_unpacklo_pi8(mm2, mm4); \ mm0 = _mm_unpacklo_pi8(mm0, mm1); \ mm2 = _mm_slli_pi16(mm2, 3); \ mm0 = _mm_or_si64(mm0, mm2); \ mm6 = (__m64)*(uint64_t *)(p_y + 8); \ *(uint64_t *)p_buffer = (uint64_t)mm0; \ \ mm7 = _mm_unpackhi_pi8(mm7, mm4); \ mm5 = _mm_unpackhi_pi8(mm5, mm1); \ mm7 = _mm_slli_pi16(mm7, 3); \ mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \ mm5 = _mm_or_si64(mm5, mm7); \ mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;#define MMX_UNPACK_32_ARGB \ mm3 = _mm_setzero_si64(); \ mm4 = mm0; \ mm4 = _mm_unpacklo_pi8(mm4, mm2); \ mm5 = mm1; \ mm5 = _mm_unpacklo_pi8(mm5, mm3); \ mm6 = mm4; \ mm4 = _mm_unpacklo_pi16(mm4, mm5); \ *(uint64_t *)p_buffer = (uint64_t)mm4; \ mm6 = _mm_unpackhi_pi16(mm6, mm5); \ *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\ mm0 = _mm_unpackhi_pi8(mm0, mm2); \ mm1 = _mm_unpackhi_pi8(mm1, mm3); \ mm5 = mm0; \ mm5 = _mm_unpacklo_pi16(mm5, mm1); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\ mm0 = _mm_unpackhi_pi16(mm0, mm1); \ *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;#define MMX_UNPACK_32_RGBA \ mm3 = _mm_setzero_si64(); \ mm4 = mm2; \ mm4 = _mm_unpacklo_pi8(mm4, mm1); \ mm3 = _mm_unpacklo_pi8(mm3, mm0); \ mm5 = mm3; \ mm3 = _mm_unpacklo_pi16(mm3, mm4); \ *(uint64_t *)p_buffer = (uint64_t)mm3; \ mm5 = _mm_unpackhi_pi16(mm5, mm4); \ *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\ mm6 = _mm_setzero_si64(); \ mm2 = _mm_unpackhi_pi8(mm2, mm1); \ mm6 = _mm_unpackhi_pi8(mm6, mm0); \ mm0 = mm6; \ mm6 = _mm_unpacklo_pi16(mm6, mm2); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\ mm0 = _mm_unpackhi_pi16(mm0, mm2); \ *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;#define MMX_UNPACK_32_BGRA \ mm3 = _mm_setzero_si64(); \ mm4 = mm2; \ mm4 = _mm_unpacklo_pi8(mm4, mm0); \ mm3 = _mm_unpacklo_pi8(mm3, mm1); \ mm5 = mm3; \ mm3 = _mm_unpacklo_pi16(mm3, mm4); \ *(uint64_t *)p_buffer = (uint64_t)mm3; \ mm5 = _mm_unpackhi_pi16(mm5, mm4); \ *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\ mm6 = _mm_setzero_si64(); \ mm2 = _mm_unpackhi_pi8(mm2, mm0); \ mm6 = _mm_unpackhi_pi8(mm6, mm1); \ mm0 = mm6; \ mm6 = _mm_unpacklo_pi16(mm6, mm2); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\ mm0 = _mm_unpackhi_pi16(mm0, mm2); \ *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;#define MMX_UNPACK_32_ABGR \ mm3 = _mm_setzero_si64(); \ mm4 = mm1; \ mm4 = _mm_unpacklo_pi8(mm4, mm2); \ mm5 = mm0; \ mm5 = _mm_unpacklo_pi8(mm5, mm3); \ mm6 = mm4; \ mm4 = _mm_unpacklo_pi16(mm4, mm5); \ *(uint64_t *)p_buffer = (uint64_t)mm4; \ mm6 = _mm_unpackhi_pi16(mm6, mm5); \ *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\ mm1 = _mm_unpackhi_pi8(mm1, mm2); \ mm0 = _mm_unpackhi_pi8(mm0, mm3); \ mm2 = mm1; \ mm1 = _mm_unpacklo_pi16(mm1, mm0); \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\ mm2 = _mm_unpackhi_pi16(mm2, mm0); \ *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;#endif#elif defined( MODULE_NAME_IS_i420_rgb_sse2 )#if defined(CAN_COMPILE_SSE2)/* SSE2 assembly */#define SSE2_CALL(SSE2_INSTRUCTIONS) \ do { \ __asm__ __volatile__( \ ".p2align 3 \n\t" \ SSE2_INSTRUCTIONS \ : \ : "r" (p_y), "r" (p_u), \ "r" (p_v), "r" (p_buffer) \ : "eax" ); \ } while(0)#define SSE2_END __asm__ __volatile__ ( "sfence" ::: "memory" )#define SSE2_INIT_16_ALIGNED " \n\movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\pxor %%xmm4, %%xmm4 # zero mm4 \n\movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\"#define SSE2_INIT_16_UNALIGNED " \n\movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\pxor %%xmm4, %%xmm4 # zero mm4 \n\movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\prefetchnta (%3) # Tell CPU not to cache output RGB data \n\"#define SSE2_INIT_32_ALIGNED " \n\movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\pxor %%xmm4, %%xmm4 # zero mm4 \n\movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\"#define SSE2_INIT_32_UNALIGNED " \n\movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\pxor %%xmm4, %%xmm4 # zero mm4 \n\movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\prefetchnta (%3) # Tell CPU not to cache output RGB data \n\"#define SSE2_YUV_MUL " \n\# convert the chroma part \n\punpcklbw %%xmm4, %%xmm0 # scatter 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\punpcklbw %%xmm4, %%xmm1 # scatter 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\movl $0x00800080, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 0080 0080 ... 0080 0080 \n\psubsw %%xmm5, %%xmm0 # Cb -= 128 \n\psubsw %%xmm5, %%xmm1 # Cr -= 128 \n\psllw $3, %%xmm0 # Promote precision \n\psllw $3, %%xmm1 # Promote precision \n\movdqa %%xmm0, %%xmm2 # Copy 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\movdqa %%xmm1, %%xmm3 # Copy 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\movl $0xf37df37d, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to f37d f37d ... f37d f37d \n\pmulhw %%xmm5, %%xmm2 # Mul Cb with green coeff -> Cb green \n\movl $0xe5fce5fc, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to e5fc e5fc ... e5fc e5fc \n\pmulhw %%xmm5, %%xmm3 # Mul Cr with green coeff -> Cr green \n\movl $0x40934093, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 4093 4093 ... 4093 4093 \n\pmulhw %%xmm5, %%xmm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\movl $0x33123312, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 3312 3312 ... 3312 3312 \n\pmulhw %%xmm5, %%xmm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\paddsw %%xmm3, %%xmm2 # Cb green + Cr green -> Cgreen \n\ \n\# convert the luma part \n\movl $0x10101010, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 1010 1010 ... 1010 1010 \n\psubusb %%xmm5, %%xmm6 # Y -= 16 \n\movdqa %%xmm6, %%xmm7 # Copy 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\movl $0x00ff00ff, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 00ff 00ff ... 00ff 00ff \n\pand %%xmm5, %%xmm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\psrlw $8, %%xmm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\psllw $3, %%xmm6 # Promote precision \n\psllw $3, %%xmm7 # Promote precision \n\movl $0x253f253f, %%eax # \n\movd %%eax, %%xmm5 # \n\pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 253f 253f ... 253f 253f \n\pmulhw %%xmm5, %%xmm6 # Mul 8 Y even 00 y6 00 y4 00 y2 00 y0 \n\pmulhw %%xmm5, %%xmm7 # Mul 8 Y odd 00 y7 00 y5 00 y3 00 y1 \n\"#define SSE2_YUV_ADD " \n\# Do horizontal and vertical scaling \n\movdqa %%xmm0, %%xmm3 # Copy Cblue \n\movdqa %%xmm1, %%xmm4 # Copy Cred \n\movdqa %%xmm2, %%xmm5 # Copy Cgreen \n\paddsw %%xmm6, %%xmm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\paddsw %%xmm7, %%xmm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\paddsw %%xmm6, %%xmm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\paddsw %%xmm7, %%xmm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\paddsw %%xmm6, %%xmm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\paddsw %%xmm7, %%xmm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\ \n\# Limit RGB even to 0..255 \n\packuswb %%xmm0, %%xmm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -