📄 dct-a.asm.svn-base
字号:
; in: mm1, mm4, mm0, mm2 out: mm1, mm2, mm3, mm0 MMX_TRANSPOSE mm1, mm4, mm0, mm2, mm3 MMX_SUMSUB_BA mm3, mm1 ; mm3=s02 mm1=d02 MMX_SUMSUBD2_AB mm2, mm0, mm5, mm4 ; mm2=s13 mm4=d13 ( well 1 + 3>>1 and 1>>1 + 3) MMX_SUMSUB_BADC mm2, mm3, mm4, mm1 ; mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 MMX_ZERO mm7 movq mm6, [pw_32 GLOBAL] MMX_STORE_DIFF_4P mm2, mm0, mm6, mm7, [parm1q+0*FDEC_STRIDE] MMX_STORE_DIFF_4P mm4, mm0, mm6, mm7, [parm1q+1*FDEC_STRIDE] MMX_STORE_DIFF_4P mm1, mm0, mm6, mm7, [parm1q+2*FDEC_STRIDE] MMX_STORE_DIFF_4P mm3, mm0, mm6, mm7, [parm1q+3*FDEC_STRIDE] ret; =============================================================================; 8x8 Transform; =============================================================================; in: ABCDEFGH; out: FBCGEDHI%macro DCT8_1D 10 MMX_SUMSUB_BA %8, %1 ; %8=s07, %1=d07 MMX_SUMSUB_BA %7, %2 ; %7=s16, %2=d16 MMX_SUMSUB_BA %6, %3 ; %6=s25, %3=d25 MMX_SUMSUB_BA %5, %4 ; %5=s34, %4=d34 MMX_SUMSUB_BA %5, %8 ; %5=a0, %8=a2 MMX_SUMSUB_BA %6, %7 ; %6=a1, %7=a3 movdqa %9, %1 psraw %9, 1 paddw %9, %1 paddw %9, %2 paddw %9, %3 ; %9=a4 movdqa %10, %4 psraw %10, 1 paddw %10, %4 paddw %10, %2 psubw %10, %3 ; %10=a7 MMX_SUMSUB_BA %4, %1 psubw %1, %3 psubw %4, %2 psraw %3, 1 psraw %2, 1 psubw %1, %3 ; %1=a5 psubw %4, %2 ; %4=a6 MMX_SUMSUB_BA %6, %5 ; %6=b0, %5=b4 movdqa %2, %10 psraw %2, 2 paddw %2, %9 ; %2=b1 psraw %9, 2 psubw %9, %10 ; %9=b7 movdqa %3, %7 psraw %3, 1 paddw %3, %8 ; %3=b2 psraw %8, 1 psubw %8, %7 ; %8=b6 movdqa %7, %4 psraw %7, 2 paddw %7, %1 ; %7=b3 psraw %1, 2 psubw %4, %1 ; %4=b5%endmacro;-----------------------------------------------------------------------------; void __cdecl x264_sub8x8_dct8_sse2( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 );-----------------------------------------------------------------------------cglobal x264_sub8x8_dct8_sse2 MMX_ZERO xmm9 MMX_LOAD_DIFF_8P xmm0, xmm8, xmm9, [parm2q+0*FENC_STRIDE], [parm3q+0*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm1, xmm8, xmm9, [parm2q+1*FENC_STRIDE], [parm3q+1*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm2, xmm8, xmm9, [parm2q+2*FENC_STRIDE], [parm3q+2*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm3, xmm8, xmm9, [parm2q+3*FENC_STRIDE], [parm3q+3*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm4, xmm8, xmm9, [parm2q+4*FENC_STRIDE], [parm3q+4*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm5, xmm8, xmm9, [parm2q+5*FENC_STRIDE], [parm3q+5*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm6, xmm8, xmm9, [parm2q+6*FENC_STRIDE], [parm3q+6*FDEC_STRIDE] MMX_LOAD_DIFF_8P xmm7, xmm8, xmm9, [parm2q+7*FENC_STRIDE], [parm3q+7*FDEC_STRIDE] DCT8_1D xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9 SSE2_TRANSPOSE8x8 xmm5, xmm1, xmm2, xmm6, xmm4, xmm3, xmm7, xmm8, xmm0 DCT8_1D xmm5, xmm3, xmm8, xmm6, xmm0, xmm4, xmm2, xmm1, xmm7, xmm9 movdqa [parm1q+0x00], xmm4 movdqa [parm1q+0x10], xmm3 movdqa [parm1q+0x20], xmm8 movdqa [parm1q+0x30], xmm2 movdqa [parm1q+0x40], xmm0 movdqa [parm1q+0x50], xmm6 movdqa [parm1q+0x60], xmm1 movdqa [parm1q+0x70], xmm7 ret; in: ABCDEFGH; out: IBHDEACG%macro IDCT8_1D 10 MMX_SUMSUB_BA %5, %1 ; %5=a0, %1=a2 movdqa %10, %3 psraw %3, 1 psubw %3, %7 ; %3=a4 psraw %7, 1 paddw %7, %10 ; %7=a6 movdqa %9, %2 psraw %9, 1 paddw %9, %2 paddw %9, %4 paddw %9, %6 ; %9=a7 movdqa %10, %6 psraw %10, 1 paddw %10, %6 paddw %10, %8 psubw %10, %2 ; %10=a5 psubw %2, %4 psubw %6, %4 paddw %2, %8 psubw %6, %8 psraw %4, 1 psraw %8, 1 psubw %2, %4 ; %2=a3 psubw %6, %8 ; %6=a1 MMX_SUMSUB_BA %7, %5 ; %7=b0, %5=b6 MMX_SUMSUB_BA %3, %1 ; %3=b2, %1=b4 movdqa %4, %9 psraw %4, 2 paddw %4, %6 ; %4=b1 psraw %6, 2 psubw %9, %6 ; %9=b7 movdqa %8, %10 psraw %8, 2 paddw %8, %2 ; %8=b3 psraw %2, 2 psubw %2, %10 ; %2=b5 MMX_SUMSUB_BA %9, %7 ; %9=c0, %7=c7 MMX_SUMSUB_BA %2, %3 ; %2=c1, %3=c6 MMX_SUMSUB_BA %8, %1 ; %8=c2, %1=c5 MMX_SUMSUB_BA %4, %5 ; %4=c3, %5=c4%endmacro;-----------------------------------------------------------------------------; void __cdecl x264_add8x8_idct8_sse2( uint8_t *p_dst, int16_t dct[8][8] );-----------------------------------------------------------------------------cglobal x264_add8x8_idct8_sse2 movdqa xmm0, [parm2q+0x00] movdqa xmm1, [parm2q+0x10] movdqa xmm2, [parm2q+0x20] movdqa xmm3, [parm2q+0x30] movdqa xmm4, [parm2q+0x40] movdqa xmm5, [parm2q+0x50] movdqa xmm6, [parm2q+0x60] movdqa xmm7, [parm2q+0x70] IDCT8_1D xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm9, xmm8 SSE2_TRANSPOSE8x8 xmm9, xmm1, xmm7, xmm3, xmm4, xmm0, xmm2, xmm6, xmm5 paddw xmm9, [pw_32 GLOBAL] ; rounding for the >>6 at the end IDCT8_1D xmm9, xmm0, xmm6, xmm3, xmm5, xmm4, xmm7, xmm1, xmm8, xmm2 MMX_ZERO xmm15 MMX_STORE_DIFF_8P xmm8, xmm14, xmm15, [parm1q+0*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm0, xmm14, xmm15, [parm1q+1*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm1, xmm14, xmm15, [parm1q+2*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm3, xmm14, xmm15, [parm1q+3*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm5, xmm14, xmm15, [parm1q+4*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm9, xmm14, xmm15, [parm1q+5*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm6, xmm14, xmm15, [parm1q+6*FDEC_STRIDE] MMX_STORE_DIFF_8P xmm7, xmm14, xmm15, [parm1q+7*FDEC_STRIDE] ret;-----------------------------------------------------------------------------; void __cdecl x264_sub8x8_dct_mmx( int16_t dct[4][4][4],; uint8_t *pix1, uint8_t *pix2 );-----------------------------------------------------------------------------%macro SUB_NxN_DCT 6cglobal %1 call %2 add parm1q, %3 add parm2q, %4-%5*FENC_STRIDE add parm3q, %4-%5*FDEC_STRIDE call %2 add parm1q, %3 add parm2q, %4*FENC_STRIDE-%6 add parm3q, %4*FDEC_STRIDE-%6 call %2 add parm1q, %3 add parm2q, %4-%5*FENC_STRIDE add parm3q, %4-%5*FDEC_STRIDE jmp %2%endmacro;-----------------------------------------------------------------------------; void __cdecl x264_add8x8_idct_mmx( uint8_t *pix, int16_t dct[4][4][4] );-----------------------------------------------------------------------------%macro ADD_NxN_IDCT 6cglobal %1 call %2 add parm1q, %4-%5*FDEC_STRIDE add parm2q, %3 call %2 add parm1q, %4*FDEC_STRIDE-%6 add parm2q, %3 call %2 add parm1q, %4-%5*FDEC_STRIDE add parm2q, %3 jmp %2%endmacroSUB_NxN_DCT x264_sub8x8_dct_mmx, x264_sub4x4_dct_mmx, 32, 4, 0, 4ADD_NxN_IDCT x264_add8x8_idct_mmx, x264_add4x4_idct_mmx, 32, 4, 0, 4SUB_NxN_DCT x264_sub16x16_dct_mmx, x264_sub8x8_dct_mmx, 32, 4, 4, 12ADD_NxN_IDCT x264_add16x16_idct_mmx, x264_add8x8_idct_mmx, 32, 4, 4, 12SUB_NxN_DCT x264_sub16x16_dct8_sse2, x264_sub8x8_dct8_sse2, 128, 8, 0, 8ADD_NxN_IDCT x264_add16x16_idct8_sse2, x264_add8x8_idct8_sse2, 128, 8, 0, 8;-----------------------------------------------------------------------------; void __cdecl x264_zigzag_scan_4x4_field_sse2( int level[16], int16_t dct[4][4] );-----------------------------------------------------------------------------cglobal x264_zigzag_scan_4x4_field_sse2 punpcklwd xmm0, [parm2q] punpckhwd xmm1, [parm2q] punpcklwd xmm2, [parm2q+16] punpckhwd xmm3, [parm2q+16] psrad xmm0, 16 psrad xmm1, 16 psrad xmm2, 16 psrad xmm3, 16 movq [parm1q ], xmm0 movdqa [parm1q+16], xmm1 movdqa [parm1q+32], xmm2 movhlps xmm0, xmm0 movdqa [parm1q+48], xmm3 movq [parm1q+12], xmm0 movd [parm1q+ 8], xmm1 ret
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -