⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 predict-a.asm

📁 DM642 H.264 codec DM642 H.264 codec DM642 H.264 codec DM642 H.264 codec
💻 ASM
📖 第 1 页 / 共 2 页
字号:
%assign Y 0%rep 8    psrldq      xmm0, 1    movq        [parm1q+Y*FDEC_STRIDE], xmm0%assign Y (Y+1)%endrep    ret;-----------------------------------------------------------------------------; void predict_8x8_ddr_sse2( uint8_t *src, uint8_t *edge );-----------------------------------------------------------------------------ALIGN 16predict_8x8_ddr_sse2:    movdqu      xmm3, [parm2q+8]    movdqu      xmm1, [parm2q+7]    movdqa      xmm2, xmm3    psrldq      xmm2, 1    PRED8x8_LOWPASS_XMM xmm0, xmm1, xmm2, xmm3, xmm4    movdqa      xmm1, xmm0    psrldq      xmm1, 1%assign Y 7%rep 3    movq        [parm1q+Y*FDEC_STRIDE], xmm0    movq        [parm1q+(Y-1)*FDEC_STRIDE], xmm1    psrldq      xmm0, 2    psrldq      xmm1, 2%assign Y (Y-2)%endrep    movq        [parm1q+1*FDEC_STRIDE], xmm0    movq        [parm1q+0*FDEC_STRIDE], xmm1    ret;-----------------------------------------------------------------------------; void predict_8x8_vl_sse2( uint8_t *src, uint8_t *edge );-----------------------------------------------------------------------------ALIGN 16predict_8x8_vl_sse2:    movdqa      xmm4, [parm2q+16]    movdqa      xmm2, xmm4    movdqa      xmm1, xmm4    movdqa      xmm3, xmm4    psrldq      xmm2, 1    pslldq      xmm1, 1    pavgb       xmm3, xmm2    PRED8x8_LOWPASS_XMM xmm0, xmm1, xmm2, xmm4, xmm5; xmm0: (t0 + 2*t1 + t2 + 2) >> 2; xmm3: (t0 + t1 + 1) >> 1%assign Y 0%rep 3    psrldq      xmm0, 1    movq        [parm1q+ Y   *FDEC_STRIDE], xmm3    movq        [parm1q+(Y+1)*FDEC_STRIDE], xmm0    psrldq      xmm3, 1%assign Y (Y+2)%endrep    psrldq      xmm0, 1    movq        [parm1q+ Y   *FDEC_STRIDE], xmm3    movq        [parm1q+(Y+1)*FDEC_STRIDE], xmm0    ret;-----------------------------------------------------------------------------; void predict_8x8_vr_core_mmxext( uint8_t *src, uint8_t *edge );-----------------------------------------------------------------------------; fills only some pixels:; f01234567; 0........; 1,,,,,,,,; 2 .......; 3 ,,,,,,,; 4  ......; 5  ,,,,,,; 6   .....; 7   ,,,,,ALIGN 16predict_8x8_vr_core_mmxext:    movq        mm2, [parm2q+16]    movq        mm3, [parm2q+15]    movq        mm1, [parm2q+14]    movq        mm4, mm3    pavgb       mm3, mm2    PRED8x8_LOWPASS mm0, mm1, mm2, mm4, mm7%assign Y 0%rep 3    movq        [parm1q+ Y   *FDEC_STRIDE], mm3    movq        [parm1q+(Y+1)*FDEC_STRIDE], mm0    psllq       mm3, 8    psllq       mm0, 8%assign Y (Y+2)%endrep    movq        [parm1q+ Y   *FDEC_STRIDE], mm3    movq        [parm1q+(Y+1)*FDEC_STRIDE], mm0    ret;-----------------------------------------------------------------------------; void predict_8x8c_v_mmx( uint8_t *src );-----------------------------------------------------------------------------ALIGN 16predict_8x8c_v_mmx :    movq        mm0, [parm1q - FDEC_STRIDE]    STORE8x8    mm0, mm0    ret;-----------------------------------------------------------------------------; void predict_8x8c_dc_core_mmxext( uint8_t *src, int s2, int s3 );-----------------------------------------------------------------------------ALIGN 16predict_8x8c_dc_core_mmxext:    movq        mm0, [parm1q - FDEC_STRIDE]    pxor        mm1, mm1    pxor        mm2, mm2    punpckhbw   mm1, mm0    punpcklbw   mm0, mm2    psadbw      mm1, mm2        ; s1    psadbw      mm0, mm2        ; s0    movd        mm4, parm2d    movd        mm5, parm3d    paddw       mm0, mm4    pshufw      mm2, mm5, 0    psrlw       mm0, 3    paddw       mm1, [pw_2 GLOBAL]    movq        mm3, mm2    pshufw      mm1, mm1, 0    pshufw      mm0, mm0, 0     ; dc0 (w)    paddw       mm3, mm1    psrlw       mm3, 3          ; dc3 (w)    psrlw       mm2, 2          ; dc2 (w)    psrlw       mm1, 2          ; dc1 (w)    packuswb    mm0, mm1        ; dc0,dc1 (b)    packuswb    mm2, mm3        ; dc2,dc3 (b)    STORE8x8    mm0, mm2    ret;-----------------------------------------------------------------------------; void predict_8x8c_p_core_mmxext( uint8_t *src, int i00, int b, int c );-----------------------------------------------------------------------------ALIGN 16predict_8x8c_p_core_mmxext:    movd        mm0, parm2d    movd        mm2, parm3d    movd        mm4, parm4d    pshufw      mm0, mm0, 0    pshufw      mm2, mm2, 0    pshufw      mm4, mm4, 0    movq        mm1, mm2    pmullw      mm2, [pw_3210 GLOBAL]    psllw       mm1, 2    paddsw      mm0, mm2        ; mm0 = {i+0*b, i+1*b, i+2*b, i+3*b}    paddsw      mm1, mm0        ; mm1 = {i+4*b, i+5*b, i+6*b, i+7*b}    mov         eax, 8ALIGN 4.loop:    movq        mm5, mm0    movq        mm6, mm1    psraw       mm5, 5    psraw       mm6, 5    packuswb    mm5, mm6    movq        [parm1q], mm5    paddsw      mm0, mm4    paddsw      mm1, mm4    add         parm1q, FDEC_STRIDE    dec         eax    jg          .loop    nop    ret;-----------------------------------------------------------------------------; void predict_16x16_p_core_mmxext( uint8_t *src, int i00, int b, int c );-----------------------------------------------------------------------------ALIGN 16predict_16x16_p_core_mmxext:    movd        mm0, parm2d    movd        mm2, parm3d    movd        mm4, parm4d    pshufw      mm0, mm0, 0    pshufw      mm2, mm2, 0    pshufw      mm4, mm4, 0    movq        mm5, mm2    movq        mm1, mm2    pmullw      mm5, [pw_3210 GLOBAL]    psllw       mm2, 3    psllw       mm1, 2    movq        mm3, mm2    paddsw      mm0, mm5        ; mm0 = {i+ 0*b, i+ 1*b, i+ 2*b, i+ 3*b}    paddsw      mm1, mm0        ; mm1 = {i+ 4*b, i+ 5*b, i+ 6*b, i+ 7*b}    paddsw      mm2, mm0        ; mm2 = {i+ 8*b, i+ 9*b, i+10*b, i+11*b}    paddsw      mm3, mm1        ; mm3 = {i+12*b, i+13*b, i+14*b, i+15*b}    mov         eax, 16ALIGN 4.loop:    movq        mm5, mm0    movq        mm6, mm1    psraw       mm5, 5    psraw       mm6, 5    packuswb    mm5, mm6    movq        [parm1q], mm5    movq        mm5, mm2    movq        mm6, mm3    psraw       mm5, 5    psraw       mm6, 5    packuswb    mm5, mm6    movq        [parm1q+8], mm5    paddsw      mm0, mm4    paddsw      mm1, mm4    paddsw      mm2, mm4    paddsw      mm3, mm4    add         parm1q, FDEC_STRIDE    dec         eax    jg          .loop    nop    ret    ;-----------------------------------------------------------------------------; void predict_16x16_v_mmx( uint8_t *src );-----------------------------------------------------------------------------ALIGN 16predict_16x16_v_mmx :    sub         parm1q, FDEC_STRIDE    movq        mm0, [parm1q]    movq        mm1, [parm1q + 8]    STORE16x16  mm0, mm1    ret;-----------------------------------------------------------------------------; void predict_16x16_dc_core_mmxext( uint8_t *src, int i_dc_left );-----------------------------------------------------------------------------%macro PRED16x16_DC 2    sub         parm1q, FDEC_STRIDE    pxor        mm0, mm0    pxor        mm1, mm1    psadbw      mm0, [parm1q]    psadbw      mm1, [parm1q + 8]    paddusw     mm0, mm1    paddusw     mm0, %1    psrlw       mm0, %2                       ; dc    pshufw      mm0, mm0, 0    packuswb    mm0, mm0                      ; dc in bytes    STORE16x16  mm0, mm0%endmacroALIGN 16predict_16x16_dc_core_mmxext:    movd         mm2, parm2d    PRED16x16_DC mm2, 5    retALIGN 16predict_16x16_dc_top_mmxext:    PRED16x16_DC [pw_8 GLOBAL], 4    ret

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -