⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pixel-sse2.asm

📁 linux下编译已经通过
💻 ASM
📖 第 1 页 / 共 2 页
字号:
    SBUTTERFLY dqa, wd, %3, %4, %2    SBUTTERFLY dqa, wd, %5, %6, %4    SBUTTERFLY dqa, wd, %7, %8, %6    SBUTTERFLY dqa, dq, %1, %3, %8    SBUTTERFLY dqa, dq, %9, %2, %3    SBUTTERFLY dqa, dq, %5, %7, %2    SBUTTERFLY dqa, dq, %4, %6, %7    SBUTTERFLY dqa, qdq, %1, %5, %6    SBUTTERFLY dqa, qdq, %9, %4, %5    SBUTTERFLY dqa, qdq, %8, %2, %4    SBUTTERFLY dqa, qdq, %3, %7, %2%endmacro%macro LOAD_DIFF_8P 4  ; MMP, MMT, [pix1], [pix2]    movq        %1, %3    movq        %2, %4    punpcklbw   %1, %2    punpcklbw   %2, %2    psubw       %1, %2%endmacro%macro LOAD_DIFF_4x8P 6 ; 4x dest, 2x temp    LOAD_DIFF_8P %1, %5, [parm1q],          [parm3q]    LOAD_DIFF_8P %2, %6, [parm1q+parm2q],   [parm3q+parm4q]    LOAD_DIFF_8P %3, %5, [parm1q+2*parm2q], [parm3q+2*parm4q]    LOAD_DIFF_8P %4, %6, [parm1q+r10],      [parm3q+r11]%endmacro%macro SUM1x8_SSE2 3    ; 01 junk sum    pxor    %2, %2    psubw   %2, %1    pmaxsw  %1, %2    paddusw %3, %1%endmacro%macro SUM4x4_SSE2 4    ; 02 13 junk sum    pxor    %3, %3    psubw   %3, %1    pmaxsw  %1, %3    pxor    %3, %3    psubw   %3, %2    pmaxsw  %2, %3    paddusw %4, %1    paddusw %4, %2%endmacro%macro SUM8x4_SSE2 7    ; a02 a13 junk1 b02 b13 junk2 (1=4 2=5 3=6) sum    pxor    %3, %3    pxor    %6, %6    psubw   %3, %1    psubw   %6, %4    pmaxsw  %1, %3    pmaxsw  %4, %6    pxor    %3, %3    pxor    %6, %6    psubw   %3, %2    psubw   %6, %5    pmaxsw  %2, %3    pmaxsw  %5, %6    paddusw %1, %2    paddusw %4, %5    paddusw %7, %1    paddusw %7, %4%endmacro%macro SUM8x4_SSSE3 7    ; a02 a13 . b02 b13 . sum    pabsw   %1, %1    pabsw   %2, %2    pabsw   %4, %4    pabsw   %5, %5    paddusw %1, %2    paddusw %4, %5    paddusw %7, %1    paddusw %7, %4%endmacro%macro SATD_TWO_SSE2 0    LOAD_DIFF_4x8P    xmm0, xmm1, xmm2, xmm3, xmm4, xmm5    lea     parm1q, [parm1q+4*parm2q]    lea     parm3q, [parm3q+4*parm4q]    HADAMARD1x4       xmm0, xmm1, xmm2, xmm3    TRANSPOSE2x4x4W   xmm0, xmm1, xmm2, xmm3, xmm4    HADAMARD1x4       xmm0, xmm1, xmm2, xmm3    SUM8x4            xmm0, xmm1, xmm4, xmm2, xmm3, xmm5, xmm6%endmacro%macro SATD_START 0    pxor    xmm6, xmm6    lea     r10,  [3*parm2q]    lea     r11,  [3*parm4q]%endmacro%macro SATD_END 0    psrlw   xmm6, 1    HADDW   xmm6, xmm7    movd    eax,  xmm6    ret%endmacro%macro SATDS 1;-----------------------------------------------------------------------------;   int x264_pixel_satd_16x16_sse2 (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_satd_16x16_%1    SATD_START    mov     r8,  rdi    mov     r9,  rdx    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    lea     rdi, [r8+8]    lea     rdx, [r9+8]    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_END;-----------------------------------------------------------------------------;   int x264_pixel_satd_8x16_sse2 (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_satd_8x16_%1    SATD_START    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_END;-----------------------------------------------------------------------------;   int x264_pixel_satd_16x8_sse2 (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_satd_16x8_%1    SATD_START    mov     r8,  rdi    mov     r9,  rdx    SATD_TWO_SSE2    SATD_TWO_SSE2    lea     rdi, [r8+8]    lea     rdx, [r9+8]    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_END;-----------------------------------------------------------------------------;   int x264_pixel_satd_8x8_sse2 (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_satd_8x8_%1    SATD_START    SATD_TWO_SSE2    SATD_TWO_SSE2    SATD_END;-----------------------------------------------------------------------------;   int x264_pixel_satd_8x4_sse2 (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_satd_8x4_%1    SATD_START    SATD_TWO_SSE2    SATD_END;-----------------------------------------------------------------------------;   int x264_pixel_sa8d_8x8_sse2( uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------cglobal x264_pixel_sa8d_8x8_%1    lea  r10, [3*parm2q]    lea  r11, [3*parm4q]    LOAD_DIFF_4x8P xmm0, xmm1, xmm2, xmm3, xmm8, xmm8    lea  parm1q, [parm1q+4*parm2q]    lea  parm3q, [parm3q+4*parm4q]    LOAD_DIFF_4x8P xmm4, xmm5, xmm6, xmm7, xmm8, xmm8    HADAMARD1x8  xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7    TRANSPOSE8x8 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8    HADAMARD1x8  xmm0, xmm5, xmm7, xmm3, xmm8, xmm4, xmm2, xmm1    pxor            xmm10, xmm10    SUM8x4          xmm0, xmm1, xmm6, xmm2, xmm3, xmm9, xmm10    SUM8x4          xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10    psrlw           xmm10, 1    HADDW           xmm10, xmm0    movd eax, xmm10    add r8d, eax ; preserve rounding for 16x16    add eax, 1    shr eax, 1    ret;-----------------------------------------------------------------------------;   int x264_pixel_sa8d_16x16_sse2( uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------;; violates calling conventioncglobal x264_pixel_sa8d_16x16_%1    xor  r8d, r8d    call x264_pixel_sa8d_8x8_%1 ; pix[0]    lea  parm1q, [parm1q+4*parm2q]    lea  parm3q, [parm3q+4*parm4q]    call x264_pixel_sa8d_8x8_%1 ; pix[8*stride]    lea  r10, [3*parm2q-2]    lea  r11, [3*parm4q-2]    shl  r10, 2    shl  r11, 2    sub  parm1q, r10    sub  parm3q, r11    call x264_pixel_sa8d_8x8_%1 ; pix[8]    lea  parm1q, [parm1q+4*parm2q]    lea  parm3q, [parm3q+4*parm4q]    call x264_pixel_sa8d_8x8_%1 ; pix[8*stride+8]    mov  eax, r8d    add  eax, 1    shr  eax, 1    ret%endmacro ; SATDS%define SUM8x4 SUM8x4_SSE2SATDS sse2%ifdef HAVE_SSE3%define SUM8x4 SUM8x4_SSSE3SATDS ssse3%endif;-----------------------------------------------------------------------------;  void x264_intra_sa8d_x3_8x8_core_sse2( uint8_t *fenc, int16_t edges[2][8], int *res );-----------------------------------------------------------------------------cglobal x264_intra_sa8d_x3_8x8_core_sse2    ; 8x8 hadamard    pxor        xmm4, xmm4    movq        xmm0, [parm1q+0*FENC_STRIDE]    movq        xmm7, [parm1q+1*FENC_STRIDE]    movq        xmm6, [parm1q+2*FENC_STRIDE]    movq        xmm3, [parm1q+3*FENC_STRIDE]    movq        xmm5, [parm1q+4*FENC_STRIDE]    movq        xmm1, [parm1q+5*FENC_STRIDE]    movq        xmm8, [parm1q+6*FENC_STRIDE]    movq        xmm2, [parm1q+7*FENC_STRIDE]    punpcklbw   xmm0, xmm4    punpcklbw   xmm7, xmm4    punpcklbw   xmm6, xmm4    punpcklbw   xmm3, xmm4    punpcklbw   xmm5, xmm4    punpcklbw   xmm1, xmm4    punpcklbw   xmm8, xmm4    punpcklbw   xmm2, xmm4    HADAMARD1x8 xmm0, xmm7, xmm6, xmm3, xmm5, xmm1, xmm8, xmm2    TRANSPOSE8x8 xmm0, xmm7, xmm6, xmm3, xmm5, xmm1, xmm8, xmm2, xmm4    HADAMARD1x8 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7    ; dc    movzx       edi, word [parm2q+0]    add          di, word [parm2q+16]    add         edi, 8    and         edi, -16    shl         edi, 2    pxor        xmm15, xmm15    movdqa      xmm8, xmm2    movdqa      xmm9, xmm3    movdqa      xmm10, xmm4    movdqa      xmm11, xmm5    SUM8x4_SSE2 xmm8, xmm9, xmm12, xmm10, xmm11, xmm13, xmm15    movdqa      xmm8, xmm6    movdqa      xmm9, xmm7    SUM4x4_SSE2 xmm8, xmm9, xmm10, xmm15    movdqa      xmm8, xmm1    SUM1x8_SSE2 xmm8, xmm10, xmm15    movdqa      xmm14, xmm15 ; 7x8 sum    movdqa      xmm8, [parm2q+0] ; left edge    movd        xmm9, edi    psllw       xmm8, 3    psubw       xmm8, xmm0    psubw       xmm9, xmm0    SUM1x8_SSE2 xmm8, xmm10, xmm14    SUM1x8_SSE2 xmm9, xmm11, xmm15 ; 1x8 sum    punpcklwd   xmm0, xmm1    punpcklwd   xmm2, xmm3    punpcklwd   xmm4, xmm5    punpcklwd   xmm6, xmm7    punpckldq   xmm0, xmm2    punpckldq   xmm4, xmm6    punpcklqdq  xmm0, xmm4 ; transpose    movdqa      xmm1, [parm2q+16] ; top edge    movdqa      xmm2, xmm15    psllw       xmm1, 3    psrldq      xmm2, 2     ; 8x7 sum    psubw       xmm0, xmm1  ; 8x1 sum    SUM1x8_SSE2 xmm0, xmm1, xmm2    HADDW       xmm14, xmm3    movd        eax, xmm14    add         eax, 2    shr         eax, 2    mov         [parm3q+4], eax ; i8x8_h sa8d    HADDW       xmm15, xmm4    movd        eax, xmm15    add         eax, 2    shr         eax, 2    mov         [parm3q+8], eax ; i8x8_dc sa8d    HADDW       xmm2, xmm5    movd        eax, xmm2    add         eax, 2    shr         eax, 2    mov         [parm3q+0], eax ; i8x8_v sa8d    ret;-----------------------------------------------------------------------------; void x264_pixel_ssim_4x4x2_core_sse2( const uint8_t *pix1, int stride1,;                                       const uint8_t *pix2, int stride2, int sums[2][4] );-----------------------------------------------------------------------------cglobal x264_pixel_ssim_4x4x2_core_sse2    pxor      xmm0, xmm0    pxor      xmm1, xmm1    pxor      xmm2, xmm2    pxor      xmm3, xmm3    pxor      xmm4, xmm4    movdqa    xmm8, [pw_1 GLOBAL]%rep 4    movq      xmm5, [parm1q]    movq      xmm6, [parm3q]    punpcklbw xmm5, xmm0    punpcklbw xmm6, xmm0    paddw     xmm1, xmm5    paddw     xmm2, xmm6    movdqa    xmm7, xmm5    pmaddwd   xmm5, xmm5    pmaddwd   xmm7, xmm6    pmaddwd   xmm6, xmm6    paddd     xmm3, xmm5    paddd     xmm4, xmm7    paddd     xmm3, xmm6    add       parm1q, parm2q    add       parm3q, parm4q%endrep    ; PHADDW xmm1, xmm2    ; PHADDD xmm3, xmm4    pshufd    xmm5, xmm3, 0xB1    pmaddwd   xmm1, xmm8    pmaddwd   xmm2, xmm8    pshufd    xmm6, xmm4, 0xB1    packssdw  xmm1, xmm2    paddd     xmm3, xmm5    pmaddwd   xmm1, xmm8    paddd     xmm4, xmm6    pshufd    xmm1, xmm1, 0xD8    movdqa    xmm5, xmm3    punpckldq xmm3, xmm4    punpckhdq xmm5, xmm4    movq      [parm5q+ 0], xmm1    movq      [parm5q+ 8], xmm3    psrldq    xmm1, 8    movq      [parm5q+16], xmm1    movq      [parm5q+24], xmm5    ret;-----------------------------------------------------------------------------; float x264_pixel_ssim_end_sse2( int sum0[5][4], int sum1[5][4], int width );-----------------------------------------------------------------------------cglobal x264_pixel_ssim_end4_sse2    movdqa   xmm0, [parm1q+ 0]    movdqa   xmm1, [parm1q+16]    movdqa   xmm2, [parm1q+32]    movdqa   xmm3, [parm1q+48]    movdqa   xmm4, [parm1q+64]    paddd    xmm0, [parm2q+ 0]    paddd    xmm1, [parm2q+16]    paddd    xmm2, [parm2q+32]    paddd    xmm3, [parm2q+48]    paddd    xmm4, [parm2q+64]    paddd    xmm0, xmm1    paddd    xmm1, xmm2    paddd    xmm2, xmm3    paddd    xmm3, xmm4    movdqa   xmm5, [ssim_c1 GLOBAL]    movdqa   xmm6, [ssim_c2 GLOBAL]    TRANSPOSE4x4D  xmm0, xmm1, xmm2, xmm3, xmm4;   s1=mm0, s2=mm3, ss=mm4, s12=mm2    movdqa   xmm1, xmm3    pslld    xmm3, 16    pmaddwd  xmm1, xmm0  ; s1*s2    por      xmm0, xmm3    pmaddwd  xmm0, xmm0  ; s1*s1 + s2*s2    pslld    xmm1, 1    pslld    xmm2, 7    pslld    xmm4, 6    psubd    xmm2, xmm1  ; covar*2    psubd    xmm4, xmm0  ; vars    paddd    xmm0, xmm5    paddd    xmm1, xmm5    paddd    xmm2, xmm6    paddd    xmm4, xmm6    cvtdq2ps xmm0, xmm0  ; (float)(s1*s1 + s2*s2 + ssim_c1)    cvtdq2ps xmm1, xmm1  ; (float)(s1*s2*2 + ssim_c1)    cvtdq2ps xmm2, xmm2  ; (float)(covar*2 + ssim_c2)    cvtdq2ps xmm4, xmm4  ; (float)(vars + ssim_c2)    mulps    xmm1, xmm2    mulps    xmm0, xmm4    divps    xmm1, xmm0  ; ssim    neg      parm3q%ifdef __PIC__    lea      rax,  [mask_ff + 16 GLOBAL]    movdqu   xmm3, [rax + parm3q*4]%else    movdqu   xmm3, [mask_ff + parm3q*4 + 16]%endif    pand     xmm1, xmm3    movhlps  xmm0, xmm1    addps    xmm0, xmm1    pshuflw  xmm1, xmm0, 0xE    addss    xmm0, xmm1    ret

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -