⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pixel-a.asm

📁 linux下编译已经通过
💻 ASM
📖 第 1 页 / 共 3 页
字号:
;*****************************************************************************;* pixel.asm: h264 encoder library;*****************************************************************************;* Copyright (C) 2003 x264 project;* $Id: pixel.asm,v 1.1 2004/06/03 19:27:07 fenrir Exp $;*;* Authors: Laurent Aimar <fenrir@via.ecp.fr>;*;* This program is free software; you can redistribute it and/or modify;* it under the terms of the GNU General Public License as published by;* the Free Software Foundation; either version 2 of the License, or;* (at your option) any later version.;*;* This program is distributed in the hope that it will be useful,;* but WITHOUT ANY WARRANTY; without even the implied warranty of;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the;* GNU General Public License for more details.;*;* You should have received a copy of the GNU General Public License;* along with this program; if not, write to the Free Software;* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.;*****************************************************************************BITS 32;=============================================================================; Macros and other preprocessor constants;=============================================================================%include "i386inc.asm"; sad%macro SAD_INC_2x16P 0    movq    mm1,    [eax]    movq    mm2,    [eax+8]    movq    mm3,    [eax+ebx]    movq    mm4,    [eax+ebx+8]    psadbw  mm1,    [ecx]    psadbw  mm2,    [ecx+8]    psadbw  mm3,    [ecx+edx]    psadbw  mm4,    [ecx+edx+8]    lea     eax,    [eax+2*ebx]    paddw   mm1,    mm2    paddw   mm3,    mm4    lea     ecx,    [ecx+2*edx]    paddw   mm0,    mm1    paddw   mm0,    mm3%endmacro%macro SAD_INC_2x8P 0    movq    mm1,    [eax]    movq    mm2,    [eax+ebx]    psadbw  mm1,    [ecx]    psadbw  mm2,    [ecx+edx]    lea     eax,    [eax+2*ebx]    paddw   mm0,    mm1    paddw   mm0,    mm2    lea     ecx,    [ecx+2*edx]%endmacro%macro SAD_INC_2x4P 0    movd    mm1,    [eax]    movd    mm2,    [ecx]    punpckldq mm1,  [eax+ebx]    punpckldq mm2,  [ecx+edx]    psadbw  mm1,    mm2    paddw   mm0,    mm1    lea     eax,    [eax+2*ebx]    lea     ecx,    [ecx+2*edx]%endmacro; sad x3 / x4%macro SAD_X3_START 0    push    edi    push    esi    mov     edi,    [esp+12]    mov     eax,    [esp+16]    mov     ecx,    [esp+20]    mov     edx,    [esp+24]    mov     esi,    [esp+28]%endmacro%macro SAD_X3_START_1x8P 0    movq    mm3,    [edi]    movq    mm0,    [eax]    movq    mm1,    [ecx]    movq    mm2,    [edx]    psadbw  mm0,    mm3    psadbw  mm1,    mm3    psadbw  mm2,    mm3%endmacro%macro SAD_X3_1x8P 2    movq    mm3,    [edi+%1]    movq    mm4,    [eax+%2]    movq    mm5,    [ecx+%2]    movq    mm6,    [edx+%2]    psadbw  mm4,    mm3    psadbw  mm5,    mm3    psadbw  mm6,    mm3    paddw   mm0,    mm4    paddw   mm1,    mm5    paddw   mm2,    mm6%endmacro%macro SAD_X3_START_2x4P 3    movd      mm3,  [edi]    movd      %1,   [eax]    movd      %2,   [ecx]    movd      %3,   [edx]    punpckldq mm3,  [edi+FENC_STRIDE]    punpckldq %1,   [eax+esi]    punpckldq %2,   [ecx+esi]    punpckldq %3,   [edx+esi]    psadbw    %1,   mm3    psadbw    %2,   mm3    psadbw    %3,   mm3%endmacro%macro SAD_X3_2x16P 1%if %1    SAD_X3_START    SAD_X3_START_1x8P%else    SAD_X3_1x8P 0, 0%endif    SAD_X3_1x8P 8, 8    SAD_X3_1x8P FENC_STRIDE, esi    SAD_X3_1x8P FENC_STRIDE+8, esi+8    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X3_2x8P 1%if %1    SAD_X3_START    SAD_X3_START_1x8P%else    SAD_X3_1x8P 0, 0%endif    SAD_X3_1x8P FENC_STRIDE, esi    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X3_2x4P 1%if %1    SAD_X3_START    SAD_X3_START_2x4P mm0, mm1, mm2%else    SAD_X3_START_2x4P mm4, mm5, mm6    paddw   mm0, mm4    paddw   mm1, mm5    paddw   mm2, mm6%endif    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X4_START 0    push    edi    push    esi    push    ebx    mov     edi,    [esp+16]    mov     eax,    [esp+20]    mov     ebx,    [esp+24]    mov     ecx,    [esp+28]    mov     edx,    [esp+32]    mov     esi,    [esp+36]%endmacro%macro SAD_X4_START_1x8P 0    movq    mm7,    [edi]    movq    mm0,    [eax]    movq    mm1,    [ebx]    movq    mm2,    [ecx]    movq    mm3,    [edx]    psadbw  mm0,    mm7    psadbw  mm1,    mm7    psadbw  mm2,    mm7    psadbw  mm3,    mm7%endmacro%macro SAD_X4_1x8P 2    movq    mm7,    [edi+%1]    movq    mm4,    [eax+%2]    movq    mm5,    [ebx+%2]    movq    mm6,    [ecx+%2]    psadbw  mm4,    mm7    psadbw  mm5,    mm7    psadbw  mm6,    mm7    psadbw  mm7,    [edx+%2]    paddw   mm0,    mm4    paddw   mm1,    mm5    paddw   mm2,    mm6    paddw   mm3,    mm7%endmacro%macro SAD_X4_START_2x4P 0    movd      mm7,  [edi]    movd      mm0,  [eax]    movd      mm1,  [ebx]    movd      mm2,  [ecx]    movd      mm3,  [edx]    punpckldq mm7,  [edi+FENC_STRIDE]    punpckldq mm0,  [eax+esi]    punpckldq mm1,  [ebx+esi]    punpckldq mm2,  [ecx+esi]    punpckldq mm3,  [edx+esi]    psadbw    mm0,  mm7    psadbw    mm1,  mm7    psadbw    mm2,  mm7    psadbw    mm3,  mm7%endmacro       %macro SAD_X4_INC_2x4P 0    movd      mm7,  [edi]    movd      mm4,  [eax]    movd      mm5,  [ebx]    punpckldq mm7,  [edi+FENC_STRIDE]    punpckldq mm4,  [eax+esi]    punpckldq mm5,  [ebx+esi]    psadbw    mm4,  mm7    psadbw    mm5,  mm7    paddw     mm0,  mm4    paddw     mm1,  mm5    movd      mm4,  [ecx]    movd      mm5,  [edx]    punpckldq mm4,  [ecx+esi]    punpckldq mm5,  [edx+esi]    psadbw    mm4,  mm7    psadbw    mm5,  mm7    paddw     mm2,  mm4    paddw     mm3,  mm5%endmacro%macro SAD_X4_2x16P 1%if %1    SAD_X4_START    SAD_X4_START_1x8P%else    SAD_X4_1x8P 0, 0%endif    SAD_X4_1x8P 8, 8    SAD_X4_1x8P FENC_STRIDE, esi    SAD_X4_1x8P FENC_STRIDE+8, esi+8    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ebx, [ebx+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X4_2x8P 1%if %1    SAD_X4_START    SAD_X4_START_1x8P%else    SAD_X4_1x8P 0, 0%endif    SAD_X4_1x8P FENC_STRIDE, esi    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ebx, [ebx+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X4_2x4P 1%if %1    SAD_X4_START    SAD_X4_START_2x4P%else    SAD_X4_INC_2x4P%endif    add     edi, 2*FENC_STRIDE    lea     eax, [eax+2*esi]    lea     ebx, [ebx+2*esi]    lea     ecx, [ecx+2*esi]    lea     edx, [edx+2*esi]%endmacro%macro SAD_X3_END 0    mov     eax,  [esp+32]    movd    [eax+0], mm0    movd    [eax+4], mm1      movd    [eax+8], mm2    pop     esi    pop     edi    ret%endmacro%macro SAD_X4_END 0    mov     eax,  [esp+40]    movd    [eax+0], mm0    movd    [eax+4], mm1      movd    [eax+8], mm2    movd    [eax+12], mm3    pop     ebx    pop     esi    pop     edi    ret%endmacro; ssd%macro SSD_INC_1x16P 0    movq    mm1,    [eax]    movq    mm2,    [ecx]    movq    mm3,    [eax+8]    movq    mm4,    [ecx+8]    movq    mm5,    mm2    movq    mm6,    mm4    psubusb mm2,    mm1    psubusb mm4,    mm3    psubusb mm1,    mm5    psubusb mm3,    mm6    por     mm1,    mm2    por     mm3,    mm4    movq    mm2,    mm1    movq    mm4,    mm3    punpcklbw mm1,  mm7    punpcklbw mm3,  mm7    punpckhbw mm2,  mm7    punpckhbw mm4,  mm7    pmaddwd mm1,    mm1    pmaddwd mm2,    mm2    pmaddwd mm3,    mm3    pmaddwd mm4,    mm4    add     eax,    ebx    add     ecx,    edx    paddd   mm0,    mm1    paddd   mm0,    mm2    paddd   mm0,    mm3    paddd   mm0,    mm4%endmacro%macro SSD_INC_1x8P 0    movq    mm1,    [eax]    movq    mm2,    [ecx]    movq    mm5,    mm2    psubusb mm2,    mm1    psubusb mm1,    mm5    por     mm1,    mm2         ; mm1 = 8bit abs diff    movq    mm2,    mm1    punpcklbw mm1,  mm7    punpckhbw mm2,  mm7         ; (mm1,mm2) = 16bit abs diff    pmaddwd mm1,    mm1    pmaddwd mm2,    mm2    add     eax,    ebx    add     ecx,    edx    paddd   mm0,    mm1    paddd   mm0,    mm2%endmacro%macro SSD_INC_1x4P 0    movd    mm1,    [eax]    movd    mm2,    [ecx]    movq    mm5,    mm2    psubusb mm2,    mm1    psubusb mm1,    mm5    por     mm1,    mm2    punpcklbw mm1,  mm7    pmaddwd mm1,    mm1    add     eax,    ebx    add     ecx,    edx    paddd   mm0,    mm1%endmacro; satd%macro SUMSUB_BADC 4    paddw %1,   %2    paddw %3,   %4    paddw %2,   %2    paddw %4,   %4    psubw %2,   %1    psubw %4,   %3%endmacro%macro HADAMARD4x4 4    SUMSUB_BADC %1, %2, %3, %4    SUMSUB_BADC %1, %3, %2, %4%endmacro%macro SBUTTERFLYwd 3    movq        %3, %1    punpcklwd   %1, %2    punpckhwd   %3, %2%endmacro%macro SBUTTERFLYdq 3    movq        %3, %1    punpckldq   %1, %2    punpckhdq   %3, %2%endmacro%macro TRANSPOSE4x4 5   ; abcd-t -> adtc    SBUTTERFLYwd %1, %2, %5    SBUTTERFLYwd %3, %4, %2    SBUTTERFLYdq %1, %3, %4    SBUTTERFLYdq %5, %2, %3%endmacro%macro MMX_ABS 2        ; mma, tmp    pxor    %2, %2    psubw   %2, %1    pmaxsw  %1, %2%endmacro%macro MMX_ABS_TWO 4    ; mma, mmb, tmp0, tmp1    pxor    %3, %3    pxor    %4, %4    psubw   %3, %1    psubw   %4, %2    pmaxsw  %1, %3    pmaxsw  %2, %4%endmacro%macro HADAMARD4x4_SUM 1    ; %1 - dest (row sum of one block)    HADAMARD4x4 mm4, mm5, mm6, mm7    TRANSPOSE4x4 mm4, mm5, mm6, mm7, %1    HADAMARD4x4 mm4, mm7, %1, mm6    MMX_ABS_TWO mm4, mm7, mm3, mm5    MMX_ABS_TWO %1,  mm6, mm3, mm5    paddw       %1,  mm4    paddw       mm6, mm7    pavgw       %1,  mm6%endmacro%macro LOAD_DIFF_4P 4  ; mmp, mmt, dx, dy    movd        %1, [eax+ebx*%4+%3]    movd        %2, [ecx+edx*%4+%3]    punpcklbw   %1, %2    punpcklbw   %2, %2    psubw       %1, %2%endmacro; in: %2 = horizontal offset; in: %3 = whether we need to increment pix1 and pix2; clobber: mm3..mm7; out: %1 = satd%macro LOAD_DIFF_HADAMARD_SUM 3%if %3    LOAD_DIFF_4P mm4, mm3, %2, 0    LOAD_DIFF_4P mm5, mm3, %2, 1    lea  eax, [eax+2*ebx]    lea  ecx, [ecx+2*edx]    LOAD_DIFF_4P mm6, mm3, %2, 0    LOAD_DIFF_4P mm7, mm3, %2, 1    lea  eax, [eax+2*ebx]    lea  ecx, [ecx+2*edx]%else    LOAD_DIFF_4P mm4, mm3, %2, 0    LOAD_DIFF_4P mm6, mm3, %2, 2    add  eax, ebx    add  ecx, edx    LOAD_DIFF_4P mm5, mm3, %2, 0    LOAD_DIFF_4P mm7, mm3, %2, 2%endif    HADAMARD4x4_SUM %1%endmacro;=============================================================================; Code;=============================================================================SECTION .text%macro SAD_START 0    push    ebx    mov     eax,    [esp+ 8]    ; pix1    mov     ebx,    [esp+12]    ; stride1    mov     ecx,    [esp+16]    ; pix2    mov     edx,    [esp+20]    ; stride2    pxor    mm0,    mm0%endmacro%macro SAD_END 0    movd eax,    mm0    pop ebx    ret%endmacro;-----------------------------------------------------------------------------;   int __cdecl x264_pixel_sad_16x16_mmxext (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------%macro SAD 2cglobal x264_pixel_sad_%1x%2_mmxext    SAD_START%rep %2/2    SAD_INC_2x%1P%endrep    SAD_END%endmacroSAD 16, 16SAD 16,  8SAD  8, 16SAD  8,  8SAD  8,  4SAD  4,  8SAD  4,  4;-----------------------------------------------------------------------------;  void x264_pixel_sad_x3_16x16_mmxext( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,;                                       uint8_t *pix2, int i_stride, int scores[3] );-----------------------------------------------------------------------------%macro SAD_X 3cglobal x264_pixel_sad_x%1_%2x%3_mmxext    SAD_X%1_2x%2P 1%rep %3/2-1    SAD_X%1_2x%2P 0%endrep    SAD_X%1_END%endmacroSAD_X 3, 16, 16SAD_X 3, 16,  8SAD_X 3,  8, 16SAD_X 3,  8,  8SAD_X 3,  8,  4SAD_X 3,  4,  8SAD_X 3,  4,  4SAD_X 4, 16, 16SAD_X 4, 16,  8SAD_X 4,  8, 16SAD_X 4,  8,  8SAD_X 4,  8,  4SAD_X 4,  4,  8SAD_X 4,  4,  4;-----------------------------------------------------------------------------;   int __cdecl x264_pixel_sad_pde_16x16_mmxext (uint8_t *, int, uint8_t *, int, int );-----------------------------------------------------------------------------%macro SAD_PDE 2cglobal x264_pixel_sad_pde_%1x%2_mmxext    SAD_START%rep %2/4    SAD_INC_2x%1P%endrep    movd ebx, mm0    cmp  ebx, [esp+24] ; prev_score    jl   .continue    pop  ebx    mov  eax, 0xffff    retALIGN 4

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -