⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pixel-a.asm

📁 DM642 H.264 codec DM642 H.264 codec DM642 H.264 codec DM642 H.264 codec
💻 ASM
📖 第 1 页 / 共 2 页
字号:
;*****************************************************************************;* pixel.asm: h264 encoder library;*****************************************************************************;* Copyright (C) 2003 x264 project;* $Id: pixel.asm,v 1.1 2004/06/03 19:27:07 fenrir Exp $;*;* Authors: Laurent Aimar <fenrir@via.ecp.fr>;*;* This program is free software; you can redistribute it and/or modify;* it under the terms of the GNU General Public License as published by;* the Free Software Foundation; either version 2 of the License, or;* (at your option) any later version.;*;* This program is distributed in the hope that it will be useful,;* but WITHOUT ANY WARRANTY; without even the implied warranty of;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the;* GNU General Public License for more details.;*;* You should have received a copy of the GNU General Public License;* along with this program; if not, write to the Free Software;* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.;*****************************************************************************BITS 64;=============================================================================; Macros and other preprocessor constants;=============================================================================%include "amd64inc.asm"; sad%macro SAD_INC_2x16P 0    movq    mm1,    [parm1q]    movq    mm2,    [parm1q+8]    movq    mm3,    [parm1q+parm2q]    movq    mm4,    [parm1q+parm2q+8]    psadbw  mm1,    [parm3q]    psadbw  mm2,    [parm3q+8]    psadbw  mm3,    [parm3q+parm4q]    psadbw  mm4,    [parm3q+parm4q+8]    lea     parm1q, [parm1q+2*parm2q]    paddw   mm1,    mm2    paddw   mm3,    mm4    lea     parm3q, [parm3q+2*parm4q]    paddw   mm0,    mm1    paddw   mm0,    mm3%endmacro%macro SAD_INC_2x8P 0    movq    mm1,    [parm1q]    movq    mm2,    [parm1q+parm2q]    psadbw  mm1,    [parm3q]    psadbw  mm2,    [parm3q+parm4q]    lea     parm1q,    [parm1q+2*parm2q]    paddw   mm0,    mm1    paddw   mm0,    mm2    lea     parm3q, [parm3q+2*parm4q]%endmacro%macro SAD_INC_2x4P 0    movd    mm1,    [parm1q]    movd    mm2,    [parm3q]    movd    mm3,    [parm1q+parm2q]    movd    mm4,    [parm3q+parm4q]    psadbw  mm1,    mm2    psadbw  mm3,    mm4    paddw   mm0,    mm1    paddw   mm0,    mm3    lea     parm1q, [parm1q+2*parm2q]    lea     parm3q, [parm3q+2*parm4q]%endmacro; sad x3 / x4%macro SAD_X3_START_1x8P 1    mov%1   mm3,    [parm1q]    mov%1   mm0,    [parm2q]    mov%1   mm1,    [parm3q]    mov%1   mm2,    [parm4q]    psadbw  mm0,    mm3    psadbw  mm1,    mm3    psadbw  mm2,    mm3%endmacro%macro SAD_X3_1x8P 3    mov%1   mm3,    [parm1q+%2]    mov%1   mm4,    [parm2q+%3]    mov%1   mm5,    [parm3q+%3]    mov%1   mm6,    [parm4q+%3]    psadbw  mm4,    mm3    psadbw  mm5,    mm3    psadbw  mm6,    mm3    paddw   mm0,    mm4    paddw   mm1,    mm5    paddw   mm2,    mm6%endmacro%macro SAD_X3_2x16P 1%if %1    SAD_X3_START_1x8P q%else    SAD_X3_1x8P q, 0, 0%endif    SAD_X3_1x8P q, 8, 8    SAD_X3_1x8P q, FENC_STRIDE, parm5q    SAD_X3_1x8P q, FENC_STRIDE+8, parm5q+8    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm5q]    lea     parm3q, [parm3q+2*parm5q]    lea     parm4q, [parm4q+2*parm5q]%endmacro%macro SAD_X3_2x8P 1%if %1    SAD_X3_START_1x8P q%else    SAD_X3_1x8P q, 0, 0%endif    SAD_X3_1x8P q, FENC_STRIDE, parm5q    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm5q]    lea     parm3q, [parm3q+2*parm5q]    lea     parm4q, [parm4q+2*parm5q]%endmacro%macro SAD_X3_2x4P 1%if %1    SAD_X3_START_1x8P d%else    SAD_X3_1x8P d, 0, 0%endif    SAD_X3_1x8P d, FENC_STRIDE, parm5q    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm5q]    lea     parm3q, [parm3q+2*parm5q]    lea     parm4q, [parm4q+2*parm5q]%endmacro%macro SAD_X4_START_1x8P 1    mov%1   mm7,    [parm1q]    mov%1   mm0,    [parm2q]    mov%1   mm1,    [parm3q]    mov%1   mm2,    [parm4q]    mov%1   mm3,    [parm5q]    psadbw  mm0,    mm7    psadbw  mm1,    mm7    psadbw  mm2,    mm7    psadbw  mm3,    mm7%endmacro%macro SAD_X4_1x8P 2    movq    mm7,    [parm1q+%1]    movq    mm4,    [parm2q+%2]    movq    mm5,    [parm3q+%2]    movq    mm6,    [parm4q+%2]    psadbw  mm4,    mm7    psadbw  mm5,    mm7    psadbw  mm6,    mm7    psadbw  mm7,    [parm5q+%2]    paddw   mm0,    mm4    paddw   mm1,    mm5    paddw   mm2,    mm6    paddw   mm3,    mm7%endmacro%macro SAD_X4_1x4P 2    movd    mm7,    [parm1q+%1]    movd    mm4,    [parm2q+%2]    movd    mm5,    [parm3q+%2]    movd    mm6,    [parm4q+%2]    psadbw  mm4,    mm7    psadbw  mm5,    mm7    paddw   mm0,    mm4    psadbw  mm6,    mm7    movd    mm4,    [parm5q+%2]    paddw   mm1,    mm5    psadbw  mm4,    mm7    paddw   mm2,    mm6    paddw   mm3,    mm4%endmacro%macro SAD_X4_2x16P 1%if %1    SAD_X4_START_1x8P q%else    SAD_X4_1x8P 0, 0%endif    SAD_X4_1x8P 8, 8    SAD_X4_1x8P FENC_STRIDE, parm6q    SAD_X4_1x8P FENC_STRIDE+8, parm6q+8    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm6q]    lea     parm3q, [parm3q+2*parm6q]    lea     parm4q, [parm4q+2*parm6q]    lea     parm5q, [parm5q+2*parm6q]%endmacro%macro SAD_X4_2x8P 1%if %1    SAD_X4_START_1x8P q%else    SAD_X4_1x8P 0, 0%endif    SAD_X4_1x8P FENC_STRIDE, parm6q    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm6q]    lea     parm3q, [parm3q+2*parm6q]    lea     parm4q, [parm4q+2*parm6q]    lea     parm5q, [parm5q+2*parm6q]%endmacro%macro SAD_X4_2x4P 1%if %1    SAD_X4_START_1x8P d%else    SAD_X4_1x4P 0, 0%endif    SAD_X4_1x4P FENC_STRIDE, parm6q    add     parm1q, 2*FENC_STRIDE    lea     parm2q, [parm2q+2*parm6q]    lea     parm3q, [parm3q+2*parm6q]    lea     parm4q, [parm4q+2*parm6q]    lea     parm5q, [parm5q+2*parm6q]%endmacro%macro SAD_X3_END 0    movd    [parm6q+0], mm0    movd    [parm6q+4], mm1    movd    [parm6q+8], mm2    ret%endmacro%macro SAD_X4_END 0    mov     rax, parm7q    movd    [rax+0], mm0    movd    [rax+4], mm1    movd    [rax+8], mm2    movd    [rax+12], mm3    ret%endmacro; ssd%macro SSD_INC_1x16P 0    movq    mm1,    [parm1q]    movq    mm2,    [parm3q]    movq    mm3,    [parm1q+8]    movq    mm4,    [parm3q+8]    movq    mm5,    mm2    movq    mm6,    mm4    psubusb mm2,    mm1    psubusb mm4,    mm3    psubusb mm1,    mm5    psubusb mm3,    mm6    por     mm1,    mm2    por     mm3,    mm4    movq    mm2,    mm1    movq    mm4,    mm3    punpcklbw mm1,  mm7    punpcklbw mm3,  mm7    punpckhbw mm2,  mm7    punpckhbw mm4,  mm7    pmaddwd mm1,    mm1    pmaddwd mm2,    mm2    pmaddwd mm3,    mm3    pmaddwd mm4,    mm4    add     parm1q, parm2q    add     parm3q, parm4q    paddd   mm0,    mm1    paddd   mm0,    mm2    paddd   mm0,    mm3    paddd   mm0,    mm4%endmacro%macro SSD_INC_1x8P 0    movq    mm1,    [parm1q]    movq    mm2,    [parm3q]    movq    mm5,    mm2    psubusb mm2,    mm1    psubusb mm1,    mm5    por     mm1,    mm2         ; mm1 = 8bit abs diff    movq    mm2,    mm1    punpcklbw mm1,  mm7    punpckhbw mm2,  mm7         ; (mm1,mm2) = 16bit abs diff    pmaddwd mm1,    mm1    pmaddwd mm2,    mm2    add     parm1q, parm2q    add     parm3q, parm4q    paddd   mm0,    mm1    paddd   mm0,    mm2%endmacro%macro SSD_INC_1x4P 0    movd    mm1,    [parm1q]    movd    mm2,    [parm3q]    movq    mm5,    mm2    psubusb mm2,    mm1    psubusb mm1,    mm5    por     mm1,    mm2    punpcklbw mm1,  mm7    pmaddwd mm1,    mm1    add     parm1q, parm2q    add     parm3q, parm4q    paddd   mm0,    mm1%endmacro; satd%macro LOAD_DIFF_4P 4  ; MMP, MMT, [pix1], [pix2]    movd        %1, %3    movd        %2, %4    punpcklbw   %1, %2    punpcklbw   %2, %2    psubw       %1, %2%endmacro%macro HADAMARD4_SUB_BADC 4    paddw %1,   %2    paddw %3,   %4    paddw %2,   %2    paddw %4,   %4    psubw %2,   %1    psubw %4,   %3%endmacro%macro HADAMARD4x4 4    HADAMARD4_SUB_BADC %1, %2, %3, %4    HADAMARD4_SUB_BADC %1, %3, %2, %4%endmacro%macro SBUTTERFLYwd 3    movq        %3, %1    punpcklwd   %1, %2    punpckhwd   %3, %2%endmacro%macro SBUTTERFLYdq 3    movq        %3, %1    punpckldq   %1, %2    punpckhdq   %3, %2%endmacro%macro TRANSPOSE4x4 5   ; abcd-t -> adtc    SBUTTERFLYwd %1, %2, %5    SBUTTERFLYwd %3, %4, %2    SBUTTERFLYdq %1, %3, %4    SBUTTERFLYdq %5, %2, %3%endmacro%macro MMX_ABS 2    ; mma, tmp    pxor    %2, %2    psubw   %2, %1    pmaxsw  %1, %2%endmacro%macro MMX_ABS_TWO 4    ; mma, mmb, tmp0, tmp1    pxor    %3, %3    pxor    %4, %4    psubw   %3, %1    psubw   %4, %2    pmaxsw  %1, %3    pmaxsw  %2, %4%endmacro%macro HADAMARD4x4_SUM 1    ; %1 = dest (row sum of one block)    HADAMARD4x4 mm4, mm5, mm6, mm7    TRANSPOSE4x4 mm4, mm5, mm6, mm7, %1    HADAMARD4x4 mm4, mm7, %1, mm6    MMX_ABS_TWO mm4, mm7, mm3, mm5    MMX_ABS_TWO %1,  mm6, mm3, mm5    paddw       %1,  mm4    paddw       mm6, mm7    pavgw       %1,  mm6%endmacro; in: r10=3*stride1, r11=3*stride2; in: %2 = horizontal offset; in: %3 = whether we need to increment pix1 and pix2; clobber: mm3..mm7; out: %1 = satd%macro LOAD_DIFF_HADAMARD_SUM 3    LOAD_DIFF_4P mm4, mm3, [parm1q+%2],          [parm3q+%2]    LOAD_DIFF_4P mm5, mm3, [parm1q+parm2q+%2],   [parm3q+parm4q+%2]    LOAD_DIFF_4P mm6, mm3, [parm1q+2*parm2q+%2], [parm3q+2*parm4q+%2]    LOAD_DIFF_4P mm7, mm3, [parm1q+r10+%2],      [parm3q+r11+%2]%if %3    lea  parm1q, [parm1q+4*parm2q]    lea  parm3q, [parm3q+4*parm4q]%endif    HADAMARD4x4_SUM %1%endmacro;=============================================================================; Code;=============================================================================SECTION .textcglobal x264_pixel_sad_16x16_mmxextcglobal x264_pixel_sad_16x8_mmxextcglobal x264_pixel_sad_8x16_mmxextcglobal x264_pixel_sad_8x8_mmxextcglobal x264_pixel_sad_8x4_mmxextcglobal x264_pixel_sad_4x8_mmxextcglobal x264_pixel_sad_4x4_mmxextcglobal x264_pixel_sad_x3_16x16_mmxextcglobal x264_pixel_sad_x3_16x8_mmxextcglobal x264_pixel_sad_x3_8x16_mmxextcglobal x264_pixel_sad_x3_8x8_mmxextcglobal x264_pixel_sad_x3_8x4_mmxextcglobal x264_pixel_sad_x3_4x8_mmxextcglobal x264_pixel_sad_x3_4x4_mmxextcglobal x264_pixel_sad_x4_16x16_mmxextcglobal x264_pixel_sad_x4_16x8_mmxextcglobal x264_pixel_sad_x4_8x16_mmxextcglobal x264_pixel_sad_x4_8x8_mmxextcglobal x264_pixel_sad_x4_8x4_mmxextcglobal x264_pixel_sad_x4_4x8_mmxextcglobal x264_pixel_sad_x4_4x4_mmxextcglobal x264_pixel_sad_pde_16x16_mmxextcglobal x264_pixel_sad_pde_16x8_mmxextcglobal x264_pixel_sad_pde_8x16_mmxextcglobal x264_pixel_ssd_16x16_mmxcglobal x264_pixel_ssd_16x8_mmxcglobal x264_pixel_ssd_8x16_mmxcglobal x264_pixel_ssd_8x8_mmxcglobal x264_pixel_ssd_8x4_mmxcglobal x264_pixel_ssd_4x8_mmxcglobal x264_pixel_ssd_4x4_mmxcglobal x264_pixel_satd_4x4_mmxextcglobal x264_pixel_satd_4x8_mmxextcglobal x264_pixel_satd_8x4_mmxextcglobal x264_pixel_satd_8x8_mmxextcglobal x264_pixel_satd_16x8_mmxextcglobal x264_pixel_satd_8x16_mmxextcglobal x264_pixel_satd_16x16_mmxextcglobal x264_intra_satd_x3_4x4_mmxextcglobal x264_intra_satd_x3_8x8c_mmxextcglobal x264_intra_satd_x3_16x16_mmxext%macro SAD_START 0    pxor    mm0, mm0%endmacro%macro SAD_END 0    movd    eax, mm0    ret%endmacro;-----------------------------------------------------------------------------;   int x264_pixel_sad_16x16_mmxext (uint8_t *, int, uint8_t *, int );-----------------------------------------------------------------------------%macro SAD 2ALIGN 16x264_pixel_sad_%1x%2_mmxext:    SAD_START%rep %2/2    SAD_INC_2x%1P%endrep     SAD_END%endmacroSAD 16, 16SAD 16,  8SAD  8, 16SAD  8,  8SAD  8,  4SAD  4,  8SAD  4,  4;-----------------------------------------------------------------------------;  void x264_pixel_sad_x3_16x16_mmxext( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,;                                       uint8_t *pix2, int i_stride, int scores[3] );-----------------------------------------------------------------------------%macro SAD_X 3ALIGN 16x264_pixel_sad_x%1_%2x%3_mmxext:    SAD_X%1_2x%2P 1%rep %3/2-1    SAD_X%1_2x%2P 0%endrep    SAD_X%1_END%endmacroSAD_X 3, 16, 16SAD_X 3, 16,  8SAD_X 3,  8, 16SAD_X 3,  8,  8SAD_X 3,  8,  4SAD_X 3,  4,  8SAD_X 3,  4,  4SAD_X 4, 16, 16SAD_X 4, 16,  8SAD_X 4,  8, 16SAD_X 4,  8,  8SAD_X 4,  8,  4SAD_X 4,  4,  8SAD_X 4,  4,  4%macro PDE_CHECK 0    movd eax, mm0    cmp  eax, parm5d ; prev_score    jl   .continue    retALIGN 4.continue:%endmacro;-----------------------------------------------------------------------------;   int x264_pixel_sad_pde_16x16_mmxext (uint8_t *, int, uint8_t *, int, int );-----------------------------------------------------------------------------%macro SAD_PDE 2    ALIGN 16x264_pixel_sad_pde_%1x%2_mmxext:    SAD_START%rep %2/4    SAD_INC_2x%1P%endrep    movd eax, mm0    cmp  eax, parm5d ; prev_score    jl   .continue    retALIGN 4.continue:%rep %2/4    SAD_INC_2x%1P%endrep      SAD_END%endmacroSAD_PDE 16, 16SAD_PDE 16 , 8SAD_PDE  8, 16

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -