⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qpel_mmx.asm

📁 从FFMPEG转换而来的H264解码程序,VC下编译..
💻 ASM
📖 第 1 页 / 共 2 页
字号:
  paddw mm3, [r9 + rax*8]

  movzx rax, byte [rsi+3]
  lea r9, [xvid_FIR_7_20_20_6 wrt rip]
  paddw mm0, [r9 + rdx*8]
  lea r9, [xvid_FIR_3_1_0_0 wrt rip]
  paddw mm3, [r9 + rdx*8]

  movzx rdx, byte [rsi+4]
  lea r9, [xvid_FIR_3_6_20_20 wrt rip]
  paddw mm0, [r9 + rax*8]
  lea r9, [xvid_FIR_6_3_1_0 wrt rip]
  paddw mm3, [r9 + rax*8]

  movzx rax, byte [rsi+5]
  lea r9, [xvid_FIR_1_3_6_20 wrt rip]
  paddw mm0, [r9 + rdx*8]
  lea r9, [xvid_FIR_20_6_3_1 wrt rip]
  paddw mm3, [r9 + rdx*8]

  movzx rdx, byte [rsi+6]
  lea r9, [xvid_FIR_0_1_3_6 wrt rip]
  paddw mm0, [r9 + rax*8]
  lea r9, [xvid_FIR_20_20_6_3 wrt rip]
  paddw mm3, [r9 + rax*8]

  movzx rax, byte [rsi+7]
  lea r9, [xvid_FIR_0_0_1_3 wrt rip]
  paddw mm0, [r9 + rdx*8]
  lea r9, [xvid_FIR_6_20_20_7 wrt rip]
  paddw mm3, [r9 + rdx*8]

  lea r9, [xvid_FIR_0_0_0_1 wrt rip]
  paddw mm0, [r9 + rax*8]
  lea r9, [xvid_FIR_3_6_19_23 wrt rip]
  paddw mm3, [r9 + rax*8]

%endif

%endif    ; !USE_TABLES

  psraw mm0, 5
  psraw mm3, 5
  packuswb mm0, mm3

%if (%1==1)
  MIX mm0, rsi, rbx
%elif (%1==2)
  MIX mm0, rsi+1, rbx
%endif
%if (%2==1)
  lea r9, [Rounder1_MMX wrt rip]
  MIX mm0, rdi, r9
%endif

  movq [rdi], mm0

  add rdi, rbp
  add rsi, rbp
  dec rcx
  jg .Loop

%if (%2==0) && (%1==0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////
;// 16x? copy Functions

xvid_H_Pass_16_x86_64:
  H_PASS_16 0, 0
.endfunc
xvid_H_Pass_Avrg_16_x86_64:
  H_PASS_16 1, 0
.endfunc
xvid_H_Pass_Avrg_Up_16_x86_64:
  H_PASS_16 2, 0
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 8x? copy Functions

xvid_H_Pass_8_x86_64:
  H_PASS_8 0, 0
.endfunc
xvid_H_Pass_Avrg_8_x86_64:
  H_PASS_8 1, 0
.endfunc
xvid_H_Pass_Avrg_Up_8_x86_64:
  H_PASS_8 2, 0
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 16x? avrg Functions

xvid_H_Pass_Add_16_x86_64:
  H_PASS_16 0, 1
.endfunc
xvid_H_Pass_Avrg_Add_16_x86_64:
  H_PASS_16 1, 1
.endfunc
xvid_H_Pass_Avrg_Up_Add_16_x86_64:
  H_PASS_16 2, 1
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 8x? avrg Functions

xvid_H_Pass_8_Add_x86_64:
  H_PASS_8 0, 1
.endfunc
xvid_H_Pass_Avrg_8_Add_x86_64:
  H_PASS_8 1, 1
.endfunc
xvid_H_Pass_Avrg_Up_8_Add_x86_64:
  H_PASS_8 2, 1
.endfunc


;//////////////////////////////////////////////////////////////////////
;//
;// All vertical passes
;//
;//////////////////////////////////////////////////////////////////////

%macro V_LOAD 1  ; %1=Last?

  movd mm4, [rdx]
  pxor mm6, mm6
%if (%1==0)
  add rdx, rbp
%endif
  punpcklbw mm4, mm6

%endmacro

%macro V_ACC1 2   ; %1:reg; 2:tap
  pmullw mm4, [%2]
  paddw %1, mm4
%endmacro

%macro V_ACC2 4   ; %1-%2: regs, %3-%4: taps
  movq mm5, mm4
  movq mm6, mm4
  pmullw mm5, [%3]
  pmullw mm6, [%4]
  paddw %1, mm5
  paddw %2, mm6
%endmacro

%macro V_ACC2l 4   ; %1-%2: regs, %3-%4: taps
  movq mm5, mm4
  pmullw mm5, [%3]
  pmullw mm4, [%4]
  paddw %1, mm5
  paddw %2, mm4
%endmacro

%macro V_ACC4 8   ; %1-%4: regs, %5-%8: taps
  V_ACC2 %1,%2, %5,%6
  V_ACC2l %3,%4, %7,%8
%endmacro


%macro V_MIX 3  ; %1:dst-reg, %2:src, %3: rounder
  pxor mm6, mm6
  movq mm4, [%2]
  punpcklbw %1, mm6
  punpcklbw mm4, mm6
  paddusw %1, mm4
  paddusw %1, [%3]
  psrlw %1, 1
  packuswb %1, %1
%endmacro

%macro V_STORE 4    ; %1-%2: mix ops, %3: reg, %4:last?

  psraw %3, 5
  packuswb %3, %3

%if (%1==1)
  V_MIX %3, rsi, rbx
  add rsi, rbp
%elif (%1==2)
  add rsi, rbp
  V_MIX %3, rsi, rbx
%endif
%if (%2==1)
  lea r9, [Rounder1_MMX wrt rip]
  V_MIX %3, rdi, r9
%endif

  movd eax, %3
  mov [rdi], eax

%if (%4==0)
  add rdi, rbp
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////

%macro V_PASS_16  2   ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG)

%if (%2==0) && (%1==0)
  PROLOG_NO_AVRG
%else
  PROLOG_AVRG
%endif

    ; we process one stripe of 4x16 pixel each time.
    ; the size (3rd argument) is meant to be a multiple of 4
    ;  mm0..mm3 serves as a 4x4 delay line

.Loop

  push rdi
  push rsi      ; esi is preserved for src-mixing
  mov rdx, rsi	; rsi is Src

    ; ouput rows [0..3], from input rows [0..8]

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0	; add rdx, rbp, rbp is Bps
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14 wrt rip, FIR_Cm3 wrt rip, FIR_C2 wrt rip,  FIR_Cm1 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23 wrt rip, FIR_C19 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1 wrt rip
  V_STORE %1, %2, mm3, 0

    ; ouput rows [4..7], from input rows [1..11] (!!)

;  mov esi, [esp]
  mov rsi, [rsp]	; rsi on stack...
  lea rdx, [rsi+rbp]

  lea rsi, [rsi+4*rbp]  ; for src-mixing
  push rsi              ; this will be the new value for next round

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm0, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2l mm0, mm1, FIR_C3 wrt rip,  FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2 mm0, mm1, FIR_Cm6 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm2, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip, FIR_Cm1 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1 wrt rip
  V_STORE %1, %2, mm3, 0

    ; ouput rows [8..11], from input rows [5..15]

  pop rsi
  lea rdx, [rsi+rbp]

  lea rsi, [rsi+4*rbp]  ; for src-mixing
  push rsi              ; this will be the new value for next round

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm0, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2l mm0, mm1, FIR_C3 wrt rip,  FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2 mm0, mm1, FIR_Cm6 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm2, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip, FIR_Cm1 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip

  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1 wrt rip
  V_STORE %1, %2, mm3, 0


    ; ouput rows [12..15], from input rows [9.16]

  pop rsi
  lea rdx, [rsi+rbp]

%if (%1!=0)
  lea rsi, [rsi+4*rbp]  ; for src-mixing
%endif

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm3, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip,  FIR_C3 wrt rip

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23 wrt rip, FIR_C19 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 1
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14 wrt rip, FIR_Cm3 wrt rip, FIR_C2 wrt rip, FIR_Cm1 wrt rip

  V_STORE %1, %2, mm3, 0
  V_STORE %1, %2, mm2, 0
  V_STORE %1, %2, mm1, 0
  V_STORE %1, %2, mm0, 1

    ; ... next 4 columns

  pop rsi
  pop rdi
  add rsi, 4
  add rdi, 4
  sub rcx, 4
  jg .Loop

%if (%2==0) && (%1==0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////

%macro V_PASS_8  2   ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG)

%if (%2==0) && (%1==0)
  PROLOG_NO_AVRG
%else
  PROLOG_AVRG
%endif

    ; we process one stripe of 4x8 pixel each time
    ; the size (3rd argument) is meant to be a multiple of 4
    ;  mm0..mm3 serves as a 4x4 delay line
.Loop

  push rdi
  push rsi      ; esi is preserved for src-mixing
  mov rdx, rsi

    ; ouput rows [0..3], from input rows [0..8]

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14 wrt rip, FIR_Cm3 wrt rip, FIR_C2 wrt rip,  FIR_Cm1 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23 wrt rip, FIR_C19 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip

  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1 wrt rip
  V_STORE %1, %2, mm3, 0

    ; ouput rows [4..7], from input rows [1..9]

  mov rsi, [rsp]	; rsi on stack...
  lea rdx, [rsi+rbp]

%if (%1!=0)
  lea rsi, [rsi+4*rbp]  ; for src-mixing
%endif

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm3, FIR_Cm1 wrt rip

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1 wrt rip,  FIR_C3 wrt rip

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1 wrt rip,  FIR_C3 wrt rip
  V_ACC1 mm3, FIR_Cm6 wrt rip

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1 wrt rip, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3 wrt rip,  FIR_Cm6 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7 wrt rip, FIR_C20 wrt rip, FIR_C20 wrt rip, FIR_Cm6 wrt rip
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23 wrt rip, FIR_C19 wrt rip, FIR_Cm6 wrt rip, FIR_C3 wrt rip
  V_LOAD 1
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14 wrt rip, FIR_Cm3 wrt rip, FIR_C2 wrt rip, FIR_Cm1 wrt rip

  V_STORE %1, %2, mm3, 0
  V_STORE %1, %2, mm2, 0
  V_STORE %1, %2, mm1, 0
  V_STORE %1, %2, mm0, 1

    ; ... next 4 columns

  pop rsi
  pop rdi
  add rsi, 4
  add rdi, 4
  sub rcx, 4
  jg .Loop

%if (%2==0) && (%1==0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro


;//////////////////////////////////////////////////////////////////////
;// 16x? copy Functions

xvid_V_Pass_16_x86_64:
  V_PASS_16 0, 0
.endfunc
xvid_V_Pass_Avrg_16_x86_64:
  V_PASS_16 1, 0
.endfunc
xvid_V_Pass_Avrg_Up_16_x86_64:
  V_PASS_16 2, 0
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 8x? copy Functions

xvid_V_Pass_8_x86_64:
  V_PASS_8 0, 0
.endfunc
xvid_V_Pass_Avrg_8_x86_64:
  V_PASS_8 1, 0
.endfunc
xvid_V_Pass_Avrg_Up_8_x86_64:
  V_PASS_8 2, 0
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 16x? avrg Functions

xvid_V_Pass_Add_16_x86_64:
  V_PASS_16 0, 1
.endfunc
xvid_V_Pass_Avrg_Add_16_x86_64:
  V_PASS_16 1, 1
.endfunc
xvid_V_Pass_Avrg_Up_Add_16_x86_64:
  V_PASS_16 2, 1
.endfunc

;//////////////////////////////////////////////////////////////////////
;// 8x? avrg Functions

xvid_V_Pass_8_Add_x86_64:
  V_PASS_8 0, 1
.endfunc
xvid_V_Pass_Avrg_8_Add_x86_64:
  V_PASS_8 1, 1
.endfunc
xvid_V_Pass_Avrg_Up_8_Add_x86_64:
  V_PASS_8 2, 1
.endfunc

;//////////////////////////////////////////////////////////////////////

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -