⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fdct_mmx.asm

📁 mpeg4代码,比较具体
💻 ASM
📖 第 1 页 / 共 4 页
字号:
    paddd mm3, [round_frw_row]			; +rounder (y2,y0)    pmaddwd mm7, mm6			; x7*w23+x3*w21 x7*w19+x3*w17    pmaddwd mm2, [TABLE_6+24]		; x6*w15+x2*w13 x6*w11+x2*w09    paddd mm3, mm4				; 4 ; a1=sum(even1) a0=sum(even0)    pmaddwd mm5, [TABLE_6+48]		; x5*w30+x1*w28 x5*w26+x1*w24     pmaddwd mm6, [TABLE_6+56]		; x7*w31+x3*w29 x7*w27+x3*w25    paddd mm1, mm7				; 7 ; b1=sum(odd1) b0=sum(odd0)    paddd mm0, [round_frw_row]			; +rounder (y6,y4)    psrad mm3, SHIFT_FRW_ROW_CLIP1 ; (y2, y0)     paddd mm1, [round_frw_row]			; +rounder (y3,y1)    paddd mm0, mm2				; 2 ; a3=sum(even3) a2=sum(even2)    paddd mm5, [round_frw_row]			; +rounder (y7,y5)    psrad mm1, SHIFT_FRW_ROW_CLIP1 ; y1=a1+b1 y0=a0+b0    paddd mm5, mm6				; 6 ; b3=sum(odd3) b2=sum(odd2)    psrad mm0, SHIFT_FRW_ROW_CLIP1 ; y3=a3+b3 y2=a2+b2    psrad mm5, SHIFT_FRW_ROW_CLIP1 ; y4=a3-b3 y5=a2-b2	packssdw mm3, mm0			; 0 ; y6 y4 y2 y0, saturate {-32768,+32767}    packssdw mm1, mm5			; 3 ; y7 y5 y3 y1, saturate {-32768,+32767}    movq mm6, mm3				; mm0 = y6 y4 y2 y0    punpcklwd mm3, mm1			; y3 y2 y1 y0        punpckhwd mm6, mm1			; y7 y6 y5 y4    psraw mm3, SHIFT_FRW_ROW_CLIP2 ; descale [y3 y2 y1 y0] to {-2048,+2047}    psraw mm6, SHIFT_FRW_ROW_CLIP2 ; descale [y7 y6 y5 y4] to {-2048,+2047}    movq [OUT_7-16], mm3			; 1 ; save y3 y2 y1 y0    movq [OUT_7-8], mm6			; 7 ; save y7 y6 y5 y4    movd mm5, [INP_7+12]			; mm5 = 7 6    punpcklwd mm5, [INP_7+8]    movq mm2, mm5				; mm2 = 5 7 4 6    psrlq mm5, 32				; mm5 = _ _ 5 7    movq mm0, [INP_7]				; mm0 = 3 2 1 0    punpcklwd mm5, mm2			; mm5 = 4 5 6 7    movq mm1, mm0				; mm1 = 3 2 1 0    paddsw mm0, mm5				; mm0 = [3+4, 2+5, 1+6, 0+7] (xt3, xt2, xt1, xt0)    psubsw mm1, mm5				; mm1 = [3-4, 2-5, 1-6, 0-7] (xt7, xt6, xt5, xt4)    movq mm2, mm0				; mm2 = [ xt3 xt2 xt1 xt0 ]    punpcklwd mm0, mm1			; mm0 = [ xt5 xt1 xt4 xt0 ]    punpckhwd mm2, mm1			; mm2 = [ xt7 xt3 xt6 xt2 ]    movq mm1, mm2				; mm1    movq mm2, mm0				; 2 ; x3 x2 x1 x0    movq mm3, [TABLE_7]				; 3 ; w06 w04 w02 w00    punpcklwd mm0, mm1			; x5 x1 x4 x0    movq mm5, mm0				; 5 ; x5 x1 x4 x0    punpckldq mm0, mm0			; x4 x0 x4 x0  [ xt2 xt0 xt2 xt0 ]    movq mm4, [TABLE_7+8]			; 4 ; w07 w05 w03 w01    punpckhwd mm2, mm1			; 1 ; x7 x3 x6 x2    pmaddwd mm3, mm0			; x4*w06+x0*w04 x4*w02+x0*w00    movq mm6, mm2				; 6 ; x7 x3 x6 x2    movq mm1, [TABLE_7+32]			; 1 ; w22 w20 w18 w16    punpckldq mm2, mm2			; x6 x2 x6 x2  [ xt3 xt1 xt3 xt1 ]	    pmaddwd mm4, mm2			; x6*w07+x2*w05 x6*w03+x2*w01    punpckhdq mm5, mm5			; x5 x1 x5 x1  [ xt6 xt4 xt6 xt4 ]    pmaddwd mm0, [TABLE_7+16]		; x4*w14+x0*w12 x4*w10+x0*w08    punpckhdq mm6, mm6			; x7 x3 x7 x3  [ xt7 xt5 xt7 xt5 ]    movq mm7, [TABLE_7+40]			; 7 ; w23 w21 w19 w17    pmaddwd mm1, mm5			; x5*w22+x1*w20 x5*w18+x1*w16    paddd mm3, [round_frw_row]			; +rounder (y2,y0)    pmaddwd mm7, mm6			; x7*w23+x3*w21 x7*w19+x3*w17    pmaddwd mm2, [TABLE_7+24]		; x6*w15+x2*w13 x6*w11+x2*w09    paddd mm3, mm4				; 4 ; a1=sum(even1) a0=sum(even0)    pmaddwd mm5, [TABLE_7+48]		; x5*w30+x1*w28 x5*w26+x1*w24     pmaddwd mm6, [TABLE_7+56]		; x7*w31+x3*w29 x7*w27+x3*w25    paddd mm1, mm7				; 7 ; b1=sum(odd1) b0=sum(odd0)    paddd mm0, [round_frw_row]			; +rounder (y6,y4)    psrad mm3, SHIFT_FRW_ROW_CLIP1 ; (y2, y0)     paddd mm1, [round_frw_row]			; +rounder (y3,y1)    paddd mm0, mm2				; 2 ; a3=sum(even3) a2=sum(even2)    paddd mm5, [round_frw_row]			; +rounder (y7,y5)    psrad mm1, SHIFT_FRW_ROW_CLIP1 ; y1=a1+b1 y0=a0+b0    paddd mm5, mm6				; 6 ; b3=sum(odd3) b2=sum(odd2)    psrad mm0, SHIFT_FRW_ROW_CLIP1 ; y3=a3+b3 y2=a2+b2    psrad mm5, SHIFT_FRW_ROW_CLIP1 ; y4=a3-b3 y5=a2-b2	packssdw mm3, mm0			; 0 ; y6 y4 y2 y0, saturate {-32768,+32767}    packssdw mm1, mm5			; 3 ; y7 y5 y3 y1, saturate {-32768,+32767}    movq mm6, mm3				; mm0 = y6 y4 y2 y0    punpcklwd mm3, mm1			; y3 y2 y1 y0        punpckhwd mm6, mm1			; y7 y6 y5 y4    psraw mm3, SHIFT_FRW_ROW_CLIP2 ; descale [y3 y2 y1 y0] to {-2048,+2047}    psraw mm6, SHIFT_FRW_ROW_CLIP2 ; descale [y7 y6 y5 y4] to {-2048,+2047}    movq [OUT_8-16], mm3			; 1 ; save y3 y2 y1 y0    movq [OUT_8-8], mm6			; 7 ; save y7 y6 y5 y4	pop ebx	emms		ret;*******************************************************************;       This SSE2 code of the FDCT algoruthm coded by;       Dmitry Rozhdestvensky and checked by Vladimir G. Ivanov;*******************************************************************%macro transpose8x8sse2 3       ; source,dest,{0=first part, 1=second part}                ;I found this smart transposing algo at www.x86.org        movdqa          xmm0,[%1+0*16]  ; 07 06 05 04 03 02 01 00        movdqa          xmm6,[%1+2*16]  ; 27 26 25 24 23 22 21 20        movdqa          xmm4,[%1+4*16]  ; 47 46 45 44 43 42 41 40        movdqa          xmm7,[%1+6*16]  ; 67 66 65 64 63 62 61 60%if %3=0        punpcklwd       xmm0,[%1+1*16]  ; 13 03 12 02 11 01 10 00        movdqa          xmm2,xmm0        punpcklwd       xmm6,[%1+3*16]  ; 33 23 32 22 31 21 30 20        punpcklwd       xmm4,[%1+5*16]  ; 53 43 52 42 51 41 50 40        movdqa          xmm5,xmm4        punpcklwd       xmm7,[%1+7*16]  ; 73 63 72 62 71 61 70 60%else        punpckhwd       xmm0,[%1+1*16]  ;         movdqa          xmm2,xmm0        punpckhwd       xmm6,[%1+3*16]  ;        punpckhwd       xmm4,[%1+5*16]  ;        movdqa          xmm5,xmm4        punpckhwd       xmm7,[%1+7*16]  ;%endif        punpckldq       xmm0,xmm6       ; 31 21 11 01 30 20 10 00        movdqa          xmm1,xmm0        punpckldq       xmm4,xmm7       ; 71 61 51 41 70 60 50 40        punpckhdq       xmm2,xmm6       ; 33 23 13 03 32 22 12 02        movdqa          xmm3,xmm2        punpckhdq       xmm5,xmm7       ; 73 63 53 43 72 62 52 42        punpcklqdq      xmm0,xmm4       ; 70 60 50 40 30 20 10 00        punpcklqdq      xmm2,xmm5       ; 72 62 52 42 32 22 21 02        punpckhqdq      xmm1,xmm4       ; 71 61 51 41 31 21 11 01        punpckhqdq      xmm3,xmm5       ; 73 63 53 43 33 23 13 03        movdqa          [%2+(0+%3*4)*16],xmm0        movdqa          [%2+(1+%3*4)*16],xmm1        movdqa          [%2+(2+%3*4)*16],xmm2        movdqa          [%2+(3+%3*4)*16],xmm3%endmacro%macro	makeoddpart	3	; output, table, shift        movdqa  xmm1,[%2+0 ]    ;45l        movdqa  xmm5,[%2+16]    ;67l        movdqa  xmm3,xmm1       ;45h        movdqa  xmm7,xmm5       ;67h        pmaddwd xmm1,xmm2       ;[%2+0 ]        pmaddwd xmm5,xmm0       ;[%2+16]        paddd   xmm1,xmm5%if %3=11        movdqa  xmm5,[rounder_11]%else        movdqa  xmm5,[rounder_18]%endif        pmaddwd xmm3,xmm6       ;[%2+0 ]        pmaddwd xmm7,xmm4       ;[%2+16]	paddd	xmm3,xmm7        paddd   xmm1,xmm5               ;rounder        paddd   xmm3,xmm5               ;rounder        psrad   xmm1,%3        psrad   xmm3,%3	packssdw	xmm1,xmm3	movdqa	[%1],xmm1%endmacro%macro  makeoddpartlast     3       ; output, table, shift        pmaddwd xmm2,[%2+0 ]        pmaddwd xmm0,[%2+16]        paddd   xmm2,xmm0        pmaddwd xmm6,[%2+0 ]        pmaddwd xmm4,[%2+16]        paddd   xmm6,xmm4        paddd   xmm2,xmm5               ;rounder        paddd   xmm6,xmm5               ;rounder        psrad   xmm2,%3        psrad   xmm6,%3        packssdw        xmm2,xmm6        movdqa  [%1],xmm2%endmacro%macro  FDCT_1D         4       ; INP,OUTP,{0=first pass, 1=second pass}, shift={11,18}                                        ;movdqa  xmm0,[%1+16*0] ;We do not load 0-3 values here for they        ;movdqa  xmm1,[%1+16*1] ;stayed from transposition        ;movdqa  xmm2,[%1+16*2]         ;movdqa  xmm3,[%1+16*3]%if %3<>0        movdqa  xmm7,[rounder_5]%endif	paddsw	xmm0,[%1+16*7]	;tmp0        movdqa  xmm4,xmm0	paddsw	xmm1,[%1+16*6]	;tmp1        movdqa  xmm5,xmm1	paddsw	xmm2,[%1+16*5]	;tmp2	paddsw	xmm3,[%1+16*4]	;tmp3	paddsw	xmm0,xmm3	;tmp10%if %3<>0       ; In the second pass we must round and shift before                ; the tmp10+tmp11 and tmp10-tmp11 calculation                ; or the overflow will happen.        paddsw  xmm0,xmm7               ;[rounder_5]        psraw   xmm0,PASS1_BITS+3%endif        movdqa  xmm6,xmm0       ;tmp10	paddsw	xmm1,xmm2	;tmp11	psubsw	xmm4,xmm3	;tmp13	psubsw	xmm5,xmm2	;tmp12%if %3=0	paddsw	xmm0,xmm1	psubsw	xmm6,xmm1	psllw	xmm0,PASS1_BITS	psllw	xmm6,PASS1_BITS%else        paddsw  xmm1,xmm7               ;[rounder_5]        psraw   xmm1,PASS1_BITS+3	paddsw	xmm0,xmm1	psubsw	xmm6,xmm1%endif        movdqa          xmm1,xmm4        movdqa          xmm2,xmm4	movdqa	[%2+16*0],xmm0	movdqa	[%2+16*4],xmm6               movdqa  xmm7,[FIX_1]        punpckhwd       xmm1,xmm5       ; 12 13 12 13 12 13 12 13 high part        movdqa          xmm6,xmm1       ;high        punpcklwd       xmm2,xmm5       ; 12 13 12 13 12 13 12 13 low part        movdqa          xmm0,xmm2       ;low        movdqa  xmm4,[FIX_2]%if %4=11        movdqa  xmm5,[rounder_11]%else        movdqa  xmm5,[rounder_18]%endif        pmaddwd xmm2,xmm7               ;[FIX_1]         pmaddwd xmm1,xmm7               ;[FIX_1]        pmaddwd xmm0,xmm4               ;[FIX_2]        pmaddwd xmm6,xmm4               ;[FIX_2]        paddd   xmm2,xmm5               ;rounder        paddd   xmm1,xmm5               ;rounder        psrad   xmm2,%4        psrad   xmm1,%4	packssdw	xmm2,xmm1	movdqa		[%2+16*2],xmm2        paddd   xmm0,xmm5               ;rounder        paddd   xmm6,xmm5               ;rounder        psrad   xmm0,%4        psrad   xmm6,%4	packssdw	xmm0,xmm6	movdqa		[%2+16*6],xmm0	movdqa	xmm0,[%1+16*0]		movdqa	xmm1,[%1+16*1]		movdqa	xmm2,[%1+16*2]		movdqa	xmm3,[%1+16*3]		psubsw	xmm0,[%1+16*7]	;tmp7	movdqa	xmm4,xmm0	psubsw	xmm1,[%1+16*6]	;tmp6	psubsw	xmm2,[%1+16*5]	;tmp5	movdqa	xmm6,xmm2	psubsw	xmm3,[%1+16*4]	;tmp4        punpckhwd       xmm4,xmm1       ; 6 7 6 7 6 7 6 7 high part        punpcklwd       xmm0,xmm1       ; 6 7 6 7 6 7 6 7 low part        punpckhwd       xmm6,xmm3       ; 4 5 4 5 4 5 4 5 high part        punpcklwd       xmm2,xmm3       ; 4 5 4 5 4 5 4 5 low part	makeoddpart	%2+16*1,FIX_3,%4	makeoddpart	%2+16*3,FIX_4,%4	makeoddpart	%2+16*5,FIX_5,%4        makeoddpartlast     %2+16*7,FIX_6,%4%endmacrocglobal fdct_sse2;;void f dct_sse2(short *block);fdct_sse2:        push    eax        push    ebx        mov     eax,[esp+4+2*4]	mov	ebx,buffer        prefetchnta     [FIX_1]        prefetchnta     [FIX_3]        prefetchnta     [FIX_5]        transpose8x8sse2        eax,ebx,1  ; First we transpose last 4 lines        transpose8x8sse2        eax,ebx,0  ; Then the firts 4 lines	;processing columns (became rows after transposition)        FDCT_1D ebx,eax,0,CONST_BITS - PASS1_BITS        transpose8x8sse2        eax,ebx,1        transpose8x8sse2        eax,ebx,0	;now processing rows        FDCT_1D ebx,eax,1,CONST_BITS + PASS1_BITS + 3        pop     ebx        pop     eax	ret

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -