📄 h264dsp_mmx.c
字号:
/* * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *//***********************************//* IDCT *//* in/out: mma=mma+mmb, mmb=mmb-mma */#define SUMSUB_BA( a, b ) \ "paddw "#b", "#a" \n\t"\ "paddw "#b", "#b" \n\t"\ "psubw "#a", "#b" \n\t"#define SUMSUB_BADC( a, b, c, d ) \ "paddw "#b", "#a" \n\t"\ "paddw "#d", "#c" \n\t"\ "paddw "#b", "#b" \n\t"\ "paddw "#d", "#d" \n\t"\ "psubw "#a", "#b" \n\t"\ "psubw "#c", "#d" \n\t"#define SUMSUBD2_AB( a, b, t ) \ "movq "#b", "#t" \n\t"\ "psraw $1 , "#b" \n\t"\ "paddw "#a", "#b" \n\t"\ "psraw $1 , "#a" \n\t"\ "psubw "#t", "#a" \n\t"#define IDCT4_1D( s02, s13, d02, d13, t ) \ SUMSUB_BA ( s02, d02 )\ SUMSUBD2_AB( s13, d13, t )\ SUMSUB_BADC( d13, s02, s13, d02 )#define STORE_DIFF_4P( p, t, z ) \ "psraw $6, "#p" \n\t"\ "movd (%0), "#t" \n\t"\ "punpcklbw "#z", "#t" \n\t"\ "paddsw "#t", "#p" \n\t"\ "packuswb "#z", "#p" \n\t"\ "movd "#p", (%0) \n\t"static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride){ /* Load dct coeffs */ asm volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" "movq 16(%0), %%mm2 \n\t" "movq 24(%0), %%mm3 \n\t" :: "r"(block) ); asm volatile( /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) "movq %0, %%mm6 \n\t" /* in: 1,4,0,2 out: 1,2,3,0 */ TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) "paddw %%mm6, %%mm3 \n\t" /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) "pxor %%mm7, %%mm7 \n\t" :: "m"(ff_pw_32)); asm volatile( STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) : "+r"(dst) : "r" ((long)stride) );}static inline void h264_idct8_1d(int16_t *block){ asm volatile( "movq 112(%0), %%mm7 \n\t" "movq 80(%0), %%mm5 \n\t" "movq 48(%0), %%mm3 \n\t" "movq 16(%0), %%mm1 \n\t" "movq %%mm7, %%mm4 \n\t" "movq %%mm3, %%mm6 \n\t" "movq %%mm5, %%mm0 \n\t" "movq %%mm7, %%mm2 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm6 \n\t" "psubw %%mm7, %%mm0 \n\t" "psubw %%mm6, %%mm2 \n\t" "psubw %%mm4, %%mm0 \n\t" "psubw %%mm3, %%mm2 \n\t" "psubw %%mm3, %%mm0 \n\t" "paddw %%mm1, %%mm2 \n\t" "movq %%mm5, %%mm4 \n\t" "movq %%mm1, %%mm6 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm6 \n\t" "paddw %%mm5, %%mm4 \n\t" "paddw %%mm1, %%mm6 \n\t" "paddw %%mm7, %%mm4 \n\t" "paddw %%mm5, %%mm6 \n\t" "psubw %%mm1, %%mm4 \n\t" "paddw %%mm3, %%mm6 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm4, %%mm3 \n\t" "movq %%mm2, %%mm5 \n\t" "movq %%mm6, %%mm7 \n\t" "psraw $2, %%mm6 \n\t" "psraw $2, %%mm3 \n\t" "psraw $2, %%mm5 \n\t" "psraw $2, %%mm0 \n\t" "paddw %%mm6, %%mm1 \n\t" "paddw %%mm2, %%mm3 \n\t" "psubw %%mm4, %%mm5 \n\t" "psubw %%mm0, %%mm7 \n\t" "movq 32(%0), %%mm2 \n\t" "movq 96(%0), %%mm6 \n\t" "movq %%mm2, %%mm4 \n\t" "movq %%mm6, %%mm0 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm6 \n\t" "psubw %%mm0, %%mm4 \n\t" "paddw %%mm2, %%mm6 \n\t" "movq (%0), %%mm2 \n\t" "movq 64(%0), %%mm0 \n\t" SUMSUB_BA( %%mm0, %%mm2 ) SUMSUB_BA( %%mm6, %%mm0 ) SUMSUB_BA( %%mm4, %%mm2 ) SUMSUB_BA( %%mm7, %%mm6 ) SUMSUB_BA( %%mm5, %%mm4 ) SUMSUB_BA( %%mm3, %%mm2 ) SUMSUB_BA( %%mm1, %%mm0 ) :: "r"(block) );}static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride){ int i; int16_t __attribute__ ((aligned(8))) b2[64]; block[0] += 32; for(i=0; i<2; i++){ DECLARE_ALIGNED_8(uint64_t, tmp); h264_idct8_1d(block+4*i); asm volatile( "movq %%mm7, %0 \n\t" TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) "movq %%mm0, 8(%1) \n\t" "movq %%mm6, 24(%1) \n\t" "movq %%mm7, 40(%1) \n\t" "movq %%mm4, 56(%1) \n\t" "movq %0, %%mm7 \n\t" TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) "movq %%mm7, (%1) \n\t" "movq %%mm1, 16(%1) \n\t" "movq %%mm0, 32(%1) \n\t" "movq %%mm3, 48(%1) \n\t" : "=m"(tmp) : "r"(b2+32*i) : "memory" ); } for(i=0; i<2; i++){ h264_idct8_1d(b2+4*i); asm volatile( "psraw $6, %%mm7 \n\t" "psraw $6, %%mm6 \n\t" "psraw $6, %%mm5 \n\t" "psraw $6, %%mm4 \n\t" "psraw $6, %%mm3 \n\t" "psraw $6, %%mm2 \n\t" "psraw $6, %%mm1 \n\t" "psraw $6, %%mm0 \n\t" "movq %%mm7, (%0) \n\t" "movq %%mm5, 16(%0) \n\t" "movq %%mm3, 32(%0) \n\t" "movq %%mm1, 48(%0) \n\t" "movq %%mm0, 64(%0) \n\t" "movq %%mm2, 80(%0) \n\t" "movq %%mm4, 96(%0) \n\t" "movq %%mm6, 112(%0) \n\t" :: "r"(b2+4*i) : "memory" ); } add_pixels_clamped_mmx(b2, dst, stride);}static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride){ int dc = (block[0] + 32) >> 6; asm volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); asm volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movd %%mm2, %0 \n\t" "movd %%mm3, %1 \n\t" "movd %%mm4, %2 \n\t" "movd %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dst+0*stride)), "+m"(*(uint32_t*)(dst+1*stride)), "+m"(*(uint32_t*)(dst+2*stride)), "+m"(*(uint32_t*)(dst+3*stride)) );}static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride){ int dc = (block[0] + 32) >> 6; int y; asm volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); for(y=2; y--; dst += 4*stride){ asm volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint64_t*)(dst+0*stride)), "+m"(*(uint64_t*)(dst+1*stride)), "+m"(*(uint64_t*)(dst+2*stride)), "+m"(*(uint64_t*)(dst+3*stride)) ); }}/***********************************//* deblocking */// out: o = |x-y|>a// clobbers: t#define DIFF_GT_MMX(x,y,a,o,t)\ "movq "#y", "#t" \n\t"\ "movq "#x", "#o" \n\t"\ "psubusb "#x", "#t" \n\t"\ "psubusb "#y", "#o" \n\t"\ "por "#t", "#o" \n\t"\ "psubusb "#a", "#o" \n\t"// out: o = |x-y|>a// clobbers: t#define DIFF_GT2_MMX(x,y,a,o,t)\ "movq "#y", "#t" \n\t"\ "movq "#x", "#o" \n\t"\ "psubusb "#x", "#t" \n\t"\ "psubusb "#y", "#o" \n\t"\ "psubusb "#a", "#t" \n\t"\ "psubusb "#a", "#o" \n\t"\ "pcmpeqb "#t", "#o" \n\t"\// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1// out: mm5=beta-1, mm7=mask// clobbers: mm4,mm6#define H264_DEBLOCK_MASK(alpha1, beta1) \ "pshufw $0, "#alpha1", %%mm4 \n\t"\ "pshufw $0, "#beta1 ", %%mm5 \n\t"\ "packuswb %%mm4, %%mm4 \n\t"\ "packuswb %%mm5, %%mm5 \n\t"\ DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ "por %%mm4, %%mm7 \n\t"\ DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ "por %%mm4, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "pcmpeqb %%mm6, %%mm7 \n\t"// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)// out: mm1=p0' mm2=q0'// clobbers: mm0,3-6#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ "movq %%mm1 , %%mm5 \n\t"\ "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ "pcmpeqb %%mm4 , %%mm4 \n\t"\ "pxor %%mm4 , %%mm3 \n\t"\ "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ "pxor %%mm1 , %%mm4 \n\t"\ "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ "pavgb %%mm5 , %%mm3 \n\t"\ "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ "psubusb %%mm3 , %%mm6 \n\t"\ "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ "pminub %%mm7 , %%mm6 \n\t"\ "pminub %%mm7 , %%mm3 \n\t"\ "psubusb %%mm6 , %%mm1 \n\t"\ "psubusb %%mm3 , %%mm2 \n\t"\ "paddusb %%mm3 , %%mm1 \n\t"\ "paddusb %%mm6 , %%mm2 \n\t"// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone// out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )// clobbers: q2, tmp, tc0#define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ "movq %%mm1, "#tmp" \n\t"\ "pavgb %%mm2, "#tmp" \n\t"\ "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ "pxor "q2addr", "#tmp" \n\t"\ "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ "movq "#p1", "#tmp" \n\t"\ "psubusb "#tc0", "#tmp" \n\t"\ "paddusb "#p1", "#tc0" \n\t"\
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -