📄 h264dsp_mmx.c.svn-base
字号:
/* * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */#include "dsputil_mmx.h"/***********************************//* IDCT */#define SUMSUB_BADC( a, b, c, d ) \ "paddw "#b", "#a" \n\t"\ "paddw "#d", "#c" \n\t"\ "paddw "#b", "#b" \n\t"\ "paddw "#d", "#d" \n\t"\ "psubw "#a", "#b" \n\t"\ "psubw "#c", "#d" \n\t"#define SUMSUBD2_AB( a, b, t ) \ "movq "#b", "#t" \n\t"\ "psraw $1 , "#b" \n\t"\ "paddw "#a", "#b" \n\t"\ "psraw $1 , "#a" \n\t"\ "psubw "#t", "#a" \n\t"#define IDCT4_1D( s02, s13, d02, d13, t ) \ SUMSUB_BA ( s02, d02 )\ SUMSUBD2_AB( s13, d13, t )\ SUMSUB_BADC( d13, s02, s13, d02 )#define STORE_DIFF_4P( p, t, z ) \ "psraw $6, "#p" \n\t"\ "movd (%0), "#t" \n\t"\ "punpcklbw "#z", "#t" \n\t"\ "paddsw "#t", "#p" \n\t"\ "packuswb "#z", "#p" \n\t"\ "movd "#p", (%0) \n\t"static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride){ /* Load dct coeffs */ asm volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" "movq 16(%0), %%mm2 \n\t" "movq 24(%0), %%mm3 \n\t" :: "r"(block) ); asm volatile( /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) "movq %0, %%mm6 \n\t" /* in: 1,4,0,2 out: 1,2,3,0 */ TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) "paddw %%mm6, %%mm3 \n\t" /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) "pxor %%mm7, %%mm7 \n\t" :: "m"(ff_pw_32)); asm volatile( STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) : "+r"(dst) : "r" ((long)stride) );}static inline void h264_idct8_1d(int16_t *block){ asm volatile( "movq 112(%0), %%mm7 \n\t" "movq 80(%0), %%mm0 \n\t" "movq 48(%0), %%mm3 \n\t" "movq 16(%0), %%mm5 \n\t" "movq %%mm0, %%mm4 \n\t" "movq %%mm5, %%mm1 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm1 \n\t" "paddw %%mm0, %%mm4 \n\t" "paddw %%mm5, %%mm1 \n\t" "paddw %%mm7, %%mm4 \n\t" "paddw %%mm0, %%mm1 \n\t" "psubw %%mm5, %%mm4 \n\t" "paddw %%mm3, %%mm1 \n\t" "psubw %%mm3, %%mm5 \n\t" "psubw %%mm3, %%mm0 \n\t" "paddw %%mm7, %%mm5 \n\t" "psubw %%mm7, %%mm0 \n\t" "psraw $1, %%mm3 \n\t" "psraw $1, %%mm7 \n\t" "psubw %%mm3, %%mm5 \n\t" "psubw %%mm7, %%mm0 \n\t" "movq %%mm4, %%mm3 \n\t" "movq %%mm1, %%mm7 \n\t" "psraw $2, %%mm1 \n\t" "psraw $2, %%mm3 \n\t" "paddw %%mm5, %%mm3 \n\t" "psraw $2, %%mm5 \n\t" "paddw %%mm0, %%mm1 \n\t" "psraw $2, %%mm0 \n\t" "psubw %%mm4, %%mm5 \n\t" "psubw %%mm0, %%mm7 \n\t" "movq 32(%0), %%mm2 \n\t" "movq 96(%0), %%mm6 \n\t" "movq %%mm2, %%mm4 \n\t" "movq %%mm6, %%mm0 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm6 \n\t" "psubw %%mm0, %%mm4 \n\t" "paddw %%mm2, %%mm6 \n\t" "movq (%0), %%mm2 \n\t" "movq 64(%0), %%mm0 \n\t" SUMSUB_BA( %%mm0, %%mm2 ) SUMSUB_BA( %%mm6, %%mm0 ) SUMSUB_BA( %%mm4, %%mm2 ) SUMSUB_BA( %%mm7, %%mm6 ) SUMSUB_BA( %%mm5, %%mm4 ) SUMSUB_BA( %%mm3, %%mm2 ) SUMSUB_BA( %%mm1, %%mm0 ) :: "r"(block) );}static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride){ int i; int16_t __attribute__ ((aligned(8))) b2[64]; block[0] += 32; for(i=0; i<2; i++){ DECLARE_ALIGNED_8(uint64_t, tmp); h264_idct8_1d(block+4*i); asm volatile( "movq %%mm7, %0 \n\t" TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) "movq %%mm0, 8(%1) \n\t" "movq %%mm6, 24(%1) \n\t" "movq %%mm7, 40(%1) \n\t" "movq %%mm4, 56(%1) \n\t" "movq %0, %%mm7 \n\t" TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) "movq %%mm7, (%1) \n\t" "movq %%mm1, 16(%1) \n\t" "movq %%mm0, 32(%1) \n\t" "movq %%mm3, 48(%1) \n\t" : "=m"(tmp) : "r"(b2+32*i) : "memory" ); } for(i=0; i<2; i++){ h264_idct8_1d(b2+4*i); asm volatile( "psraw $6, %%mm7 \n\t" "psraw $6, %%mm6 \n\t" "psraw $6, %%mm5 \n\t" "psraw $6, %%mm4 \n\t" "psraw $6, %%mm3 \n\t" "psraw $6, %%mm2 \n\t" "psraw $6, %%mm1 \n\t" "psraw $6, %%mm0 \n\t" "movq %%mm7, (%0) \n\t" "movq %%mm5, 16(%0) \n\t" "movq %%mm3, 32(%0) \n\t" "movq %%mm1, 48(%0) \n\t" "movq %%mm0, 64(%0) \n\t" "movq %%mm2, 80(%0) \n\t" "movq %%mm4, 96(%0) \n\t" "movq %%mm6, 112(%0) \n\t" :: "r"(b2+4*i) : "memory" ); } add_pixels_clamped_mmx(b2, dst, stride);}#define STORE_DIFF_8P( p, d, t, z )\ "movq "#d", "#t" \n"\ "psraw $6, "#p" \n"\ "punpcklbw "#z", "#t" \n"\ "paddsw "#t", "#p" \n"\ "packuswb "#p", "#p" \n"\ "movq "#p", "#d" \n"#define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\ "movdqa "#c", "#a" \n"\ "movdqa "#g", "#e" \n"\ "psraw $1, "#c" \n"\ "psraw $1, "#g" \n"\ "psubw "#e", "#c" \n"\ "paddw "#a", "#g" \n"\ "movdqa "#b", "#e" \n"\ "psraw $1, "#e" \n"\ "paddw "#b", "#e" \n"\ "paddw "#d", "#e" \n"\ "paddw "#f", "#e" \n"\ "movdqa "#f", "#a" \n"\ "psraw $1, "#a" \n"\ "paddw "#f", "#a" \n"\ "paddw "#h", "#a" \n"\ "psubw "#b", "#a" \n"\ "psubw "#d", "#b" \n"\ "psubw "#d", "#f" \n"\ "paddw "#h", "#b" \n"\ "psubw "#h", "#f" \n"\ "psraw $1, "#d" \n"\ "psraw $1, "#h" \n"\ "psubw "#d", "#b" \n"\ "psubw "#h", "#f" \n"\ "movdqa "#e", "#d" \n"\ "movdqa "#a", "#h" \n"\ "psraw $2, "#d" \n"\ "psraw $2, "#h" \n"\ "paddw "#f", "#d" \n"\ "paddw "#b", "#h" \n"\ "psraw $2, "#f" \n"\ "psraw $2, "#b" \n"\ "psubw "#f", "#e" \n"\ "psubw "#a", "#b" \n"\ "movdqa 0x00(%1), "#a" \n"\ "movdqa 0x40(%1), "#f" \n"\ SUMSUB_BA(f, a)\ SUMSUB_BA(g, f)\ SUMSUB_BA(c, a)\ SUMSUB_BA(e, g)\ SUMSUB_BA(b, c)\ SUMSUB_BA(h, a)\ SUMSUB_BA(d, f)static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride){ asm volatile( "movdqa 0x10(%1), %%xmm1 \n" "movdqa 0x20(%1), %%xmm2 \n" "movdqa 0x30(%1), %%xmm3 \n" "movdqa 0x50(%1), %%xmm5 \n" "movdqa 0x60(%1), %%xmm6 \n" "movdqa 0x70(%1), %%xmm7 \n" H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) "paddw %4, %%xmm4 \n" "movdqa %%xmm4, 0x00(%1) \n" "movdqa %%xmm2, 0x40(%1) \n" H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) "movdqa %%xmm6, 0x60(%1) \n" "movdqa %%xmm7, 0x70(%1) \n" "pxor %%xmm7, %%xmm7 \n" STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) "lea (%0,%2,4), %0 \n" STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) "movdqa 0x60(%1), %%xmm0 \n" "movdqa 0x70(%1), %%xmm1 \n" STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) :"+r"(dst) :"r"(block), "r"((long)stride), "r"(3L*stride), "m"(ff_pw_32) );}static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride){ int dc = (block[0] + 32) >> 6; asm volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); asm volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t"
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -