⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cavsdsp_mmx.c

📁 ffmpeg的完整源代码和作者自己写的文档。不但有在Linux的工程哦
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. * Copyright (c) 2006  Stefan Gehrer <stefan.gehrer@gmx.de> * * MMX optimised DSP functions, based on H.264 optimisations by * Michael Niedermayer and Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */#include "dsputil.h"#include "common.h"DECLARE_ALIGNED_8(static const uint64_t,ff_pw_4 ) = 0x0004000400040004ULL;DECLARE_ALIGNED_8(static const uint64_t,ff_pw_5 ) = 0x0005000500050005ULL;DECLARE_ALIGNED_8(static const uint64_t,ff_pw_7 ) = 0x0007000700070007ULL;DECLARE_ALIGNED_8(static const uint64_t,ff_pw_42) = 0x002A002A002A002AULL;DECLARE_ALIGNED_8(static const uint64_t,ff_pw_64) = 0x0040004000400040ULL;DECLARE_ALIGNED_8(static const uint64_t,ff_pw_96) = 0x0060006000600060ULL;/***************************************************************************** * * inverse transform * ****************************************************************************/#define SUMSUB_BA( a, b ) \    "paddw "#b", "#a" \n\t"\    "paddw "#b", "#b" \n\t"\    "psubw "#a", "#b" \n\t"#define SBUTTERFLY(a,b,t,n)\    "movq " #a ", " #t "              \n\t" /* abcd */\    "punpckl" #n " " #b ", " #a "     \n\t" /* aebf */\    "punpckh" #n " " #b ", " #t "     \n\t" /* cgdh */#define TRANSPOSE4(a,b,c,d,t)\    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */static inline void cavs_idct8_1d(int16_t *block, uint64_t bias){    asm volatile(        "movq 112(%0), %%mm4  \n\t" /* mm4 = src7 */        "movq  16(%0), %%mm5  \n\t" /* mm5 = src1 */        "movq  80(%0), %%mm2  \n\t" /* mm2 = src5 */        "movq  48(%0), %%mm7  \n\t" /* mm7 = src3 */        "movq   %%mm4, %%mm0  \n\t"        "movq   %%mm5, %%mm3  \n\t"        "movq   %%mm2, %%mm6  \n\t"        "movq   %%mm7, %%mm1  \n\t"        "paddw  %%mm4, %%mm4  \n\t" /* mm4 = 2*src7 */        "paddw  %%mm3, %%mm3  \n\t" /* mm3 = 2*src1 */        "paddw  %%mm6, %%mm6  \n\t" /* mm6 = 2*src5 */        "paddw  %%mm1, %%mm1  \n\t" /* mm1 = 2*src3 */        "paddw  %%mm4, %%mm0  \n\t" /* mm0 = 3*src7 */        "paddw  %%mm3, %%mm5  \n\t" /* mm5 = 3*src1 */        "paddw  %%mm6, %%mm2  \n\t" /* mm2 = 3*src5 */        "paddw  %%mm1, %%mm7  \n\t" /* mm7 = 3*src3 */        "psubw  %%mm4, %%mm5  \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */        "paddw  %%mm6, %%mm7  \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */        "psubw  %%mm2, %%mm1  \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */        "paddw  %%mm0, %%mm3  \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */        "movq   %%mm5, %%mm4  \n\t"        "movq   %%mm7, %%mm6  \n\t"        "movq   %%mm3, %%mm0  \n\t"        "movq   %%mm1, %%mm2  \n\t"        SUMSUB_BA( %%mm7, %%mm5 )   /* mm7 = a0 + a1  mm5 = a0 - a1 */        "paddw  %%mm3, %%mm7  \n\t" /* mm7 = a0 + a1 + a3 */        "paddw  %%mm1, %%mm5  \n\t" /* mm5 = a0 - a1 + a2 */        "paddw  %%mm7, %%mm7  \n\t"        "paddw  %%mm5, %%mm5  \n\t"        "paddw  %%mm6, %%mm7  \n\t" /* mm7 = b4 */        "paddw  %%mm4, %%mm5  \n\t" /* mm5 = b5 */        SUMSUB_BA( %%mm1, %%mm3 )   /* mm1 = a3 + a2  mm3 = a3 - a2 */        "psubw  %%mm1, %%mm4  \n\t" /* mm4 = a0 - a2 - a3 */        "movq   %%mm4, %%mm1  \n\t" /* mm1 = a0 - a2 - a3 */        "psubw  %%mm6, %%mm3  \n\t" /* mm3 = a3 - a2 - a1 */        "paddw  %%mm1, %%mm1  \n\t"        "paddw  %%mm3, %%mm3  \n\t"        "psubw  %%mm2, %%mm1  \n\t" /* mm1 = b7 */        "paddw  %%mm0, %%mm3  \n\t" /* mm3 = b6 */        "movq  32(%0), %%mm2  \n\t" /* mm2 = src2 */        "movq  96(%0), %%mm6  \n\t" /* mm6 = src6 */        "movq   %%mm2, %%mm4  \n\t"        "movq   %%mm6, %%mm0  \n\t"        "psllw  $2,    %%mm4  \n\t" /* mm4 = 4*src2 */        "psllw  $2,    %%mm6  \n\t" /* mm6 = 4*src6 */        "paddw  %%mm4, %%mm2  \n\t" /* mm2 = 5*src2 */        "paddw  %%mm6, %%mm0  \n\t" /* mm0 = 5*src6 */        "paddw  %%mm2, %%mm2  \n\t"        "paddw  %%mm0, %%mm0  \n\t"        "psubw  %%mm0, %%mm4  \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */        "paddw  %%mm2, %%mm6  \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */        "movq    (%0), %%mm2  \n\t" /* mm2 = src0 */        "movq  64(%0), %%mm0  \n\t" /* mm0 = src4 */        SUMSUB_BA( %%mm0, %%mm2 )   /* mm0 = src0+src4  mm2 = src0-src4 */        "psllw  $3,    %%mm0  \n\t"        "psllw  $3,    %%mm2  \n\t"        "paddw  %1,    %%mm0  \n\t" /* add rounding bias */        "paddw  %1,    %%mm2  \n\t" /* add rounding bias */        SUMSUB_BA( %%mm6, %%mm0 )   /* mm6 = a4 + a6  mm0 = a4 - a6 */        SUMSUB_BA( %%mm4, %%mm2 )   /* mm4 = a5 + a7  mm2 = a5 - a7 */        SUMSUB_BA( %%mm7, %%mm6 )   /* mm7 = dst0  mm6 = dst7 */        SUMSUB_BA( %%mm5, %%mm4 )   /* mm5 = dst1  mm4 = dst6 */        SUMSUB_BA( %%mm3, %%mm2 )   /* mm3 = dst2  mm2 = dst5 */        SUMSUB_BA( %%mm1, %%mm0 )   /* mm1 = dst3  mm0 = dst4 */        :: "r"(block), "m"(bias)    );}static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride){    int i;    DECLARE_ALIGNED_8(int16_t, b2[64]);    for(i=0; i<2; i++){        DECLARE_ALIGNED_8(uint64_t, tmp);        cavs_idct8_1d(block+4*i, ff_pw_4);        asm volatile(            "psraw     $3, %%mm7  \n\t"            "psraw     $3, %%mm6  \n\t"            "psraw     $3, %%mm5  \n\t"            "psraw     $3, %%mm4  \n\t"            "psraw     $3, %%mm3  \n\t"            "psraw     $3, %%mm2  \n\t"            "psraw     $3, %%mm1  \n\t"            "psraw     $3, %%mm0  \n\t"            "movq   %%mm7,    %0   \n\t"            TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )            "movq   %%mm0,  8(%1)  \n\t"            "movq   %%mm6, 24(%1)  \n\t"            "movq   %%mm7, 40(%1)  \n\t"            "movq   %%mm4, 56(%1)  \n\t"            "movq    %0,    %%mm7  \n\t"            TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )            "movq   %%mm7,   (%1)  \n\t"            "movq   %%mm1, 16(%1)  \n\t"            "movq   %%mm0, 32(%1)  \n\t"            "movq   %%mm3, 48(%1)  \n\t"            : "=m"(tmp)            : "r"(b2+32*i)            : "memory"        );    }    for(i=0; i<2; i++){        cavs_idct8_1d(b2+4*i, ff_pw_64);        asm volatile(            "psraw     $7, %%mm7  \n\t"            "psraw     $7, %%mm6  \n\t"            "psraw     $7, %%mm5  \n\t"            "psraw     $7, %%mm4  \n\t"            "psraw     $7, %%mm3  \n\t"            "psraw     $7, %%mm2  \n\t"            "psraw     $7, %%mm1  \n\t"            "psraw     $7, %%mm0  \n\t"            "movq   %%mm7,    (%0)  \n\t"            "movq   %%mm5,  16(%0)  \n\t"            "movq   %%mm3,  32(%0)  \n\t"            "movq   %%mm1,  48(%0)  \n\t"            "movq   %%mm0,  64(%0)  \n\t"            "movq   %%mm2,  80(%0)  \n\t"            "movq   %%mm4,  96(%0)  \n\t"            "movq   %%mm6, 112(%0)  \n\t"            :: "r"(b2+4*i)            : "memory"        );    }    add_pixels_clamped_mmx(b2, dst, stride);    /* clear block */    asm volatile(            "pxor %%mm7, %%mm7   \n\t"            "movq %%mm7, (%0)    \n\t"            "movq %%mm7, 8(%0)   \n\t"            "movq %%mm7, 16(%0)  \n\t"            "movq %%mm7, 24(%0)  \n\t"            "movq %%mm7, 32(%0)  \n\t"            "movq %%mm7, 40(%0)  \n\t"            "movq %%mm7, 48(%0)  \n\t"            "movq %%mm7, 56(%0)  \n\t"            "movq %%mm7, 64(%0)  \n\t"            "movq %%mm7, 72(%0)  \n\t"            "movq %%mm7, 80(%0)  \n\t"            "movq %%mm7, 88(%0)  \n\t"            "movq %%mm7, 96(%0)  \n\t"            "movq %%mm7, 104(%0) \n\t"            "movq %%mm7, 112(%0) \n\t"            "movq %%mm7, 120(%0) \n\t"            :: "r" (block)    );}/***************************************************************************** * * motion compensation * ****************************************************************************//* vertical filter [-1 -2 96 42 -7  0]  */#define QPEL_CAVSV1(A,B,C,D,E,F,OP)      \        "movd (%0), "#F"            \n\t"\        "movq "#C", %%mm6           \n\t"\        "pmullw %5, %%mm6           \n\t"\        "movq "#D", %%mm7           \n\t"\        "pmullw %6, %%mm7           \n\t"\        "psllw $3, "#E"             \n\t"\        "psubw "#E", %%mm6          \n\t"\        "psraw $3, "#E"             \n\t"\        "paddw %%mm7, %%mm6         \n\t"\        "paddw "#E", %%mm6          \n\t"\        "paddw "#B", "#B"           \n\t"\        "pxor %%mm7, %%mm7          \n\t"\        "add %2, %0                 \n\t"\        "punpcklbw %%mm7, "#F"      \n\t"\        "psubw "#B", %%mm6          \n\t"\        "psraw $1, "#B"             \n\t"\        "psubw "#A", %%mm6          \n\t"\        "paddw %4, %%mm6            \n\t"\        "psraw $7, %%mm6            \n\t"\        "packuswb %%mm6, %%mm6      \n\t"\        OP(%%mm6, (%1), A, d)            \        "add %3, %1                 \n\t"/* vertical filter [ 0 -1  5  5 -1  0]  */#define QPEL_CAVSV2(A,B,C,D,E,F,OP)      \        "movd (%0), "#F"            \n\t"\        "movq "#C", %%mm6           \n\t"\        "paddw "#D", %%mm6          \n\t"\        "pmullw %5, %%mm6           \n\t"\        "add %2, %0                 \n\t"\        "punpcklbw %%mm7, "#F"      \n\t"\        "psubw "#B", %%mm6          \n\t"\

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -