⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dsputilenc_mmx.c

📁 ffmpeg移植到symbian的全部源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * MMX optimized DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * MMX optimization by Nick Kurshev <nickols_k@mail.ru> */#include "libavutil/x86_cpu.h"#include "libavcodec/dsputil.h"#include "libavcodec/mpegvideo.h"#include "dsputil_mmx.h"static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size){    asm volatile(        "mov $-128, %%"REG_a"           \n\t"        "pxor %%mm7, %%mm7              \n\t"        ASMALIGN(4)        "1:                             \n\t"        "movq (%0), %%mm0               \n\t"        "movq (%0, %2), %%mm2           \n\t"        "movq %%mm0, %%mm1              \n\t"        "movq %%mm2, %%mm3              \n\t"        "punpcklbw %%mm7, %%mm0         \n\t"        "punpckhbw %%mm7, %%mm1         \n\t"        "punpcklbw %%mm7, %%mm2         \n\t"        "punpckhbw %%mm7, %%mm3         \n\t"        "movq %%mm0, (%1, %%"REG_a")    \n\t"        "movq %%mm1, 8(%1, %%"REG_a")   \n\t"        "movq %%mm2, 16(%1, %%"REG_a")  \n\t"        "movq %%mm3, 24(%1, %%"REG_a")  \n\t"        "add %3, %0                     \n\t"        "add $32, %%"REG_a"             \n\t"        "js 1b                          \n\t"        : "+r" (pixels)        : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)        : "%"REG_a    );}static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride){    asm volatile(        "pxor %%mm7, %%mm7              \n\t"        "mov $-128, %%"REG_a"           \n\t"        ASMALIGN(4)        "1:                             \n\t"        "movq (%0), %%mm0               \n\t"        "movq (%1), %%mm2               \n\t"        "movq %%mm0, %%mm1              \n\t"        "movq %%mm2, %%mm3              \n\t"        "punpcklbw %%mm7, %%mm0         \n\t"        "punpckhbw %%mm7, %%mm1         \n\t"        "punpcklbw %%mm7, %%mm2         \n\t"        "punpckhbw %%mm7, %%mm3         \n\t"        "psubw %%mm2, %%mm0             \n\t"        "psubw %%mm3, %%mm1             \n\t"        "movq %%mm0, (%2, %%"REG_a")    \n\t"        "movq %%mm1, 8(%2, %%"REG_a")   \n\t"        "add %3, %0                     \n\t"        "add %3, %1                     \n\t"        "add $16, %%"REG_a"             \n\t"        "jnz 1b                         \n\t"        : "+r" (s1), "+r" (s2)        : "r" (block+64), "r" ((x86_reg)stride)        : "%"REG_a    );}static int pix_sum16_mmx(uint8_t * pix, int line_size){    const int h=16;    int sum;    x86_reg index= -line_size*h;    asm volatile(                "pxor %%mm7, %%mm7              \n\t"                "pxor %%mm6, %%mm6              \n\t"                "1:                             \n\t"                "movq (%2, %1), %%mm0           \n\t"                "movq (%2, %1), %%mm1           \n\t"                "movq 8(%2, %1), %%mm2          \n\t"                "movq 8(%2, %1), %%mm3          \n\t"                "punpcklbw %%mm7, %%mm0         \n\t"                "punpckhbw %%mm7, %%mm1         \n\t"                "punpcklbw %%mm7, %%mm2         \n\t"                "punpckhbw %%mm7, %%mm3         \n\t"                "paddw %%mm0, %%mm1             \n\t"                "paddw %%mm2, %%mm3             \n\t"                "paddw %%mm1, %%mm3             \n\t"                "paddw %%mm3, %%mm6             \n\t"                "add %3, %1                     \n\t"                " js 1b                         \n\t"                "movq %%mm6, %%mm5              \n\t"                "psrlq $32, %%mm6               \n\t"                "paddw %%mm5, %%mm6             \n\t"                "movq %%mm6, %%mm5              \n\t"                "psrlq $16, %%mm6               \n\t"                "paddw %%mm5, %%mm6             \n\t"                "movd %%mm6, %0                 \n\t"                "andl $0xFFFF, %0               \n\t"                : "=&r" (sum), "+r" (index)                : "r" (pix - index), "r" ((x86_reg)line_size)        );        return sum;}static int pix_norm1_mmx(uint8_t *pix, int line_size) {    int tmp;  asm volatile (      "movl $16,%%ecx\n"      "pxor %%mm0,%%mm0\n"      "pxor %%mm7,%%mm7\n"      "1:\n"      "movq (%0),%%mm2\n"       /* mm2 = pix[0-7] */      "movq 8(%0),%%mm3\n"      /* mm3 = pix[8-15] */      "movq %%mm2,%%mm1\n"      /* mm1 = mm2 = pix[0-7] */      "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */      "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */      "movq %%mm3,%%mm4\n"      /* mm4 = mm3 = pix[8-15] */      "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */      "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */      "pmaddwd %%mm1,%%mm1\n"   /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */      "pmaddwd %%mm2,%%mm2\n"   /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */      "pmaddwd %%mm3,%%mm3\n"      "pmaddwd %%mm4,%%mm4\n"      "paddd %%mm1,%%mm2\n"     /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,                                          pix2^2+pix3^2+pix6^2+pix7^2) */      "paddd %%mm3,%%mm4\n"      "paddd %%mm2,%%mm7\n"      "add %2, %0\n"      "paddd %%mm4,%%mm7\n"      "dec %%ecx\n"      "jnz 1b\n"      "movq %%mm7,%%mm1\n"      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */      "paddd %%mm7,%%mm1\n"      "movd %%mm1,%1\n"      : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );    return tmp;}static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {    int tmp;  asm volatile (      "movl %4,%%ecx\n"      "shr $1,%%ecx\n"      "pxor %%mm0,%%mm0\n"      /* mm0 = 0 */      "pxor %%mm7,%%mm7\n"      /* mm7 holds the sum */      "1:\n"      "movq (%0),%%mm1\n"       /* mm1 = pix1[0][0-7] */      "movq (%1),%%mm2\n"       /* mm2 = pix2[0][0-7] */      "movq (%0,%3),%%mm3\n"    /* mm3 = pix1[1][0-7] */      "movq (%1,%3),%%mm4\n"    /* mm4 = pix2[1][0-7] */      /* todo: mm1-mm2, mm3-mm4 */      /* algo: subtract mm1 from mm2 with saturation and vice versa */      /*       OR the results to get absolute difference */      "movq %%mm1,%%mm5\n"      "movq %%mm3,%%mm6\n"      "psubusb %%mm2,%%mm1\n"      "psubusb %%mm4,%%mm3\n"      "psubusb %%mm5,%%mm2\n"      "psubusb %%mm6,%%mm4\n"      "por %%mm1,%%mm2\n"      "por %%mm3,%%mm4\n"      /* now convert to 16-bit vectors so we can square them */      "movq %%mm2,%%mm1\n"      "movq %%mm4,%%mm3\n"      "punpckhbw %%mm0,%%mm2\n"      "punpckhbw %%mm0,%%mm4\n"      "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */      "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */      "pmaddwd %%mm2,%%mm2\n"      "pmaddwd %%mm4,%%mm4\n"      "pmaddwd %%mm1,%%mm1\n"      "pmaddwd %%mm3,%%mm3\n"      "lea (%0,%3,2), %0\n"     /* pix1 += 2*line_size */      "lea (%1,%3,2), %1\n"     /* pix2 += 2*line_size */      "paddd %%mm2,%%mm1\n"      "paddd %%mm4,%%mm3\n"      "paddd %%mm1,%%mm7\n"      "paddd %%mm3,%%mm7\n"      "decl %%ecx\n"      "jnz 1b\n"      "movq %%mm7,%%mm1\n"      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */      "paddd %%mm7,%%mm1\n"      "movd %%mm1,%2\n"      : "+r" (pix1), "+r" (pix2), "=r"(tmp)      : "r" ((x86_reg)line_size) , "m" (h)      : "%ecx");    return tmp;}static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {    int tmp;  asm volatile (      "movl %4,%%ecx\n"      "pxor %%mm0,%%mm0\n"      /* mm0 = 0 */      "pxor %%mm7,%%mm7\n"      /* mm7 holds the sum */      "1:\n"      "movq (%0),%%mm1\n"       /* mm1 = pix1[0-7] */      "movq (%1),%%mm2\n"       /* mm2 = pix2[0-7] */      "movq 8(%0),%%mm3\n"      /* mm3 = pix1[8-15] */      "movq 8(%1),%%mm4\n"      /* mm4 = pix2[8-15] */      /* todo: mm1-mm2, mm3-mm4 */      /* algo: subtract mm1 from mm2 with saturation and vice versa */      /*       OR the results to get absolute difference */      "movq %%mm1,%%mm5\n"      "movq %%mm3,%%mm6\n"      "psubusb %%mm2,%%mm1\n"      "psubusb %%mm4,%%mm3\n"      "psubusb %%mm5,%%mm2\n"      "psubusb %%mm6,%%mm4\n"      "por %%mm1,%%mm2\n"      "por %%mm3,%%mm4\n"      /* now convert to 16-bit vectors so we can square them */      "movq %%mm2,%%mm1\n"      "movq %%mm4,%%mm3\n"      "punpckhbw %%mm0,%%mm2\n"      "punpckhbw %%mm0,%%mm4\n"      "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */      "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */      "pmaddwd %%mm2,%%mm2\n"      "pmaddwd %%mm4,%%mm4\n"      "pmaddwd %%mm1,%%mm1\n"      "pmaddwd %%mm3,%%mm3\n"      "add %3,%0\n"      "add %3,%1\n"      "paddd %%mm2,%%mm1\n"      "paddd %%mm4,%%mm3\n"      "paddd %%mm1,%%mm7\n"      "paddd %%mm3,%%mm7\n"      "decl %%ecx\n"      "jnz 1b\n"      "movq %%mm7,%%mm1\n"      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */      "paddd %%mm7,%%mm1\n"      "movd %%mm1,%2\n"      : "+r" (pix1), "+r" (pix2), "=r"(tmp)      : "r" ((x86_reg)line_size) , "m" (h)      : "%ecx");    return tmp;}static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {    int tmp;  asm volatile (      "shr $1,%2\n"      "pxor %%xmm0,%%xmm0\n"    /* mm0 = 0 */      "pxor %%xmm7,%%xmm7\n"    /* mm7 holds the sum */      "1:\n"      "movdqu (%0),%%xmm1\n"    /* mm1 = pix1[0][0-15] */      "movdqu (%1),%%xmm2\n"    /* mm2 = pix2[0][0-15] */      "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */      "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */      /* todo: mm1-mm2, mm3-mm4 */      /* algo: subtract mm1 from mm2 with saturation and vice versa */      /*       OR the results to get absolute difference */      "movdqa %%xmm1,%%xmm5\n"      "movdqa %%xmm3,%%xmm6\n"      "psubusb %%xmm2,%%xmm1\n"      "psubusb %%xmm4,%%xmm3\n"      "psubusb %%xmm5,%%xmm2\n"      "psubusb %%xmm6,%%xmm4\n"      "por %%xmm1,%%xmm2\n"      "por %%xmm3,%%xmm4\n"      /* now convert to 16-bit vectors so we can square them */      "movdqa %%xmm2,%%xmm1\n"      "movdqa %%xmm4,%%xmm3\n"      "punpckhbw %%xmm0,%%xmm2\n"      "punpckhbw %%xmm0,%%xmm4\n"      "punpcklbw %%xmm0,%%xmm1\n"  /* mm1 now spread over (mm1,mm2) */      "punpcklbw %%xmm0,%%xmm3\n"  /* mm4 now spread over (mm3,mm4) */      "pmaddwd %%xmm2,%%xmm2\n"      "pmaddwd %%xmm4,%%xmm4\n"      "pmaddwd %%xmm1,%%xmm1\n"      "pmaddwd %%xmm3,%%xmm3\n"      "lea (%0,%4,2), %0\n"        /* pix1 += 2*line_size */      "lea (%1,%4,2), %1\n"        /* pix2 += 2*line_size */      "paddd %%xmm2,%%xmm1\n"      "paddd %%xmm4,%%xmm3\n"      "paddd %%xmm1,%%xmm7\n"      "paddd %%xmm3,%%xmm7\n"      "decl %2\n"      "jnz 1b\n"      "movdqa %%xmm7,%%xmm1\n"      "psrldq $8, %%xmm7\n"        /* shift hi qword to lo */      "paddd %%xmm1,%%xmm7\n"      "movdqa %%xmm7,%%xmm1\n"      "psrldq $4, %%xmm7\n"        /* shift hi dword to lo */      "paddd %%xmm1,%%xmm7\n"      "movd %%xmm7,%3\n"      : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)      : "r" ((x86_reg)line_size));    return tmp;}static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {    int tmp;  asm volatile (      "movl %3,%%ecx\n"      "pxor %%mm7,%%mm7\n"      "pxor %%mm6,%%mm6\n"      "movq (%0),%%mm0\n"      "movq %%mm0, %%mm1\n"      "psllq $8, %%mm0\n"      "psrlq $8, %%mm1\n"      "psrlq $8, %%mm0\n"      "movq %%mm0, %%mm2\n"      "movq %%mm1, %%mm3\n"      "punpcklbw %%mm7,%%mm0\n"      "punpcklbw %%mm7,%%mm1\n"      "punpckhbw %%mm7,%%mm2\n"      "punpckhbw %%mm7,%%mm3\n"      "psubw %%mm1, %%mm0\n"      "psubw %%mm3, %%mm2\n"      "add %2,%0\n"      "movq (%0),%%mm4\n"      "movq %%mm4, %%mm1\n"      "psllq $8, %%mm4\n"      "psrlq $8, %%mm1\n"      "psrlq $8, %%mm4\n"      "movq %%mm4, %%mm5\n"      "movq %%mm1, %%mm3\n"      "punpcklbw %%mm7,%%mm4\n"      "punpcklbw %%mm7,%%mm1\n"      "punpckhbw %%mm7,%%mm5\n"      "punpckhbw %%mm7,%%mm3\n"      "psubw %%mm1, %%mm4\n"      "psubw %%mm3, %%mm5\n"      "psubw %%mm4, %%mm0\n"      "psubw %%mm5, %%mm2\n"      "pxor %%mm3, %%mm3\n"      "pxor %%mm1, %%mm1\n"      "pcmpgtw %%mm0, %%mm3\n\t"      "pcmpgtw %%mm2, %%mm1\n\t"      "pxor %%mm3, %%mm0\n"      "pxor %%mm1, %%mm2\n"      "psubw %%mm3, %%mm0\n"      "psubw %%mm1, %%mm2\n"      "paddw %%mm0, %%mm2\n"      "paddw %%mm2, %%mm6\n"      "add %2,%0\n"      "1:\n"      "movq (%0),%%mm0\n"      "movq %%mm0, %%mm1\n"      "psllq $8, %%mm0\n"      "psrlq $8, %%mm1\n"      "psrlq $8, %%mm0\n"      "movq %%mm0, %%mm2\n"      "movq %%mm1, %%mm3\n"      "punpcklbw %%mm7,%%mm0\n"      "punpcklbw %%mm7,%%mm1\n"      "punpckhbw %%mm7,%%mm2\n"      "punpckhbw %%mm7,%%mm3\n"      "psubw %%mm1, %%mm0\n"      "psubw %%mm3, %%mm2\n"      "psubw %%mm0, %%mm4\n"      "psubw %%mm2, %%mm5\n"      "pxor %%mm3, %%mm3\n"      "pxor %%mm1, %%mm1\n"      "pcmpgtw %%mm4, %%mm3\n\t"      "pcmpgtw %%mm5, %%mm1\n\t"      "pxor %%mm3, %%mm4\n"      "pxor %%mm1, %%mm5\n"      "psubw %%mm3, %%mm4\n"      "psubw %%mm1, %%mm5\n"      "paddw %%mm4, %%mm5\n"      "paddw %%mm5, %%mm6\n"      "add %2,%0\n"      "movq (%0),%%mm4\n"      "movq %%mm4, %%mm1\n"      "psllq $8, %%mm4\n"      "psrlq $8, %%mm1\n"      "psrlq $8, %%mm4\n"      "movq %%mm4, %%mm5\n"      "movq %%mm1, %%mm3\n"      "punpcklbw %%mm7,%%mm4\n"      "punpcklbw %%mm7,%%mm1\n"      "punpckhbw %%mm7,%%mm5\n"      "punpckhbw %%mm7,%%mm3\n"      "psubw %%mm1, %%mm4\n"      "psubw %%mm3, %%mm5\n"      "psubw %%mm4, %%mm0\n"      "psubw %%mm5, %%mm2\n"      "pxor %%mm3, %%mm3\n"      "pxor %%mm1, %%mm1\n"      "pcmpgtw %%mm0, %%mm3\n\t"      "pcmpgtw %%mm2, %%mm1\n\t"      "pxor %%mm3, %%mm0\n"      "pxor %%mm1, %%mm2\n"      "psubw %%mm3, %%mm0\n"      "psubw %%mm1, %%mm2\n"      "paddw %%mm0, %%mm2\n"      "paddw %%mm2, %%mm6\n"      "add %2,%0\n"      "subl $2, %%ecx\n"      " jnz 1b\n"      "movq %%mm6, %%mm0\n"      "punpcklwd %%mm7,%%mm0\n"      "punpckhwd %%mm7,%%mm6\n"      "paddd %%mm0, %%mm6\n"      "movq %%mm6,%%mm0\n"      "psrlq $32, %%mm6\n"      "paddd %%mm6,%%mm0\n"

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -