⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 swscale_template.c

📁 uclinux 下的vlc播放器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/*    Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>    This program is free software; you can redistribute it and/or modify    it under the terms of the GNU General Public License as published by    the Free Software Foundation; either version 2 of the License, or    (at your option) any later version.    This program is distributed in the hope that it will be useful,    but WITHOUT ANY WARRANTY; without even the implied warranty of    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the    GNU General Public License for more details.    You should have received a copy of the GNU General Public License    along with this program; if not, write to the Free Software    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA*/#undef MOVNTQ#undef PAVGB#undef PREFETCH#undef PREFETCHW#undef EMMS#undef SFENCE#ifdef HAVE_3DNOW/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */#define EMMS     "femms"#else#define EMMS     "emms"#endif#ifdef HAVE_3DNOW#define PREFETCH  "prefetch"#define PREFETCHW "prefetchw"#elif defined ( HAVE_MMX2 )#define PREFETCH "prefetchnta"#define PREFETCHW "prefetcht0"#else#define PREFETCH "/nop"#define PREFETCHW "/nop"#endif#ifdef HAVE_MMX2#define SFENCE "sfence"#else#define SFENCE "/nop"#endif#ifdef HAVE_MMX2#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"#elif defined (HAVE_3DNOW)#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"#endif#ifdef HAVE_MMX2#define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"#else#define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"#endif#ifdef HAVE_ALTIVEC#include "swscale_altivec_template.c"#endif#define YSCALEYUV2YV12X(x, offset) \			"xorl %%eax, %%eax		\n\t"\			"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\			"movq %%mm3, %%mm4		\n\t"\			"leal " offset "(%0), %%edx	\n\t"\			"movl (%%edx), %%esi		\n\t"\			".balign 16			\n\t" /* FIXME Unroll? */\			"1:				\n\t"\			"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\			"movq " #x "(%%esi, %%eax, 2), %%mm2	\n\t" /* srcData */\			"movq 8+" #x "(%%esi, %%eax, 2), %%mm5	\n\t" /* srcData */\			"addl $16, %%edx		\n\t"\			"movl (%%edx), %%esi		\n\t"\			"testl %%esi, %%esi		\n\t"\			"pmulhw %%mm0, %%mm2		\n\t"\			"pmulhw %%mm0, %%mm5		\n\t"\			"paddw %%mm2, %%mm3		\n\t"\			"paddw %%mm5, %%mm4		\n\t"\			" jnz 1b			\n\t"\			"psraw $3, %%mm3		\n\t"\			"psraw $3, %%mm4		\n\t"\			"packuswb %%mm4, %%mm3		\n\t"\			MOVNTQ(%%mm3, (%1, %%eax))\			"addl $8, %%eax			\n\t"\			"cmpl %2, %%eax			\n\t"\			"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\			"movq %%mm3, %%mm4		\n\t"\			"leal " offset "(%0), %%edx	\n\t"\			"movl (%%edx), %%esi		\n\t"\			"jb 1b				\n\t"#define YSCALEYUV2YV121 \			"movl %2, %%eax			\n\t"\			".balign 16			\n\t" /* FIXME Unroll? */\			"1:				\n\t"\			"movq (%0, %%eax, 2), %%mm0	\n\t"\			"movq 8(%0, %%eax, 2), %%mm1	\n\t"\			"psraw $7, %%mm0		\n\t"\			"psraw $7, %%mm1		\n\t"\			"packuswb %%mm1, %%mm0		\n\t"\			MOVNTQ(%%mm0, (%1, %%eax))\			"addl $8, %%eax			\n\t"\			"jnc 1b				\n\t"/*			:: "m" (-lumFilterSize), "m" (-chrFilterSize),			   "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),			   "r" (dest), "m" (dstW),			   "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)			: "%eax", "%ebx", "%ecx", "%edx", "%esi"*/#define YSCALEYUV2PACKEDX \		"xorl %%eax, %%eax		\n\t"\		".balign 16			\n\t"\		"nop				\n\t"\		"1:				\n\t"\		"leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx	\n\t"\		"movl (%%edx), %%esi		\n\t"\		"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\		"movq %%mm3, %%mm4		\n\t"\		".balign 16			\n\t"\		"2:				\n\t"\		"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\		"movq (%%esi, %%eax), %%mm2	\n\t" /* UsrcData */\		"movq 4096(%%esi, %%eax), %%mm5	\n\t" /* VsrcData */\		"addl $16, %%edx		\n\t"\		"movl (%%edx), %%esi		\n\t"\		"pmulhw %%mm0, %%mm2		\n\t"\		"pmulhw %%mm0, %%mm5		\n\t"\		"paddw %%mm2, %%mm3		\n\t"\		"paddw %%mm5, %%mm4		\n\t"\		"testl %%esi, %%esi		\n\t"\		" jnz 2b			\n\t"\\		"leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx	\n\t"\		"movl (%%edx), %%esi		\n\t"\		"movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\		"movq %%mm1, %%mm7		\n\t"\		".balign 16			\n\t"\		"2:				\n\t"\		"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\		"movq (%%esi, %%eax, 2), %%mm2	\n\t" /* Y1srcData */\		"movq 8(%%esi, %%eax, 2), %%mm5	\n\t" /* Y2srcData */\		"addl $16, %%edx		\n\t"\		"movl (%%edx), %%esi		\n\t"\		"pmulhw %%mm0, %%mm2		\n\t"\		"pmulhw %%mm0, %%mm5		\n\t"\		"paddw %%mm2, %%mm1		\n\t"\		"paddw %%mm5, %%mm7		\n\t"\		"testl %%esi, %%esi		\n\t"\		" jnz 2b			\n\t"\#define YSCALEYUV2RGBX \		YSCALEYUV2PACKEDX\		"psubw "U_OFFSET"(%0), %%mm3	\n\t" /* (U-128)8*/\		"psubw "V_OFFSET"(%0), %%mm4	\n\t" /* (V-128)8*/\		"movq %%mm3, %%mm2		\n\t" /* (U-128)8*/\		"movq %%mm4, %%mm5		\n\t" /* (V-128)8*/\		"pmulhw "UG_COEFF"(%0), %%mm3	\n\t"\		"pmulhw "VG_COEFF"(%0), %%mm4	\n\t"\	/* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\		"pmulhw "UB_COEFF"(%0), %%mm2	\n\t"\		"pmulhw "VR_COEFF"(%0), %%mm5	\n\t"\		"psubw "Y_OFFSET"(%0), %%mm1	\n\t" /* 8(Y-16)*/\		"psubw "Y_OFFSET"(%0), %%mm7	\n\t" /* 8(Y-16)*/\		"pmulhw "Y_COEFF"(%0), %%mm1	\n\t"\		"pmulhw "Y_COEFF"(%0), %%mm7	\n\t"\	/* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\		"paddw %%mm3, %%mm4		\n\t"\		"movq %%mm2, %%mm0		\n\t"\		"movq %%mm5, %%mm6		\n\t"\		"movq %%mm4, %%mm3		\n\t"\		"punpcklwd %%mm2, %%mm2		\n\t"\		"punpcklwd %%mm5, %%mm5		\n\t"\		"punpcklwd %%mm4, %%mm4		\n\t"\		"paddw %%mm1, %%mm2		\n\t"\		"paddw %%mm1, %%mm5		\n\t"\		"paddw %%mm1, %%mm4		\n\t"\		"punpckhwd %%mm0, %%mm0		\n\t"\		"punpckhwd %%mm6, %%mm6		\n\t"\		"punpckhwd %%mm3, %%mm3		\n\t"\		"paddw %%mm7, %%mm0		\n\t"\		"paddw %%mm7, %%mm6		\n\t"\		"paddw %%mm7, %%mm3		\n\t"\		/* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\		"packuswb %%mm0, %%mm2		\n\t"\		"packuswb %%mm6, %%mm5		\n\t"\		"packuswb %%mm3, %%mm4		\n\t"\		"pxor %%mm7, %%mm7		\n\t"#if 0#define FULL_YSCALEYUV2RGB \		"pxor %%mm7, %%mm7		\n\t"\		"movd %6, %%mm6			\n\t" /*yalpha1*/\		"punpcklwd %%mm6, %%mm6		\n\t"\		"punpcklwd %%mm6, %%mm6		\n\t"\		"movd %7, %%mm5			\n\t" /*uvalpha1*/\		"punpcklwd %%mm5, %%mm5		\n\t"\		"punpcklwd %%mm5, %%mm5		\n\t"\		"xorl %%eax, %%eax		\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%0, %%eax, 2), %%mm0	\n\t" /*buf0[eax]*/\		"movq (%1, %%eax, 2), %%mm1	\n\t" /*buf1[eax]*/\		"movq (%2, %%eax,2), %%mm2	\n\t" /* uvbuf0[eax]*/\		"movq (%3, %%eax,2), %%mm3	\n\t" /* uvbuf1[eax]*/\		"psubw %%mm1, %%mm0		\n\t" /* buf0[eax] - buf1[eax]*/\		"psubw %%mm3, %%mm2		\n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\		"pmulhw %%mm6, %%mm0		\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\		"pmulhw %%mm5, %%mm2		\n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\		"psraw $4, %%mm1		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"movq 4096(%2, %%eax,2), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\		"psraw $4, %%mm3		\n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\		"paddw %%mm0, %%mm1		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\		"movq 4096(%3, %%eax,2), %%mm0	\n\t" /* uvbuf1[eax+2048]*/\		"paddw %%mm2, %%mm3		\n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\		"psubw %%mm0, %%mm4		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\		"psubw "MANGLE(w80)", %%mm1	\n\t" /* 8(Y-16)*/\		"psubw "MANGLE(w400)", %%mm3	\n\t" /* 8(U-128)*/\		"pmulhw "MANGLE(yCoeff)", %%mm1	\n\t"\\\		"pmulhw %%mm5, %%mm4		\n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\		"movq %%mm3, %%mm2		\n\t" /* (U-128)8*/\		"pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\		"psraw $4, %%mm0		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\		"pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\		"paddw %%mm4, %%mm0		\n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\		"psubw "MANGLE(w400)", %%mm0	\n\t" /* (V-128)8*/\\\		"movq %%mm0, %%mm4		\n\t" /* (V-128)8*/\		"pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\		"pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\		"paddw %%mm1, %%mm3		\n\t" /* B*/\		"paddw %%mm1, %%mm0		\n\t" /* R*/\		"packuswb %%mm3, %%mm3		\n\t"\\		"packuswb %%mm0, %%mm0		\n\t"\		"paddw %%mm4, %%mm2		\n\t"\		"paddw %%mm2, %%mm1		\n\t" /* G*/\\		"packuswb %%mm1, %%mm1		\n\t"#endif#define YSCALEYUV2PACKED(index, c) \		"movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\		"movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\		"psraw $3, %%mm0		\n\t"\		"psraw $3, %%mm1		\n\t"\		"movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\		"movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\		"xorl "#index", "#index"		\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\		"movq (%3, "#index"), %%mm3	\n\t" /* uvbuf1[eax]*/\		"movq 4096(%2, "#index"), %%mm5	\n\t" /* uvbuf0[eax+2048]*/\		"movq 4096(%3, "#index"), %%mm4	\n\t" /* uvbuf1[eax+2048]*/\		"psubw %%mm3, %%mm2		\n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\		"psubw %%mm4, %%mm5		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\		"movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\		"pmulhw %%mm0, %%mm2		\n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\		"pmulhw %%mm0, %%mm5		\n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\		"psraw $7, %%mm3		\n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\		"psraw $7, %%mm4		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\		"paddw %%mm2, %%mm3		\n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\		"paddw %%mm5, %%mm4		\n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\		"movq (%0, "#index", 2), %%mm0	\n\t" /*buf0[eax]*/\		"movq (%1, "#index", 2), %%mm1	\n\t" /*buf1[eax]*/\		"movq 8(%0, "#index", 2), %%mm6	\n\t" /*buf0[eax]*/\		"movq 8(%1, "#index", 2), %%mm7	\n\t" /*buf1[eax]*/\		"psubw %%mm1, %%mm0		\n\t" /* buf0[eax] - buf1[eax]*/\		"psubw %%mm7, %%mm6		\n\t" /* buf0[eax] - buf1[eax]*/\		"pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\		"pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\		"psraw $7, %%mm1		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"psraw $7, %%mm7		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"paddw %%mm0, %%mm1		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\		"paddw %%mm6, %%mm7		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\                #define YSCALEYUV2RGB(index, c) \		"xorl "#index", "#index"	\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\		"movq (%3, "#index"), %%mm3	\n\t" /* uvbuf1[eax]*/\		"movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\		"movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\		"psubw %%mm3, %%mm2		\n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\		"psubw %%mm4, %%mm5		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\		"movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\		"pmulhw %%mm0, %%mm2		\n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\		"pmulhw %%mm0, %%mm5		\n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\		"psraw $4, %%mm3		\n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\		"psraw $4, %%mm4		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\		"paddw %%mm2, %%mm3		\n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\		"paddw %%mm5, %%mm4		\n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\		"psubw "U_OFFSET"("#c"), %%mm3	\n\t" /* (U-128)8*/\		"psubw "V_OFFSET"("#c"), %%mm4	\n\t" /* (V-128)8*/\		"movq %%mm3, %%mm2		\n\t" /* (U-128)8*/\		"movq %%mm4, %%mm5		\n\t" /* (V-128)8*/\		"pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\		"pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\	/* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\		"movq (%0, "#index", 2), %%mm0	\n\t" /*buf0[eax]*/\		"movq (%1, "#index", 2), %%mm1	\n\t" /*buf1[eax]*/\		"movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\		"movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\		"psubw %%mm1, %%mm0		\n\t" /* buf0[eax] - buf1[eax]*/\		"psubw %%mm7, %%mm6		\n\t" /* buf0[eax] - buf1[eax]*/\		"pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\		"pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\		"psraw $4, %%mm1		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"psraw $4, %%mm7		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"paddw %%mm0, %%mm1		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\		"paddw %%mm6, %%mm7		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\		"pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\		"pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\		"psubw "Y_OFFSET"("#c"), %%mm1	\n\t" /* 8(Y-16)*/\		"psubw "Y_OFFSET"("#c"), %%mm7	\n\t" /* 8(Y-16)*/\		"pmulhw "Y_COEFF"("#c"), %%mm1	\n\t"\		"pmulhw "Y_COEFF"("#c"), %%mm7	\n\t"\	/* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\		"paddw %%mm3, %%mm4		\n\t"\		"movq %%mm2, %%mm0		\n\t"\		"movq %%mm5, %%mm6		\n\t"\		"movq %%mm4, %%mm3		\n\t"\		"punpcklwd %%mm2, %%mm2		\n\t"\		"punpcklwd %%mm5, %%mm5		\n\t"\		"punpcklwd %%mm4, %%mm4		\n\t"\		"paddw %%mm1, %%mm2		\n\t"\		"paddw %%mm1, %%mm5		\n\t"\		"paddw %%mm1, %%mm4		\n\t"\		"punpckhwd %%mm0, %%mm0		\n\t"\		"punpckhwd %%mm6, %%mm6		\n\t"\		"punpckhwd %%mm3, %%mm3		\n\t"\		"paddw %%mm7, %%mm0		\n\t"\		"paddw %%mm7, %%mm6		\n\t"\		"paddw %%mm7, %%mm3		\n\t"\		/* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\		"packuswb %%mm0, %%mm2		\n\t"\		"packuswb %%mm6, %%mm5		\n\t"\		"packuswb %%mm3, %%mm4		\n\t"\		"pxor %%mm7, %%mm7		\n\t"                #define YSCALEYUV2PACKED1(index, c) \		"xorl "#index", "#index"		\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm3	\n\t" /* uvbuf0[eax]*/\		"movq 4096(%2, "#index"), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\		"psraw $7, %%mm3		\n\t" \		"psraw $7, %%mm4		\n\t" \		"movq (%0, "#index", 2), %%mm1	\n\t" /*buf0[eax]*/\		"movq 8(%0, "#index", 2), %%mm7	\n\t" /*buf0[eax]*/\		"psraw $7, %%mm1		\n\t" \		"psraw $7, %%mm7		\n\t" \                #define YSCALEYUV2RGB1(index, c) \		"xorl "#index", "#index"	\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm3	\n\t" /* uvbuf0[eax]*/\		"movq 4096(%2, "#index"), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\		"psraw $4, %%mm3		\n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\		"psraw $4, %%mm4		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\		"psubw "U_OFFSET"("#c"), %%mm3	\n\t" /* (U-128)8*/\		"psubw "V_OFFSET"("#c"), %%mm4	\n\t" /* (V-128)8*/\		"movq %%mm3, %%mm2		\n\t" /* (U-128)8*/\		"movq %%mm4, %%mm5		\n\t" /* (V-128)8*/\		"pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\		"pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\	/* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\		"movq (%0, "#index", 2), %%mm1	\n\t" /*buf0[eax]*/\		"movq 8(%0, "#index", 2), %%mm7	\n\t" /*buf0[eax]*/\		"psraw $4, %%mm1		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"psraw $4, %%mm7		\n\t" /* buf0[eax] - buf1[eax] >>4*/\		"pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\		"pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\		"psubw "Y_OFFSET"("#c"), %%mm1	\n\t" /* 8(Y-16)*/\		"psubw "Y_OFFSET"("#c"), %%mm7	\n\t" /* 8(Y-16)*/\		"pmulhw "Y_COEFF"("#c"), %%mm1	\n\t"\		"pmulhw "Y_COEFF"("#c"), %%mm7	\n\t"\	/* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\		"paddw %%mm3, %%mm4		\n\t"\		"movq %%mm2, %%mm0		\n\t"\		"movq %%mm5, %%mm6		\n\t"\		"movq %%mm4, %%mm3		\n\t"\		"punpcklwd %%mm2, %%mm2		\n\t"\		"punpcklwd %%mm5, %%mm5		\n\t"\		"punpcklwd %%mm4, %%mm4		\n\t"\		"paddw %%mm1, %%mm2		\n\t"\		"paddw %%mm1, %%mm5		\n\t"\		"paddw %%mm1, %%mm4		\n\t"\		"punpckhwd %%mm0, %%mm0		\n\t"\		"punpckhwd %%mm6, %%mm6		\n\t"\		"punpckhwd %%mm3, %%mm3		\n\t"\		"paddw %%mm7, %%mm0		\n\t"\		"paddw %%mm7, %%mm6		\n\t"\		"paddw %%mm7, %%mm3		\n\t"\		/* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\		"packuswb %%mm0, %%mm2		\n\t"\		"packuswb %%mm6, %%mm5		\n\t"\		"packuswb %%mm3, %%mm4		\n\t"\		"pxor %%mm7, %%mm7		\n\t"#define YSCALEYUV2PACKED1b(index, c) \		"xorl "#index", "#index"		\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\		"movq (%3, "#index"), %%mm3	\n\t" /* uvbuf1[eax]*/\		"movq 4096(%2, "#index"), %%mm5	\n\t" /* uvbuf0[eax+2048]*/\		"movq 4096(%3, "#index"), %%mm4	\n\t" /* uvbuf1[eax+2048]*/\		"paddw %%mm2, %%mm3		\n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\		"paddw %%mm5, %%mm4		\n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\		"psrlw $8, %%mm3		\n\t" \		"psrlw $8, %%mm4		\n\t" \		"movq (%0, "#index", 2), %%mm1	\n\t" /*buf0[eax]*/\		"movq 8(%0, "#index", 2), %%mm7	\n\t" /*buf0[eax]*/\		"psraw $7, %%mm1		\n\t" \		"psraw $7, %%mm7		\n\t"                 // do vertical chrominance interpolation#define YSCALEYUV2RGB1b(index, c) \		"xorl "#index", "#index"		\n\t"\		".balign 16			\n\t"\		"1:				\n\t"\		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\		"movq (%3, "#index"), %%mm3	\n\t" /* uvbuf1[eax]*/\		"movq 4096(%2, "#index"), %%mm5	\n\t" /* uvbuf0[eax+2048]*/\		"movq 4096(%3, "#index"), %%mm4	\n\t" /* uvbuf1[eax+2048]*/\

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -