⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xor.h

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 H
📖 第 1 页 / 共 2 页
字号:
}static voidxor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,	     unsigned long *p3, unsigned long *p4, unsigned long *p5){	unsigned long lines = bytes >> 6;	char fpu_save[108];	FPU_SAVE;	__asm__ __volatile__ (	" .align 32,0x90             ;\n"	" 1:                         ;\n"	"       movq   (%1), %%mm0   ;\n"	"       movq  8(%1), %%mm1   ;\n"	"       pxor   (%2), %%mm0   ;\n"	"       pxor  8(%2), %%mm1   ;\n"	"       movq 16(%1), %%mm2   ;\n"	"       pxor   (%3), %%mm0   ;\n"	"       pxor  8(%3), %%mm1   ;\n"	"       pxor 16(%2), %%mm2   ;\n"	"       pxor   (%4), %%mm0   ;\n"	"       pxor  8(%4), %%mm1   ;\n"	"       pxor 16(%3), %%mm2   ;\n"	"       movq 24(%1), %%mm3   ;\n"	"       pxor   (%5), %%mm0   ;\n"	"       pxor  8(%5), %%mm1   ;\n"	"       movq %%mm0,   (%1)   ;\n"	"       pxor 16(%4), %%mm2   ;\n"	"       pxor 24(%2), %%mm3   ;\n"	"       movq %%mm1,  8(%1)   ;\n"	"       pxor 16(%5), %%mm2   ;\n"	"       pxor 24(%3), %%mm3   ;\n"	"       movq 32(%1), %%mm4   ;\n"	"       movq %%mm2, 16(%1)   ;\n"	"       pxor 24(%4), %%mm3   ;\n"	"       pxor 32(%2), %%mm4   ;\n"	"       movq 40(%1), %%mm5   ;\n"	"       pxor 24(%5), %%mm3   ;\n"	"       pxor 32(%3), %%mm4   ;\n"	"       pxor 40(%2), %%mm5   ;\n"	"       movq %%mm3, 24(%1)   ;\n"	"       pxor 32(%4), %%mm4   ;\n"	"       pxor 40(%3), %%mm5   ;\n"	"       movq 48(%1), %%mm6   ;\n"	"       movq 56(%1), %%mm7   ;\n"	"       pxor 32(%5), %%mm4   ;\n"	"       pxor 40(%4), %%mm5   ;\n"	"       pxor 48(%2), %%mm6   ;\n"	"       pxor 56(%2), %%mm7   ;\n"	"       movq %%mm4, 32(%1)   ;\n"	"       pxor 48(%3), %%mm6   ;\n"	"       pxor 56(%3), %%mm7   ;\n"	"       pxor 40(%5), %%mm5   ;\n"	"       pxor 48(%4), %%mm6   ;\n"	"       pxor 56(%4), %%mm7   ;\n"	"       movq %%mm5, 40(%1)   ;\n"	"       pxor 48(%5), %%mm6   ;\n"	"       pxor 56(%5), %%mm7   ;\n"	"       movq %%mm6, 48(%1)   ;\n"	"       movq %%mm7, 56(%1)   ;\n"      	"       addl $64, %1         ;\n"	"       addl $64, %2         ;\n"	"       addl $64, %3         ;\n"	"       addl $64, %4         ;\n"	"       addl $64, %5         ;\n"	"       decl %0              ;\n"	"       jnz 1b               ;\n"	: 	: "g" (lines),	  "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)	: "memory");	FPU_RESTORE;}static struct xor_block_template xor_block_pII_mmx = {	name: "pII_mmx",	do_2: xor_pII_mmx_2,	do_3: xor_pII_mmx_3,	do_4: xor_pII_mmx_4,	do_5: xor_pII_mmx_5,};static struct xor_block_template xor_block_p5_mmx = {	name: "p5_mmx",	do_2: xor_p5_mmx_2,	do_3: xor_p5_mmx_3,	do_4: xor_p5_mmx_4,	do_5: xor_p5_mmx_5,};#undef FPU_SAVE#undef FPU_RESTORE/* * Cache avoiding checksumming functions utilizing KNI instructions * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */#define XMMS_SAVE				\	__asm__ __volatile__ ( 			\		"movl %%cr0,%0		;\n\t"	\		"clts			;\n\t"	\		"movups %%xmm0,(%1)	;\n\t"	\		"movups %%xmm1,0x10(%1)	;\n\t"	\		"movups %%xmm2,0x20(%1)	;\n\t"	\		"movups %%xmm3,0x30(%1)	;\n\t"	\		: "=r" (cr0)			\		: "r" (xmm_save) 		\		: "memory")#define XMMS_RESTORE				\	__asm__ __volatile__ ( 			\		"sfence			;\n\t"	\		"movups (%1),%%xmm0	;\n\t"	\		"movups 0x10(%1),%%xmm1	;\n\t"	\		"movups 0x20(%1),%%xmm2	;\n\t"	\		"movups 0x30(%1),%%xmm3	;\n\t"	\		"movl 	%0,%%cr0	;\n\t"	\		:				\		: "r" (cr0), "r" (xmm_save)	\		: "memory")#define OFFS(x)		"16*("#x")"#define PF_OFFS(x)	"256+16*("#x")"#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%1)		;\n"#define LD(x,y)		"       movaps   "OFFS(x)"(%1), %%xmm"#y"	;\n"#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%1)	;\n"#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%2)		;\n"#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%3)		;\n"#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%4)		;\n"#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%5)		;\n"#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%6)		;\n"#define XO1(x,y)	"       xorps   "OFFS(x)"(%2), %%xmm"#y"	;\n"#define XO2(x,y)	"       xorps   "OFFS(x)"(%3), %%xmm"#y"	;\n"#define XO3(x,y)	"       xorps   "OFFS(x)"(%4), %%xmm"#y"	;\n"#define XO4(x,y)	"       xorps   "OFFS(x)"(%5), %%xmm"#y"	;\n"#define XO5(x,y)	"       xorps   "OFFS(x)"(%6), %%xmm"#y"	;\n"static voidxor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2){        unsigned long lines = bytes >> 8;	char xmm_save[16*4];	int cr0;	XMMS_SAVE;        __asm__ __volatile__ (#undef BLOCK#define BLOCK(i) \		LD(i,0)					\			LD(i+1,1)			\		PF1(i)					\				PF1(i+2)		\				LD(i+2,2)		\					LD(i+3,3)	\		PF0(i+4)				\				PF0(i+6)		\		XO1(i,0)				\			XO1(i+1,1)			\				XO1(i+2,2)		\					XO1(i+3,3)	\		ST(i,0)					\			ST(i+1,1)			\				ST(i+2,2)		\					ST(i+3,3)	\		PF0(0)				PF0(2)	" .align 32			;\n"        " 1:                            ;\n"		BLOCK(0)		BLOCK(4)		BLOCK(8)		BLOCK(12)        "       addl $256, %1           ;\n"        "       addl $256, %2           ;\n"        "       decl %0                 ;\n"        "       jnz 1b                  ;\n"	:	: "r" (lines),	  "r" (p1), "r" (p2)        : "memory");	XMMS_RESTORE;}static voidxor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,	  unsigned long *p3){        unsigned long lines = bytes >> 8;	char xmm_save[16*4];	int cr0;	XMMS_SAVE;        __asm__ __volatile__ (#undef BLOCK#define BLOCK(i) \		PF1(i)					\				PF1(i+2)		\		LD(i,0)					\			LD(i+1,1)			\				LD(i+2,2)		\					LD(i+3,3)	\		PF2(i)					\				PF2(i+2)		\		PF0(i+4)				\				PF0(i+6)		\		XO1(i,0)				\			XO1(i+1,1)			\				XO1(i+2,2)		\					XO1(i+3,3)	\		XO2(i,0)				\			XO2(i+1,1)			\				XO2(i+2,2)		\					XO2(i+3,3)	\		ST(i,0)					\			ST(i+1,1)			\				ST(i+2,2)		\					ST(i+3,3)	\		PF0(0)				PF0(2)	" .align 32			;\n"        " 1:                            ;\n"		BLOCK(0)		BLOCK(4)		BLOCK(8)		BLOCK(12)        "       addl $256, %1           ;\n"        "       addl $256, %2           ;\n"        "       addl $256, %3           ;\n"        "       decl %0                 ;\n"        "       jnz 1b                  ;\n"	:	: "r" (lines),	  "r" (p1), "r"(p2), "r"(p3)        : "memory" );	XMMS_RESTORE;}static voidxor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,	  unsigned long *p3, unsigned long *p4){        unsigned long lines = bytes >> 8;	char xmm_save[16*4];	int cr0;	XMMS_SAVE;        __asm__ __volatile__ (#undef BLOCK#define BLOCK(i) \		PF1(i)					\				PF1(i+2)		\		LD(i,0)					\			LD(i+1,1)			\				LD(i+2,2)		\					LD(i+3,3)	\		PF2(i)					\				PF2(i+2)		\		XO1(i,0)				\			XO1(i+1,1)			\				XO1(i+2,2)		\					XO1(i+3,3)	\		PF3(i)					\				PF3(i+2)		\		PF0(i+4)				\				PF0(i+6)		\		XO2(i,0)				\			XO2(i+1,1)			\				XO2(i+2,2)		\					XO2(i+3,3)	\		XO3(i,0)				\			XO3(i+1,1)			\				XO3(i+2,2)		\					XO3(i+3,3)	\		ST(i,0)					\			ST(i+1,1)			\				ST(i+2,2)		\					ST(i+3,3)	\		PF0(0)				PF0(2)	" .align 32			;\n"        " 1:                            ;\n"		BLOCK(0)		BLOCK(4)		BLOCK(8)		BLOCK(12)        "       addl $256, %1           ;\n"        "       addl $256, %2           ;\n"        "       addl $256, %3           ;\n"        "       addl $256, %4           ;\n"        "       decl %0                 ;\n"        "       jnz 1b                  ;\n"	:	: "r" (lines),	  "r" (p1), "r" (p2), "r" (p3), "r" (p4)        : "memory" );	XMMS_RESTORE;}static voidxor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,	  unsigned long *p3, unsigned long *p4, unsigned long *p5){        unsigned long lines = bytes >> 8;	char xmm_save[16*4];	int cr0;	XMMS_SAVE;        __asm__ __volatile__ (#undef BLOCK#define BLOCK(i) \		PF1(i)					\				PF1(i+2)		\		LD(i,0)					\			LD(i+1,1)			\				LD(i+2,2)		\					LD(i+3,3)	\		PF2(i)					\				PF2(i+2)		\		XO1(i,0)				\			XO1(i+1,1)			\				XO1(i+2,2)		\					XO1(i+3,3)	\		PF3(i)					\				PF3(i+2)		\		XO2(i,0)				\			XO2(i+1,1)			\				XO2(i+2,2)		\					XO2(i+3,3)	\		PF4(i)					\				PF4(i+2)		\		PF0(i+4)				\				PF0(i+6)		\		XO3(i,0)				\			XO3(i+1,1)			\				XO3(i+2,2)		\					XO3(i+3,3)	\		XO4(i,0)				\			XO4(i+1,1)			\				XO4(i+2,2)		\					XO4(i+3,3)	\		ST(i,0)					\			ST(i+1,1)			\				ST(i+2,2)		\					ST(i+3,3)	\		PF0(0)				PF0(2)	" .align 32			;\n"        " 1:                            ;\n"		BLOCK(0)		BLOCK(4)		BLOCK(8)		BLOCK(12)        "       addl $256, %1           ;\n"        "       addl $256, %2           ;\n"        "       addl $256, %3           ;\n"        "       addl $256, %4           ;\n"        "       addl $256, %5           ;\n"        "       decl %0                 ;\n"        "       jnz 1b                  ;\n"	:	: "r" (lines),	  "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)	: "memory");	XMMS_RESTORE;}static struct xor_block_template xor_block_pIII_sse = {        name: "pIII_sse",        do_2: xor_sse_2,        do_3: xor_sse_3,        do_4: xor_sse_4,        do_5: xor_sse_5,};/* Also try the generic routines.  */#include <asm-generic/xor.h>#undef XOR_TRY_TEMPLATES#define XOR_TRY_TEMPLATES				\	do {						\		xor_speed(&xor_block_8regs);		\		xor_speed(&xor_block_32regs);		\	        if (cpu_has_xmm)			\			xor_speed(&xor_block_pIII_sse);	\	        if (md_cpu_has_mmx()) {			\	                xor_speed(&xor_block_pII_mmx);	\	                xor_speed(&xor_block_p5_mmx);	\	        }					\	} while (0)/* We force the use of the SSE xor block because it can write around L2.   We may also be able to load into the L1 only depending on how the cpu   deals with a load to a line that is being prefetched.  */#define XOR_SELECT_TEMPLATE(FASTEST) \	(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -