⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 zgemm_kernel_2x2_sse3.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 2 页
字号:
	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	addl	$4 * SIZE, %esi		# coffset += 4	decl	%ebx			# i --	jg	.L10	ALIGN_4.L30:	movl	M,  %ebx	andl	$1, %ebx	jle	.L99	ALIGN_4.L40:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,    8), %eax	leal	(AA, %eax,  1), AA	leal	(BB,  %eax, 4), BB#endif		movddup	  0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movddup	  8 * SIZE(AA), %xmm1	pxor	%xmm5, %xmm5	movsd     0 * SIZE(BB), %xmm2	movsd    16 * SIZE(BB), %xmm3#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$2, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L42	ALIGN_4.L41:	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	addps	%xmm2, %xmm4	movsd     4 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	  2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd     8 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movsd    12 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	  4 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd    32 * SIZE(BB), %xmm2	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm0, %xmm3	addps	%xmm3, %xmm4	movsd    20 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm0, %xmm3	movddup	  6 * SIZE(AA), %xmm0	addps	%xmm3, %xmm5	movsd    24 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm0, %xmm3	addps	%xmm3, %xmm4	movsd    28 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm0, %xmm3	movddup	 16 * SIZE(AA), %xmm0	addps	%xmm3, %xmm5	movsd    48 * SIZE(BB), %xmm3	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm1, %xmm2	addps	%xmm2, %xmm4	movsd    36 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm1, %xmm2	movddup	 10 * SIZE(AA), %xmm1	addps	%xmm2, %xmm5	movsd    40 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm1, %xmm2	addps	%xmm2, %xmm4	movsd    44 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm1, %xmm2	movddup	 12 * SIZE(AA), %xmm1	addps	%xmm2, %xmm5	movsd    64 * SIZE(BB), %xmm2	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movsd    52 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 14 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    56 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movsd    60 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 24 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    80 * SIZE(BB), %xmm3	addl	$16 * SIZE, AA	addl	$64 * SIZE, BB	decl	%eax	jne	.L41	ALIGN_4	.L42:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA_R, %xmm1	movaps	ALPHA_I, %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L44	ALIGN_4.L43:	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movsd     4 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2 	movddup	  2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd     8 * SIZE(BB), %xmm2	addl	$2 * SIZE, AA	addl	$8 * SIZE, BB	decl	%eax	jg	.L43	ALIGN_4.L44:	movaps	%xmm4, %xmm6	movlhps	%xmm5, %xmm4	movhlps %xmm6, %xmm5#if  defined(NR) || defined(NC) || defined(TR) || defined(TC) || \     defined(RR) || defined(RC) || defined(CR) || defined(CC) 	cmpeqps	%xmm7, %xmm7	pslld	$31,   %xmm7	xorps	%xmm7, %xmm5#endif          #if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(NR) || defined(NC) || defined(TR) || defined(TC)	shufps	$0xb1, %xmm5, %xmm5	addsubps	%xmm5, %xmm4	movaps	%xmm4, %xmm5	shufps	$0xb1, %xmm4, %xmm4#else	shufps	$0xb1, %xmm4, %xmm4	addsubps	%xmm4, %xmm5	movaps	%xmm5, %xmm4	shufps	$0xb1, %xmm5, %xmm5#endif	mulps	%xmm1, %xmm5	mulps	%xmm3, %xmm4	addps	%xmm5, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhps	0 * SIZE(%esi, LDC), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	movhps	%xmm4, 0 * SIZE(%esi, LDC)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 8), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$1, KK#endif	ALIGN_4.L99:#if defined(TRMMKERNEL) && !defined(LEFT)	addl	$2, KK#endif	leal	(LDC, LDC), %eax	addl	%eax, C			# c += 2 * ldc	decl	J			# j --	jg	.L01	ALIGN_4.L100:	movl	N, %eax	andl	$1, %eax	jle	.L999	ALIGN_4.L101:#if defined(TRMMKERNEL) && defined(LEFT)	movl	OFFSET, %eax	movl	%eax, KK#endif	/* Copying to Sub Buffer */	leal	BUFFER, %ecx	movl	K, %eax	sarl	$3, %eax	jle	.L103	ALIGN_4.L102:	movddup	 0 * SIZE(B), %xmm0	movddup	 2 * SIZE(B), %xmm1	movddup	 4 * SIZE(B), %xmm2	movddup	 6 * SIZE(B), %xmm3	movddup	 8 * SIZE(B), %xmm4	movddup 10 * SIZE(B), %xmm5	movddup	12 * SIZE(B), %xmm6	movddup	14 * SIZE(B), %xmm7	movaps	%xmm0,  0 * SIZE(BB)	movaps	%xmm1,  4 * SIZE(BB)	movaps	%xmm2,  8 * SIZE(BB)	movaps	%xmm3, 12 * SIZE(BB)	movaps	%xmm4, 16 * SIZE(BB)	movaps	%xmm5, 20 * SIZE(BB)	movaps	%xmm6, 24 * SIZE(BB)	movaps	%xmm7, 28 * SIZE(BB)	prefetcht0	 104 * SIZE(B)	addl	$16 * SIZE, B	addl	$32 * SIZE, BB	decl	%eax	jne	.L102	ALIGN_4.L103:	movl	K, %eax	andl	$7, %eax	BRANCH	jle	.L105	ALIGN_4.L104:	movddup	0 * SIZE(B), %xmm0	movaps	%xmm0,  0 * SIZE(BB)	addl	$ 2 * SIZE, %edi	addl	$ 4 * SIZE, %ecx	decl	%eax	jne	.L104	ALIGN_4.L105:	movl	C,  %esi	movl	A,  AA	movl	M,  %ebx	sarl	$1, %ebx	jle	.L130	ALIGN_4.L110:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,    8), %eax	leal	(AA, %eax,  2), AA	leal	(BB,  %eax, 2), BB#endif		movaps	  0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movaps	 16 * SIZE(AA), %xmm1	pxor	%xmm5, %xmm5	movsldup  0 * SIZE(BB), %xmm2	pxor	%xmm6, %xmm6	movsldup 16 * SIZE(BB), %xmm3	pxor	%xmm7, %xmm7#ifdef PENTIUM4	prefetchnta 4 * SIZE(%esi)#endif#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$2, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L112	ALIGN_4.L111:	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	movshdup  0 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	  4 * SIZE(AA), %xmm0	ADDSUB	%xmm2, %xmm5	movsldup  4 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movshdup  4 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	  8 * SIZE(AA), %xmm0	ADDSUB	%xmm2, %xmm5	movsldup  8 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movshdup  8 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	 12 * SIZE(AA), %xmm0	ADDSUB	%xmm2, %xmm5	movsldup 12 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movshdup 12 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	 32 * SIZE(AA), %xmm0	ADDSUB	%xmm2, %xmm5	movsldup 32 * SIZE(BB), %xmm2	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movshdup 16 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	 20 * SIZE(AA), %xmm1	ADDSUB	%xmm3, %xmm5	movsldup 20 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movshdup 20 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	 24 * SIZE(AA), %xmm1	ADDSUB	%xmm3, %xmm5	movsldup 24 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movshdup 24 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	 28 * SIZE(AA), %xmm1	ADDSUB	%xmm3, %xmm5	movsldup 28 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	addps	%xmm3, %xmm4	movshdup 28 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	 48 * SIZE(AA), %xmm1	ADDSUB	%xmm3, %xmm5	movsldup 48 * SIZE(BB), %xmm3	addl	$32 * SIZE, AA	addl	$32 * SIZE, BB	decl	%eax	jne	.L111	ALIGN_4	.L112:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA_R, %xmm1	movaps	ALPHA_I, %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L114	ALIGN_4.L113:	mulps	%xmm0, %xmm2	addps	%xmm2, %xmm4	movshdup  0 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	  4 * SIZE(AA), %xmm0	ADDSUB	%xmm2, %xmm5	movsldup  4 * SIZE(BB), %xmm2	addl	$ 4 * SIZE, AA	addl	$ 4 * SIZE, BB	decl	%eax	jg	.L113	ALIGN_4.L114:#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(NR) || defined(NC) || defined(TR) || defined(TC)	shufps	$0xb1, %xmm5, %xmm5	addsubps	%xmm5, %xmm4	movaps	%xmm4, %xmm5	shufps	$0xb1, %xmm4, %xmm4#else	shufps	$0xb1, %xmm4, %xmm4	addsubps	%xmm4, %xmm5	movaps	%xmm5, %xmm4	shufps	$0xb1, %xmm5, %xmm5#endif	mulps	%xmm1, %xmm5	mulps	%xmm3, %xmm4	addps	%xmm5, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhps	2 * SIZE(%esi), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	movhps	%xmm4, 2 * SIZE(%esi)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 8), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	addl	$4 * SIZE, %esi		# coffset += 4	decl	%ebx			# i --	jg	.L110	ALIGN_4.L130:	movl	M,  %ebx	andl	$1, %ebx	jle	.L999	ALIGN_4.L140:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,    8), %eax	leal	(AA, %eax,  1), AA	leal	(BB,  %eax, 2), BB#endif		movddup	  0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movddup	  8 * SIZE(AA), %xmm1	pxor	%xmm5, %xmm5	movsd     0 * SIZE(BB), %xmm2	movsd    16 * SIZE(BB), %xmm3#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L142	ALIGN_4.L141:	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	movddup	  2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd     4 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	  4 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd     8 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	  6 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd    12 * SIZE(BB), %xmm2	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	 16 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd    32 * SIZE(BB), %xmm2	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 10 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movsd    20 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 12 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    24 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 14 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movsd    28 * SIZE(BB), %xmm3	shufps	$0x50, %xmm3, %xmm3	mulps	%xmm1, %xmm3	movddup	 24 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    48 * SIZE(BB), %xmm3	addl	$ 16 * SIZE, AA	addl	$ 32 * SIZE, BB	decl	%eax	jne	.L141	ALIGN_4	.L142:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA_R, %xmm1	movaps	ALPHA_I, %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L144	ALIGN_4.L143:	shufps	$0x50, %xmm2, %xmm2	mulps	%xmm0, %xmm2	movddup	  2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd     4 * SIZE(BB), %xmm2	addl	$2 * SIZE, AA	addl	$4 * SIZE, BB	decl	%eax	jg	.L143	ALIGN_4.L144:	addps	%xmm5, %xmm4	movhlps %xmm4, %xmm5#if  defined(NR) || defined(NC) || defined(TR) || defined(TC) || \     defined(RR) || defined(RC) || defined(CR) || defined(CC) 	cmpeqps	%xmm7, %xmm7	pslld	$31,   %xmm7	xorps	%xmm7, %xmm5#endif          #if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(NR) || defined(NC) || defined(TR) || defined(TC)	shufps	$0xb1, %xmm5, %xmm5	addsubps	%xmm5, %xmm4	movaps	%xmm4, %xmm5	shufps	$0xb1, %xmm4, %xmm4#else	shufps	$0xb1, %xmm4, %xmm4	addsubps	%xmm4, %xmm5	movaps	%xmm5, %xmm4	shufps	$0xb1, %xmm5, %xmm5#endif	mulps	%xmm1, %xmm5	mulps	%xmm3, %xmm4	addps	%xmm5, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	ALIGN_4.L999:	movl	OLD_STACK, %esp	popl	%ebx	popl	%esi	popl	%edi	popl	%ebp	ret	EPILOGUE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -