⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gemm_kernel_4x4_sse3.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 3 页
字号:
	movsd     4 * SIZE(BB), %xmm2	addl	$2 * SIZE, AA	addl	$4 * SIZE, BB	decl	%eax	jg	.L66	ALIGN_4.L68:	addps	%xmm5, %xmm4	mulps	%xmm3, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhps	0 * SIZE(%esi, LDC, 1), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	movhps	%xmm4, 0 * SIZE(%esi, LDC, 1)	addl	$2 * SIZE, %esi		# coffset += 2#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 8), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	ALIGN_4.L70:	testl	$1, M	je	.L79#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,   4), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 4), BB#endif		movss	 0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movss	 4 * SIZE(AA), %xmm1	pxor	%xmm5, %xmm5	movsd	 0 * SIZE(BB), %xmm2	movsd	16 * SIZE(BB), %xmm3	leal	(LDC, LDC, 2), %eax#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$2, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L75	ALIGN_4.L72:	shufps	$0, %xmm0, %xmm0	mulps	%xmm0, %xmm2	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	movss	 1 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	shufps	$0, %xmm0, %xmm0	movsd	 4 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movss	 2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	shufps	$0, %xmm0, %xmm0	movsd	 8 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movss	 3 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	shufps	$0, %xmm0, %xmm0	movsd	12 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movss	 8 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd	32 * SIZE(BB), %xmm2	shufps	$0, %xmm1, %xmm1	mulps	%xmm1, %xmm3	movss	 5 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	shufps	$0, %xmm1, %xmm1	movsd	20 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movss	 6 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	shufps	$0, %xmm1, %xmm1	movsd	24 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movss	 7 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	shufps	$0, %xmm1, %xmm1	movsd	28 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movss	12 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd	48 * SIZE(BB), %xmm3	addl	$ 8 * SIZE, AA	addl	$32 * SIZE, BB	decl   %eax	jne    .L72	ALIGN_4.L75:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L78	ALIGN_4.L76:	shufps	$0, %xmm0, %xmm0	mulps	%xmm0, %xmm2	movss	 1 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd	 4 * SIZE(BB), %xmm2	addl	$ 1 * SIZE, AA	addl	$ 4 * SIZE, BB	decl	%eax	jg	.L76	ALIGN_4.L78:	addps	%xmm5, %xmm4	mulps	%xmm3, %xmm4#ifndef TRMMKERNEL	movss	0 * SIZE(%esi), %xmm0	movss	0 * SIZE(%esi, LDC, 1), %xmm1	addss	%xmm4, %xmm0	psrlq	$32, %xmm4	addss	%xmm4, %xmm1	movss	%xmm0, 0 * SIZE(%esi)	movss	%xmm1, 0 * SIZE(%esi, LDC, 1)#else	movss	%xmm4, 0 * SIZE(%esi)	psrlq	$32, %xmm4	movss	%xmm4, 0 * SIZE(%esi, LDC, 1)#endif#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 4), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$1, KK#endif	ALIGN_4.L79:#if defined(TRMMKERNEL) && !defined(LEFT)	addl	$2, KK#endif	leal	(, LDC, 2), %eax	addl	%eax, C		ALIGN_4.L80:	testl	$1, N	je	.L999#if defined(TRMMKERNEL) && defined(LEFT)	movl	OFFSET, %eax	movl	%eax, KK#endif		movl	K, %eax	leal	BUFFER, %ecx	sarl	$3, %eax	jle	.L85	ALIGN_4	.L82:	movss	 0 * SIZE(%edi), %xmm0	movss	 1 * SIZE(%edi), %xmm1	movss	 2 * SIZE(%edi), %xmm2	movss	 3 * SIZE(%edi), %xmm3	movss	 4 * SIZE(%edi), %xmm4	movss	 5 * SIZE(%edi), %xmm5	movss	 6 * SIZE(%edi), %xmm6	movss	 7 * SIZE(%edi), %xmm7	movss	%xmm0,  0 * SIZE(%ecx)	movss	%xmm0,  1 * SIZE(%ecx)	movss	%xmm1,  2 * SIZE(%ecx)	movss	%xmm1,  3 * SIZE(%ecx)	movss	%xmm2,  4 * SIZE(%ecx)	movss	%xmm2,  5 * SIZE(%ecx)	movss	%xmm3,  6 * SIZE(%ecx)	movss	%xmm3,  7 * SIZE(%ecx)	movss	%xmm4,  8 * SIZE(%ecx)	movss	%xmm4,  9 * SIZE(%ecx)	movss	%xmm5, 10 * SIZE(%ecx)	movss	%xmm5, 11 * SIZE(%ecx)	movss	%xmm6, 12 * SIZE(%ecx)	movss	%xmm6, 13 * SIZE(%ecx)	movss	%xmm7, 14 * SIZE(%ecx)	movss	%xmm7, 15 * SIZE(%ecx)#	prefetcht1	128 * SIZE(%ecx)	prefetcht0	112 * SIZE(%edi)	addl	$ 8 * SIZE, %edi	addl	$16 * SIZE, %ecx	decl	%eax	jne	.L82	ALIGN_4.L85:	movl	K, %eax	andl	$7, %eax	BRANCH	jle	.L90	ALIGN_4.L86:	movss	 0 * SIZE(%edi), %xmm0	movss	%xmm0,  0 * SIZE(%ecx)	movss	%xmm0,  1 * SIZE(%ecx)	addl	$1 * SIZE, %edi	addl	$2 * SIZE, %ecx	decl	%eax	jne	.L86	ALIGN_4	.L90:	movl	C, %esi		# coffset = c	movl	A, %edx		# aoffset = a	movl	M,  %ebx	sarl	$2, %ebx	# i = (m >> 2)	jle	.L100	ALIGN_4.L91:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,   8), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 1), BB#endif		movaps	 0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movddup  0 * SIZE(BB), %xmm2	pxor	%xmm5, %xmm5	movaps	16 * SIZE(AA), %xmm1	movddup  8 * SIZE(BB), %xmm3#ifdef HAVE_3DNOW	prefetchw	4 * SIZE(%esi)#elif defined(HAVE_SSE) || defined(HAVE_SSE2)	prefetcht2	4 * SIZE(%esi)#endif#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$4, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L95	ALIGN_4.L92:	mulps	%xmm0, %xmm2	movaps	 4 * SIZE(AA), %xmm0	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	addps	%xmm2, %xmm4	movddup  2 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	 8 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movddup  4 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	12 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movddup  6 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2	movaps	32 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movddup 16 * SIZE(BB), %xmm2	mulps	%xmm1, %xmm3	movaps	20 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movddup 10 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	24 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movddup 12 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	28 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movddup 14 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3	movaps	48 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movddup 24 * SIZE(BB), %xmm3	addl	$32 * SIZE, AA	addl	$16 * SIZE, BB	decl   %eax	jne    .L92	ALIGN_4.L95:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L98	ALIGN_4.L96:	mulps	%xmm0, %xmm2	movaps	 4 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movddup  2 * SIZE(BB), %xmm2	addl	$4 * SIZE, AA	addl	$2 * SIZE, BB	decl	%eax	jg	.L96	ALIGN_4.L98:	addps	%xmm5, %xmm4	mulps	%xmm3, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhps	2 * SIZE(%esi), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	movhps	%xmm4, 2 * SIZE(%esi)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 8), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 1), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$4, KK#endif	addl	$4 * SIZE, %esi		# coffset += 2	decl	%ebx			# i --	jg	.L91	ALIGN_4.L100:	testl	$2, M	je	.L110#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,   8), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 1), BB#endif		pxor	%xmm4, %xmm4	pxor	%xmm5, %xmm5	pxor	%xmm6, %xmm6	pxor	%xmm7, %xmm7 	movsd	  0 * SIZE(AA), %xmm0	movsd     0 * SIZE(BB), %xmm2 	movsd	  8 * SIZE(AA), %xmm1	movsd     8 * SIZE(BB), %xmm3	leal	(LDC, LDC, 2), %eax#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$2, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L105	ALIGN_4.L102:	mulps	%xmm0, %xmm2 	movsd	  2 * SIZE(AA), %xmm0	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA)	addps	%xmm2, %xmm4	movsd     2 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2 	movsd	  4 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd     4 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2 	movsd	  6 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd     6 * SIZE(BB), %xmm2	mulps	%xmm0, %xmm2 	movsd	 16 * SIZE(AA), %xmm0	addps	%xmm2, %xmm5	movsd    16 * SIZE(BB), %xmm2	mulps	%xmm1, %xmm3 	movsd	 10 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movsd    10 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3 	movsd	 12 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    12 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3 	movsd	 14 * SIZE(AA), %xmm1	addps	%xmm3, %xmm4	movsd    14 * SIZE(BB), %xmm3	mulps	%xmm1, %xmm3 	movsd	 24 * SIZE(AA), %xmm1	addps	%xmm3, %xmm5	movsd    24 * SIZE(BB), %xmm3	addl	$16 * SIZE, AA	addl	$16 * SIZE, BB	decl   %eax	jne    .L102	ALIGN_4.L105:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L108	ALIGN_4.L106:	mulps	%xmm0, %xmm2 	movsd	  2 * SIZE(AA), %xmm0	addps	%xmm2, %xmm4	movsd     2 * SIZE(BB), %xmm2	addl	$2 * SIZE, AA	addl	$2 * SIZE, BB	decl	%eax	jg	.L106	ALIGN_4.L108:	addps	%xmm5, %xmm4	movhlps	%xmm4, %xmm5	addps	%xmm5, %xmm4	mulps	%xmm3, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	addps	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, 8), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 1), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	addl	$2 * SIZE, %esi		# coffset += 2	ALIGN_4.L110:	testl	$1, M	je	.L999#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	# boffset1 = boffset#else	leal	BUFFER, BB	# boffset1 = boffset	movl	KK, %eax	leal	(, %eax,   4), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 2), BB#endif	 	movss	  0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movss     0 * SIZE(BB), %xmm2	pxor	%xmm5, %xmm5 	movss	  4 * SIZE(AA), %xmm1	movss     8 * SIZE(BB), %xmm3	leal	(LDC, LDC, 2), %eax#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L115	ALIGN_4.L112:	mulss	%xmm0, %xmm2	PREFETCH  (PREFETCHSIZE +  0) * SIZE(AA) 	movss	  1 * SIZE(AA), %xmm0	addss	%xmm2, %xmm4	movss     2 * SIZE(BB), %xmm2	mulss	%xmm0, %xmm2 	movss	  2 * SIZE(AA), %xmm0	addss	%xmm2, %xmm5	movss     4 * SIZE(BB), %xmm2	mulss	%xmm0, %xmm2 	movss	  3 * SIZE(AA), %xmm0	addss	%xmm2, %xmm4	movss     6 * SIZE(BB), %xmm2	mulss	%xmm0, %xmm2 	movss	  8 * SIZE(AA), %xmm0	addss	%xmm2, %xmm5	movss    16 * SIZE(BB), %xmm2	mulss	%xmm1, %xmm3 	movss	  5 * SIZE(AA), %xmm1	addss	%xmm3, %xmm4	movss    10 * SIZE(BB), %xmm3	mulss	%xmm1, %xmm3 	movss	  6 * SIZE(AA), %xmm1	addss	%xmm3, %xmm5	movss    12 * SIZE(BB), %xmm3	mulss	%xmm1, %xmm3 	movss	  7 * SIZE(AA), %xmm1	addss	%xmm3, %xmm4	movss    14 * SIZE(BB), %xmm3	mulss	%xmm1, %xmm3 	movss	 12 * SIZE(AA), %xmm1	addss	%xmm3, %xmm5	movss    24 * SIZE(BB), %xmm3	addl	$ 8 * SIZE, AA	addl	$16 * SIZE, BB	decl   %eax	jne    .L112	ALIGN_4.L115:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movaps	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L118	ALIGN_4.L116:	mulss	%xmm0, %xmm2 	movss	  1 * SIZE(AA), %xmm0	addss	%xmm2, %xmm4	movss     2 * SIZE(BB), %xmm2	addl	$1 * SIZE, AA	addl	$2 * SIZE, BB	decl	%eax	jg	.L116	ALIGN_4.L118: 	addss	%xmm5, %xmm4	mulss	%xmm3, %xmm4#ifndef TRMMKERNEL	movss	0 * SIZE(%esi), %xmm0	addss	%xmm0, %xmm4#else	mulss	%xmm3, %xmm4#endif	movss	%xmm4, 0 * SIZE(%esi)	ALIGN_4.L999:	movl	OLD_STACK, %esp	popl	%ebx	popl	%esi	popl	%edi	popl	%ebp	ret	EPILOGUE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -