⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gemm_kernel_4x2_sse2.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 2 页
字号:
	movsd	%xmm5, 0 * SIZE(%esi, LDC)	movhpd	%xmm5, 1 * SIZE(%esi, LDC)	addl	$2 * SIZE, %esi		# coffset += 4#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, SIZE), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	ALIGN_2.L50:	movl	M,  %ebx	testl	$1, %ebx	jle	.L99#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, %ecx	movapd	 0 * SIZE + BUFFER, %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE + BUFFER, %xmm3	pxor	%xmm6, %xmm6	movsd	 4 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#else	leal	BUFFER, BB	movl	KK, %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 4), BB /* because it's doubled */	movapd	 0 * SIZE(BB), %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE(BB), %xmm3	pxor	%xmm6, %xmm6	movsd	 4 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#endif	#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$2, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L52.L51:		mulsd	%xmm0, %xmm2	mulsd	 2 * SIZE(BB), %xmm0	addsd	%xmm2, %xmm4	movsd	 4 * SIZE(BB), %xmm2	addsd	%xmm0, %xmm5	movsd	 1 * SIZE(AA), %xmm0	mulsd	%xmm0, %xmm2	mulsd	 6 * SIZE(BB), %xmm0	addsd	%xmm2, %xmm4	movsd	16 * SIZE(BB), %xmm2	addsd	%xmm0, %xmm5	movsd	 2 * SIZE(AA), %xmm0	mulsd	%xmm0, %xmm3	mulsd	10 * SIZE(BB), %xmm0	addsd	%xmm3, %xmm4	movsd	12 * SIZE(BB), %xmm3	addsd	%xmm0, %xmm5	movsd	 3 * SIZE(AA), %xmm0	mulsd	%xmm0, %xmm3	mulsd	14 * SIZE(BB), %xmm0	addsd	%xmm3, %xmm4	movsd	24 * SIZE(BB), %xmm3	addsd	%xmm0, %xmm5	movsd	 8 * SIZE(AA), %xmm0	mulsd	%xmm1, %xmm2	mulsd	18 * SIZE(BB), %xmm1	addsd	%xmm2, %xmm4	movsd	20 * SIZE(BB), %xmm2	addsd	%xmm1, %xmm5	movsd	 5 * SIZE(AA), %xmm1	mulsd	%xmm1, %xmm2	mulsd	22 * SIZE(BB), %xmm1	addsd	%xmm2, %xmm4	movsd	32 * SIZE(BB), %xmm2	addsd	%xmm1, %xmm5	movsd	 6 * SIZE(AA), %xmm1	mulsd	%xmm1, %xmm3	mulsd	26 * SIZE(BB), %xmm1	addsd	%xmm3, %xmm4	movsd	28 * SIZE(BB), %xmm3	addsd	%xmm1, %xmm5	movsd	 7 * SIZE(AA), %xmm1	mulsd	%xmm1, %xmm3	mulsd	30 * SIZE(BB), %xmm1	addsd	%xmm3, %xmm4	movsd	40 * SIZE(BB), %xmm3	addsd	%xmm1, %xmm5	movsd	12 * SIZE(AA), %xmm1	addl   $ 8 * SIZE, AA	addl   $32 * SIZE, BB	BRANCH	decl   %eax	jne    .L51.L52:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movsd	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L54.L53:	mulsd	%xmm0, %xmm2	mulsd	 2 * SIZE(BB), %xmm0	addsd	%xmm2, %xmm4	movsd	 4 * SIZE(BB), %xmm2	addsd	%xmm0, %xmm5	movsd	 1 * SIZE(AA), %xmm0	addl	$1 * SIZE, AA		# aoffset  += 8	addl	$4 * SIZE, BB		# boffset1 += 8	decl	%eax	BRANCH	jg	.L53	ALIGN_4.L54:	addsd	%xmm6, %xmm4	addsd	%xmm7, %xmm5	mulsd	%xmm3, %xmm4	mulsd	%xmm3, %xmm5#ifndef TRMMKERNEL	addsd	0 * SIZE(%esi), %xmm4	addsd	0 * SIZE(%esi, LDC), %xmm5#endif	movsd	%xmm4, 0 * SIZE(%esi)	movsd	%xmm5, 0 * SIZE(%esi, LDC)	addl	$1 * SIZE, %esi#if defined(TRMMKERNEL) && defined(LEFT)	addl	$1, KK#endif	ALIGN_2.L99:#if defined(TRMMKERNEL) && !defined(LEFT)	addl	$2, KK#endif	leal	(, LDC, 2), %eax	addl	%eax, C			# c += 2 * ldc	BRANCH	decl	J			# j --	jg	.L01	ALIGN_2.L100:	movl	N, %eax	testl	$1, %eax	jle	.L999	ALIGN_2	.L101:#if defined(TRMMKERNEL) && defined(LEFT)	movl	OFFSET, %eax	movl	%eax, KK#endif	/* Copying to Sub Buffer */	leal	BUFFER, %ecx	movl	K, %eax	sarl	$3, %eax	jle	.L103	ALIGN_4	.L102:	movsd	 0 * SIZE(B), %xmm0	movsd	 1 * SIZE(B), %xmm1	movsd	 2 * SIZE(B), %xmm2	movsd	 3 * SIZE(B), %xmm3	movsd	 4 * SIZE(B), %xmm4	movsd	 5 * SIZE(B), %xmm5	movsd	 6 * SIZE(B), %xmm6	movsd	 7 * SIZE(B), %xmm7	unpcklpd  %xmm0, %xmm0	unpcklpd  %xmm1, %xmm1	unpcklpd  %xmm2, %xmm2	unpcklpd  %xmm3, %xmm3	unpcklpd  %xmm4, %xmm4	unpcklpd  %xmm5, %xmm5	unpcklpd  %xmm6, %xmm6	unpcklpd  %xmm7, %xmm7	movapd	%xmm0,  0 * SIZE(%ecx)	movapd	%xmm1,  2 * SIZE(%ecx)	movapd	%xmm2,  4 * SIZE(%ecx)	movapd	%xmm3,  6 * SIZE(%ecx)	movapd	%xmm4,  8 * SIZE(%ecx)	movapd	%xmm5, 10 * SIZE(%ecx)	movapd	%xmm6, 12 * SIZE(%ecx)	movapd	%xmm7, 14 * SIZE(%ecx)	prefetcht0	104 * SIZE(B)	addl	$ 8 * SIZE, B	addl	$16 * SIZE, %ecx	decl	%eax	BRANCH	jne	.L102	ALIGN_2.L103:	movl	K, %eax	andl	$7, %eax	BRANCH	jle	.L105	ALIGN_2.L104:	movsd	 0 * SIZE(B), %xmm0	unpcklpd  %xmm0, %xmm0	movapd	%xmm0,  0 * SIZE(%ecx)	addl	$1 * SIZE, B	addl	$2 * SIZE, %ecx	decl	%eax	jne	.L104	ALIGN_4	.L105:	movl	C, %esi		# coffset = c	movl	A, %edx		# aoffset = a	movl	M,  %ebx	sarl	$2, %ebx	# i = (m >> 2)	jle	.L130	ALIGN_4.L110:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	movapd	 0 * SIZE + BUFFER, %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE + BUFFER, %xmm3	pxor	%xmm6, %xmm6	movapd	 8 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#else	leal	BUFFER, BB	movl	KK, %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 4), AA	leal	(BB, %eax, 2), BB	movapd	 0 * SIZE(BB), %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE(BB), %xmm3	pxor	%xmm6, %xmm6	movapd	 8 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#endif	#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$4, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L112.L111:		mulpd	%xmm2, %xmm0	mulpd	 2 * SIZE(AA), %xmm2	addpd	%xmm0, %xmm4	movapd	 4 * SIZE(AA), %xmm0	addpd	%xmm2, %xmm6	movapd	 2 * SIZE(BB), %xmm2	mulpd	%xmm2, %xmm0	mulpd	 6 * SIZE(AA), %xmm2	addpd	%xmm0, %xmm5	movapd	16 * SIZE(AA), %xmm0	addpd	%xmm2, %xmm7	movapd	 4 * SIZE(BB), %xmm2	mulpd	%xmm2, %xmm1	mulpd	10 * SIZE(AA), %xmm2	addpd	%xmm1, %xmm4	movapd	12 * SIZE(AA), %xmm1	addpd	%xmm2, %xmm6	movapd	 6 * SIZE(BB), %xmm2	mulpd	%xmm2, %xmm1	mulpd	14 * SIZE(AA), %xmm2	addpd	%xmm1, %xmm5	movapd	24 * SIZE(AA), %xmm1	addpd	%xmm2, %xmm7	movapd	16 * SIZE(BB), %xmm2	mulpd	%xmm3, %xmm0	mulpd	18 * SIZE(AA), %xmm3	addpd	%xmm0, %xmm4	movapd	20 * SIZE(AA), %xmm0	addpd	%xmm3, %xmm6	movapd	10 * SIZE(BB), %xmm3	mulpd	%xmm3, %xmm0	mulpd	22 * SIZE(AA), %xmm3	addpd	%xmm0, %xmm5	movapd	32 * SIZE(AA), %xmm0	addpd	%xmm3, %xmm7	movapd	12 * SIZE(BB), %xmm3	mulpd	%xmm3, %xmm1	mulpd	26 * SIZE(AA), %xmm3	addpd	%xmm1, %xmm4	movapd	28 * SIZE(AA), %xmm1	addpd	%xmm3, %xmm6	movapd	14 * SIZE(BB), %xmm3	mulpd	%xmm3, %xmm1	mulpd	30 * SIZE(AA), %xmm3	addpd	%xmm1, %xmm5	movapd	40 * SIZE(AA), %xmm1	addpd	%xmm3, %xmm7	movapd	24 * SIZE(BB), %xmm3	addl   $32 * SIZE, AA	addl   $16 * SIZE, BB	decl   %eax	jne    .L111.L112:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movapd	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L114.L113:	mulpd	%xmm2, %xmm0	mulpd	 2 * SIZE(AA), %xmm2	addpd	%xmm0, %xmm4	movapd	 4 * SIZE(AA), %xmm0	addpd	%xmm2, %xmm6	movapd	 2 * SIZE(BB), %xmm2	addl	$4 * SIZE, AA		# aoffset  += 8	addl	$2 * SIZE, BB		# boffset1 += 8	subl	$1, %eax	jg	.L113	ALIGN_4.L114:	addpd	%xmm5, %xmm4	addpd	%xmm7, %xmm6	mulpd	%xmm3, %xmm4	mulpd	%xmm3, %xmm6#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhpd	1 * SIZE(%esi), %xmm0	addpd	%xmm0, %xmm4	movsd	2 * SIZE(%esi), %xmm1	movhpd	3 * SIZE(%esi), %xmm1	addpd	%xmm1, %xmm6#endif	movsd	%xmm4, 0 * SIZE(%esi)	unpckhpd %xmm4, %xmm4	movsd	%xmm4, 1 * SIZE(%esi)	movsd	%xmm6, 2 * SIZE(%esi)	unpckhpd %xmm6, %xmm6	movsd	%xmm6, 3 * SIZE(%esi)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, SIZE), %eax	leal	(AA, %eax, 4), AA	leal	(BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$4, KK#endif	addl	$4 * SIZE, %esi		# coffset += 4	BRANCH	decl	%ebx			# i --	jg	.L110	ALIGN_2	.L130:	movl	M,  %ebx	testl	$2, %ebx	jle	.L150#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	movapd	 0 * SIZE + BUFFER, %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE + BUFFER, %xmm3	pxor	%xmm6, %xmm6	movapd	 8 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#else	leal	BUFFER, BB	movl	KK, %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 2), BB	movapd	 0 * SIZE(BB), %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE(BB), %xmm3	pxor	%xmm6, %xmm6	movapd	 8 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#endif	#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$2, %eax#else	addl	$1, %eax#endif	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L132.L131:		mulpd	%xmm0, %xmm2	movapd	 2 * SIZE(AA), %xmm0	addpd	%xmm2, %xmm4	mulpd	 2 * SIZE(BB), %xmm0	movapd	16 * SIZE(BB), %xmm2	addpd	%xmm0, %xmm5	movapd	 4 * SIZE(AA), %xmm0	mulpd	 4 * SIZE(BB), %xmm0	addpd	%xmm0, %xmm6	movapd	 6 * SIZE(AA), %xmm0	mulpd	 6 * SIZE(BB), %xmm0	addpd	%xmm0, %xmm7	movapd	16 * SIZE(AA), %xmm0	mulpd	%xmm1, %xmm3	movapd	10 * SIZE(AA), %xmm1	addpd	%xmm3, %xmm4	mulpd	10 * SIZE(BB), %xmm1	movapd	24 * SIZE(BB), %xmm3	addpd	%xmm1, %xmm5	movapd	12 * SIZE(AA), %xmm1	mulpd	12 * SIZE(BB), %xmm1	addpd	%xmm1, %xmm6	movapd	14 * SIZE(AA), %xmm1	mulpd	14 * SIZE(BB), %xmm1	addpd	%xmm1, %xmm7	movapd	24 * SIZE(AA), %xmm1	addl   $16 * SIZE, AA	addl   $16 * SIZE, BB	BRANCH	decl   %eax	jne    .L131.L132:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movapd	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L134.L133:	movapd	 0 * SIZE(AA), %xmm0	mulpd	 0 * SIZE(BB), %xmm0	addpd	%xmm0, %xmm4	addl	$2 * SIZE, AA		# aoffset  += 8	addl	$2 * SIZE, BB		# boffset1 += 8	decl	%eax	BRANCH	jg	.L133	ALIGN_4.L134:	addpd	%xmm5, %xmm4	addpd	%xmm7, %xmm6	addpd	%xmm6, %xmm4	mulpd	%xmm3, %xmm4#ifndef TRMMKERNEL	movsd	0 * SIZE(%esi), %xmm0	movhpd	1 * SIZE(%esi), %xmm0	addpd	%xmm0, %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	unpckhpd %xmm4, %xmm4	movsd	%xmm4, 1 * SIZE(%esi)	addl	$2 * SIZE, %esi		# coffset += 4#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, SIZE), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	ALIGN_2.L150:	movl	M,  %ebx	testl	$1, %ebx	jle	.L999#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leal	BUFFER, BB	movapd	 0 * SIZE + BUFFER, %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE + BUFFER, %xmm3	pxor	%xmm6, %xmm6	movapd	 4 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#else	leal	BUFFER, BB	movl	KK, %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 2), BB	movapd	 0 * SIZE(BB), %xmm2	pxor	%xmm4, %xmm4	movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm5, %xmm5	movapd	 8 * SIZE(BB), %xmm3	pxor	%xmm6, %xmm6	movapd	 4 * SIZE(AA), %xmm1	pxor	%xmm7, %xmm7#endif	#ifndef TRMMKERNEL	movl	K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax	addl	$1, %eax	movl	%eax, KKK#endif	sarl	$3, %eax	je	.L152.L151:		mulsd	%xmm0, %xmm2	movsd	 1 * SIZE(AA), %xmm0	addsd	%xmm2, %xmm4	mulsd	 2 * SIZE(BB), %xmm0	movsd	16 * SIZE(BB), %xmm2	addsd	%xmm0, %xmm4	movsd	 2 * SIZE(AA), %xmm0	mulsd	 4 * SIZE(BB), %xmm0	addsd	%xmm0, %xmm4	movsd	 3 * SIZE(AA), %xmm0	mulsd	 6 * SIZE(BB), %xmm0	addsd	%xmm0, %xmm4	movsd	 8 * SIZE(AA), %xmm0	mulsd	%xmm1, %xmm3	movsd	 5 * SIZE(AA), %xmm1	addsd	%xmm3, %xmm4	mulsd	10 * SIZE(BB), %xmm1	movsd	24 * SIZE(BB), %xmm3	addsd	%xmm1, %xmm4	movsd	 6 * SIZE(AA), %xmm1	mulsd	12 * SIZE(BB), %xmm1	addsd	%xmm1, %xmm4	movsd	 7 * SIZE(AA), %xmm1	mulsd	14 * SIZE(BB), %xmm1	addsd	%xmm1, %xmm4	movsd	12 * SIZE(AA), %xmm1	addl   $ 8 * SIZE, AA	addl   $16 * SIZE, BB	BRANCH	decl   %eax	jne    .L151.L152:#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	movsd	ALPHA,  %xmm3	andl	$7, %eax		# if (k & 1)	BRANCH	je .L154.L153:	movsd	 0 * SIZE(AA), %xmm0	mulsd	 0 * SIZE(BB), %xmm0	addsd	%xmm0, %xmm4	addl	$1 * SIZE, AA		# aoffset  += 8	addl	$2 * SIZE, BB		# boffset1 += 8	decl	%eax	BRANCH	jg	.L153	ALIGN_4.L154:	addsd	%xmm6, %xmm4	addsd	%xmm7, %xmm5	mulsd	%xmm3, %xmm4#ifndef TRMMKERNEL	addsd	0 * SIZE(%esi), %xmm4#endif	movsd	%xmm4, 0 * SIZE(%esi)	ALIGN_2.L999:	movl	OLD_STACK, %esp	EMMS	popl	%ebx	popl	%esi	popl	%edi	popl	%ebp	ret	ALIGN_2	EPILOGUE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -