⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 zgemm_kernel_2x2_core2.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 2 页
字号:
/*********************************************************************//*                                                                   *//*             Optimized BLAS libraries                              *//*                     By Kazushige Goto <kgoto@tacc.utexas.edu>     *//*                                                                   *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING  *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF      *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,              *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY  *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF     *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO   *//* THE USE OF THE SOFTWARE OR DOCUMENTATION.                         *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of     *//* profits, interruption of business, or related expenses which may  *//* arise from use of Software or Documentation, including but not    *//* limited to those resulting from defects in Software and/or        *//* Documentation, or loss or inaccuracy of data of any kind.         *//*********************************************************************/#define ASSEMBLER#include "common.h" #define OLD_M	%rdi#define OLD_N	%rsi#define M	%r13#define N	%r14#define K	%rdx#define A	%rcx#define B	%r8#define C	%r9#define LDC	%r10	#define I	%r11#define AO	%rdi#define BO	%rsi#define	CO1	%rbx#define CO2	%rbp#define BB	%r12#ifndef WINDOWS_ABI#define STACKSIZE 64#define OLD_LDC		 8 + STACKSIZE(%rsp)#define OLD_OFFSET	16 + STACKSIZE(%rsp)#else#define STACKSIZE 256#define OLD_ALPHA_I	40 + STACKSIZE(%rsp)#define OLD_A		48 + STACKSIZE(%rsp)#define OLD_B		56 + STACKSIZE(%rsp)#define OLD_C		64 + STACKSIZE(%rsp)#define OLD_LDC		72 + STACKSIZE(%rsp)#define OLD_OFFSET	80 + STACKSIZE(%rsp)#endif#define ALPHA_R	  0(%rsp)#define ALPHA_I	 16(%rsp)#define J	 32(%rsp)#define OFFSET	 40(%rsp)#define KK	 48(%rsp)#define KKK	 56(%rsp)#define BUFFER	128(%rsp)#define PREFETCH_R    (8 * 16 + 0)#define PREFETCH_W    (PREFETCH_R * 2)#define PREFETCHSIZE  (8 * 21 + 4)#define PREFETCH     prefetcht0#if   defined(NN) || defined(NT) || defined(TN) || defined(TT)#define ADD1	  addpd#define ADD2	  addpd#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)#define ADD1	  addpd#define ADD2	  subpd#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)#define ADD1	  subpd#define ADD2	  addpd#else#define ADD1	  subpd#define ADD2	  subpd#endif#define ADDSUB	subpd	PROLOGUE	PROFCODE		subq	$STACKSIZE, %rsp	movq	%rbx,  0(%rsp)	movq	%rbp,  8(%rsp)	movq	%r12, 16(%rsp)	movq	%r13, 24(%rsp)	movq	%r14, 32(%rsp)	movq	%r15, 40(%rsp)#ifdef WINDOWS_ABI	movq	%rdi,    48(%rsp)	movq	%rsi,    56(%rsp)	movups	%xmm6,   64(%rsp)	movups	%xmm7,   80(%rsp)	movups	%xmm8,   96(%rsp)	movups	%xmm9,  112(%rsp)	movups	%xmm10, 128(%rsp)	movups	%xmm11, 144(%rsp)	movups	%xmm12, 160(%rsp)	movups	%xmm13, 176(%rsp)	movups	%xmm14, 192(%rsp)	movups	%xmm15, 208(%rsp)	movq	ARG1,      OLD_M	movq	ARG2,      OLD_N	movq	ARG3,      K	movq	OLD_A,     A	movq	OLD_B,     B	movq	OLD_C,     C	movq	OLD_LDC,   LDC#ifdef TRMMKERNEL	movsd	OLD_OFFSET, %xmm12#endif	movaps	%xmm3, %xmm0	movsd	OLD_ALPHA_I, %xmm1#else	movq	OLD_LDC,   LDC#ifdef TRMMKERNEL	movsd	OLD_OFFSET, %xmm12#endif#endif	movq	%rsp, %r15	# save old stack	subq	$128 + GEMM_Q * 64, %rsp	andq	$-4096, %rsp	# align stack#ifdef WINDOWS_ABI#if GEMM_Q > 192	movq	$0,  4096 * 3(%rsp)#endif#if GEMM_Q > 128	movq	$0,  4096 * 2(%rsp)#endif#if GEMM_Q >  64	movq	$0,  4096 * 1(%rsp)#endif	movq	$0,  4096 * 0(%rsp)#endif	movddup	%xmm0, %xmm0	movddup	%xmm1, %xmm1	movapd	 %xmm0, ALPHA_R	movapd	 %xmm1, ALPHA_I	subq	$-16 * SIZE, A	subq	$-16 * SIZE, B	movq	OLD_M, M	movq	OLD_N, N#ifdef TRMMKERNEL	movsd	%xmm12, OFFSET	movsd	%xmm12, KK#ifndef LEFT	negq	KK#endif	#endif	salq	$ZBASE_SHIFT, LDC	movq	N,  J	sarq	$1, J		# j = (n >> 2)	jle,pn	.L100	ALIGN_4.L01:#if defined(TRMMKERNEL) && defined(LEFT)	movq	OFFSET, %rax	movq	%rax, KK#endif	/* Copying to Sub Buffer */	leaq	16 * SIZE +  BUFFER, BO		movq	K, %rax	sarq	$2, %rax	jle	.L03	ALIGN_3	.L02:	prefetcht0	(PREFETCH_R + 0) * SIZE(B)	movapd	 -16 * SIZE(B), %xmm0	movapd	 -14 * SIZE(B), %xmm1	movapd	 -12 * SIZE(B), %xmm2	movapd	 -10 * SIZE(B), %xmm3	prefetcht0	(PREFETCH_R + 8) * SIZE(B)	movapd	  -8 * SIZE(B), %xmm4	movapd	  -6 * SIZE(B), %xmm5	movapd	  -4 * SIZE(B), %xmm6	movapd	  -2 * SIZE(B), %xmm7	movddup	  %xmm0, %xmm8	unpckhpd  %xmm0, %xmm0	movddup	  %xmm1, %xmm9	unpckhpd  %xmm1, %xmm1	movddup	  %xmm2, %xmm10	unpckhpd  %xmm2, %xmm2	movddup	  %xmm3, %xmm11	unpckhpd  %xmm3, %xmm3	movddup	  %xmm4, %xmm12	unpckhpd  %xmm4, %xmm4	movddup	  %xmm5, %xmm13	unpckhpd  %xmm5, %xmm5	movddup	  %xmm6, %xmm14	unpckhpd  %xmm6, %xmm6	movddup	  %xmm7, %xmm15	unpckhpd  %xmm7, %xmm7		prefetcht0	(PREFETCH_W +  0) * SIZE(BO)	movapd	%xmm8,  -16 * SIZE(BO)	movapd	%xmm0,  -14 * SIZE(BO)	movapd	%xmm9,  -12 * SIZE(BO)	movapd	%xmm1,  -10 * SIZE(BO)	prefetcht0	(PREFETCH_W +  8) * SIZE(BO)	movapd	%xmm10,  -8 * SIZE(BO)	movapd	%xmm2,   -6 * SIZE(BO)	movapd	%xmm11,  -4 * SIZE(BO)	movapd	%xmm3,   -2 * SIZE(BO)	prefetcht0	(PREFETCH_W + 16) * SIZE(BO)	movapd	%xmm12,   0 * SIZE(BO)	movapd	%xmm4,    2 * SIZE(BO)	movapd	%xmm13,   4 * SIZE(BO)	movapd	%xmm5,    6 * SIZE(BO)	prefetcht0	(PREFETCH_W + 24) * SIZE(BO)	movapd	%xmm14,   8 * SIZE(BO)	movapd	%xmm6,   10 * SIZE(BO)	movapd	%xmm15,  12 * SIZE(BO)	movapd	%xmm7,   14 * SIZE(BO)	subq	$-32 * SIZE, BO	subq	$-16  * SIZE, B	decq	%rax	jne	.L02	ALIGN_3.L03:	movq	K, %rax	andq	$3, %rax	BRANCH	jle	.L05	ALIGN_3.L04:	movapd	 -16 * SIZE(B), %xmm0	movapd	 -14 * SIZE(B), %xmm1	movddup	  %xmm0, %xmm8	unpckhpd  %xmm0, %xmm0	movddup	  %xmm1, %xmm9	unpckhpd  %xmm1, %xmm1	movapd	%xmm8,  -16 * SIZE(BO)	movapd	%xmm0,  -14 * SIZE(BO)	movapd	%xmm9,  -12 * SIZE(BO)	movapd	%xmm1,  -10 * SIZE(BO)	addq	$ 4 * SIZE, B	addq	$ 8 * SIZE, BO	decq	%rax	jne	.L04	ALIGN_3	.L05:	movq	C, CO1			# coffset1 = c	leaq	(C, LDC, 1), CO2	# coffset2 = c + ldc	movq	A, AO		# aoffset = a	movq	B, BB	movq	M,  I	sarq	$1, I		# i = (m >> 2)	jle	.L30	ALIGN_4.L10:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leaq	19 * SIZE + BUFFER, BO#else	leaq	19 * SIZE + BUFFER, BO	movq	KK, %rax	leaq	(, %rax, SIZE), %rax	leaq	(AO, %rax, 4), AO	leaq	(BO, %rax, 8), BO#endif		movaps	-16 * SIZE(AO), %xmm0	movaps	-14 * SIZE(AO), %xmm1	movaps	-19 * SIZE(BO), %xmm6	movaps	-17 * SIZE(BO), %xmm7	pxor	%xmm8, %xmm8	prefetcht2	(PREFETCH_R +  0) * SIZE(BB)	pxor	%xmm9, %xmm9	pxor	%xmm10, %xmm10 	prefetcht2	(PREFETCH_R +  8) * SIZE(BB)	pxor	%xmm11, %xmm11	pxor	%xmm12, %xmm12	prefetcht0     3 * SIZE(CO1)	pxor	%xmm13, %xmm13	pxor	%xmm14, %xmm14	pxor	%xmm15, %xmm15	pxor	%xmm2, %xmm2	pxor	%xmm3, %xmm3	pxor	%xmm4, %xmm4	prefetcht0     3 * SIZE(CO2)	pxor	%xmm5, %xmm5	subq		$-8 * SIZE, BB#ifndef TRMMKERNEL	movq	K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movq	K, %rax	subq	KK, %rax	movq	%rax, KKK	#else	movq	KK, %rax#ifdef LEFT	addq	$2, %rax#else	addq	$2, %rax#endif	movq	%rax, KKK#endif	sarq	$2, %rax	jle,pn	.L15	ALIGN_4.L12:		PADDING;	ADD1	%xmm2, %xmm10	movaps	-15 * SIZE(BO), %xmm2	PADDING;	ADD1	%xmm3, %xmm14	PREFETCH (PREFETCHSIZE +  0) * SIZE(AO)	movaps	%xmm6, %xmm3	mulpd	%xmm0, %xmm6	mulpd	%xmm1, %xmm3	ADD2	%xmm4, %xmm11	movaps	-13 * SIZE(BO), %xmm4	ADD2	%xmm5, %xmm15	movaps	%xmm7, %xmm5	mulpd	%xmm0, %xmm7	mulpd	%xmm1, %xmm5	ADD1	%xmm6, %xmm8	movaps	-11 * SIZE(BO), %xmm6	ADD1	%xmm3, %xmm12	movaps	%xmm2, %xmm3	mulpd	%xmm0, %xmm2	mulpd	%xmm1, %xmm3	ADD2	%xmm7, %xmm9	movaps	 -9 * SIZE(BO), %xmm7	ADD2	%xmm5, %xmm13	movaps	 %xmm4, %xmm5	mulpd	%xmm0, %xmm4	movaps	-12 * SIZE(AO), %xmm0	mulpd	%xmm1, %xmm5	movaps	-10 * SIZE(AO), %xmm1	ADD1	%xmm2, %xmm10	movaps	 -7 * SIZE(BO), %xmm2	ADD1	%xmm3, %xmm14	movaps	%xmm6, %xmm3	mulpd	%xmm0, %xmm6	mulpd	%xmm1, %xmm3	ADD2	%xmm4, %xmm11	movaps   -5 * SIZE(BO), %xmm4	ADD2	%xmm5, %xmm15	movaps	%xmm7, %xmm5	mulpd	%xmm0, %xmm7	mulpd	%xmm1, %xmm5	ADD1	%xmm6, %xmm8	movaps	 -3 * SIZE(BO), %xmm6	ADD1	%xmm3, %xmm12	movaps	%xmm2, %xmm3	mulpd	%xmm0, %xmm2	mulpd	%xmm1, %xmm3	ADD2	%xmm7, %xmm9	movaps	 -1 * SIZE(BO), %xmm7	ADD2	%xmm5, %xmm13	movaps	 %xmm4, %xmm5	mulpd	%xmm0, %xmm4	movaps	 -8 * SIZE(AO), %xmm0	mulpd	%xmm1, %xmm5	movaps	 -6 * SIZE(AO), %xmm1	ADD1	%xmm2, %xmm10	movaps	  1 * SIZE(BO), %xmm2	ADD1	%xmm3, %xmm14	movaps	%xmm6, %xmm3	mulpd	%xmm0, %xmm6	mulpd	%xmm1, %xmm3	ADD2	%xmm4, %xmm11	movaps	  3 * SIZE(BO), %xmm4	ADD2	%xmm5, %xmm15	PADDING	movaps	%xmm7, %xmm5	mulpd	%xmm1, %xmm5	PREFETCH (PREFETCHSIZE +  8) * SIZE(AO)	mulpd	%xmm0, %xmm7	ADD1	%xmm6, %xmm8	movaps	  5 * SIZE(BO), %xmm6	ADD1	%xmm3, %xmm12	movaps	%xmm2, %xmm3	mulpd	%xmm0, %xmm2	mulpd	%xmm1, %xmm3	ADD2	%xmm7, %xmm9	movaps	  7 * SIZE(BO), %xmm7	ADD2	%xmm5, %xmm13	movaps	 %xmm4, %xmm5	mulpd	%xmm0, %xmm4	movaps	 -4 * SIZE(AO), %xmm0	mulpd	%xmm1, %xmm5	movaps	 -2 * SIZE(AO), %xmm1	ADD1	%xmm2, %xmm10	movaps	  9 * SIZE(BO), %xmm2	ADD1	%xmm3, %xmm14	movaps	%xmm6, %xmm3	mulpd	%xmm0, %xmm6	mulpd	%xmm1, %xmm3	ADD2	%xmm4, %xmm11	movaps	 11 * SIZE(BO), %xmm4	ADD2	%xmm5, %xmm15	movaps	%xmm7, %xmm5	mulpd	%xmm0, %xmm7	mulpd	%xmm1, %xmm5	ADD1	%xmm6, %xmm8	movaps	 13 * SIZE(BO), %xmm6	ADD1	%xmm3, %xmm12	movaps	%xmm2, %xmm3	mulpd	%xmm0, %xmm2	mulpd	%xmm1, %xmm3	subq	$-16 * SIZE, AO	ADD2	%xmm7, %xmm9	movaps	 15 * SIZE(BO), %xmm7	ADD2	%xmm5, %xmm13	movaps	 %xmm4, %xmm5	mulpd	%xmm0, %xmm4	movaps	-16 * SIZE(AO), %xmm0	mulpd	%xmm1, %xmm5	movaps	-14 * SIZE(AO), %xmm1	subq	$-32 * SIZE, BO	subq	$1, %rax	jg,pt	.L12	ALIGN_3.L15:#ifndef TRMMKERNEL	movq	K, %rax#else	movq	KKK, %rax#endif	andq	$3, %rax	BRANCH	je,pt	.L19	ALIGN_4.L16:	ADD1	%xmm2, %xmm10	movaps	-15 * SIZE(BO), %xmm2	ADD1	%xmm3, %xmm14	movaps	%xmm6, %xmm3	mulpd	%xmm0, %xmm6	mulpd	%xmm1, %xmm3	ADD2	%xmm4, %xmm11	movaps	-13 * SIZE(BO), %xmm4	ADD2	%xmm5, %xmm15	movaps	%xmm7, %xmm5	mulpd	%xmm0, %xmm7	mulpd	%xmm1, %xmm5	ADD1	%xmm6, %xmm8	movaps	-11 * SIZE(BO), %xmm6	ADD1	%xmm3, %xmm12	addq	$4  * SIZE, AO	movaps	%xmm2, %xmm3	mulpd	%xmm0, %xmm2	mulpd	%xmm1, %xmm3	ADD2	%xmm7, %xmm9	movaps	 -9 * SIZE(BO), %xmm7	ADD2	%xmm5, %xmm13	addq	$8 * SIZE, BO	movaps	 %xmm4, %xmm5	mulpd	%xmm0, %xmm4	movaps	-16 * SIZE(AO), %xmm0	mulpd	%xmm1, %xmm5	movaps	-14 * SIZE(AO), %xmm1	subq	$1, %rax	jg,pt	.L16	ALIGN_3.L19:	movapd	ALPHA_R, %xmm6	movapd	ALPHA_I, %xmm7	ADD1	%xmm2, %xmm10	ADD1	%xmm3, %xmm14	ADD2	%xmm4, %xmm11	ADD2	%xmm5, %xmm15	SHUFPD_1 %xmm9, %xmm9	SHUFPD_1 %xmm11, %xmm11	SHUFPD_1 %xmm13, %xmm13	SHUFPD_1 %xmm15, %xmm15#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(NR) || defined(NC) || defined(TR) || defined(TC)	addsubpd	%xmm9, %xmm8	addsubpd	%xmm11, %xmm10	addsubpd	%xmm13, %xmm12	addsubpd	%xmm15, %xmm14	movapd	%xmm8, %xmm9	movapd	%xmm10, %xmm11	movapd	%xmm12, %xmm13	movapd	%xmm14, %xmm15#else	addsubpd	%xmm8, %xmm9	addsubpd	%xmm10, %xmm11	addsubpd	%xmm12, %xmm13	addsubpd	%xmm14, %xmm15	movapd		%xmm9, %xmm8	movapd		%xmm11, %xmm10	movapd		%xmm13, %xmm12	movapd		%xmm15, %xmm14#endif#if! defined(TRMMKERNEL) && !defined(BETAZERO)	movsd	0 * SIZE(CO1), %xmm0	movhpd	1 * SIZE(CO1), %xmm0	movsd	2 * SIZE(CO1), %xmm2	movhpd	3 * SIZE(CO1), %xmm2	movsd	0 * SIZE(CO2), %xmm1	movhpd	1 * SIZE(CO2), %xmm1	movsd	2 * SIZE(CO2), %xmm3	movhpd	3 * SIZE(CO2), %xmm3#endif	SHUFPD_1 %xmm9, %xmm9	SHUFPD_1 %xmm11, %xmm11	SHUFPD_1 %xmm13, %xmm13	SHUFPD_1 %xmm15, %xmm15	mulpd	%xmm6, %xmm8	mulpd	%xmm6, %xmm10	mulpd	%xmm6, %xmm12	mulpd	%xmm6, %xmm14	mulpd	%xmm7, %xmm9	mulpd	%xmm7, %xmm11	mulpd	%xmm7, %xmm13	mulpd	%xmm7, %xmm15	addsubpd	%xmm9, %xmm8	addsubpd	%xmm11, %xmm10	addsubpd	%xmm13, %xmm12	addsubpd	%xmm15, %xmm14	#if! defined(TRMMKERNEL) && !defined(BETAZERO)	addpd	%xmm0,  %xmm8	addpd	%xmm1,  %xmm10	addpd	%xmm2, %xmm12	addpd	%xmm3, %xmm14#endif	movsd	%xmm8, 0 * SIZE(CO1)	movhpd	%xmm8, 1 * SIZE(CO1)	movsd	%xmm12, 2 * SIZE(CO1)	movhpd	%xmm12, 3 * SIZE(CO1)	movsd	%xmm10, 0 * SIZE(CO2)	movhpd	%xmm10, 1 * SIZE(CO2)	movsd	%xmm14, 2 * SIZE(CO2)	movhpd	%xmm14, 3 * SIZE(CO2)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movq	K, %rax	subq	KKK, %rax	leaq	(,%rax, SIZE), %rax	leaq	(AO, %rax, 4), AO	leaq	(BO, %rax, 4), BO#endif#if defined(TRMMKERNEL) && defined(LEFT)	addq	$2, KK#endif	addq	$4 * SIZE, CO1		# coffset += 4	addq	$4 * SIZE, CO2		# coffset += 4	decq	I			# i --	jg	.L10	ALIGN_4	.L30:	testq	$1, M	jle	.L99.L40:#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	leaq	16 * SIZE + BUFFER, BO#else	leaq	16 * SIZE + BUFFER, BO	movq	KK, %rax	leaq	(, %rax, SIZE), %rax	leaq	(AO, %rax, 2), AO	leaq	(BO, %rax, 8), BO#endif		pxor	%xmm8, %xmm8	pxor	%xmm9, %xmm9	pxor	%xmm10, %xmm10	pxor	%xmm11, %xmm11#ifndef TRMMKERNEL	movq	K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movq	K, %rax	subq	KK, %rax

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -