⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ztrsm_kernel_lt_1x2_sse2.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 2 页
字号:
/*********************************************************************//*                                                                   *//*             Optimized BLAS libraries                              *//*                     By Kazushige Goto <kgoto@tacc.utexas.edu>     *//*                                                                   *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING  *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF      *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,              *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY  *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF     *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO   *//* THE USE OF THE SOFTWARE OR DOCUMENTATION.                         *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of     *//* profits, interruption of business, or related expenses which may  *//* arise from use of Software or Documentation, including but not    *//* limited to those resulting from defects in Software and/or        *//* Documentation, or loss or inaccuracy of data of any kind.         *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK	16#define ARGS	 0	#define STACK_M	 4 + STACK + ARGS(%esi)#define STACK_N	 8 + STACK + ARGS(%esi)#define STACK_K	12 + STACK + ARGS(%esi)#define STACK_ALPHA_R	16 + STACK + ARGS(%esi)#define STACK_ALPHA_I	24 + STACK + ARGS(%esi)#define STACK_A	32 + STACK + ARGS(%esi)#define STACK_B	36 + STACK + ARGS(%esi)#define STACK_C	40 + STACK + ARGS(%esi)#define STACK_LDC	44 + STACK + ARGS(%esi)#define STACK_OFFT	48 + STACK + ARGS(%esi)#define POSINV	 0(%esp)#define K	16(%esp)#define N	20(%esp)#define M	24(%esp)#define A	28(%esp)#define C	32(%esp)#define J	36(%esp)#define OLD_STACK 40(%esp)#define OFFSET  44(%esp)#define KK	48(%esp)#define KKK	52(%esp)#define AORIG   56(%esp)#define BORIG	60(%esp)#define BUFFER 128(%esp)#define STACK_ALIGN	4096#define STACK_OFFSET	1024#define LOCAL_BUFFER_SIZE  GEMM_Q * GEMM_UNROLL_N * COMPSIZE * 16#define PREFETCHSIZE (8 * 10 + 4)#define AA	%edx#define BB	%ecx#define LDC	%ebp#define B	%edi#define CO1	%esi#ifndef CONJ#define NN#else#if defined(LN) || defined(LT)#define CN#else#define NC#endif#endif#define KERNEL1(address) \	mulpd	%xmm0, %xmm2; \	addpd	%xmm2, %xmm4; \	PREFETCH (PREFETCHSIZE +  0) * SIZE + (address) * 1 * SIZE(AA); \	movapd	 2 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm0, %xmm2; \	addpd	%xmm2, %xmm5; \	movapd	 4 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm0, %xmm2; \	mulpd	 6 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \	addpd	%xmm2, %xmm6; \	movapd	16 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	addpd	%xmm0, %xmm7; \	movapd	 2 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL2(address) \	mulpd	%xmm0, %xmm3; \	addpd	%xmm3, %xmm4; \	movapd	10 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm0, %xmm3; \	addpd	%xmm3, %xmm5; \	movapd	12 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm0, %xmm3; \	mulpd	14 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \	addpd	%xmm3, %xmm6; \	movapd	24 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	addpd	%xmm0, %xmm7; \	movapd	 4 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL3(address) \	mulpd	%xmm0, %xmm2; \	addpd	%xmm2, %xmm4; \	movapd	18 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm0, %xmm2; \	addpd	%xmm2, %xmm5; \	movapd	20 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm0, %xmm2; \	mulpd	22 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \	addpd	%xmm2, %xmm6; \	movapd	32 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	addpd	%xmm0, %xmm7; \	movapd	 6 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL4(address) \	mulpd	%xmm0, %xmm3; \	addpd	%xmm3, %xmm4; \	movapd	26 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm0, %xmm3; \	addpd	%xmm3, %xmm5; \	movapd	28 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm0, %xmm3; \	mulpd	30 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \	addpd	%xmm3, %xmm6; \	movapd	40 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	addpd	%xmm0, %xmm7; \	movapd	16 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL5(address) \	PREFETCH (PREFETCHSIZE + 8) * SIZE + (address) * 1 * SIZE(AA); \	mulpd	%xmm1, %xmm2; \	addpd	%xmm2, %xmm4; \	movapd	34 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm1, %xmm2; \	addpd	%xmm2, %xmm5; \	movapd	36 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm1, %xmm2; \	mulpd	38 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \	addpd	%xmm2, %xmm6; \	movapd	48 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	addpd	%xmm1, %xmm7; \	movapd	10 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL6(address) \	mulpd	%xmm1, %xmm3; \	addpd	%xmm3, %xmm4; \	movapd	42 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm1, %xmm3; \	addpd	%xmm3, %xmm5; \	movapd	44 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm1, %xmm3; \	mulpd	46 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \	addpd	%xmm3, %xmm6; \	movapd	56 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	addpd	%xmm1, %xmm7; \	movapd	12 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL7(address) \	mulpd	%xmm1, %xmm2; \	addpd	%xmm2, %xmm4; \	movapd	50 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm1, %xmm2; \	addpd	%xmm2, %xmm5; \	movapd	52 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	mulpd	%xmm1, %xmm2; \	mulpd	54 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \	addpd	%xmm2, %xmm6; \	movapd	64 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \	addpd	%xmm1, %xmm7; \	movapd	14 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL8(address) \	mulpd	%xmm1, %xmm3; \	addpd	%xmm3, %xmm4; \	movapd	58 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm1, %xmm3; \	addpd	%xmm3, %xmm5; \	movapd	60 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	mulpd	%xmm1, %xmm3; \	mulpd	62 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \	addpd	%xmm3, %xmm6; \	movapd	72 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \	addpd	%xmm1, %xmm7; \	movapd	24 * SIZE + (address) * 1 * SIZE(AA), %xmm1	PROLOGUE	pushl	%ebp	pushl	%edi	pushl	%esi	pushl	%ebx	PROFCODE	EMMS	movl	%esp, %esi	# save old stack	subl	$128 + LOCAL_BUFFER_SIZE + STACK_OFFSET, %esp	andl	$-STACK_ALIGN, %esp	# align stack	addl	$STACK_OFFSET, %esp	movl	STACK_M, %ebx	movl	STACK_N, %eax	movl	STACK_K, %ecx	movl	STACK_A, %edx	movl	%ebx, M	movl	%eax, N	movl	%ecx, K	movl	%edx, A	movl	%esi, OLD_STACK	movl	STACK_B, B	movl	STACK_C, %ebx	movss	STACK_OFFT, %xmm4	pcmpeqb	%xmm7, %xmm7	psllq	$63, %xmm7	# Generate mask	pxor	%xmm2, %xmm2	movlpd	  %xmm2,  0 + POSINV	movlpd	  %xmm7,  8 + POSINV	movl	%ebx, C	movl	STACK_LDC, LDC	movss	%xmm4, OFFSET	movss	%xmm4, KK	sall	$ZBASE_SHIFT, LDC#ifdef LN       movl	M, %eax       sall	$ZBASE_SHIFT, %eax       addl	%eax, C       imull	K, %eax       addl	%eax, A#endif#ifdef RT       movl	N, %eax       sall	$ZBASE_SHIFT, %eax       imull	K, %eax       addl	%eax, B       movl	N, %eax       imull	LDC, %eax       addl	%eax, C#endif#ifdef RN	negl	KK#endif	#ifdef RT       movl	N, %eax       subl	OFFSET, %eax       movl	%eax, KK#endif	movl	N, %eax	sarl	$1, %eax	movl	%eax, J			# j = n	jle	.L100	ALIGN_4.L01:#ifdef LN	movl	OFFSET, %eax	addl	M, %eax	movl	%eax, KK#endif		leal	BUFFER, BB#ifdef RT       movl	K, %eax       sall	$1 + ZBASE_SHIFT, %eax       subl	%eax, B#endif#if defined(LN) || defined(RT)	movl	KK, %eax	movl	B, BORIG	sall	$1 + ZBASE_SHIFT, %eax	addl	%eax, B	leal	(BB, %eax, 2), BB#endif	#if defined(LT)	movl	OFFSET, %eax	movl	%eax, KK#endif#if defined(LT) || defined(RN)	movl	KK, %eax#else	movl	K,  %eax	subl	KK, %eax#endif	sarl	$1, %eax	jle	.L03	ALIGN_4.L02:	prefetchnta	 56 * SIZE(B)	movlpd	 0 * SIZE(B), %xmm0	movlpd	 1 * SIZE(B), %xmm1	movlpd	 2 * SIZE(B), %xmm2	movlpd	 3 * SIZE(B), %xmm3	movlpd	 4 * SIZE(B), %xmm4	movlpd	 5 * SIZE(B), %xmm5	movlpd	 6 * SIZE(B), %xmm6	movlpd	 7 * SIZE(B), %xmm7	movlpd	%xmm0,  0 * SIZE(BB)	movlpd	%xmm0,  1 * SIZE(BB)	movlpd	%xmm1,  2 * SIZE(BB)	movlpd	%xmm1,  3 * SIZE(BB)	movlpd	%xmm2,  4 * SIZE(BB)	movlpd	%xmm2,  5 * SIZE(BB)	movlpd	%xmm3,  6 * SIZE(BB)	movlpd	%xmm3,  7 * SIZE(BB)	movlpd	%xmm4,  8 * SIZE(BB)	movlpd	%xmm4,  9 * SIZE(BB)	movlpd	%xmm5, 10 * SIZE(BB)	movlpd	%xmm5, 11 * SIZE(BB)	movlpd	%xmm6, 12 * SIZE(BB)	movlpd	%xmm6, 13 * SIZE(BB)	movlpd	%xmm7, 14 * SIZE(BB)	movlpd	%xmm7, 15 * SIZE(BB)	addl	$  8 * SIZE, B	subl	$-16 * SIZE, BB	decl	%eax	jne	.L02	ALIGN_4.L03:#if defined(LT) || defined(RN)	movl	KK, %eax#else	movl	K, %eax	subl	KK, %eax#endif	andl	$1, %eax	BRANCH	jle	.L05	movlpd	 0 * SIZE(B), %xmm0	movlpd	 1 * SIZE(B), %xmm1	movlpd	 2 * SIZE(B), %xmm2	movlpd	 3 * SIZE(B), %xmm3	movlpd	%xmm0,  0 * SIZE(BB)	movlpd	%xmm0,  1 * SIZE(BB)	movlpd	%xmm1,  2 * SIZE(BB)	movlpd	%xmm1,  3 * SIZE(BB)	movlpd	%xmm2,  4 * SIZE(BB)	movlpd	%xmm2,  5 * SIZE(BB)	movlpd	%xmm3,  6 * SIZE(BB)	movlpd	%xmm3,  7 * SIZE(BB)	addl	$4 * SIZE, B	ALIGN_4.L05:#if defined(LT) || defined(RN)	movl	A, %eax	movl	%eax, AA#else	movl	A, %eax	movl	%eax, AORIG#endif#ifdef RT       leal	(, LDC, 2), %eax       subl	%eax, C#endif	movl	C, CO1#ifndef RT	leal	(, LDC, 2), %eax	addl	%eax, C#endif	movl	M,  %ebx	testl	%ebx, %ebx	jle	.L100	ALIGN_4.L10:#ifdef LN       movl	K, %eax       sall	$ZBASE_SHIFT, %eax       subl	%eax, AORIG#endif#if defined(LN) || defined(RT)	movl	AORIG, %eax	movl	%eax, AA	movl	KK, %eax	sall	$ZBASE_SHIFT, %eax	addl	%eax, AA#endif	leal	BUFFER, BB#if defined(LN) || defined(RT)	movl	KK, %eax	sall	$1 + ZBASE_SHIFT, %eax	leal	(BB, %eax, 2), BB#endif		movapd	 0 * SIZE(AA), %xmm0	pxor	%xmm4, %xmm4	movapd	 8 * SIZE(AA), %xmm1	pxor	%xmm5, %xmm5	movapd	 0 * SIZE(BB), %xmm2	pxor	%xmm6, %xmm6	movapd	 8 * SIZE(BB), %xmm3	pxor	%xmm7, %xmm7#ifdef LN	prefetchw -2 * SIZE(CO1)	prefetchw -2 * SIZE(CO1, LDC)#else	prefetchw  2 * SIZE(CO1)	prefetchw  2 * SIZE(CO1, LDC)#endif#if defined(LT) || defined(RN)	movl	KK, %eax#else	movl	K, %eax	subl	KK, %eax#endif#if 1	andl	$-8, %eax	sall	$4, %eax	je	.L15.L1X:		KERNEL1(16  *  0)	KERNEL2(16  *  0)	KERNEL3(16  *  0)	KERNEL4(16  *  0)	KERNEL5(16  *  0)	KERNEL6(16  *  0)	KERNEL7(16  *  0)	KERNEL8(16  *  0)	cmpl	$128 *  1, %eax	jle	.L12	KERNEL1(16  *  1)	KERNEL2(16  *  1)	KERNEL3(16  *  1)	KERNEL4(16  *  1)	KERNEL5(16  *  1)	KERNEL6(16  *  1)	KERNEL7(16  *  1)	KERNEL8(16  *  1)	cmpl	$128 *  2, %eax	jle	.L12	KERNEL1(16  *  2)	KERNEL2(16  *  2)	KERNEL3(16  *  2)	KERNEL4(16  *  2)	KERNEL5(16  *  2)	KERNEL6(16  *  2)	KERNEL7(16  *  2)	KERNEL8(16  *  2)	cmpl	$128 *  3, %eax	jle	.L12	KERNEL1(16  *  3)	KERNEL2(16  *  3)	KERNEL3(16  *  3)	KERNEL4(16  *  3)	KERNEL5(16  *  3)	KERNEL6(16  *  3)	KERNEL7(16  *  3)	KERNEL8(16  *  3)	cmpl	$128 *  4, %eax	jle	.L12	KERNEL1(16  *  4)	KERNEL2(16  *  4)	KERNEL3(16  *  4)	KERNEL4(16  *  4)	KERNEL5(16  *  4)	KERNEL6(16  *  4)	KERNEL7(16  *  4)	KERNEL8(16  *  4)	cmpl	$128 *  5, %eax	jle	.L12	KERNEL1(16  *  5)	KERNEL2(16  *  5)	KERNEL3(16  *  5)	KERNEL4(16  *  5)	KERNEL5(16  *  5)	KERNEL6(16  *  5)	KERNEL7(16  *  5)	KERNEL8(16  *  5)	cmpl	$128 *  6, %eax	jle	.L12	KERNEL1(16  *  6)	KERNEL2(16  *  6)	KERNEL3(16  *  6)	KERNEL4(16  *  6)	KERNEL5(16  *  6)	KERNEL6(16  *  6)	KERNEL7(16  *  6)	KERNEL8(16  *  6)	cmpl	$128 *  7, %eax	jle	.L12	KERNEL1(16  *  7)	KERNEL2(16  *  7)	KERNEL3(16  *  7)	KERNEL4(16  *  7)	KERNEL5(16  *  7)	KERNEL6(16  *  7)	KERNEL7(16  *  7)	KERNEL8(16  *  7)	addl	$128 * 4  * SIZE, BB	addl	$128 * 1  * SIZE, AA	subl	$128 * 8, %eax	jg	.L1X	jmp	.L15.L12:	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 4), BB	ALIGN_4#else	sarl	$3, %eax	je	.L15	ALIGN_4.L12:	KERNEL1(16  *  0)	KERNEL2(16  *  0)	KERNEL3(16  *  0)	KERNEL4(16  *  0)	KERNEL5(16  *  0)	KERNEL6(16  *  0)	KERNEL7(16  *  0)	KERNEL8(16  *  0)	addl   $64 * SIZE, BB	addl   $16 * SIZE, AA	decl   %eax	jne    .L11	ALIGN_4#endif.L15:#if defined(LT) || defined(RN)	movl	KK, %eax#else	movl	K,  %eax	subl	KK, %eax#endif	andl	$7, %eax		# if (k & 1)	BRANCH	je .L14	ALIGN_4.L13:	mulpd	 %xmm0, %xmm2	addpd	 %xmm2, %xmm4	movapd	 2 * SIZE(BB), %xmm2	mulpd	 %xmm0, %xmm2	addpd	 %xmm2, %xmm5	movapd	 4 * SIZE(BB), %xmm2	mulpd	 %xmm0, %xmm2	mulpd	 6 * SIZE(BB), %xmm0	addpd	 %xmm2, %xmm6	movapd	 8 * SIZE(BB), %xmm2	addpd	 %xmm0, %xmm7	movapd	 2 * SIZE(AA), %xmm0	addl	$2 * SIZE, AA	addl	$8 * SIZE, BB	decl	%eax	jg	.L13	ALIGN_4.L14:#if defined(LN) || defined(RT)	movl	KK, %eax#ifdef LN	subl	$1, %eax#else	subl	$2, %eax#endif	movl	AORIG, AA	movl	BORIG, B	leal	BUFFER, BB	sall	$ZBASE_SHIFT, %eax	addl	%eax, AA	leal	(B,  %eax, 2), B	leal	(BB, %eax, 4), BB#endif	movapd	POSINV,  %xmm1	SHUFPD_1 %xmm5, %xmm5	SHUFPD_1 %xmm7, %xmm7#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(NR) || defined(NC) || defined(TR) || defined(TC)	xorpd	%xmm1, %xmm5	xorpd	%xmm1, %xmm7#else	xorpd	%xmm1, %xmm4	xorpd	%xmm1, %xmm6#endif#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \    defined(RR) || defined(RC) || defined(CR) || defined(CC)	subpd	%xmm5, %xmm4	subpd	%xmm7, %xmm6#else	addpd	%xmm5, %xmm4	addpd	%xmm7, %xmm6#endif#if defined(LN) || defined(LT)	movapd	 0 * SIZE(B), %xmm5	movapd	 2 * SIZE(B), %xmm7	subpd	%xmm4,  %xmm5	subpd	%xmm6,  %xmm7#else	movapd	 0 * SIZE(AA), %xmm5	movapd	 2 * SIZE(AA), %xmm7	subpd	%xmm4,  %xmm5	subpd	%xmm6,  %xmm7#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -