⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dot_sse2_core2.s

📁 Optimized GotoBLAS libraries
💻 S
字号:
/*********************************************************************//*                                                                   *//*             Optimized BLAS libraries                              *//*                     By Kazushige Goto <kgoto@tacc.utexas.edu>     *//*                                                                   *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING  *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF      *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,              *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY  *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF     *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO   *//* THE USE OF THE SOFTWARE OR DOCUMENTATION.                         *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of     *//* profits, interruption of business, or related expenses which may  *//* arise from use of Software or Documentation, including but not    *//* limited to those resulting from defects in Software and/or        *//* Documentation, or loss or inaccuracy of data of any kind.         *//*********************************************************************/#define ASSEMBLER#include "common.h"#define N	ARG1	/* rdi */#define X	ARG2	/* rsi */#define INCX	ARG3	/* rdx */#define Y	ARG4	/* rcx */#ifndef WINDOWS_ABI#define INCY	ARG5	/* r8  */#else#define INCY	%r10#endif	PROLOGUE	PROFCODE#ifdef WINDOWS_ABI	movq	40(%rsp), INCY#endif	SAVEREGISTERS#ifdef F_INTERFACE#ifndef USE64BITINT	movslq	(N), N			# N	movslq	(INCX),INCX		# INCX	movslq	(INCY),INCY		# INCY#else	movq	(N), N			# N	movq	(INCX),INCX		# INCX	movq	(INCY),INCY		# INCY#endif#endif	leaq	(, INCX, SIZE), INCX		leaq	(, INCY, SIZE), INCY		pxor	%xmm0, %xmm0	pxor	%xmm1, %xmm1	pxor	%xmm2, %xmm2	pxor	%xmm3, %xmm3	cmpq	$0, N	jle	.L999	cmpq	$SIZE, INCX	jne	.L50	cmpq	$SIZE, INCY	jne	.L50	testq	$SIZE, Y	je	.L10	movsd	0 * SIZE(X), %xmm0	mulsd	0 * SIZE(Y), %xmm0	addq	$1 * SIZE, X	addq	$1 * SIZE, Y	decq	N	ALIGN_2.L10:	subq	$-16 * SIZE, X	subq	$-16 * SIZE, Y	testq	$SIZE, X	jne	.L20	pxor	%xmm4, %xmm4	pxor	%xmm5, %xmm5	movq	N,  %rax	sarq	$5, %rax	jle	.L13	ALIGN_4.L11:	addpd	%xmm4,  %xmm2	movapd	-16 * SIZE(Y), %xmm6	movapd	-16 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	-14 * SIZE(X), %xmm5	movapd	-14 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	-12 * SIZE(Y), %xmm6	movapd	-12 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	-10 * SIZE(X), %xmm5	movapd	-10 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm2	movapd	 -8 * SIZE(Y), %xmm6	movapd	 -8 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	 -6 * SIZE(X), %xmm5	movapd	 -6 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	 -4 * SIZE(Y), %xmm6	movapd	 -4 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	 -2 * SIZE(X), %xmm5	movapd	 -2 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm2	movapd	  0 * SIZE(Y), %xmm6	movapd	  0 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	  2 * SIZE(X), %xmm5	movapd	  2 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	  4 * SIZE(Y), %xmm6	movapd	  4 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	  6 * SIZE(X), %xmm5	movapd	  6 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm2	movapd	  8 * SIZE(Y), %xmm6	movapd	  8 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	 10 * SIZE(X), %xmm5	movapd	 10 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	 12 * SIZE(Y), %xmm6	movapd	 12 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	 14 * SIZE(X), %xmm5	subq	$-32 * SIZE, X	movapd	 14 * SIZE(Y), %xmm7	subq	$-32 * SIZE, Y	mulpd	%xmm7,  %xmm5	subq	$1, %rax	jg,pt	.L11	ALIGN_3.L13:	testq	$16, N	jle	.L14	addpd	%xmm4,  %xmm2	movapd	-16 * SIZE(Y), %xmm6	movapd	-16 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	-14 * SIZE(X), %xmm5	movapd	-14 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	-12 * SIZE(Y), %xmm6	movapd	-12 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	-10 * SIZE(X), %xmm5	movapd	-10 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm2	movapd	 -8 * SIZE(Y), %xmm6	movapd	 -8 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	 -6 * SIZE(X), %xmm5	movapd	 -6 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	 -4 * SIZE(Y), %xmm6	movapd	 -4 * SIZE(X), %xmm4	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	 -2 * SIZE(X), %xmm5	movapd	 -2 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	subq	$-16 * SIZE, Y	subq	$-16 * SIZE, X	ALIGN_3.L14:	testq	$8, N	jle	.L15	addpd	%xmm4,  %xmm2	movapd	-16 * SIZE(X), %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	-14 * SIZE(X), %xmm5	movapd	-14 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addpd	%xmm4,  %xmm0	movapd	-12 * SIZE(X), %xmm4	movapd	-12 * SIZE(Y), %xmm6	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm1	movapd	-10 * SIZE(X), %xmm5	movapd	-10 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addq	$8 * SIZE, X	addq	$8 * SIZE, Y	ALIGN_3.L15:	testq	$4, N	jle	.L16	addpd	%xmm4,  %xmm2	movapd	-16 * SIZE(X), %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6,  %xmm4	addpd	%xmm5,  %xmm3	movapd	-14 * SIZE(X), %xmm5	movapd	-14 * SIZE(Y), %xmm7	mulpd	%xmm7,  %xmm5	addq	$4 * SIZE, X	addq	$4 * SIZE, Y	ALIGN_3.L16:	addpd	%xmm4,  %xmm2	addpd	%xmm5,  %xmm3	testq	$2, N	jle	.L17	movapd	-16 * SIZE(X), %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6,  %xmm4	addpd	%xmm4,  %xmm2	addq	$2 * SIZE, X	addq	$2 * SIZE, Y	ALIGN_3.L17:	testq	$1, N	jle	.L999	movsd	-16 * SIZE(X), %xmm4	movsd	-16 * SIZE(Y), %xmm6	mulsd	%xmm6,  %xmm4	addsd	%xmm4,  %xmm3	jmp	.L999	ALIGN_3.L20:	movhpd	-16 * SIZE(X), %xmm4	pxor	%xmm5, %xmm5	pxor	%xmm7, %xmm7	movq	N,  %rax	sarq	$5, %rax	jle	.L23	ALIGN_3.L21:		addpd	%xmm5,  %xmm3	movapd	-15 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	-13 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	-14 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	-11 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	-12 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 -9 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	-10 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm3	movapd	 -7 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	 -8 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	 -5 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	 -6 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	 -3 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	 -4 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 -1 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	 -2 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm3	movapd	  1 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	  0 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	  3 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	  2 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	  5 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	  4 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	  7 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	  6 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm3	movapd	  9 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	  8 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	 11 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	 10 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	 13 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	 12 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 15 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	 14 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	subq	$-32 * SIZE, X	subq	$-32 * SIZE, Y	decq	%rax	jg	.L21	ALIGN_3.L23:	testq	$16, N	jle	.L24	addpd	%xmm5,  %xmm3	movapd	-15 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	-13 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	-14 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	-11 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	-12 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 -9 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	-10 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm3	movapd	 -7 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	 -8 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	 -5 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	 -6 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	 -3 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	 -4 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 -1 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	 -2 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	subq	$-16 * SIZE, X	subq	$-16 * SIZE, Y	ALIGN_3.L24:	testq	$8, N	jle	.L25	addpd	%xmm5,  %xmm3	movapd	-15 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm7,  %xmm2	movapd	-13 * SIZE(X), %xmm7	SHUFPD_1 %xmm7, %xmm5	movapd	-14 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	movapd	-11 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm7	movapd	-12 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm7	addpd	%xmm4,  %xmm0	movapd	 -9 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	-10 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addq	$8 * SIZE, X	addq	$8 * SIZE, Y	ALIGN_3.L25:	addpd	%xmm7,  %xmm2	addpd	%xmm5,  %xmm3	testq	$4, N	jle	.L26	movapd	-15 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	movapd	-16 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm4	addpd	%xmm4,  %xmm0	movapd	-13 * SIZE(X), %xmm4	SHUFPD_1 %xmm4, %xmm5	movapd	-14 * SIZE(Y), %xmm6	mulpd	%xmm6, %xmm5	addpd	%xmm5,  %xmm1	addq	$4 * SIZE, X	addq	$4 * SIZE, Y	ALIGN_3.L26:	testq	$2, N	jle	.L27	movapd	-15 * SIZE(X), %xmm5	SHUFPD_1 %xmm5, %xmm4	mulpd	-16 * SIZE(Y), %xmm4	addpd	%xmm4,  %xmm2	movapd	%xmm5, %xmm4	addq	$2 * SIZE, X	addq	$2 * SIZE, Y	ALIGN_3.L27:	testq	$1, N	jle	.L999	movsd	-16 * SIZE(Y), %xmm6	movhlps	%xmm4, %xmm4	mulsd	%xmm6,  %xmm4	addsd	%xmm4,  %xmm3	jmp	.L999	ALIGN_3.L50:#ifdef F_INTERFACE	testq	INCX, INCX	jge	.L51	movq	N, %rax	decq	%rax	imulq	INCX, %rax	subq	%rax, X	ALIGN_3.L51:	testq	INCY, INCY	jge	.L52	movq	N, %rax	decq	%rax	imulq	INCY, %rax	subq	%rax, Y	ALIGN_3.L52:#endif	movq	N,  %rax	sarq	$2, %rax	jle	.L55	ALIGN_3.L53:	movsd	0 * SIZE(X), %xmm4	addq	INCX, X	mulsd	0 * SIZE(Y), %xmm4	addq	INCY, Y	movsd	0 * SIZE(X), %xmm5	addq	INCX, X	mulsd	0 * SIZE(Y), %xmm5	addq	INCY, Y	movsd	0 * SIZE(X), %xmm6	addq	INCX, X	mulsd	0 * SIZE(Y), %xmm6	addq	INCY, Y	movsd	0 * SIZE(X), %xmm7	addq	INCX, X	mulsd	0 * SIZE(Y), %xmm7	addq	INCY, Y	addsd	%xmm4, %xmm0	addsd	%xmm5, %xmm1	addsd	%xmm6, %xmm2	addsd	%xmm7, %xmm3	decq	%rax	jg	.L53	ALIGN_3.L55:	movq	N, %rax	andq	$3,   %rax	jle	.L999	ALIGN_3.L56:	movsd	0 * SIZE(X), %xmm4	addq	INCX, X	mulsd	0 * SIZE(Y), %xmm4	addq	INCY, Y	addsd	%xmm4, %xmm0	decq	%rax	jg	.L56	ALIGN_3.L999:	addpd	%xmm1, %xmm0	addpd	%xmm3, %xmm2	addpd	%xmm2, %xmm0#ifndef HAVE_SSE3	movapd	%xmm0, %xmm1	unpckhpd	%xmm0, %xmm0	addsd	%xmm1, %xmm0#else	haddpd	%xmm0, %xmm0#endif	RESTOREREGISTERS	ret	EPILOGUE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -