📄 zscal_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#ifndef WINDOWS_ABI#define M ARG1#define X ARG4#define INCX ARG5#else#define M ARG1#define X ARG2#define INCX ARG3#endif#define XX %r10#define FLAG %r11#define I %rax#define PREFETCHSIZE 112#if (defined(HAVE_SSE3) && !defined(CORE_OPTERON)) || defined(BARCELONA)#define MOVDDUP(a, b, c) movddup a(b), c#define MOVDDUP2(a, b, c) movddup a##b, c#else#define MOVDDUP(a, b, c) movlpd a(b), c;unpcklpd c, c#define MOVDDUP2(a, b, c) movlpd a##b, c;unpcklpd c, c#endif PROLOGUE PROFCODE#ifdef WINDOWS_ABI movaps %xmm3, %xmm0 movsd 40(%rsp), %xmm1 movq 48(%rsp), X movq 56(%rsp), INCX#endif SAVEREGISTERS salq $ZBASE_SHIFT, INCX xor FLAG, FLAG testq M, M jle .L999 pxor %xmm15, %xmm15 comisd %xmm0, %xmm15 jne .L100 # Alpha_r != ZERO comisd %xmm1, %xmm15 jne .L100 # Alpha_i != ZERO/* Alpha == ZERO */ cmpq $2 * SIZE, INCX jne .L50/* INCX == 1 */ testq $SIZE, X je .L05 movsd %xmm15, 0 * SIZE(X) addq $SIZE, X movq $1, FLAG decq M jle .L19 ALIGN_3.L05:/* Aligned Mode */ movq M, I # rcx = n sarq $3, I jle .L12 ALIGN_4.L11:#ifdef HAVE_3DNOW prefetchw 88 * SIZE(X)#endif movapd %xmm15, 0 * SIZE(X) movapd %xmm15, 2 * SIZE(X) movapd %xmm15, 4 * SIZE(X) movapd %xmm15, 6 * SIZE(X)#ifdef HAVE_3DNOW prefetchw 96 * SIZE(X)#endif movapd %xmm15, 8 * SIZE(X) movapd %xmm15, 10 * SIZE(X) movapd %xmm15, 12 * SIZE(X) movapd %xmm15, 14 * SIZE(X) addq $16 * SIZE, X decq I jg .L11 ALIGN_4.L12: testq $4, M je .L13 movapd %xmm15, 0 * SIZE(X) movapd %xmm15, 2 * SIZE(X) movapd %xmm15, 4 * SIZE(X) movapd %xmm15, 6 * SIZE(X) addq $8 * SIZE, X ALIGN_3.L13: testq $2, M je .L14 movapd %xmm15, 0 * SIZE(X) movapd %xmm15, 2 * SIZE(X) addq $4 * SIZE, X ALIGN_3.L14: testq $1, M je .L19 movapd %xmm15, 0 * SIZE(X) addq $2 * SIZE, X ALIGN_3.L19: testq $1, FLAG je .L999 movsd %xmm15, 0 * SIZE(X) jmp .L999 ALIGN_4/* incx != 1 */.L50: testq $SIZE, X jne .L60/* Aligned Mode */ movq M, I # rcx = n sarq $2, I jle .L52 ALIGN_4.L51: #ifdef HAVE_3DNOW prefetchw 88 * SIZE(X)#endif movapd %xmm15, 0 * SIZE(X) addq INCX, X movapd %xmm15, 0 * SIZE(X) addq INCX, X movapd %xmm15, 0 * SIZE(X) addq INCX, X movapd %xmm15, 0 * SIZE(X) addq INCX, X decq I jg .L51 ALIGN_4.L52: testq $3, M je .L999 testq $2, M je .L53 movapd %xmm15, 0 * SIZE(X) addq INCX, X movapd %xmm15, 0 * SIZE(X) addq INCX, X ALIGN_3.L53: testq $1, M je .L999 movapd %xmm15, 0 * SIZE(X) jmp .L999 ALIGN_4/* Unaligned Mode */.L60: movq M, I # rcx = n sarq $2, I jle .L62 ALIGN_4.L61:#ifdef HAVE_3DNOW prefetchw 88 * SIZE(X)#endif movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X decq I jg .L61 ALIGN_4.L62: testq $3, M je .L999 testq $2, M je .L63 movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) addq INCX, X ALIGN_3.L63: testq $1, M je .L999 movsd %xmm15, 0 * SIZE(X) movsd %xmm15, 1 * SIZE(X) jmp .L999 ALIGN_4/* Alpha != ZERO */.L100: movapd %xmm0, %xmm14 # 0 a movapd %xmm1, %xmm5 # 0 b pxor %xmm15, %xmm15 # 0 0 subsd %xmm5, %xmm15 # 0 -b unpcklpd %xmm14, %xmm15 # a -b unpcklpd %xmm5, %xmm14 # b a testq $SIZE, X jne .L200/* Aligned Mode */ cmpq $2 * SIZE, INCX jne .L120.L110: movq M, I # rcx = n sarq $3, I jle .L115 MOVDDUP(0 * SIZE, X, %xmm0) MOVDDUP(1 * SIZE, X, %xmm1) MOVDDUP(2 * SIZE, X, %xmm2) MOVDDUP(3 * SIZE, X, %xmm3) MOVDDUP(4 * SIZE, X, %xmm4) MOVDDUP(5 * SIZE, X, %xmm5) MOVDDUP(6 * SIZE, X, %xmm6) MOVDDUP(7 * SIZE, X, %xmm7) decq I jle .L112 ALIGN_4.L111: mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm4 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm6 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 MOVDDUP( 8 * SIZE, X, %xmm8) MOVDDUP( 9 * SIZE, X, %xmm1) MOVDDUP(10 * SIZE, X, %xmm9) MOVDDUP(11 * SIZE, X, %xmm3) addpd %xmm5, %xmm4 addpd %xmm7, %xmm6 MOVDDUP(12 * SIZE, X, %xmm10) MOVDDUP(13 * SIZE, X, %xmm5) MOVDDUP(14 * SIZE, X, %xmm11) MOVDDUP(15 * SIZE, X, %xmm7) mulpd %xmm14, %xmm8 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm9 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm10 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm11 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm8 addpd %xmm3, %xmm9 addpd %xmm5, %xmm10 addpd %xmm7, %xmm11 movapd %xmm0, 0 * SIZE(X) movapd %xmm2, 2 * SIZE(X) movapd %xmm4, 4 * SIZE(X) movapd %xmm6, 6 * SIZE(X) MOVDDUP(16 * SIZE, X, %xmm0) MOVDDUP(17 * SIZE, X, %xmm1) MOVDDUP(18 * SIZE, X, %xmm2) MOVDDUP(19 * SIZE, X, %xmm3) MOVDDUP(20 * SIZE, X, %xmm4) MOVDDUP(21 * SIZE, X, %xmm5) MOVDDUP(22 * SIZE, X, %xmm6) MOVDDUP(23 * SIZE, X, %xmm7) movapd %xmm8, 8 * SIZE(X) movapd %xmm9, 10 * SIZE(X) movapd %xmm10, 12 * SIZE(X) movapd %xmm11, 14 * SIZE(X) addq $16 * SIZE, X decq I jg .L111 ALIGN_4.L112: mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm4 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm6 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 MOVDDUP( 8 * SIZE, X, %xmm8) MOVDDUP( 9 * SIZE, X, %xmm1) MOVDDUP(10 * SIZE, X, %xmm9) MOVDDUP(11 * SIZE, X, %xmm3) addpd %xmm5, %xmm4 addpd %xmm7, %xmm6 MOVDDUP(12 * SIZE, X, %xmm10) MOVDDUP(13 * SIZE, X, %xmm5) MOVDDUP(14 * SIZE, X, %xmm11) MOVDDUP(15 * SIZE, X, %xmm7) mulpd %xmm14, %xmm8 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm9 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm10 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm11 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm8 addpd %xmm3, %xmm9 addpd %xmm5, %xmm10 addpd %xmm7, %xmm11 movapd %xmm0, 0 * SIZE(X) movapd %xmm2, 2 * SIZE(X) movapd %xmm4, 4 * SIZE(X) movapd %xmm6, 6 * SIZE(X) movapd %xmm8, 8 * SIZE(X) movapd %xmm9, 10 * SIZE(X) movapd %xmm10, 12 * SIZE(X) movapd %xmm11, 14 * SIZE(X) addq $16 * SIZE, X ALIGN_3.L115: testq $7, M je .L999 testq $4, M je .L116 MOVDDUP(0 * SIZE, X, %xmm0) MOVDDUP(1 * SIZE, X, %xmm1) MOVDDUP(2 * SIZE, X, %xmm2) MOVDDUP(3 * SIZE, X, %xmm3) MOVDDUP(4 * SIZE, X, %xmm4) MOVDDUP(5 * SIZE, X, %xmm5) MOVDDUP(6 * SIZE, X, %xmm6) MOVDDUP(7 * SIZE, X, %xmm7) mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm4 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm6 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 addpd %xmm5, %xmm4 addpd %xmm7, %xmm6 movapd %xmm0, 0 * SIZE(X) movapd %xmm2, 2 * SIZE(X) movapd %xmm4, 4 * SIZE(X) movapd %xmm6, 6 * SIZE(X) addq $8 * SIZE, X ALIGN_3.L116: testq $2, M je .L117 MOVDDUP(0 * SIZE, X, %xmm0) MOVDDUP(1 * SIZE, X, %xmm1) MOVDDUP(2 * SIZE, X, %xmm2) MOVDDUP(3 * SIZE, X, %xmm3) mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 movapd %xmm0, 0 * SIZE(X) movapd %xmm2, 2 * SIZE(X) addq $4 * SIZE, X ALIGN_3.L117: testq $1, M je .L999 MOVDDUP(0 * SIZE, X, %xmm0) MOVDDUP(1 * SIZE, X, %xmm1) mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 addpd %xmm1, %xmm0 movapd %xmm0, 0 * SIZE(X) jmp .L999 ALIGN_3.L120: movq X, XX movq M, I # rcx = n sarq $3, I jle .L125 MOVDDUP(0 * SIZE, X, %xmm0) MOVDDUP(1 * SIZE, X, %xmm1) addq INCX, X MOVDDUP(0 * SIZE, X, %xmm2) MOVDDUP(1 * SIZE, X, %xmm3) addq INCX, X MOVDDUP(0 * SIZE, X, %xmm4) MOVDDUP(1 * SIZE, X, %xmm5) addq INCX, X MOVDDUP(0 * SIZE, X, %xmm6) MOVDDUP(1 * SIZE, X, %xmm7) addq INCX, X decq I jle .L122 ALIGN_4.L121: mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm4 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm6 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 MOVDDUP( 0 * SIZE, X, %xmm8) MOVDDUP( 1 * SIZE, X, %xmm1) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm9) MOVDDUP( 1 * SIZE, X, %xmm3) addq INCX, X addpd %xmm5, %xmm4 addpd %xmm7, %xmm6 MOVDDUP( 0 * SIZE, X, %xmm10) MOVDDUP( 1 * SIZE, X, %xmm5) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm11) MOVDDUP( 1 * SIZE, X, %xmm7) addq INCX, X mulpd %xmm14, %xmm8 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm9 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm10 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm11 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm8 addpd %xmm3, %xmm9 addpd %xmm5, %xmm10 addpd %xmm7, %xmm11 movapd %xmm0, 0 * SIZE(XX) addq INCX, XX movapd %xmm2, 0 * SIZE(XX) addq INCX, XX movapd %xmm4, 0 * SIZE(XX) addq INCX, XX movapd %xmm6, 0 * SIZE(XX) addq INCX, XX MOVDDUP( 0 * SIZE, X, %xmm0) MOVDDUP( 1 * SIZE, X, %xmm1) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm2) MOVDDUP( 1 * SIZE, X, %xmm3) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm4) MOVDDUP( 1 * SIZE, X, %xmm5) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm6) MOVDDUP( 1 * SIZE, X, %xmm7) addq INCX, X movapd %xmm8, 0 * SIZE(XX) addq INCX, XX movapd %xmm9, 0 * SIZE(XX) addq INCX, XX movapd %xmm10, 0 * SIZE(XX) addq INCX, XX movapd %xmm11, 0 * SIZE(XX) addq INCX, XX decq I jg .L121 ALIGN_4.L122: mulpd %xmm14, %xmm0 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm2 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm4 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm6 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm0 addpd %xmm3, %xmm2 MOVDDUP( 0 * SIZE, X, %xmm8) MOVDDUP( 1 * SIZE, X, %xmm1) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm9) MOVDDUP( 1 * SIZE, X, %xmm3) addq INCX, X addpd %xmm5, %xmm4 addpd %xmm7, %xmm6 MOVDDUP( 0 * SIZE, X, %xmm10) MOVDDUP( 1 * SIZE, X, %xmm5) addq INCX, X MOVDDUP( 0 * SIZE, X, %xmm11) MOVDDUP( 1 * SIZE, X, %xmm7) addq INCX, X mulpd %xmm14, %xmm8 mulpd %xmm15, %xmm1 mulpd %xmm14, %xmm9 mulpd %xmm15, %xmm3 mulpd %xmm14, %xmm10 mulpd %xmm15, %xmm5 mulpd %xmm14, %xmm11 mulpd %xmm15, %xmm7 addpd %xmm1, %xmm8
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -