📄 gemv_n_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#ifdef CORE2#define PREFETCH prefetcht0#define PREFETCHW prefetcht0#define PREFETCHSIZE 32#endif #ifdef PENRYN#define PREFETCH prefetcht0#define PREFETCHW prefetcht0#define PREFETCHSIZE 32#endif #ifdef PENTIUM4#define PREFETCH prefetcht2#define PREFETCHW prefetchnta#define PREFETCHSIZE 32#endif#ifdef GENERIC#define PREFETCH prefetcht0#define PREFETCHW prefetcht0#define PREFETCHSIZE (8 * 5)#endif#ifndef WINDOWS_ABI#define STACKSIZE 64 #define OLD_INCX 8 + STACKSIZE(%rsp)#define OLD_Y 16 + STACKSIZE(%rsp)#define OLD_INCY 24 + STACKSIZE(%rsp)#define OLD_BUFFER 32 + STACKSIZE(%rsp)#define STACK_ALPHA 48 (%rsp)#define M %rdi#define N %rsi#define A %rcx#define LDA %r8#define X %r9#define INCX %rdx#define Y %rbp#define INCY %r10#else#define STACKSIZE 256 #define OLD_A 40 + STACKSIZE(%rsp)#define OLD_LDA 48 + STACKSIZE(%rsp)#define OLD_X 56 + STACKSIZE(%rsp)#define OLD_INCX 64 + STACKSIZE(%rsp)#define OLD_Y 72 + STACKSIZE(%rsp)#define OLD_INCY 80 + STACKSIZE(%rsp)#define BUFFER 88 + STACKSIZE(%rsp)#define STACK_ALPHA 224 (%rsp)#define M %rcx#define N %rdx#define A %r8#define LDA %r9#define X %rdi#define INCX %rsi#define Y %rbp#define INCY %r10#endif #define TEMP %rax#define I %rax#define J %r11#define A1 %r12#define A2 %r13#define Y1 %r14#define BUFFER %r15#define MM %rbx #define ALPHA %xmm15 PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq OLD_A, A movq OLD_LDA, LDA movq OLD_X, X#endif movq OLD_INCX, INCX movq OLD_Y, Y movq OLD_INCY, INCY movq OLD_BUFFER, BUFFER#ifndef WINDOWS_ABI movsd %xmm0, STACK_ALPHA#else movsd %xmm3, STACK_ALPHA#endif leaq (,INCX, SIZE), INCX leaq (,INCY, SIZE), INCY leaq (,LDA, SIZE), LDA testq N, N # if n <= 0 goto END jle .L999 testq M, M # if n <= 0 goto END jle .L999 movq BUFFER, Y1 movq M, MM movq BUFFER, %rax decq MM addq $1 * SIZE, BUFFER testq $SIZE, A cmoveq M, MM cmoveq %rax, BUFFER pxor %xmm4, %xmm4 movq M, %rax addq $8, %rax sarq $3, %rax ALIGN_3.L01: movapd %xmm4, 0 * SIZE(Y1) movapd %xmm4, 2 * SIZE(Y1) movapd %xmm4, 4 * SIZE(Y1) movapd %xmm4, 6 * SIZE(Y1) addq $8 * SIZE, Y1 decq %rax jg .L01 ALIGN_3.L10: testq $SIZE, LDA jne .L40 movq N, J sarq $2, J jle .L20 ALIGN_3.L11: movq BUFFER, Y1 movq A, A1 leaq (A, LDA, 1), A2 leaq (A, LDA, 4), A movsd (X), %xmm0 addq INCX, X movsd (X), %xmm1 addq INCX, X movsd (X), %xmm2 addq INCX, X movsd (X), %xmm3 addq INCX, X mulsd STACK_ALPHA, %xmm0 mulsd STACK_ALPHA, %xmm1 mulsd STACK_ALPHA, %xmm2 mulsd STACK_ALPHA, %xmm3 unpcklpd %xmm0, %xmm0 unpcklpd %xmm1, %xmm1 unpcklpd %xmm2, %xmm2 unpcklpd %xmm3, %xmm3 ALIGN_3 testq $SIZE, A je .L12 movsd 0 * SIZE(Y1), %xmm4 movsd 0 * SIZE(A1), %xmm8 movsd 0 * SIZE(A2), %xmm9 movsd 0 * SIZE(A1, LDA, 2), %xmm10 movsd 0 * SIZE(A2, LDA, 2), %xmm11 mulsd %xmm0, %xmm8 mulsd %xmm1, %xmm9 mulsd %xmm2, %xmm10 mulsd %xmm3, %xmm11 addsd %xmm8, %xmm4 addsd %xmm9, %xmm4 addsd %xmm10, %xmm4 addsd %xmm11, %xmm4 movsd %xmm4, 0 * SIZE(Y1) addq $1 * SIZE, A1 addq $1 * SIZE, A2 addq $1 * SIZE, Y1 ALIGN_3.L12: movq MM, I sarq $4, I jle .L15 movapd 0 * SIZE(A1), %xmm8 movapd 2 * SIZE(A1), %xmm9 movapd 4 * SIZE(A1), %xmm10 movapd 6 * SIZE(A1), %xmm11 movapd 0 * SIZE(A2), %xmm12 movapd 2 * SIZE(A2), %xmm13 movapd 4 * SIZE(A2), %xmm14 movapd 6 * SIZE(A2), %xmm15 movapd 0 * SIZE(Y1), %xmm4 movapd 2 * SIZE(Y1), %xmm5 movapd 4 * SIZE(Y1), %xmm6 movapd 6 * SIZE(Y1), %xmm7 mulpd %xmm0, %xmm8 mulpd %xmm0, %xmm9 mulpd %xmm0, %xmm10 mulpd %xmm0, %xmm11 decq I jle .L14 ALIGN_3.L13:#ifdef PENTIUM4 PREFETCH PREFETCHSIZE * SIZE(A1)#endif addpd %xmm8, %xmm4 movapd 0 * SIZE(A1, LDA, 2), %xmm8 mulpd %xmm1, %xmm12#ifdef PENTIUM4 PREFETCH PREFETCHSIZE * SIZE(Y1)#endif addpd %xmm9, %xmm5 movapd 2 * SIZE(A1, LDA, 2), %xmm9 mulpd %xmm1, %xmm13 addpd %xmm10, %xmm6 movapd 4 * SIZE(A1, LDA, 2), %xmm10 mulpd %xmm1, %xmm14 addpd %xmm11, %xmm7 movapd 6 * SIZE(A1, LDA, 2), %xmm11 mulpd %xmm1, %xmm15 addpd %xmm12, %xmm4 movapd 0 * SIZE(A2, LDA, 2), %xmm12 mulpd %xmm2, %xmm8 addpd %xmm13, %xmm5 movapd 2 * SIZE(A2, LDA, 2), %xmm13 mulpd %xmm2, %xmm9 addpd %xmm14, %xmm6 movapd 4 * SIZE(A2, LDA, 2), %xmm14 mulpd %xmm2, %xmm10 addpd %xmm15, %xmm7 movapd 6 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm2, %xmm11#ifdef PENTIUM4 PREFETCH PREFETCHSIZE * SIZE(A2)#endif addpd %xmm8, %xmm4 movapd 8 * SIZE(A1), %xmm8 mulpd %xmm3, %xmm12 addpd %xmm9, %xmm5 movapd 10 * SIZE(A1), %xmm9 mulpd %xmm3, %xmm13 addpd %xmm10, %xmm6 movapd 12 * SIZE(A1), %xmm10 mulpd %xmm3, %xmm14 addpd %xmm11, %xmm7 movapd 14 * SIZE(A1), %xmm11 mulpd %xmm3, %xmm15 addpd %xmm12, %xmm4 movapd 8 * SIZE(A2), %xmm12 mulpd %xmm0, %xmm8 addpd %xmm13, %xmm5 movapd 10 * SIZE(A2), %xmm13 mulpd %xmm0, %xmm9 addpd %xmm14, %xmm6 movapd 12 * SIZE(A2), %xmm14 mulpd %xmm0, %xmm10 addpd %xmm15, %xmm7 movapd 14 * SIZE(A2), %xmm15 mulpd %xmm0, %xmm11 movapd %xmm4, 0 * SIZE(Y1) movapd 8 * SIZE(Y1), %xmm4 movapd %xmm5, 2 * SIZE(Y1) movapd 10 * SIZE(Y1), %xmm5 movapd %xmm6, 4 * SIZE(Y1) movapd 12 * SIZE(Y1), %xmm6 movapd %xmm7, 6 * SIZE(Y1) movapd 14 * SIZE(Y1), %xmm7#ifdef PENTIUM4 PREFETCH PREFETCHSIZE * SIZE(A1, LDA, 2)#endif addpd %xmm8, %xmm4 movapd 8 * SIZE(A1, LDA, 2), %xmm8 mulpd %xmm1, %xmm12 addpd %xmm9, %xmm5 movapd 10 * SIZE(A1, LDA, 2), %xmm9 mulpd %xmm1, %xmm13 addpd %xmm10, %xmm6 movapd 12 * SIZE(A1, LDA, 2), %xmm10 mulpd %xmm1, %xmm14 addpd %xmm11, %xmm7 movapd 14 * SIZE(A1, LDA, 2), %xmm11 mulpd %xmm1, %xmm15 addpd %xmm12, %xmm4 movapd 8 * SIZE(A2, LDA, 2), %xmm12 mulpd %xmm2, %xmm8 addpd %xmm13, %xmm5 movapd 10 * SIZE(A2, LDA, 2), %xmm13 mulpd %xmm2, %xmm9 addpd %xmm14, %xmm6 movapd 12 * SIZE(A2, LDA, 2), %xmm14 mulpd %xmm2, %xmm10 addpd %xmm15, %xmm7 movapd 14 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm2, %xmm11#ifdef PENTIUM4 PREFETCH PREFETCHSIZE * SIZE(A2, LDA, 2)#endif addpd %xmm8, %xmm4 movapd 16 * SIZE(A1), %xmm8 mulpd %xmm3, %xmm12 addpd %xmm9, %xmm5 movapd 18 * SIZE(A1), %xmm9 mulpd %xmm3, %xmm13 addpd %xmm10, %xmm6 movapd 20 * SIZE(A1), %xmm10 mulpd %xmm3, %xmm14 addpd %xmm11, %xmm7 movapd 22 * SIZE(A1), %xmm11 mulpd %xmm3, %xmm15 addpd %xmm12, %xmm4 movapd 16 * SIZE(A2), %xmm12 mulpd %xmm0, %xmm8 addpd %xmm13, %xmm5 movapd 18 * SIZE(A2), %xmm13 mulpd %xmm0, %xmm9 addpd %xmm14, %xmm6 movapd 20 * SIZE(A2), %xmm14 mulpd %xmm0, %xmm10 addpd %xmm15, %xmm7 movapd 22 * SIZE(A2), %xmm15 mulpd %xmm0, %xmm11 movapd %xmm4, 8 * SIZE(Y1) movapd 16 * SIZE(Y1), %xmm4 movapd %xmm5, 10 * SIZE(Y1) movapd 18 * SIZE(Y1), %xmm5 movapd %xmm6, 12 * SIZE(Y1) movapd 20 * SIZE(Y1), %xmm6 movapd %xmm7, 14 * SIZE(Y1) movapd 22 * SIZE(Y1), %xmm7 subq $-16 * SIZE, A1 subq $-16 * SIZE, A2 subq $-16 * SIZE, Y1 decq I jg .L13 ALIGN_3.L14: addpd %xmm8, %xmm4 movapd 0 * SIZE(A1, LDA, 2), %xmm8 mulpd %xmm1, %xmm12 addpd %xmm9, %xmm5 movapd 2 * SIZE(A1, LDA, 2), %xmm9 mulpd %xmm1, %xmm13 addpd %xmm10, %xmm6 movapd 4 * SIZE(A1, LDA, 2), %xmm10 mulpd %xmm1, %xmm14 addpd %xmm11, %xmm7 movapd 6 * SIZE(A1, LDA, 2), %xmm11 mulpd %xmm1, %xmm15 addpd %xmm12, %xmm4 movapd 0 * SIZE(A2, LDA, 2), %xmm12 mulpd %xmm2, %xmm8 addpd %xmm13, %xmm5 movapd 2 * SIZE(A2, LDA, 2), %xmm13 mulpd %xmm2, %xmm9 addpd %xmm14, %xmm6 movapd 4 * SIZE(A2, LDA, 2), %xmm14 mulpd %xmm2, %xmm10 addpd %xmm15, %xmm7 movapd 6 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm2, %xmm11 addpd %xmm8, %xmm4 movapd 8 * SIZE(A1), %xmm8 mulpd %xmm3, %xmm12 addpd %xmm9, %xmm5 movapd 10 * SIZE(A1), %xmm9 mulpd %xmm3, %xmm13 addpd %xmm10, %xmm6 movapd 12 * SIZE(A1), %xmm10 mulpd %xmm3, %xmm14 addpd %xmm11, %xmm7 movapd 14 * SIZE(A1), %xmm11 mulpd %xmm3, %xmm15 addpd %xmm12, %xmm4 movapd 8 * SIZE(A2), %xmm12 mulpd %xmm0, %xmm8 addpd %xmm13, %xmm5 movapd 10 * SIZE(A2), %xmm13 mulpd %xmm0, %xmm9 addpd %xmm14, %xmm6 movapd 12 * SIZE(A2), %xmm14 mulpd %xmm0, %xmm10 addpd %xmm15, %xmm7 movapd 14 * SIZE(A2), %xmm15 mulpd %xmm0, %xmm11 movapd %xmm4, 0 * SIZE(Y1) movapd 8 * SIZE(Y1), %xmm4 movapd %xmm5, 2 * SIZE(Y1) movapd 10 * SIZE(Y1), %xmm5 movapd %xmm6, 4 * SIZE(Y1) movapd 12 * SIZE(Y1), %xmm6 movapd %xmm7, 6 * SIZE(Y1) movapd 14 * SIZE(Y1), %xmm7 addpd %xmm8, %xmm4 movapd 8 * SIZE(A1, LDA, 2), %xmm8 mulpd %xmm1, %xmm12 addpd %xmm9, %xmm5 movapd 10 * SIZE(A1, LDA, 2), %xmm9 mulpd %xmm1, %xmm13 addpd %xmm10, %xmm6 movapd 12 * SIZE(A1, LDA, 2), %xmm10 mulpd %xmm1, %xmm14 addpd %xmm11, %xmm7 movapd 14 * SIZE(A1, LDA, 2), %xmm11 mulpd %xmm1, %xmm15 addpd %xmm12, %xmm4 movapd 8 * SIZE(A2, LDA, 2), %xmm12 mulpd %xmm2, %xmm8 addpd %xmm13, %xmm5 movapd 10 * SIZE(A2, LDA, 2), %xmm13 mulpd %xmm2, %xmm9 addpd %xmm14, %xmm6 movapd 12 * SIZE(A2, LDA, 2), %xmm14 mulpd %xmm2, %xmm10 addpd %xmm15, %xmm7 movapd 14 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm2, %xmm11 addpd %xmm8, %xmm4 mulpd %xmm3, %xmm12 addpd %xmm9, %xmm5 mulpd %xmm3, %xmm13 addpd %xmm10, %xmm6 mulpd %xmm3, %xmm14 addpd %xmm11, %xmm7 mulpd %xmm3, %xmm15 addpd %xmm12, %xmm4 addpd %xmm13, %xmm5 addpd %xmm14, %xmm6 addpd %xmm15, %xmm7 movapd %xmm4, 8 * SIZE(Y1) movapd %xmm5, 10 * SIZE(Y1) movapd %xmm6, 12 * SIZE(Y1) movapd %xmm7, 14 * SIZE(Y1) addq $16 * SIZE, A1 addq $16 * SIZE, A2 addq $16 * SIZE, Y1 ALIGN_3.L15: testq $8, MM je .L16 movapd 0 * SIZE(Y1), %xmm4 movapd 2 * SIZE(Y1), %xmm5 movapd 4 * SIZE(Y1), %xmm6 movapd 6 * SIZE(Y1), %xmm7 movapd 0 * SIZE(A1), %xmm8 movapd 2 * SIZE(A1), %xmm9 movapd 4 * SIZE(A1), %xmm10 movapd 6 * SIZE(A1), %xmm11 movapd 0 * SIZE(A2), %xmm12 movapd 2 * SIZE(A2), %xmm13 movapd 4 * SIZE(A2), %xmm14 movapd 6 * SIZE(A2), %xmm15 mulpd %xmm0, %xmm8 mulpd %xmm0, %xmm9 mulpd %xmm0, %xmm10 mulpd %xmm0, %xmm11 addpd %xmm8, %xmm4 movapd 0 * SIZE(A1, LDA, 2), %xmm8 mulpd %xmm1, %xmm12 addpd %xmm9, %xmm5 movapd 2 * SIZE(A1, LDA, 2), %xmm9 mulpd %xmm1, %xmm13 addpd %xmm10, %xmm6 movapd 4 * SIZE(A1, LDA, 2), %xmm10 mulpd %xmm1, %xmm14 addpd %xmm11, %xmm7 movapd 6 * SIZE(A1, LDA, 2), %xmm11 mulpd %xmm1, %xmm15 addpd %xmm12, %xmm4 movapd 0 * SIZE(A2, LDA, 2), %xmm12 mulpd %xmm2, %xmm8 addpd %xmm13, %xmm5 movapd 2 * SIZE(A2, LDA, 2), %xmm13 mulpd %xmm2, %xmm9 addpd %xmm14, %xmm6 movapd 4 * SIZE(A2, LDA, 2), %xmm14 mulpd %xmm2, %xmm10 addpd %xmm15, %xmm7 movapd 6 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm2, %xmm11 addpd %xmm8, %xmm4 mulpd %xmm3, %xmm12 addpd %xmm9, %xmm5 mulpd %xmm3, %xmm13 addpd %xmm10, %xmm6 mulpd %xmm3, %xmm14 addpd %xmm11, %xmm7 mulpd %xmm3, %xmm15 addpd %xmm12, %xmm4 mulpd %xmm0, %xmm8 addpd %xmm13, %xmm5 mulpd %xmm0, %xmm9 addpd %xmm14, %xmm6 mulpd %xmm0, %xmm10 addpd %xmm15, %xmm7 mulpd %xmm0, %xmm11 movapd %xmm4, 0 * SIZE(Y1) movapd %xmm5, 2 * SIZE(Y1) movapd %xmm6, 4 * SIZE(Y1) movapd %xmm7, 6 * SIZE(Y1) addq $8 * SIZE, A1 addq $8 * SIZE, A2 addq $8 * SIZE, Y1 ALIGN_3.L16: testq $4, MM je .L17 movapd 0 * SIZE(Y1), %xmm4 movapd 2 * SIZE(Y1), %xmm5 movapd 0 * SIZE(A1), %xmm8 movapd 2 * SIZE(A1), %xmm9 movapd 0 * SIZE(A2), %xmm10 movapd 2 * SIZE(A2), %xmm11 movapd 0 * SIZE(A1, LDA, 2), %xmm12 movapd 2 * SIZE(A1, LDA, 2), %xmm13 movapd 0 * SIZE(A2, LDA, 2), %xmm14 movapd 2 * SIZE(A2, LDA, 2), %xmm15 mulpd %xmm0, %xmm8 mulpd %xmm0, %xmm9 mulpd %xmm1, %xmm10 mulpd %xmm1, %xmm11 mulpd %xmm2, %xmm12 mulpd %xmm2, %xmm13 mulpd %xmm3, %xmm14 mulpd %xmm3, %xmm15 addpd %xmm8, %xmm4 addpd %xmm9, %xmm5 addpd %xmm10, %xmm4 addpd %xmm11, %xmm5 addpd %xmm12, %xmm4 addpd %xmm13, %xmm5 addpd %xmm14, %xmm4
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -