📄 zgemm_kernel_2x2_barcelona.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h" #define OLD_M %rdi#define OLD_N %rsi#define M %r13#define N %r14#define K %rdx#define A %rcx#define B %r8#define C %r9#define LDC %r10#define I %r11#define AO %rdi#define BO %rsi#define CO1 %rbp#define CO2 %rbx#define BB %r12#define J %r15 #ifndef WINDOWS_ABI#define STACKSIZE 96#define ALPHA_R 48(%rsp)#define ALPHA_I 56(%rsp)#define OFFSET 64(%rsp)#define KK 72(%rsp)#define KKK 80(%rsp)#else#define STACKSIZE 320#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)#define OLD_A 48 + STACKSIZE(%rsp)#define OLD_B 56 + STACKSIZE(%rsp)#define OLD_C 64 + STACKSIZE(%rsp)#define OLD_LDC 72 + STACKSIZE(%rsp)#define OLD_OFFSET 80 + STACKSIZE(%rsp)#define ALPHA_R 224(%rsp)#define ALPHA_I 232(%rsp)#define OFFSET 240(%rsp)#define KK 248(%rsp)#define KKK 256(%rsp)#endif#define PREFETCH prefetch#define PREFETCHSIZE (8 * 7 + 0)#define SPREFETCHSIZE (8 * 3 + 0)#define movlpd movsd#define movapd movups#define movupd movups #if defined(NN) || defined(NT) || defined(TN) || defined(TT)#define ADD1 addpd#define ADD2 addpd#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)#define ADD1 subpd#define ADD2 addpd#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)#define ADD1 addpd#define ADD2 subpd#else#define ADD1 subpd#define ADD2 subpd#endif#define KERNEL1(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE(AO, %rax, 4) ;\ ADD1 %xmm1, %xmm12 ;\ movddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm10 ;\ movapd -12 * SIZE(AO, %rax, 4), %xmm0 ;\ ADD1 %xmm1, %xmm14 ;\ movddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm0, %xmm2#define KERNEL2(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ ADD1 %xmm1, %xmm12 ;\ movddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm10 ;\ ADD1 %xmm1, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\/**/ movddup (BO, %rax, 4), %xmm1 ;\ movapd %xmm4, %xmm2#define KERNEL3(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ ADD1 %xmm5, %xmm12 ;\ movddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm10 ;\ movapd -4 * SIZE(AO, %rax, 4), %xmm4 ;\ ADD1 %xmm5, %xmm14 ;\ movddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL4(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ ADD1 %xmm5, %xmm12 ;\ movddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\/***/ movapd (AO, %rax, 4), %xmm6 ;\ ADD1 %xmm4, %xmm10 ;\ ADD1 %xmm5, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ movddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\ movapd %xmm6, %xmm2#define KERNEL5(xx) \ mulpd %xmm1, %xmm6 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm6, %xmm8 ;\ movapd %xmm2, %xmm6 ;\ ADD1 %xmm1, %xmm12 ;\ movddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\/**/ movapd 8 * SIZE(AO, %rax, 4), %xmm7 ;\ movapd %xmm6, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm6 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm6, %xmm10 ;\ movapd 4 * SIZE(AO, %rax, 4), %xmm6 ;\ ADD1 %xmm1, %xmm14 ;\ movddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm6, %xmm2#define KERNEL6(xx) \ mulpd %xmm1, %xmm6 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm6, %xmm8 ;\ movapd %xmm2, %xmm6 ;\ ADD1 %xmm1, %xmm12 ;\ movddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm6, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm6 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm6, %xmm10 ;\/***/ movapd 16 * SIZE(AO, %rax, 4), %xmm0 ;\ ADD1 %xmm1, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ movddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\ movapd %xmm7, %xmm2#define KERNEL7(xx) \ mulpd %xmm5, %xmm7 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm7, %xmm8 ;\ movapd %xmm2, %xmm7 ;\ ADD1 %xmm5, %xmm12 ;\ movddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm7, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm7 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm7, %xmm10 ;\ movapd 12 * SIZE(AO, %rax, 4), %xmm7 ;\ ADD1 %xmm5, %xmm14 ;\ movddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm7, %xmm2#define KERNEL8(xx) \ mulpd %xmm5, %xmm7 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm7, %xmm8 ;\ movapd %xmm2, %xmm7 ;\ ADD1 %xmm5, %xmm12 ;\ movddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm7, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm7 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm7, %xmm10 ;\ ADD1 %xmm5, %xmm14 ;\/**/ movapd 24 * SIZE(AO, %rax, 4), %xmm4 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ movddup 24 * SIZE(BO, %rax, 4), %xmm5 ;\ movapd %xmm0, %xmm2 ;\ addq $8 * SIZE, %rax #define KERNEL_SUB1(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ ADD1 %xmm1, %xmm12 ;\ movddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm10 ;\ movapd -12 * SIZE(AO, %rax, 4), %xmm0 ;\ ADD1 %xmm1, %xmm14 ;\ movddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm0, %xmm2#define KERNEL_SUB2(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ ADD1 %xmm1, %xmm12 ;\ movddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ ADD1 %xmm0, %xmm10 ;\ movapd (AO, %rax, 4), %xmm0 ;\ ADD1 %xmm1, %xmm14 ;\ movddup (BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL_SUB3(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ ADD1 %xmm5, %xmm12 ;\ movddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm10 ;\ movapd -4 * SIZE(AO, %rax, 4), %xmm4 ;\ ADD1 %xmm5, %xmm14 ;\ movddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL_SUB4(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ ADD1 %xmm5, %xmm12 ;\ movddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ ADD2 %xmm3, %xmm13 ;\ movddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\ ADD1 %xmm4, %xmm10 ;\ ADD1 %xmm5, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ ADD2 %xmm2, %xmm11 ;\ ADD2 %xmm3, %xmm15 ;\ movddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm0, %xmm2 PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, (%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq ARG1, OLD_M movq ARG2, OLD_N movq ARG3, K movq OLD_A, A movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC#ifdef TRMMKERNEL movsd OLD_OFFSET, %xmm12#endif movaps %xmm3, %xmm0 movsd OLD_ALPHA_I, %xmm1#else movq STACKSIZE + 8(%rsp), LDC#ifdef TRMMKERNEL movsd STACKSIZE + 16(%rsp), %xmm12#endif#endif movq OLD_M, M movq OLD_N, N movlpd %xmm0, ALPHA_R movlpd %xmm1, ALPHA_I#ifdef TRMMKERNEL movlpd %xmm12, OFFSET movlpd %xmm12, KK#ifndef LEFT negq KK#endif #endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -