📄 zgemm_kernel_4x2_barcelona.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h" #define OLD_M %rdi#define OLD_N %rsi#define M %r13#define N %r14#define K %rdx#define A %rcx#define B %r8#define C %r9#define LDC %r10#define I %r11#define AO %rdi#define BO %rsi#define CO1 %r15#define CO2 %rbp#define BB %r12 #ifndef WINDOWS_ABI#define STACKSIZE 64#else#define STACKSIZE 256#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)#define OLD_A 48 + STACKSIZE(%rsp)#define OLD_B 56 + STACKSIZE(%rsp)#define OLD_C 64 + STACKSIZE(%rsp)#define OLD_LDC 72 + STACKSIZE(%rsp)#define OLD_OFFSET 80 + STACKSIZE(%rsp)#endif#define POSINV 0(%rsp)#define ALPHA_R 16(%rsp)#define ALPHA_I 32(%rsp)#define J 48(%rsp)#define OFFSET 56(%rsp)#define KK 64(%rsp)#define KKK 72(%rsp)#define BUFFER 128(%rsp)#ifdef OPTERON#define movsd movlps#endif#define PREFETCH prefetch#define PREFETCHSIZE (16 * 17 + 0)#define RPREFETCHSIZE (16 * 9 + 0)#define WPREFETCHSIZE (16 * 9 + 0)#define KERNEL1(xx) \ mulps %xmm1, %xmm0 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm8 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE(AO, %rax, 4) ;\ movaps %xmm2, %xmm0 ;\ addps %xmm1, %xmm12 ;\ movaps -24 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm0, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps -20 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm0 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm10 ;\ movaps -24 * SIZE(AO, %rax, 4), %xmm0 ;\ addps %xmm1, %xmm14 ;\ movaps -16 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps -12 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm0, %xmm2#define KERNEL2(xx) \ mulps %xmm1, %xmm0 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm8 ;\ movaps %xmm2, %xmm0 ;\ addps %xmm1, %xmm12 ;\ movaps -8 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm0, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps -4 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm0 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm10 ;\ addps %xmm1, %xmm14 ;\ mulps %xmm3, %xmm2 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 4 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm4, %xmm2#define KERNEL3(xx) \ mulps %xmm5, %xmm4 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm8 ;\ movaps 32 * SIZE(BO, %rax, 8), %xmm1 ;\ movaps %xmm2, %xmm4 ;\ addps %xmm5, %xmm12 ;\ movaps 8 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm4, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 12 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm4 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm10 ;\ movaps -8 * SIZE(AO, %rax, 4), %xmm4 ;\ addps %xmm5, %xmm14 ;\ movaps 16 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 20 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm4, %xmm2#define KERNEL4(xx) \ mulps %xmm5, %xmm4 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm5 ;\ movaps (AO, %rax, 4), %xmm6 ;\ addps %xmm4, %xmm8 ;\ movaps %xmm2, %xmm4 ;\ addps %xmm5, %xmm12 ;\ movaps 24 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm4, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 28 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm4 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm10 ;\ addps %xmm5, %xmm14 ;\ movaps 64 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 36 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm6, %xmm2#define KERNEL5(xx) \ mulps %xmm1, %xmm6 ;\ mulps 4 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm6, %xmm8 ;\ movaps %xmm2, %xmm6 ;\ addps %xmm1, %xmm12 ;\ movaps 40 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps 4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps 16 * SIZE(AO, %rax, 4), %xmm7 ;\ movaps %xmm6, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 44 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm6 ;\ mulps 4 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm6, %xmm10 ;\ movaps 8 * SIZE(AO, %rax, 4), %xmm6 ;\ addps %xmm1, %xmm14 ;\ movaps 48 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps 4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 52 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm6, %xmm2#define KERNEL6(xx) \ mulps %xmm1, %xmm6 ;\ mulps 12 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm6, %xmm8 ;\ movaps %xmm2, %xmm6 ;\ addps %xmm1, %xmm12 ;\ movaps 56 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps 12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm6, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 60 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm6 ;\ mulps 12 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm6, %xmm10 ;\ movaps 32 * SIZE(AO, %rax, 4), %xmm0 ;\ addps %xmm1, %xmm14 ;\ mulps %xmm3, %xmm2 ;\ mulps 12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 68 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm7, %xmm2#define KERNEL7(xx) \ mulps %xmm5, %xmm7 ;\ mulps 20 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm7, %xmm8 ;\ movaps 96 * SIZE(BO, %rax, 8), %xmm1 ;\ movaps %xmm2, %xmm7 ;\ addps %xmm5, %xmm12 ;\ movaps 72 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps 20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm7, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 76 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm7 ;\ mulps 20 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm7, %xmm10 ;\ movaps 24 * SIZE(AO, %rax, 4), %xmm7 ;\ addps %xmm5, %xmm14 ;\ movaps 80 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps 20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 84 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm7, %xmm2#define KERNEL8(xx) \ mulps %xmm5, %xmm7 ;\ mulps 28 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm7, %xmm8 ;\ movaps %xmm2, %xmm7 ;\ addps %xmm5, %xmm12 ;\ movaps 88 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps 28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm7, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 92 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm7 ;\ mulps 28 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm7, %xmm10 ;\ movaps 48 * SIZE(AO, %rax, 4), %xmm4 ;\ addps %xmm5, %xmm14 ;\ movaps 128 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps 28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 100 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm0, %xmm2 ;\ addq $16 * SIZE, %rax #define KERNEL_SUB1(xx) \ mulps %xmm1, %xmm0 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm8 ;\ movaps %xmm2, %xmm0 ;\ addps %xmm1, %xmm12 ;\ movaps -24 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm0, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps -20 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm0 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm10 ;\ movaps -24 * SIZE(AO, %rax, 4), %xmm0 ;\ addps %xmm1, %xmm14 ;\ movaps -16 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -28 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps -12 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm0, %xmm2#define KERNEL_SUB2(xx) \ mulps %xmm1, %xmm0 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm8 ;\ movaps %xmm2, %xmm0 ;\ addps %xmm1, %xmm12 ;\ movaps -8 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm0, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps -4 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm1, %xmm0 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm1 ;\ addps %xmm0, %xmm10 ;\ movaps (AO, %rax, 4), %xmm0 ;\ addps %xmm1, %xmm14 ;\ movaps 32 * SIZE(BO, %rax, 8), %xmm1 ;\ mulps %xmm3, %xmm2 ;\ mulps -20 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 4 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm4, %xmm2#define KERNEL_SUB3(xx) \ mulps %xmm5, %xmm4 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm8 ;\ movaps %xmm2, %xmm4 ;\ addps %xmm5, %xmm12 ;\ movaps 8 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm4, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 12 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm4 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm10 ;\ movaps -8 * SIZE(AO, %rax, 4), %xmm4 ;\ addps %xmm5, %xmm14 ;\ movaps 16 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -12 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 20 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm4, %xmm2#define KERNEL_SUB4(xx) \ mulps %xmm5, %xmm4 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm8 ;\ movaps %xmm2, %xmm4 ;\ addps %xmm5, %xmm12 ;\ movaps 24 * SIZE(BO, %rax, 8), %xmm5 ;\ mulps %xmm3, %xmm2 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm9 ;\ movaps %xmm4, %xmm2 ;\ addps %xmm3, %xmm13 ;\ movaps 28 * SIZE(BO, %rax, 8), %xmm3 ;\ mulps %xmm5, %xmm4 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm5 ;\ addps %xmm4, %xmm10 ;\ addps %xmm5, %xmm14 ;\ mulps %xmm3, %xmm2 ;\ mulps -4 * SIZE(AO, %rax, 4), %xmm3 ;\ addps %xmm2, %xmm11 ;\ addps %xmm3, %xmm15 ;\ movaps 36 * SIZE(BO, %rax, 8), %xmm3 ;\ movaps %xmm0, %xmm2 PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq ARG1, OLD_M movq ARG2, OLD_N movq ARG3, K movq OLD_A, A movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC#ifdef TRMMKERNEL movsd OLD_OFFSET, %xmm12#endif movaps %xmm3, %xmm0 movsd OLD_ALPHA_I, %xmm1#else movq 72(%rsp), LDC#ifdef TRMMKERNEL movsd 80(%rsp), %xmm12#endif#endif EMMS movq %rsp, %rbx # save old stack subq $128 + GEMM_Q * 64, %rsp andq $-4096, %rsp # align stack#ifdef WINDOWS_ABI#if GEMM_Q > 192 movq $0, 4096 * 3(%rsp)#endif#if GEMM_Q > 128 movq $0, 4096 * 2(%rsp)#endif#if GEMM_Q > 64 movq $0, 4096 * 1(%rsp)#endif movq $0, 4096 * 0(%rsp)#endif movq OLD_M, M movq OLD_N, N pxor %xmm7, %xmm7 cmpeqps %xmm7, %xmm7 pslld $31, %xmm7 # Generate mask pxor %xmm10, %xmm10 shufps $0, %xmm0, %xmm0 movaps %xmm0, 0 + ALPHA_R movss %xmm1, 4 + ALPHA_I movss %xmm1, 12 + ALPHA_I xorps %xmm7, %xmm1 movss %xmm1, 0 + ALPHA_I movss %xmm1, 8 + ALPHA_I#if defined(NN) || defined(NT) || defined(NR) || defined(NC) || \ defined(TN) || defined(TT) || defined(TR) || defined(TC) movss %xmm7, 0 + POSINV movss %xmm10, 4 + POSINV movss %xmm7, 8 + POSINV movss %xmm10,12 + POSINV#else movss %xmm10, 0 + POSINV movss %xmm7, 4 + POSINV movss %xmm10, 8 + POSINV movss %xmm7, 12 + POSINV#endif addq $32 * SIZE, A#ifdef TRMMKERNEL movsd %xmm12, OFFSET movsd %xmm12, KK#ifndef LEFT negq KK#endif #endif salq $ZBASE_SHIFT, LDC movq N, J sarq $1, J # j = (n >> 2) jle .L40 ALIGN_4.L01:#if defined(TRMMKERNEL) && defined(LEFT) movq OFFSET, %rax movq %rax, KK#endif /* Copying to Sub Buffer */ leaq BUFFER, BO movaps POSINV, %xmm15 movq K, %rax sarq $2, %rax jle .L03 addq %rax, %rax ALIGN_4 .L02: prefetch (RPREFETCHSIZE + 0) * SIZE(B) movaps 0 * SIZE(B), %xmm3 movaps 4 * SIZE(B), %xmm7 prefetchw (WPREFETCHSIZE + 0) * SIZE(BO) pshufd $0x00, %xmm3, %xmm0 pshufd $0x55, %xmm3, %xmm1 pshufd $0xaa, %xmm3, %xmm2 pshufd $0xff, %xmm3, %xmm3 prefetchw (WPREFETCHSIZE + 16) * SIZE(BO) pshufd $0x00, %xmm7, %xmm4 pshufd $0x55, %xmm7, %xmm5 pshufd $0xaa, %xmm7, %xmm6 pshufd $0xff, %xmm7, %xmm7#if defined(NN) || defined(NT) || defined(NR) || defined(NC) || \ defined(TN) || defined(TT) || defined(TR) || defined(TC) xorps %xmm15, %xmm1 xorps %xmm15, %xmm3 xorps %xmm15, %xmm5 xorps %xmm15, %xmm7#else xorps %xmm15, %xmm0 xorps %xmm15, %xmm2 xorps %xmm15, %xmm4 xorps %xmm15, %xmm6#endif movaps %xmm0, 0 * SIZE(BO) movaps %xmm1, 4 * SIZE(BO) movaps %xmm2, 8 * SIZE(BO) movaps %xmm3, 12 * SIZE(BO)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -