📄 trsm_kernel_ln_4x4_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h" #define M %rdi#define N %rsi#define K %rdx#define A %rcx#define B %r8#define C %r9#define LDC %r10#define I %r11#define J %r12#define AO %r13#define BO %r14#define CO1 %r15#define CO2 %rbp#ifndef WINDOWS_ABI#define STACKSIZE 64#define OLD_LDC 8 + STACKSIZE(%rsp)#define OLD_OFFSET 16 + STACKSIZE(%rsp)#else#define STACKSIZE 256#define OLD_A 40 + STACKSIZE(%rsp)#define OLD_B 48 + STACKSIZE(%rsp)#define OLD_C 56 + STACKSIZE(%rsp)#define OLD_LDC 64 + STACKSIZE(%rsp)#define OLD_OFFSET 72 + STACKSIZE(%rsp)#endif#define ALPHA 0(%rsp)#define OFFSET 16(%rsp)#define KK 24(%rsp)#define KKK 32(%rsp)#define AORIG 40(%rsp)#define BORIG 48(%rsp)#define BUFFER 128(%rsp)#if defined(OPTERON) || defined(BARCELONA)#define PREFETCH prefetch#define PREFETCHW prefetchw#define PREFETCHNTA prefetchnta#ifndef ALLOC_HUGETLB#define PREFETCHSIZE (8 * 4 + 4)#else#define PREFETCHSIZE (8 * 2 + 4)#endif#endif#ifdef GENERIC#define PREFETCH prefetcht0#define PREFETCHW prefetcht0#define PREFETCHNTA prefetchnta#define PREFETCHSIZE (8 * 4 + 4)#endif#define KERNEL1(xx) \ mulpd %xmm8, %xmm9 ;\ addpd %xmm9, %xmm0 ;\ movapd 0 * SIZE + 2 * (xx) * SIZE(BO), %xmm9 ;\ mulpd %xmm8, %xmm11 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE + 1 * (xx) * SIZE(AO) ;\ addpd %xmm11, %xmm1 ;\ movapd 2 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm8, %xmm13 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO), %xmm8 ;\ addpd %xmm13, %xmm2 ;\ movapd 4 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm8, %xmm3 ;\ movapd 8 * SIZE + 1 * (xx) * SIZE(AO), %xmm8#define KERNEL2(xx) \ mulpd %xmm10, %xmm9 ;\ addpd %xmm9, %xmm4 ;\ movapd 16 * SIZE + 2 * (xx) * SIZE(BO), %xmm9 ;\ mulpd %xmm10, %xmm11 ;\ addpd %xmm11, %xmm5 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm10, %xmm13 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO), %xmm10 ;\ addpd %xmm13, %xmm6 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm10, %xmm7 ;\ movapd 10 * SIZE + 1 * (xx) * SIZE(AO), %xmm10#define KERNEL3(xx) \ mulpd %xmm12, %xmm15 ;\ addpd %xmm15, %xmm0 ;\ movapd 8 * SIZE + 2 * (xx) * SIZE(BO), %xmm15 ;\ mulpd %xmm12, %xmm11 ;\ addpd %xmm11, %xmm1 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm12, %xmm13 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO), %xmm12 ;\ addpd %xmm13, %xmm2 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm12, %xmm3 ;\ movapd 12 * SIZE + 1 * (xx) * SIZE(AO), %xmm12#define KERNEL4(xx) \ mulpd %xmm14, %xmm15 ;\ addpd %xmm15, %xmm4 ;\ movapd 24 * SIZE + 2 * (xx) * SIZE(BO), %xmm15 ;\ mulpd %xmm14, %xmm11 ;\ addpd %xmm11, %xmm5 ;\ movapd 18 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm14, %xmm13 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO), %xmm14 ;\ addpd %xmm13, %xmm6 ;\ movapd 20 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm14, %xmm7 ;\ movapd 14 * SIZE + 1 * (xx) * SIZE(AO), %xmm14#define KERNEL5(xx) \ mulpd %xmm8, %xmm9 ;\ addpd %xmm9, %xmm0 ;\ movapd 16 * SIZE + 2 * (xx) * SIZE(BO), %xmm9 ;\ mulpd %xmm8, %xmm11 ;\ PREFETCH (PREFETCHSIZE + 8) * SIZE + 1 * (xx) * SIZE(AO) ;\ addpd %xmm11, %xmm1 ;\ movapd 18 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm8, %xmm13 ;\ mulpd 22 * SIZE + 2 * (xx) * SIZE(BO), %xmm8 ;\ addpd %xmm13, %xmm2 ;\ movapd 20 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm8, %xmm3 ;\ movapd 16 * SIZE + 1 * (xx) * SIZE(AO), %xmm8#define KERNEL6(xx) \ mulpd %xmm10, %xmm9 ;\ addpd %xmm9, %xmm4 ;\ movapd 32 * SIZE + 2 * (xx) * SIZE(BO), %xmm9 ;\ mulpd %xmm10, %xmm11 ;\ addpd %xmm11, %xmm5 ;\ movapd 26 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm10, %xmm13 ;\ mulpd 22 * SIZE + 2 * (xx) * SIZE(BO), %xmm10 ;\ addpd %xmm13, %xmm6 ;\ movapd 28 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm10, %xmm7 ;\ movapd 18 * SIZE + 1 * (xx) * SIZE(AO), %xmm10#define KERNEL7(xx) \ mulpd %xmm12, %xmm15 ;\ addpd %xmm15, %xmm0 ;\ movapd 24 * SIZE + 2 * (xx) * SIZE(BO), %xmm15 ;\ mulpd %xmm12, %xmm11 ;\ addpd %xmm11, %xmm1 ;\ movapd 26 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm12, %xmm13 ;\ mulpd 30 * SIZE + 2 * (xx) * SIZE(BO), %xmm12 ;\ addpd %xmm13, %xmm2 ;\ movapd 28 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm12, %xmm3 ;\ movapd 20 * SIZE + 1 * (xx) * SIZE(AO), %xmm12#define KERNEL8(xx) \ mulpd %xmm14, %xmm15 ;\ addpd %xmm15, %xmm4 ;\ movapd 40 * SIZE + 2 * (xx) * SIZE(BO), %xmm15 ;\ mulpd %xmm14, %xmm11 ;\ addpd %xmm11, %xmm5 ;\ movapd 34 * SIZE + 2 * (xx) * SIZE(BO), %xmm11 ;\ mulpd %xmm14, %xmm13 ;\ mulpd 30 * SIZE + 2 * (xx) * SIZE(BO), %xmm14 ;\ addpd %xmm13, %xmm6 ;\ movapd 36 * SIZE + 2 * (xx) * SIZE(BO), %xmm13 ;\ addpd %xmm14, %xmm7 ;\ movapd 22 * SIZE + 1 * (xx) * SIZE(AO), %xmm14 PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq ARG1, M movq ARG2, N movq ARG3, K movq OLD_A, A movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC movsd OLD_OFFSET, %xmm4 movaps %xmm3, %xmm0#else movq OLD_LDC, LDC movsd OLD_OFFSET, %xmm4#endif movq %rsp, %rbx # save old stack subq $128 + GEMM_Q * 64, %rsp andq $-4096, %rsp # align stack#ifdef WINDOWS_ABI#if GEMM_Q > 192 movq $0, 4096 * 3(%rsp)#endif#if GEMM_Q > 128 movq $0, 4096 * 2(%rsp)#endif#if GEMM_Q > 64 movq $0, 4096 * 1(%rsp)#endif movq $0, 4096 * 0(%rsp)#endif movsd %xmm4, OFFSET movsd %xmm4, KK leaq (, LDC, SIZE), LDC#ifdef LN leaq (, M, SIZE), %rax addq %rax, C imulq K, %rax addq %rax, A#endif#ifdef RT leaq (, N, SIZE), %rax imulq K, %rax addq %rax, B movq N, %rax imulq LDC, %rax addq %rax, C#endif#ifdef RN negq KK#endif #ifdef RT movq N, %rax subq OFFSET, %rax movq %rax, KK#endif movq N, J sarq $2, J # j = (n >> 2) jle .L40.L01:/* Copying to Sub Buffer */#ifdef LN movq OFFSET, %rax addq M, %rax movq %rax, KK#endif leaq BUFFER, BO#ifdef RT movq K, %rax salq $2 + BASE_SHIFT, %rax subq %rax, B#endif#if defined(LN) || defined(RT) movq KK, %rax movq B, BORIG leaq (, %rax, SIZE), %rax leaq (B, %rax, 4), B leaq (BO, %rax, 8), BO#endif #ifdef LT movq OFFSET, %rax movq %rax, KK#endif#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $2, %rax jle .L03 addq %rax, %rax ALIGN_4 .L02: PREFETCHNTA 40 * SIZE(B) movsd 0 * SIZE(B), %xmm0 movsd 1 * SIZE(B), %xmm1 movsd 2 * SIZE(B), %xmm2 movsd 3 * SIZE(B), %xmm3 movsd 4 * SIZE(B), %xmm4 movsd 5 * SIZE(B), %xmm5 movsd 6 * SIZE(B), %xmm6 movsd 7 * SIZE(B), %xmm7 addq $16 * SIZE, BO addq $ 8 * SIZE, B movsd %xmm0, -16 * SIZE(BO) movsd %xmm0, -15 * SIZE(BO) movsd %xmm1, -14 * SIZE(BO) movsd %xmm1, -13 * SIZE(BO) movsd %xmm2, -12 * SIZE(BO) movsd %xmm2, -11 * SIZE(BO) movsd %xmm3, -10 * SIZE(BO) movsd %xmm3, -9 * SIZE(BO) movsd %xmm4, -8 * SIZE(BO) movsd %xmm4, -7 * SIZE(BO) movsd %xmm5, -6 * SIZE(BO) movsd %xmm5, -5 * SIZE(BO) movsd %xmm6, -4 * SIZE(BO) movsd %xmm6, -3 * SIZE(BO) movsd %xmm7, -2 * SIZE(BO) movsd %xmm7, -1 * SIZE(BO) decq %rax jne .L02 ALIGN_4.L03:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax BRANCH jle .L10 ALIGN_4.L04: movsd 0 * SIZE(B), %xmm0 movsd 1 * SIZE(B), %xmm1 movsd 2 * SIZE(B), %xmm2 movsd 3 * SIZE(B), %xmm3 movsd %xmm0, 0 * SIZE(BO) movsd %xmm0, 1 * SIZE(BO) movsd %xmm1, 2 * SIZE(BO) movsd %xmm1, 3 * SIZE(BO) movsd %xmm2, 4 * SIZE(BO) movsd %xmm2, 5 * SIZE(BO) movsd %xmm3, 6 * SIZE(BO) movsd %xmm3, 7 * SIZE(BO) addq $4 * SIZE, B addq $8 * SIZE, BO decq %rax jne .L04 ALIGN_4 .L10:#if defined(LT) || defined(RN) movq A, AO#else movq A, AORIG#endif#ifdef RT leaq (, LDC, 4), %rax subq %rax, C#endif movq C, CO1 # coffset1 = c leaq (C, LDC, 1), CO2 # coffset2 = c + ldc#ifndef RT leaq (C, LDC, 4), C#endif testq $1, M je .L20 ALIGN_4.L31:#ifdef LN movq K, %rax salq $0 + BASE_SHIFT, %rax subq %rax, AORIG#endif#if defined(LN) || defined(RT) movq KK, %rax movq AORIG, AO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 1), AO#endif leaq BUFFER, BO#if defined(LN) || defined(RT) movq KK, %rax salq $2 + BASE_SHIFT, %rax leaq (BO, %rax, 2), BO#endif movsd 0 * SIZE(AO), %xmm8 pxor %xmm0, %xmm0 movsd 0 * SIZE(BO), %xmm9 pxor %xmm1, %xmm1 movsd 8 * SIZE(AO), %xmm10 pxor %xmm2, %xmm2 movsd 8 * SIZE(BO), %xmm11 pxor %xmm3, %xmm3 movsd 16 * SIZE(BO), %xmm13 movsd 24 * SIZE(BO), %xmm15#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $3, %rax je .L35 ALIGN_4.L32: mulsd %xmm8, %xmm9 addsd %xmm9, %xmm0 PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) movsd 2 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 addsd %xmm9, %xmm1 movsd 4 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 mulsd 6 * SIZE(BO), %xmm8 addsd %xmm9, %xmm2 movsd 32 * SIZE(BO), %xmm9 addsd %xmm8, %xmm3 movsd 1 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm11 addsd %xmm11, %xmm0 movsd 10 * SIZE(BO), %xmm11 mulsd %xmm8, %xmm11 addsd %xmm11, %xmm1 movsd 12 * SIZE(BO), %xmm11 mulsd %xmm8, %xmm11 mulsd 14 * SIZE(BO), %xmm8 addsd %xmm11, %xmm2 movsd 40 * SIZE(BO), %xmm11 addsd %xmm8, %xmm3 movsd 2 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm13 addsd %xmm13, %xmm0 movsd 18 * SIZE(BO), %xmm13 mulsd %xmm8, %xmm13 addsd %xmm13, %xmm1 movsd 20 * SIZE(BO), %xmm13 mulsd %xmm8, %xmm13 mulsd 22 * SIZE(BO), %xmm8 addsd %xmm13, %xmm2 movsd 48 * SIZE(BO), %xmm13 addsd %xmm8, %xmm3 movsd 3 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm15 addsd %xmm15, %xmm0 movsd 26 * SIZE(BO), %xmm15 mulsd %xmm8, %xmm15 addsd %xmm15, %xmm1 movsd 28 * SIZE(BO), %xmm15 mulsd %xmm8, %xmm15 mulsd 30 * SIZE(BO), %xmm8 addsd %xmm15, %xmm2 movsd 56 * SIZE(BO), %xmm15 addsd %xmm8, %xmm3 movsd 4 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm9 addsd %xmm9, %xmm0 movsd 34 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 addsd %xmm9, %xmm1 movsd 36 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 mulsd 38 * SIZE(BO), %xmm8 addsd %xmm9, %xmm2 movsd 64 * SIZE(BO), %xmm9 addsd %xmm8, %xmm3 movsd 5 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm11 addsd %xmm11, %xmm0 movsd 42 * SIZE(BO), %xmm11 mulsd %xmm8, %xmm11 addsd %xmm11, %xmm1 movsd 44 * SIZE(BO), %xmm11 mulsd %xmm8, %xmm11 mulsd 46 * SIZE(BO), %xmm8 addsd %xmm11, %xmm2 movsd 72 * SIZE(BO), %xmm11 addsd %xmm8, %xmm3 movsd 6 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm13 addsd %xmm13, %xmm0 movsd 50 * SIZE(BO), %xmm13 mulsd %xmm8, %xmm13 addsd %xmm13, %xmm1 movsd 52 * SIZE(BO), %xmm13 mulsd %xmm8, %xmm13 mulsd 54 * SIZE(BO), %xmm8 addsd %xmm13, %xmm2 movsd 80 * SIZE(BO), %xmm13 addsd %xmm8, %xmm3 movsd 7 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm15 addsd %xmm15, %xmm0 movsd 58 * SIZE(BO), %xmm15 mulsd %xmm8, %xmm15 addsd %xmm15, %xmm1 movsd 60 * SIZE(BO), %xmm15 mulsd %xmm8, %xmm15 mulsd 62 * SIZE(BO), %xmm8 addsd %xmm15, %xmm2 movsd 88 * SIZE(BO), %xmm15 addsd %xmm8, %xmm3 movsd 8 * SIZE(AO), %xmm8 addq $ 8 * SIZE, AO addq $64 * SIZE, BO decq %rax jne .L32 ALIGN_4.L35:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $7, %rax # if (k & 1) BRANCH je .L38 ALIGN_4.L36: mulsd %xmm8, %xmm9 addsd %xmm9, %xmm0 movsd 2 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 addsd %xmm9, %xmm1 movsd 4 * SIZE(BO), %xmm9 mulsd %xmm8, %xmm9 mulsd 6 * SIZE(BO), %xmm8 addsd %xmm9, %xmm2 movsd 8 * SIZE(BO), %xmm9 addsd %xmm8, %xmm3 movsd 1 * SIZE(AO), %xmm8 addq $1 * SIZE, AO # aoffset += 4 addq $8 * SIZE, BO # boffset1 += 8 decq %rax jg .L36 ALIGN_4.L38:#if defined(LN) || defined(RT) movq KK, %rax#ifdef LN subq $1, %rax#else subq $4, %rax#endif movq AORIG, AO movq BORIG, B leaq BUFFER, BO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 1), AO leaq (B, %rax, 4), B leaq (BO, %rax, 8), BO#endif#if defined(LN) || defined(LT) movsd 0 * SIZE(B), %xmm4 movsd 1 * SIZE(B), %xmm5 movsd 2 * SIZE(B), %xmm6 movsd 3 * SIZE(B), %xmm7#else movsd 0 * SIZE(AO), %xmm4 movsd 1 * SIZE(AO), %xmm5 movsd 2 * SIZE(AO), %xmm6 movsd 3 * SIZE(AO), %xmm7#endif subsd %xmm0, %xmm4 subsd %xmm1, %xmm5 subsd %xmm2, %xmm6 subsd %xmm3, %xmm7#ifdef LN movsd 0 * SIZE(AO), %xmm0 mulsd %xmm0, %xmm4 mulsd %xmm0, %xmm5 mulsd %xmm0, %xmm6 mulsd %xmm0, %xmm7#endif#ifdef LT movsd 0 * SIZE(AO), %xmm0 mulsd %xmm0, %xmm4 mulsd %xmm0, %xmm5 mulsd %xmm0, %xmm6 mulsd %xmm0, %xmm7#endif#ifdef RN mulsd 0 * SIZE(B), %xmm4 movlpd 1 * SIZE(B), %xmm1 mulsd %xmm4, %xmm1 subsd %xmm1, %xmm5 movlpd 2 * SIZE(B), %xmm2 mulsd %xmm4, %xmm2 subsd %xmm2, %xmm6
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -