📄 trsm_kernel_ln_2x4_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 0 #define OLD_M 4 + STACK + ARGS(%esi)#define OLD_N 8 + STACK + ARGS(%esi)#define OLD_K 12 + STACK + ARGS(%esi)#define OLD_ALPHA 16 + STACK + ARGS(%esi)#define OLD_A 24 + STACK + ARGS(%esi)#define OLD_B 28 + STACK + ARGS(%esi)#define OLD_C 32 + STACK + ARGS(%esi)#define OLD_LDC 36 + STACK + ARGS(%esi)#define OLD_OFFT 40 + STACK + ARGS(%esi)#define K 16(%esp)#define N 20(%esp)#define M 24(%esp)#define A 28(%esp)#define C 32(%esp)#define J 36(%esp)#define OLD_STACK 40(%esp)#define OFFSET 44(%esp)#define KK 48(%esp)#define KKK 52(%esp)#define AORIG 56(%esp)#define BORIG 60(%esp)#define BUFFER 128(%esp)#define STACK_ALIGN 4096#define STACK_OFFSET 1024#define LOCAL_BUFFER_SIZE GEMM_Q * GEMM_UNROLL_N * COMPSIZE * 16#if defined(OPTERON) || defined(BARCELONA)#define PREFETCH prefetch#define PREFETCHSIZE (8 * 10 + 4)#endif#define B %edi#define AA %edx#define BB %ecx#define LDC %ebp#define CO1 %esi#define KERNEL1(address) \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm4; \ PREFETCH (PREFETCHSIZE + 0) * SIZE + (address) * 1 * SIZE(AA); \ movapd 2 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 4 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ mulpd 6 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm2, %xmm6; \ movapd 16 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm0, %xmm7; \ movapd 2 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL2(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 10 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 12 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ mulpd 14 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm3, %xmm6; \ movapd 24 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm0, %xmm7; \ movapd 4 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL3(address) \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 18 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 20 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ mulpd 22 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm2, %xmm6; \ movapd 32 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm0, %xmm7; \ movapd 6 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL4(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 26 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 28 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ mulpd 30 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm3, %xmm6; \ movapd 40 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm0, %xmm7; \ movapd 16 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL5(address) \ PREFETCH (PREFETCHSIZE + 8) * SIZE + (address) * 1 * SIZE(AA); \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 34 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 36 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ mulpd 38 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm2, %xmm6; \ movapd 48 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm1, %xmm7; \ movapd 10 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL6(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 42 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 44 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ mulpd 46 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm3, %xmm6; \ movapd 56 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm1, %xmm7; \ movapd 12 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL7(address) \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 50 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 52 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ mulpd 54 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm2, %xmm6; \ movapd 64 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm1, %xmm7; \ movapd 14 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL8(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 58 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 60 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ mulpd 62 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm3, %xmm6; \ movapd 72 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm1, %xmm7; \ movapd 24 * SIZE + (address) * 1 * SIZE(AA), %xmm1 PROLOGUE pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE EMMS movl %esp, %esi # save old stack subl $128 + LOCAL_BUFFER_SIZE + STACK_OFFSET, %esp andl $-STACK_ALIGN, %esp addl $STACK_OFFSET, %esp movl OLD_M, %ebx movl OLD_N, %eax movl OLD_K, %ecx movl OLD_A, %edx movl %ebx, M movl %eax, N movl %ecx, K movl %edx, A movl %esi, OLD_STACK movd OLD_OFFT, %mm4 movl OLD_B, B movl OLD_C, %ebx movl %ebx, C movl OLD_LDC, LDC movd %mm4, OFFSET movd %mm4, KK leal (, LDC, SIZE), LDC#ifdef LN movl M, %eax leal (, %eax, SIZE), %eax addl %eax, C imull K, %eax addl %eax, A#endif#ifdef RT movl N, %eax leal (, %eax, SIZE), %eax imull K, %eax addl %eax, B movl N, %eax imull LDC, %eax addl %eax, C#endif#ifdef RN negl KK#endif #ifdef RT movl N, %eax subl OFFSET, %eax movl %eax, KK#endif movl N, %eax sarl $2, %eax movl %eax, J jle .L30 ALIGN_2.L01:#ifdef LN movl OFFSET, %eax addl M, %eax movl %eax, KK#endif leal BUFFER, BB#ifdef RT movl K, %eax sall $2 + BASE_SHIFT, %eax subl %eax, B#endif#if defined(LN) || defined(RT) movl KK, %eax movl B, BORIG leal (, %eax, SIZE), %eax leal (B, %eax, 4), B leal (BB, %eax, 8), BB#endif #ifdef LT movl OFFSET, %eax movl %eax, KK#endif#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $1, %eax jle .L05 ALIGN_4 .L02:#define COPYPREFETCH 40 prefetchnta (COPYPREFETCH) * SIZE(B) movq 0 * SIZE(B), %mm0 movq 1 * SIZE(B), %mm1 movq 2 * SIZE(B), %mm2 movq 3 * SIZE(B), %mm3 movq 4 * SIZE(B), %mm4 movq 5 * SIZE(B), %mm5 movq 6 * SIZE(B), %mm6 movq 7 * SIZE(B), %mm7 movq %mm0, 0 * SIZE(BB) movq %mm0, 1 * SIZE(BB) movq %mm1, 2 * SIZE(BB) movq %mm1, 3 * SIZE(BB) movq %mm2, 4 * SIZE(BB) movq %mm2, 5 * SIZE(BB) movq %mm3, 6 * SIZE(BB) movq %mm3, 7 * SIZE(BB) movq %mm4, 8 * SIZE(BB) movq %mm4, 9 * SIZE(BB) movq %mm5, 10 * SIZE(BB) movq %mm5, 11 * SIZE(BB) movq %mm6, 12 * SIZE(BB) movq %mm6, 13 * SIZE(BB) movq %mm7, 14 * SIZE(BB) movq %mm7, 15 * SIZE(BB) addl $ 8 * SIZE, B addl $16 * SIZE, BB decl %eax jne .L02 ALIGN_2.L05:#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif andl $1, %eax BRANCH jle .L10 movq 0 * SIZE(B), %mm0 movq 1 * SIZE(B), %mm1 movq 2 * SIZE(B), %mm2 movq 3 * SIZE(B), %mm3 movq %mm0, 0 * SIZE(BB) movq %mm0, 1 * SIZE(BB) movq %mm1, 2 * SIZE(BB) movq %mm1, 3 * SIZE(BB) movq %mm2, 4 * SIZE(BB) movq %mm2, 5 * SIZE(BB) movq %mm3, 6 * SIZE(BB) movq %mm3, 7 * SIZE(BB) addl $4 * SIZE, B ALIGN_4 .L10:#if defined(LT) || defined(RN) movl A, AA#else movl A, %eax movl %eax, AORIG#endif leal (, LDC, 4), %eax#ifdef RT subl %eax, C#endif movl C, CO1#ifndef RT addl %eax, C#endif movl M, %ebx testl $1, %ebx # i = (m >> 2) jle .L20#ifdef LN movl K, %eax sall $BASE_SHIFT, %eax subl %eax, AORIG#endif#if defined(LN) || defined(RT) movl KK, %eax movl AORIG, AA leal (AA, %eax, SIZE), AA#endif leal BUFFER, BB#if defined(LN) || defined(RT) movl KK, %eax sall $3 + BASE_SHIFT, %eax addl %eax, BB#endif pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7 movlpd 0 * SIZE(AA), %xmm0 movlpd 4 * SIZE(AA), %xmm1 movlpd 0 * SIZE(BB), %xmm2 movlpd 8 * SIZE(BB), %xmm3#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $3, %eax je .L25 ALIGN_4.L22: mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4#if defined(OPTERON) || defined(BARCELONA) PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)#endif movlpd 2 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movlpd 4 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 6 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movlpd 16 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movlpd 1 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm4 movlpd 10 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm5 movlpd 12 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 mulsd 14 * SIZE(BB), %xmm0 addsd %xmm3, %xmm6 movlpd 24 * SIZE(BB), %xmm3 addsd %xmm0, %xmm7 movlpd 2 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4 movlpd 18 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movlpd 20 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 22 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movlpd 32 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movlpd 3 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm4 movlpd 26 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm5 movlpd 28 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 mulsd 30 * SIZE(BB), %xmm0 addsd %xmm3, %xmm6 movlpd 40 * SIZE(BB), %xmm3 addsd %xmm0, %xmm7 movlpd 8 * SIZE(AA), %xmm0#if defined(OPTERON) || defined(BARCELONA) PREFETCH (PREFETCHSIZE + 8) * SIZE(AA)#endif mulsd %xmm1, %xmm2 addsd %xmm2, %xmm4 movlpd 34 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm5 movlpd 36 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 mulsd 38 * SIZE(BB), %xmm1 addsd %xmm2, %xmm6 movlpd 48 * SIZE(BB), %xmm2 addsd %xmm1, %xmm7 movlpd 5 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm4 movlpd 42 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm5 movlpd 44 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 mulsd 46 * SIZE(BB), %xmm1 addsd %xmm3, %xmm6 movlpd 56 * SIZE(BB), %xmm3 addsd %xmm1, %xmm7 movlpd 6 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm4 movlpd 50 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm5 movlpd 52 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 mulsd 54 * SIZE(BB), %xmm1 addsd %xmm2, %xmm6 movlpd 64 * SIZE(BB), %xmm2 addsd %xmm1, %xmm7 movlpd 7 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm4 movlpd 58 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm5 movlpd 60 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 mulsd 62 * SIZE(BB), %xmm1 addsd %xmm3, %xmm6 movlpd 72 * SIZE(BB), %xmm3 addl $64 * SIZE, BB addsd %xmm1, %xmm7 movlpd 12 * SIZE(AA), %xmm1 addl $8 * SIZE, AA decl %eax jne .L22 ALIGN_4.L25:#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif andl $7, %eax # if (k & 1) BRANCH je .L28.L26: mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4 movlpd 2 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movlpd 4 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 6 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movlpd 8 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movlpd 1 * SIZE(AA), %xmm0 addl $1 * SIZE, AA addl $8 * SIZE, BB decl %eax jg .L26 ALIGN_4.L28:#if defined(LN) || defined(RT) movl KK, %eax#ifdef LN subl $1, %eax#else subl $4, %eax#endif movl AORIG, AA movl BORIG, B leal BUFFER, BB leal (, %eax, SIZE), %eax addl %eax, AA leal (B, %eax, 4), B leal (BB, %eax, 8), BB#endif#if defined(LN) || defined(LT) unpcklpd %xmm5, %xmm4 unpcklpd %xmm7, %xmm6 movapd 0 * SIZE(B), %xmm2 movapd 2 * SIZE(B), %xmm5 subpd %xmm4, %xmm2 subpd %xmm6, %xmm5#else movlpd 0 * SIZE(AA), %xmm0 movlpd 1 * SIZE(AA), %xmm1 movlpd 2 * SIZE(AA), %xmm2 movlpd 3 * SIZE(AA), %xmm3 subsd %xmm4, %xmm0 subsd %xmm5, %xmm1 subsd %xmm6, %xmm2 subsd %xmm7, %xmm3#endif#ifdef LN movlpd 0 * SIZE(AA), %xmm4 movhpd 0 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm2 mulpd %xmm4, %xmm5#endif#ifdef LT movlpd 0 * SIZE(AA), %xmm4 movhpd 0 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm2 mulpd %xmm4, %xmm5#endif#ifdef RN movlpd 0 * SIZE(B), %xmm4 mulsd %xmm4, %xmm0 movlpd 1 * SIZE(B), %xmm4 mulsd %xmm0, %xmm4 subsd %xmm4, %xmm1 movlpd 2 * SIZE(B), %xmm4 mulsd %xmm0, %xmm4 subsd %xmm4, %xmm2 movlpd 3 * SIZE(B), %xmm4 mulsd %xmm0, %xmm4 subsd %xmm4, %xmm3 movlpd 5 * SIZE(B), %xmm4 mulsd %xmm4, %xmm1 movlpd 6 * SIZE(B), %xmm4 mulsd %xmm1, %xmm4 subsd %xmm4, %xmm2 movlpd 7 * SIZE(B), %xmm4 mulsd %xmm1, %xmm4 subsd %xmm4, %xmm3 movlpd 10 * SIZE(B), %xmm4 mulsd %xmm4, %xmm2 movlpd 11 * SIZE(B), %xmm4 mulsd %xmm2, %xmm4 subsd %xmm4, %xmm3
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -