📄 trsm_kernel_lt_2x4_sse3.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 16 #define M 4 + STACK + ARGS(%esp)#define N 8 + STACK + ARGS(%esp)#define K 12 + STACK + ARGS(%esp)#define ALPHA 16 + STACK + ARGS(%esp)#define A 24 + STACK + ARGS(%esp)#define ARG_B 28 + STACK + ARGS(%esp)#define C 32 + STACK + ARGS(%esp)#define ARG_LDC 36 + STACK + ARGS(%esp)#define OFFSET 40 + STACK + ARGS(%esp)#define J 0 + STACK(%esp)#define KK 4 + STACK(%esp)#define KKK 8 + STACK(%esp)#define AORIG 12 + STACK(%esp)#ifdef PENTIUM4#define PREFETCH prefetcht1#define PREFETCHSIZE 84#endif#ifdef PENRYN#define PREFETCH prefetcht1#define PREFETCHSIZE 84#endif#ifdef PENTIUMM#define PREFETCH prefetcht1#define PREFETCHSIZE 84#endif#define AA %edx#define BB %ecx#define LDC %ebp#define B %edi#define CO1 %esi PROLOGUE subl $ARGS, %esp pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE movl ARG_B, B movl ARG_LDC, LDC movl OFFSET, %eax#ifdef RN negl %eax#endif movl %eax, KK leal (, LDC, SIZE), LDC#ifdef LN movl M, %eax leal (, %eax, SIZE), %eax addl %eax, C imull K, %eax addl %eax, A#endif#ifdef RT movl N, %eax leal (, %eax, SIZE), %eax imull K, %eax addl %eax, B movl N, %eax imull LDC, %eax addl %eax, C#endif#ifdef RT movl N, %eax subl OFFSET, %eax movl %eax, KK#endif movl N, %eax sarl $2, %eax movl %eax, J jle .L30 ALIGN_2.L10:#if defined(LT) || defined(RN) movl A, AA#else movl A, %eax movl %eax, AORIG#endif#ifdef RT movl K, %eax sall $2 + BASE_SHIFT, %eax subl %eax, B#endif leal (, LDC, 4), %eax#ifdef RT subl %eax, C#endif movl C, CO1#ifndef RT addl %eax, C#endif#ifdef LN movl OFFSET, %eax addl M, %eax movl %eax, KK#endif #ifdef LT movl OFFSET, %eax movl %eax, KK#endif movl M, %ebx sarl $1, %ebx # i = (m >> 2) jle .L20 ALIGN_4.L11:#ifdef LN movl K, %eax sall $1 + BASE_SHIFT, %eax subl %eax, AORIG#endif#if defined(LN) || defined(RT) movl KK, %eax movl AORIG, AA leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA#endif movl B, BB#if defined(LN) || defined(RT) movl KK, %eax sall $2 + BASE_SHIFT, %eax addl %eax, BB#endif movapd 0 * SIZE(AA), %xmm0 pxor %xmm4, %xmm4 movapd 8 * SIZE(AA), %xmm1 pxor %xmm5, %xmm5 movddup 0 * SIZE(BB), %xmm2 pxor %xmm6, %xmm6 movddup 8 * SIZE(BB), %xmm3 pxor %xmm7, %xmm7 leal (LDC, LDC, 2), %eax#ifdef LN prefetchnta -2 * SIZE(CO1) prefetchnta -2 * SIZE(CO1, LDC, 1) prefetchnta -2 * SIZE(CO1, LDC, 2) prefetchnta -2 * SIZE(CO1, %eax, 1)#else prefetchnta 2 * SIZE(CO1) prefetchnta 2 * SIZE(CO1, LDC, 1) prefetchnta 2 * SIZE(CO1, LDC, 2) prefetchnta 2 * SIZE(CO1, %eax, 1)#endif#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $3, %eax je .L15 ALIGN_4.L12: mulpd %xmm0, %xmm2 PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) addpd %xmm2, %xmm4 movddup 1 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movddup 2 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm6 movddup 3 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 movapd 2 * SIZE(AA), %xmm0 addpd %xmm2, %xmm7 movddup 4 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm4 movddup 5 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movddup 6 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm6 movddup 7 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 movapd 4 * SIZE(AA), %xmm0 addpd %xmm2, %xmm7 movddup 16 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm4 movddup 9 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm5 movddup 10 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm6 movddup 11 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 movapd 6 * SIZE(AA), %xmm0 addpd %xmm3, %xmm7 movddup 12 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm4 movddup 13 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm5 movddup 14 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm6 movddup 15 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 movapd 16 * SIZE(AA), %xmm0 addpd %xmm3, %xmm7 movddup 24 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm4 movddup 17 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm5 movddup 18 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm6 movddup 19 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 movapd 10 * SIZE(AA), %xmm1 addpd %xmm2, %xmm7 movddup 20 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm4 movddup 21 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm5 movddup 22 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm6 movddup 23 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 movapd 12 * SIZE(AA), %xmm1 addpd %xmm2, %xmm7 movddup 32 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm4 movddup 25 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm5 movddup 26 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm6 movddup 27 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 movapd 14 * SIZE(AA), %xmm1 addpd %xmm3, %xmm7 movddup 28 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm4 movddup 29 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm5 movddup 30 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 addpd %xmm3, %xmm6 movddup 31 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm3 movapd 24 * SIZE(AA), %xmm1 addpd %xmm3, %xmm7 movddup 40 * SIZE(BB), %xmm3 addl $32 * SIZE, BB addl $16 * SIZE, AA decl %eax jne .L12 ALIGN_4.L15:#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif andl $7, %eax # if (k & 1) BRANCH je .L18 ALIGN_3.L16: mulpd %xmm0, %xmm2 addpd %xmm2, %xmm4 movddup 1 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movddup 2 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm6 movddup 3 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 movapd 2 * SIZE(AA), %xmm0 addpd %xmm2, %xmm7 movddup 4 * SIZE(BB), %xmm2 addl $2 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L16 ALIGN_4.L18:#if defined(LN) || defined(RT) movl KK, %eax#ifdef LN subl $2, %eax#else subl $4, %eax#endif movl AORIG, AA leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (B, %eax, 4), BB#endif#if defined(LN) || defined(LT) movapd %xmm4, %xmm0 unpcklpd %xmm5, %xmm4 unpckhpd %xmm5, %xmm0 movapd %xmm6, %xmm1 unpcklpd %xmm7, %xmm6 unpckhpd %xmm7, %xmm1 movapd 0 * SIZE(BB), %xmm2 movapd 2 * SIZE(BB), %xmm5 movapd 4 * SIZE(BB), %xmm3 movapd 6 * SIZE(BB), %xmm7 subpd %xmm4, %xmm2 subpd %xmm6, %xmm5 subpd %xmm0, %xmm3 subpd %xmm1, %xmm7#else movapd 0 * SIZE(AA), %xmm0 movapd 2 * SIZE(AA), %xmm1 movapd 4 * SIZE(AA), %xmm2 movapd 6 * SIZE(AA), %xmm3 subpd %xmm4, %xmm0 subpd %xmm5, %xmm1 subpd %xmm6, %xmm2 subpd %xmm7, %xmm3#endif#ifdef LN movddup 3 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm3 mulpd %xmm4, %xmm7 movddup 2 * SIZE(AA), %xmm4 movapd %xmm4, %xmm6 mulpd %xmm3, %xmm4 subpd %xmm4, %xmm2 mulpd %xmm7, %xmm6 subpd %xmm6, %xmm5 movddup 0 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm2 mulpd %xmm4, %xmm5#endif#ifdef LT movddup 0 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm2 mulpd %xmm4, %xmm5 movddup 1 * SIZE(AA), %xmm4 movapd %xmm4, %xmm6 mulpd %xmm2, %xmm4 subpd %xmm4, %xmm3 mulpd %xmm5, %xmm6 subpd %xmm6, %xmm7 movddup 3 * SIZE(AA), %xmm4 mulpd %xmm4, %xmm3 mulpd %xmm4, %xmm7#endif#ifdef RN movddup 0 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm0 movddup 1 * SIZE(BB), %xmm4 mulpd %xmm0, %xmm4 subpd %xmm4, %xmm1 movddup 2 * SIZE(BB), %xmm4 mulpd %xmm0, %xmm4 subpd %xmm4, %xmm2 movddup 3 * SIZE(BB), %xmm4 mulpd %xmm0, %xmm4 subpd %xmm4, %xmm3 movddup 5 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm1 movddup 6 * SIZE(BB), %xmm4 mulpd %xmm1, %xmm4 subpd %xmm4, %xmm2 movddup 7 * SIZE(BB), %xmm4 mulpd %xmm1, %xmm4 subpd %xmm4, %xmm3 movddup 10 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm2 movddup 11 * SIZE(BB), %xmm4 mulpd %xmm2, %xmm4 subpd %xmm4, %xmm3 movddup 15 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm3#endif#ifdef RT movddup 15 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm3 movddup 14 * SIZE(BB), %xmm4 mulpd %xmm3, %xmm4 subpd %xmm4, %xmm2 movddup 13 * SIZE(BB), %xmm4 mulpd %xmm3, %xmm4 subpd %xmm4, %xmm1 movddup 12 * SIZE(BB), %xmm4 mulpd %xmm3, %xmm4 subpd %xmm4, %xmm0 movddup 10 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm2 movddup 9 * SIZE(BB), %xmm4 mulpd %xmm2, %xmm4 subpd %xmm4, %xmm1 movddup 8 * SIZE(BB), %xmm4 mulpd %xmm2, %xmm4 subpd %xmm4, %xmm0 movddup 5 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm1 movddup 4 * SIZE(BB), %xmm4 mulpd %xmm1, %xmm4 subpd %xmm4, %xmm0 movddup 0 * SIZE(BB), %xmm4 mulpd %xmm4, %xmm0#endif#if defined(LN) || defined(LT) movapd %xmm2, 0 * SIZE(BB) movapd %xmm5, 2 * SIZE(BB) movapd %xmm3, 4 * SIZE(BB) movapd %xmm7, 6 * SIZE(BB)#else movapd %xmm0, 0 * SIZE(AA) movapd %xmm1, 2 * SIZE(AA) movapd %xmm2, 4 * SIZE(AA) movapd %xmm3, 6 * SIZE(AA)#endif#ifdef LN subl $2 * SIZE, CO1#endif leal (LDC, LDC, 2), %eax#if defined(LN) || defined(LT) movsd %xmm2, 0 * SIZE(CO1) movsd %xmm3, 1 * SIZE(CO1) movhpd %xmm2, 0 * SIZE(CO1, LDC, 1) movhpd %xmm3, 1 * SIZE(CO1, LDC, 1) movsd %xmm5, 0 * SIZE(CO1, LDC, 2) movsd %xmm7, 1 * SIZE(CO1, LDC, 2) movhpd %xmm5, 0 * SIZE(CO1, %eax, 1) movhpd %xmm7, 1 * SIZE(CO1, %eax, 1)#else movsd %xmm0, 0 * SIZE(CO1) movhpd %xmm0, 1 * SIZE(CO1) movsd %xmm1, 0 * SIZE(CO1, LDC, 1) movhpd %xmm1, 1 * SIZE(CO1, LDC, 1) movsd %xmm2, 0 * SIZE(CO1, LDC, 2) movhpd %xmm2, 1 * SIZE(CO1, LDC, 2) movsd %xmm3, 0 * SIZE(CO1, %eax, 1) movhpd %xmm3, 1 * SIZE(CO1, %eax, 1)#endif#ifndef LN addl $2 * SIZE, CO1#endif#if defined(LT) || defined(RN) movl K, %eax subl KK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif#ifdef LN subl $2, KK#endif#ifdef LT addl $2, KK#endif#ifdef RT movl K, %eax sall $1 + BASE_SHIFT, %eax addl %eax, AORIG#endif decl %ebx # i -- jg .L11 ALIGN_4.L20: movl M, %ebx testl $1, %ebx # i = (m >> 2) jle .L29#ifdef LN movl K, %eax sall $BASE_SHIFT, %eax subl %eax, AORIG#endif#if defined(LN) || defined(RT) movl KK, %eax movl AORIG, AA leal (AA, %eax, SIZE), AA#endif movl B, BB#if defined(LN) || defined(RT) movl KK, %eax sall $2 + BASE_SHIFT, %eax addl %eax, BB#endif movddup 0 * SIZE(AA), %xmm0 pxor %xmm4, %xmm4 movddup 8 * SIZE(AA), %xmm1 pxor %xmm5, %xmm5 movapd 0 * SIZE(BB), %xmm2 pxor %xmm6, %xmm6 movapd 8 * SIZE(BB), %xmm3 pxor %xmm7, %xmm7#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $4, %eax je .L25 ALIGN_4.L22: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) mulpd %xmm0, %xmm2 mulpd 2 * SIZE(BB), %xmm0 addpd %xmm2, %xmm4 movapd 4 * SIZE(BB), %xmm2 addpd %xmm0, %xmm5 movddup 1 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm2 mulpd 6 * SIZE(BB), %xmm0 addpd %xmm2, %xmm6 movapd 16 * SIZE(BB), %xmm2 addpd %xmm0, %xmm7 movddup 2 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 10 * SIZE(BB), %xmm0 addpd %xmm3, %xmm4 movapd 12 * SIZE(BB), %xmm3 addpd %xmm0, %xmm5 movddup 3 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 14 * SIZE(BB), %xmm0 addpd %xmm3, %xmm6 movapd 24 * SIZE(BB), %xmm3 addpd %xmm0, %xmm7 movddup 4 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm2 mulpd 18 * SIZE(BB), %xmm0 addpd %xmm2, %xmm4 movapd 20 * SIZE(BB), %xmm2 addpd %xmm0, %xmm5 movddup 5 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm2 mulpd 22 * SIZE(BB), %xmm0 addpd %xmm2, %xmm6 movapd 32 * SIZE(BB), %xmm2 addpd %xmm0, %xmm7 movddup 6 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 26 * SIZE(BB), %xmm0 addpd %xmm3, %xmm4 movapd 28 * SIZE(BB), %xmm3 addpd %xmm0, %xmm5 movddup 7 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 30 * SIZE(BB), %xmm0 addpd %xmm3, %xmm6 movapd 40 * SIZE(BB), %xmm3 addpd %xmm0, %xmm7 movddup 16 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd 34 * SIZE(BB), %xmm1 addpd %xmm2, %xmm4 movapd 36 * SIZE(BB), %xmm2 addpd %xmm1, %xmm5 movddup 9 * SIZE(AA), %xmm1 mulpd %xmm1, %xmm2 mulpd 38 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -