📄 zgemm_kernel_2x2_penryn.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 16 #define M 4 + STACK + ARGS(%esp)#define N 8 + STACK + ARGS(%esp)#define K 12 + STACK + ARGS(%esp)#define ALPHA_R 16 + STACK + ARGS(%esp)#define ALPHA_I 20 + STACK + ARGS(%esp)#define A 24 + STACK + ARGS(%esp)#define ARG_B 28 + STACK + ARGS(%esp)#define C 32 + STACK + ARGS(%esp)#define ARG_LDC 36 + STACK + ARGS(%esp)#define OFFSET 40 + STACK + ARGS(%esp)#define J 0 + STACK(%esp)#define BX 4 + STACK(%esp)#define KK 8 + STACK(%esp)#define KKK 12 + STACK(%esp)#define PREFETCH_R (8 * 4)#define PREFETCHSIZE (8 * 17 + 4)#define PREFETCH prefetcht0#define AA %edx#define BB %ecx#define LDC %ebp#define B %edi#define C1 %esi#define I %ebx#if defined(NN) || defined(NT) || defined(TN) || defined(TT)#define ADD1 addps#define ADD2 addps#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)#define ADD1 addps#define ADD2 addps#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)#define ADD1 addps#define ADD2 addps#else#define ADD1 addps#define ADD2 subps#endif PROLOGUE subl $ARGS, %esp # Generate Stack Frame pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE movl ARG_B, B movl ARG_LDC, LDC#ifdef TRMMKERNEL movl OFFSET, %eax#ifndef LEFT negl %eax#endif movl %eax, KK#endif subl $-32 * SIZE, A subl $-32 * SIZE, B sall $ZBASE_SHIFT, LDC movl N, %eax sarl $1, %eax movl %eax, J jle .L30 ALIGN_4.L01:#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif movl B, BX movl C, C1 movl A, AA movl M, %ebx sarl $1, %ebx jle .L20 ALIGN_4.L10:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl B, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 4), AA leal (BB, %eax, 4), BB#endif movl BX, %eax prefetcht2 (PREFETCH_R + 0) * SIZE(%eax) prefetcht2 (PREFETCH_R + 16) * SIZE(%eax) subl $-16 * SIZE, BX movaps -32 * SIZE(AA), %xmm0 pxor %xmm2, %xmm2 movaps -32 * SIZE(BB), %xmm1 pxor %xmm3, %xmm3 pxor %xmm4, %xmm4 prefetcht0 3 * SIZE(C1) pxor %xmm5, %xmm5 prefetcht0 3 * SIZE(C1, LDC) pxor %xmm6, %xmm6 pxor %xmm7, %xmm7#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $2, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L15 ALIGN_4.L12: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -28 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -28 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -24 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -24 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -20 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -20 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -16 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -16 * SIZE(AA), %xmm0 PREFETCH (PREFETCHSIZE + 16) * SIZE(AA) ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -12 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -12 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -8 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -8 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -4 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -4 * SIZE(AA), %xmm0 ADD2 %xmm2, %xmm7 subl $-32 * SIZE, BB pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 subl $-32 * SIZE, AA pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -32 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -32 * SIZE(AA), %xmm0 decl %eax jne .L12 ALIGN_4 .L15:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $7, %eax BRANCH je .L18 ALIGN_4.L16: ADD2 %xmm2, %xmm7 pshufd $0xb1, %xmm1, %xmm2 mulps %xmm0, %xmm1 ADD1 %xmm3, %xmm6 pshufd $0x1b, %xmm2, %xmm3 mulps %xmm0, %xmm2 ADD2 %xmm2, %xmm5 pshufd $0xb1, %xmm3, %xmm2 mulps %xmm0, %xmm3 ADD1 %xmm1, %xmm4 movaps -28 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -28 * SIZE(AA), %xmm0 addl $4 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L16 ALIGN_4.L18: ADD2 %xmm2, %xmm7 pcmpeqb %xmm0, %xmm0 ADD1 %xmm3, %xmm6 psllq $63, %xmm0 movsd ALPHA_R, %xmm3#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(RR) || defined(RC) || defined(CR) || defined(CC) pxor %xmm0, %xmm4 pxor %xmm0, %xmm6#elif defined(NR) || defined(NC) || defined(TR) || defined(TC) pshufd $0xb1, %xmm0, %xmm0 pxor %xmm0, %xmm5 pxor %xmm0, %xmm7#elif defined(RN) || defined(RT) || defined(CN) || defined(CT) pxor %xmm0, %xmm5 pxor %xmm0, %xmm7#endif haddps %xmm5, %xmm4 haddps %xmm7, %xmm6 shufps $0xd8, %xmm4, %xmm4 shufps $0xd8, %xmm6, %xmm6 movaps %xmm4, %xmm5 shufps $0xe4, %xmm6, %xmm4 shufps $0xe4, %xmm5, %xmm6 pshufd $0x00, %xmm3, %xmm2 pshufd $0x55, %xmm3, %xmm3 pshufd $0xb1, %xmm4, %xmm5 pshufd $0xb1, %xmm6, %xmm7 mulps %xmm2, %xmm4 mulps %xmm3, %xmm5 mulps %xmm2, %xmm6 mulps %xmm3, %xmm7 addsubps %xmm5, %xmm4 addsubps %xmm7, %xmm6#if! defined(TRMMKERNEL) && !defined(BETAZERO) movsd 0 * SIZE(C1), %xmm2 movhps 2 * SIZE(C1), %xmm2 movsd 0 * SIZE(C1, LDC), %xmm3 movhps 2 * SIZE(C1, LDC), %xmm3 addps %xmm2, %xmm4 addps %xmm3, %xmm6#endif movsd %xmm4, 0 * SIZE(C1) movhps %xmm4, 2 * SIZE(C1) movsd %xmm6, 0 * SIZE(C1, LDC) movhps %xmm6, 2 * SIZE(C1, LDC)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 4), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $4 * SIZE, C1 decl %ebx jg .L10 ALIGN_4 .L20: movl M, %ebx testl $1, %ebx jle .L29#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl B, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif movsd -32 * SIZE(AA), %xmm0 pxor %xmm2, %xmm2 movaps -32 * SIZE(BB), %xmm1 pxor %xmm3, %xmm3 pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $1, %eax#else addl $2, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L25 ALIGN_4.L22: addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -28 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -30 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -24 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -28 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -20 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -26 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -16 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -24 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -12 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -22 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -8 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -20 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm5 pshufd $0xff, %xmm1, %xmm3 movaps -4 * SIZE(BB), %xmm1 mulps %xmm0, %xmm3 movsd -18 * SIZE(AA), %xmm0 addps %xmm2, %xmm6 pshufd $0x00, %xmm1, %xmm2 mulps %xmm0, %xmm2 addps %xmm3, %xmm7 pshufd $0x55, %xmm1, %xmm3 mulps %xmm0, %xmm3 addps %xmm2, %xmm4 pshufd $0xaa, %xmm1, %xmm2 mulps %xmm0, %xmm2
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -