📄 gemm_kernel_2x4_core2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 16 #define M 4 + STACK + ARGS(%esp)#define N 8 + STACK + ARGS(%esp)#define K 12 + STACK + ARGS(%esp)#define ALPHA 16 + STACK + ARGS(%esp)#define A 24 + STACK + ARGS(%esp)#define ARG_B 28 + STACK + ARGS(%esp)#define C 32 + STACK + ARGS(%esp)#define ARG_LDC 36 + STACK + ARGS(%esp)#define OFFSET 40 + STACK + ARGS(%esp)#define J 0 + STACK(%esp)#define BX 4 + STACK(%esp)#define KK 8 + STACK(%esp)#define KKK 12 + STACK(%esp)#define PREFETCH_R (8 * 4)#define PREFETCHSIZE (8 * 21 + 4)#define PREFETCH prefetcht0#define AA %edx#define BB %ecx#define LDC %ebp#define B %edi#define C1 %esi#define I %ebx PROLOGUE subl $ARGS, %esp # Generate Stack Frame pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE movl ARG_B, B movl ARG_LDC, LDC#ifdef TRMMKERNEL movl OFFSET, %eax#ifndef LEFT negl %eax#endif movl %eax, KK#endif subl $-16 * SIZE, A subl $-16 * SIZE, B leal (, LDC, SIZE), LDC movl N, %eax sarl $2, %eax movl %eax, J jle .L30 ALIGN_4.L01:#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif movl B, BX movl C, C1 movl A, AA movl M, I sarl $1, I jle .L20 ALIGN_4.L11:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl B, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif movl BX, %eax prefetcht2 (PREFETCH_R + 0) * SIZE(%eax) prefetcht2 (PREFETCH_R + 8) * SIZE(%eax) subl $-8 * SIZE, BX leal (C1, LDC, 2), %eax movaps -16 * SIZE(AA), %xmm0 pxor %xmm2, %xmm2 movaps -16 * SIZE(BB), %xmm1 pxor %xmm3, %xmm3 pxor %xmm4, %xmm4 prefetcht0 1 * SIZE(C1) pxor %xmm5, %xmm5 prefetcht0 1 * SIZE(C1, LDC) pxor %xmm6, %xmm6 prefetcht0 1 * SIZE(%eax) pxor %xmm7, %xmm7 prefetcht0 1 * SIZE(%eax, LDC)#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $4, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L15 ALIGN_4.L12: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps -14 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -14 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps -12 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps -10 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -12 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps -8 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps -6 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -10 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps -4 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps -2 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -8 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps 0 * SIZE(BB), %xmm1 PREFETCH (PREFETCHSIZE + 8) * SIZE(AA) addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps 2 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -6 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps 4 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps 6 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -4 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps 8 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps 10 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -2 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps 12 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps 14 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1// SHUFPD_1 %xmm0, %xmm0 pshufd $0x4e, %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps 0 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps 16 * SIZE(BB), %xmm1 subl $-32 * SIZE, BB subl $-16 * SIZE, AA subl $1, %eax jne,pt .L12 ALIGN_4.L15:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $7, %eax BRANCH je .L18 ALIGN_4.L16: addpd %xmm2, %xmm6 movapd %xmm1, %xmm2 mulpd %xmm0, %xmm1 addpd %xmm1, %xmm4 movaps -14 * SIZE(BB), %xmm1 addpd %xmm3, %xmm7 movapd %xmm1, %xmm3 mulpd %xmm0, %xmm1 SHUFPD_1 %xmm0, %xmm0 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 movaps -14 * SIZE(AA), %xmm0 addpd %xmm1, %xmm5 movaps -12 * SIZE(BB), %xmm1 addl $2 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L16 ALIGN_4.L18: addpd %xmm2, %xmm6 addpd %xmm3, %xmm7 movddup ALPHA, %xmm3 movaps %xmm4, %xmm0 unpcklpd %xmm6, %xmm4 unpckhpd %xmm0, %xmm6 movaps %xmm5, %xmm1 unpcklpd %xmm7, %xmm5 unpckhpd %xmm1, %xmm7 mulpd %xmm3, %xmm4 mulpd %xmm3, %xmm5 mulpd %xmm3, %xmm6 mulpd %xmm3, %xmm7 leal (C1, LDC, 2), %eax#ifndef TRMMKERNEL movsd 0 * SIZE(C1), %xmm0 movhpd 1 * SIZE(C1), %xmm0 movsd 0 * SIZE(C1, LDC), %xmm1 movhpd 1 * SIZE(C1, LDC), %xmm1 movsd 0 * SIZE(%eax), %xmm2 movhpd 1 * SIZE(%eax), %xmm2 movsd 0 * SIZE(%eax, LDC), %xmm3 movhpd 1 * SIZE(%eax, LDC), %xmm3 addpd %xmm0, %xmm4 addpd %xmm1, %xmm6 addpd %xmm2, %xmm5 addpd %xmm3, %xmm7#endif movsd %xmm4, 0 * SIZE(C1) movhpd %xmm4, 1 * SIZE(C1) movsd %xmm6, 0 * SIZE(C1, LDC) movhpd %xmm6, 1 * SIZE(C1, LDC) movsd %xmm5, 0 * SIZE(%eax) movhpd %xmm5, 1 * SIZE(%eax) movsd %xmm7, 0 * SIZE(%eax, LDC) movhpd %xmm7, 1 * SIZE(%eax, LDC)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $2 * SIZE, C1 decl I jg .L11 ALIGN_4.L20: movl M, I testl $1, I jle .L29#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl B, BB movl KK, %eax leal (, %eax, SIZE), %eax addl %eax, AA leal (BB, %eax, 4), BB#endif movaps -16 * SIZE(AA), %xmm0 pxor %xmm4, %xmm4 movaps -16 * SIZE(BB), %xmm2 pxor %xmm5, %xmm5 movaps -14 * SIZE(BB), %xmm3 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $1, %eax#else addl $4, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L25 ALIGN_4.L22: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) pshufd $0x44, %xmm0, %xmm1 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm4 movaps -12 * SIZE(BB), %xmm2 addpd %xmm3, %xmm5 movaps -10 * SIZE(BB), %xmm3 pshufd $0xee, %xmm0, %xmm1 movaps -14 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm6 movaps -8 * SIZE(BB), %xmm2 addpd %xmm3, %xmm7 movaps -6 * SIZE(BB), %xmm3 pshufd $0x44, %xmm0, %xmm1 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm4 movaps -4 * SIZE(BB), %xmm2 addpd %xmm3, %xmm5 movaps -2 * SIZE(BB), %xmm3 pshufd $0xee, %xmm0, %xmm1 movaps -12 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm6 movaps 0 * SIZE(BB), %xmm2 addpd %xmm3, %xmm7 movaps 2 * SIZE(BB), %xmm3 pshufd $0x44, %xmm0, %xmm1 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm4 movaps 4 * SIZE(BB), %xmm2 addpd %xmm3, %xmm5 movaps 6 * SIZE(BB), %xmm3 pshufd $0xee, %xmm0, %xmm1 movaps -10 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm6 movaps 8 * SIZE(BB), %xmm2 addpd %xmm3, %xmm7 movaps 10 * SIZE(BB), %xmm3 pshufd $0x44, %xmm0, %xmm1 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm4 movaps 12 * SIZE(BB), %xmm2 addpd %xmm3, %xmm5 movaps 14 * SIZE(BB), %xmm3 pshufd $0xee, %xmm0, %xmm1 movaps -8 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm6 movaps 16 * SIZE(BB), %xmm2 addpd %xmm3, %xmm7 movaps 18 * SIZE(BB), %xmm3 subl $ -8 * SIZE, AA subl $-32 * SIZE, BB subl $1, %eax jne .L22 ALIGN_4.L25:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $7, %eax BRANCH je .L28 ALIGN_4.L26: pshufd $0x44, %xmm0, %xmm1 movsd -15 * SIZE(AA), %xmm0 mulpd %xmm1, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm2, %xmm4 movaps -12 * SIZE(BB), %xmm2 addpd %xmm3, %xmm5 movaps -10 * SIZE(BB), %xmm3 addl $1 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L26 ALIGN_4.L28: movddup ALPHA, %xmm3 addpd %xmm6, %xmm4 addpd %xmm7, %xmm5 leal (C1, LDC, 2), %eax#ifndef TRMMKERNEL movsd 0 * SIZE(C1), %xmm0 movhpd 0 * SIZE(C1, LDC), %xmm0 movsd 0 * SIZE(%eax), %xmm1 movhpd 0 * SIZE(%eax, LDC), %xmm1#endif mulpd %xmm3, %xmm4 mulpd %xmm3, %xmm5#ifndef TRMMKERNEL addpd %xmm0, %xmm4 addpd %xmm1, %xmm5#endif movsd %xmm4, 0 * SIZE(C1) movhpd %xmm4, 0 * SIZE(C1, LDC) movsd %xmm5, 0 * SIZE(%eax) movhpd %xmm5, 0 * SIZE(%eax, LDC)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax addl %eax, AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $1, KK#endif ALIGN_4 .L29:#if defined(TRMMKERNEL) && !defined(LEFT) addl $4, KK#endif movl BB, B leal (, LDC, 4), %eax addl %eax, C decl J jg .L01 ALIGN_4.L30: movl N, %eax testl $2, %eax jle .L50#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif movl C, C1 movl A, AA movl M, I sarl $1, I jle .L40 ALIGN_4.L31:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl B, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 2), BB#endif movaps -16 * SIZE(AA), %xmm0 pxor %xmm4, %xmm4 movaps -16 * SIZE(BB), %xmm1 pxor %xmm5, %xmm5
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -