📄 gemm_kernel_2x4_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 0 #define OLD_M 4 + STACK + ARGS(%esi)#define OLD_N 8 + STACK + ARGS(%esi)#define OLD_K 12 + STACK + ARGS(%esi)#define OLD_ALPHA 16 + STACK + ARGS(%esi)#define OLD_A 24 + STACK + ARGS(%esi)#define OLD_B 28 + STACK + ARGS(%esi)#define OLD_C 32 + STACK + ARGS(%esi)#define OLD_LDC 36 + STACK + ARGS(%esi)#define OLD_OFFT 40 + STACK + ARGS(%esi)#define ALPHA 0(%esp)#define K 16(%esp)#define N 20(%esp)#define M 24(%esp)#define A 28(%esp)#define C 32(%esp)#define J 36(%esp)#define OLD_STACK 40(%esp)#define OFFSET 44(%esp)#define KK 48(%esp)#define KKK 52(%esp)#define BUFFER 128(%esp)#if defined(OPTERON) || defined(BARCELONA)#define movsd movlpd#endif#define LOCAL_BUFFER_SIZE GEMM_Q * GEMM_UNROLL_N * COMPSIZE * 16#if defined(OPTERON) || defined(BARCELONA)#define PREFETCH prefetch#define PREFETCHSIZE (8 * 10 + 4)#endif#define AA %edx#define BB %ecx#define LDC %ebp#define KERNEL1(address) \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm4; \ PREFETCH (PREFETCHSIZE + 0) * SIZE + (address) * 1 * SIZE(AA); \ movapd 2 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 4 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ mulpd 6 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm2, %xmm6; \ movapd 16 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm0, %xmm7; \ movapd 2 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL2(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 10 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 12 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ mulpd 14 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm3, %xmm6; \ movapd 24 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm0, %xmm7; \ movapd 4 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL3(address) \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 18 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 20 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ mulpd 22 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm2, %xmm6; \ movapd 32 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm0, %xmm7; \ movapd 6 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL4(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 26 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 28 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ mulpd 30 * SIZE + (address) * 4 * SIZE(BB), %xmm0; \ addpd %xmm3, %xmm6; \ movapd 40 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm0, %xmm7; \ movapd 16 * SIZE + (address) * 1 * SIZE(AA), %xmm0#define KERNEL5(address) \ PREFETCH (PREFETCHSIZE + 8) * SIZE + (address) * 1 * SIZE(AA); \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 34 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 36 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ mulpd 38 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm2, %xmm6; \ movapd 48 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm1, %xmm7; \ movapd 10 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL6(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 42 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 44 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ mulpd 46 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm3, %xmm6; \ movapd 56 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm1, %xmm7; \ movapd 12 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL7(address) \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movapd 50 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movapd 52 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ mulpd 54 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm2, %xmm6; \ movapd 64 * SIZE + (address) * 4 * SIZE(BB), %xmm2; \ addpd %xmm1, %xmm7; \ movapd 14 * SIZE + (address) * 1 * SIZE(AA), %xmm1#define KERNEL8(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movapd 58 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movapd 60 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ mulpd 62 * SIZE + (address) * 4 * SIZE(BB), %xmm1; \ addpd %xmm3, %xmm6; \ movapd 72 * SIZE + (address) * 4 * SIZE(BB), %xmm3; \ addpd %xmm1, %xmm7; \ movapd 24 * SIZE + (address) * 1 * SIZE(AA), %xmm1 PROLOGUE pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE EMMS movl %esp, %esi # save old stack subl $128 + LOCAL_BUFFER_SIZE, %esp andl $-1024, %esp # align stack#ifdef WINDOWS_ABI#if LOCAL_BUFFER_SIZE > 12288 movl $0, 4096 * 3(%esp)#endif#if LOCAL_BUFFER_SIZE > 8192 movl $0, 4096 * 2(%esp)#endif#if LOCAL_BUFFER_SIZE > 4096 movl $0, 4096 * 1(%esp)#endif movl $0, 4096 * 0(%esp)#endif movl OLD_M, %ebx movl OLD_N, %eax movl OLD_K, %ecx movl OLD_A, %edx movsd OLD_ALPHA, %xmm3 movl %ebx, M movl %eax, N movl %ecx, K movl %edx, A movl %esi, OLD_STACK#ifdef TRMMKERNEL movss OLD_OFFT, %xmm4#endif unpcklpd %xmm3, %xmm3 movl OLD_B, %edi movl OLD_C, %ebx movapd %xmm3, ALPHA movl %ebx, C movl OLD_LDC, LDC#ifdef TRMMKERNEL movss %xmm4, OFFSET movss %xmm4, KK#ifndef LEFT negl KK#endif #endif leal (, LDC, SIZE), LDC sarl $2, %eax movl %eax, J jle .L30 ALIGN_2.L01:#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif /* Copying to Sub Buffer */ movl K, %eax leal BUFFER, %ecx sarl $1, %eax jle .L05 ALIGN_4 .L02:#define COPYPREFETCH 40 prefetchnta (COPYPREFETCH) * SIZE(%edi) movq 0 * SIZE(%edi), %mm0 movq 1 * SIZE(%edi), %mm1 movq 2 * SIZE(%edi), %mm2 movq 3 * SIZE(%edi), %mm3 movq 4 * SIZE(%edi), %mm4 movq 5 * SIZE(%edi), %mm5 movq 6 * SIZE(%edi), %mm6 movq 7 * SIZE(%edi), %mm7 movq %mm0, 0 * SIZE(%ecx) movq %mm0, 1 * SIZE(%ecx) movq %mm1, 2 * SIZE(%ecx) movq %mm1, 3 * SIZE(%ecx) movq %mm2, 4 * SIZE(%ecx) movq %mm2, 5 * SIZE(%ecx) movq %mm3, 6 * SIZE(%ecx) movq %mm3, 7 * SIZE(%ecx) movq %mm4, 8 * SIZE(%ecx) movq %mm4, 9 * SIZE(%ecx) movq %mm5, 10 * SIZE(%ecx) movq %mm5, 11 * SIZE(%ecx) movq %mm6, 12 * SIZE(%ecx) movq %mm6, 13 * SIZE(%ecx) movq %mm7, 14 * SIZE(%ecx) movq %mm7, 15 * SIZE(%ecx) addl $ 8 * SIZE, %edi addl $16 * SIZE, %ecx decl %eax jne .L02 ALIGN_2.L05: movl K, %eax andl $1, %eax BRANCH jle .L10 movq 0 * SIZE(%edi), %mm0 movq 1 * SIZE(%edi), %mm1 movq 2 * SIZE(%edi), %mm2 movq 3 * SIZE(%edi), %mm3 movq %mm0, 0 * SIZE(%ecx) movq %mm0, 1 * SIZE(%ecx) movq %mm1, 2 * SIZE(%ecx) movq %mm1, 3 * SIZE(%ecx) movq %mm2, 4 * SIZE(%ecx) movq %mm2, 5 * SIZE(%ecx) movq %mm3, 6 * SIZE(%ecx) movq %mm3, 7 * SIZE(%ecx) addl $4 * SIZE, %edi ALIGN_4 .L10: movl C, %esi # coffset = c movl A, AA # aoffset = a movl M, %ebx sarl $1, %ebx # i = (m >> 2) jle .L20 ALIGN_4.L11:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB#else leal BUFFER, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 8), BB#endif pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7 movapd 0 * SIZE(AA), %xmm0 movapd 8 * SIZE(AA), %xmm1 movapd 0 * SIZE(BB), %xmm2 movapd 8 * SIZE(BB), %xmm3 leal (LDC, LDC, 2), %eax prefetchw 1 * SIZE(%esi) prefetchw 1 * SIZE(%esi, LDC) prefetchw 1 * SIZE(%esi, LDC, 2) prefetchw 1 * SIZE(%esi, %eax)#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $4, %eax#endif movl %eax, KKK#endif#if 1 andl $-8, %eax sall $4, %eax je .L15.L1X: KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) cmpl $128 * 1, %eax jle .L12 KERNEL1(16 * 1) KERNEL2(16 * 1) KERNEL3(16 * 1) KERNEL4(16 * 1) KERNEL5(16 * 1) KERNEL6(16 * 1) KERNEL7(16 * 1) KERNEL8(16 * 1) cmpl $128 * 2, %eax jle .L12 KERNEL1(16 * 2) KERNEL2(16 * 2) KERNEL3(16 * 2) KERNEL4(16 * 2) KERNEL5(16 * 2) KERNEL6(16 * 2) KERNEL7(16 * 2) KERNEL8(16 * 2) cmpl $128 * 3, %eax jle .L12 KERNEL1(16 * 3) KERNEL2(16 * 3) KERNEL3(16 * 3) KERNEL4(16 * 3) KERNEL5(16 * 3) KERNEL6(16 * 3) KERNEL7(16 * 3) KERNEL8(16 * 3) cmpl $128 * 4, %eax jle .L12 KERNEL1(16 * 4) KERNEL2(16 * 4) KERNEL3(16 * 4) KERNEL4(16 * 4) KERNEL5(16 * 4) KERNEL6(16 * 4) KERNEL7(16 * 4) KERNEL8(16 * 4) cmpl $128 * 5, %eax jle .L12 KERNEL1(16 * 5) KERNEL2(16 * 5) KERNEL3(16 * 5) KERNEL4(16 * 5) KERNEL5(16 * 5) KERNEL6(16 * 5) KERNEL7(16 * 5) KERNEL8(16 * 5) cmpl $128 * 6, %eax jle .L12 KERNEL1(16 * 6) KERNEL2(16 * 6) KERNEL3(16 * 6) KERNEL4(16 * 6) KERNEL5(16 * 6) KERNEL6(16 * 6) KERNEL7(16 * 6) KERNEL8(16 * 6) cmpl $128 * 7, %eax jle .L12 KERNEL1(16 * 7) KERNEL2(16 * 7) KERNEL3(16 * 7) KERNEL4(16 * 7) KERNEL5(16 * 7) KERNEL6(16 * 7) KERNEL7(16 * 7) KERNEL8(16 * 7) addl $128 * 4 * SIZE, BB addl $128 * 1 * SIZE, AA subl $128 * 8, %eax jg .L1X jmp .L15.L12: leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB ALIGN_4#else sarl $3, %eax je .L15 ALIGN_4.L12: KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) addl $64 * SIZE, BB addl $16 * SIZE, AA decl %eax jne .L12 ALIGN_4#endif.L15:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif movapd ALPHA, %xmm3 andl $7, %eax # if (k & 1) BRANCH je .L18 ALIGN_3.L16: mulpd %xmm0, %xmm2 addpd %xmm2, %xmm4 movapd 2 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movapd 4 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 mulpd 6 * SIZE(BB), %xmm0 addpd %xmm2, %xmm6 movapd 8 * SIZE(BB), %xmm2 addpd %xmm0, %xmm7 movapd 2 * SIZE(AA), %xmm0 addl $2 * SIZE, AA addl $8 * SIZE, BB decl %eax jg .L16 ALIGN_4.L18: leal (LDC, LDC, 2), %eax#ifndef TRMMKERNEL mulpd %xmm3, %xmm4 movsd 0 * SIZE(%esi), %xmm0 movhpd 1 * SIZE(%esi), %xmm0 mulpd %xmm3, %xmm5 movsd 0 * SIZE(%esi, LDC, 1), %xmm1 movhpd 1 * SIZE(%esi, LDC, 1), %xmm1 mulpd %xmm3, %xmm6 movsd 0 * SIZE(%esi, LDC, 2), %xmm2 movhpd 1 * SIZE(%esi, LDC, 2), %xmm2 mulpd %xmm3, %xmm7 movsd 0 * SIZE(%esi, %eax, 1), %xmm3 movhpd 1 * SIZE(%esi, %eax, 1), %xmm3 addpd %xmm0, %xmm4 addpd %xmm1, %xmm5 addpd %xmm2, %xmm6 addpd %xmm3, %xmm7#else mulpd %xmm3, %xmm4 mulpd %xmm3, %xmm5 mulpd %xmm3, %xmm6 mulpd %xmm3, %xmm7#endif movsd %xmm4, 0 * SIZE(%esi) movhpd %xmm4, 1 * SIZE(%esi) movsd %xmm5, 0 * SIZE(%esi, LDC, 1) movhpd %xmm5, 1 * SIZE(%esi, LDC, 1) movsd %xmm6, 0 * SIZE(%esi, LDC, 2) movhpd %xmm6, 1 * SIZE(%esi, LDC, 2) movsd %xmm7, 0 * SIZE(%esi, %eax, 1) movhpd %xmm7, 1 * SIZE(%esi, %eax, 1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 8), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $2 * SIZE, %esi # coffset += 2 decl %ebx # i -- jg .L11 ALIGN_4.L20: movl M, %ebx testl $1, %ebx # i = (m >> 2) jle .L29#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB#else leal BUFFER, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 1), AA
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -