📄 gemm_kernel_2x4_sse3.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK 16#define ARGS 16 #define M 4 + STACK + ARGS(%esp)#define N 8 + STACK + ARGS(%esp)#define K 12 + STACK + ARGS(%esp)#define ALPHA 16 + STACK + ARGS(%esp)#define A 24 + STACK + ARGS(%esp)#define ARG_B 28 + STACK + ARGS(%esp)#define C 32 + STACK + ARGS(%esp)#define ARG_LDC 36 + STACK + ARGS(%esp)#define OFFSET 40 + STACK + ARGS(%esp)#define J 0 + STACK(%esp)#define BX 4 + STACK(%esp)#define KK 8 + STACK(%esp)#define KKK 12 + STACK(%esp)#ifdef PENTIUM4#define PREFETCH_R (8 * 4)#define PREFETCH prefetcht1#define PREFETCHSIZE 84#endif#ifdef PENTIUMM#define PREFETCH_R (8 * 4)#define PREFETCH prefetcht1#define PREFETCHSIZE 84#endif#define AA %edx#define BB %ecx#define LDC %ebp#define B %edi#define KERNEL1(address) \ mulpd %xmm0, %xmm2; \ PREFETCH (PREFETCHSIZE + 0) * SIZE + (address) * 1 * SIZE(AA); \ addpd %xmm2, %xmm4; \ movddup 1 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movddup 2 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm6; \ movddup 3 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ movapd 2 * SIZE + (address) * 1 * SIZE(AA), %xmm0; \ addpd %xmm2, %xmm7; \ movddup 4 * SIZE + (address) * 2 * SIZE(BB), %xmm2#define KERNEL2(address) \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm4; \ movddup 5 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm5; \ movddup 6 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ addpd %xmm2, %xmm6; \ movddup 7 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm0, %xmm2; \ movapd 4 * SIZE + (address) * 1 * SIZE(AA), %xmm0; \ addpd %xmm2, %xmm7; \ movddup 16 * SIZE + (address) * 2 * SIZE(BB), %xmm2#define KERNEL3(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movddup 9 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movddup 10 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm6; \ movddup 11 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ movapd 6 * SIZE + (address) * 1 * SIZE(AA), %xmm0; \ addpd %xmm3, %xmm7; \ movddup 12 * SIZE + (address) * 2 * SIZE(BB), %xmm3#define KERNEL4(address) \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm4; \ movddup 13 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm5; \ movddup 14 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ addpd %xmm3, %xmm6; \ movddup 15 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm0, %xmm3; \ movapd 16 * SIZE + (address) * 1 * SIZE(AA), %xmm0; \ addpd %xmm3, %xmm7; \ movddup 24 * SIZE + (address) * 2 * SIZE(BB), %xmm3#define KERNEL5(address) \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movddup 17 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movddup 18 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm6; \ movddup 19 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ movapd 10 * SIZE + (address) * 1 * SIZE(AA), %xmm1; \ addpd %xmm2, %xmm7#define KERNEL6(address) \ movddup 20 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm4; \ movddup 21 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm5; \ movddup 22 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ addpd %xmm2, %xmm6; \ movddup 23 * SIZE + (address) * 2 * SIZE(BB), %xmm2; \ mulpd %xmm1, %xmm2; \ movapd 12 * SIZE + (address) * 1 * SIZE(AA), %xmm1; \ addpd %xmm2, %xmm7; \ movddup 32 * SIZE + (address) * 2 * SIZE(BB), %xmm2#define KERNEL7(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movddup 25 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movddup 26 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm6; \ movddup 27 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ movapd 14 * SIZE + (address) * 1 * SIZE(AA), %xmm1; \ addpd %xmm3, %xmm7; \ movddup 28 * SIZE + (address) * 2 * SIZE(BB), %xmm3#define KERNEL8(address) \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm4; \ movddup 29 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm5; \ movddup 30 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ addpd %xmm3, %xmm6; \ movddup 31 * SIZE + (address) * 2 * SIZE(BB), %xmm3; \ mulpd %xmm1, %xmm3; \ movapd 24 * SIZE + (address) * 1 * SIZE(AA), %xmm1; \ addpd %xmm3, %xmm7; \ movddup 40 * SIZE + (address) * 2 * SIZE(BB), %xmm3 PROLOGUE subl $ARGS, %esp pushl %ebp pushl %edi pushl %esi pushl %ebx PROFCODE movl ARG_B, B movl ARG_LDC, LDC#ifdef TRMMKERNEL movl OFFSET, %eax#ifndef LEFT negl %eax#endif movl %eax, KK#endif leal (, LDC, SIZE), LDC movl N, %eax sarl $2, %eax movl %eax, J jle .L30 ALIGN_2.L10:#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif movl K, %eax sall $BASE_SHIFT + 2, %eax leal (B, %eax), %eax movl %eax, BX movl C, %esi # coffset = c movl A, AA # aoffset = a movl M, %ebx sarl $1, %ebx # i = (m >> 2) jle .L20 ALIGN_4.L11:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl B, BB#else movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (B, %eax, 4), BB#endif movl BX, %eax prefetcht2 0 * SIZE(%eax) subl $-8 * SIZE, BX movapd 0 * SIZE(AA), %xmm0 pxor %xmm4, %xmm4 movapd 8 * SIZE(AA), %xmm1 pxor %xmm5, %xmm5 movddup 0 * SIZE(BB), %xmm2 pxor %xmm6, %xmm6 movddup 8 * SIZE(BB), %xmm3 pxor %xmm7, %xmm7 leal (LDC, LDC, 2), %eax#ifdef PENTIUM4 prefetchnta 3 * SIZE(%esi) prefetchnta 3 * SIZE(%esi, LDC, 1) prefetchnta 3 * SIZE(%esi, LDC, 2) prefetchnta 3 * SIZE(%esi, %eax, 1)#endif#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $4, %eax#endif movl %eax, KKK#endif#ifdef CORE_PRESCOTT andl $-8, %eax sall $4, %eax je .L15 .L1X: KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) cmpl $128 * 1, %eax jle .L12 KERNEL1(16 * 1) KERNEL2(16 * 1) KERNEL3(16 * 1) KERNEL4(16 * 1) KERNEL5(16 * 1) KERNEL6(16 * 1) KERNEL7(16 * 1) KERNEL8(16 * 1) cmpl $128 * 2, %eax jle .L12 KERNEL1(16 * 2) KERNEL2(16 * 2) KERNEL3(16 * 2) KERNEL4(16 * 2) KERNEL5(16 * 2) KERNEL6(16 * 2) KERNEL7(16 * 2) KERNEL8(16 * 2) cmpl $128 * 3, %eax jle .L12 KERNEL1(16 * 3) KERNEL2(16 * 3) KERNEL3(16 * 3) KERNEL4(16 * 3) KERNEL5(16 * 3) KERNEL6(16 * 3) KERNEL7(16 * 3) KERNEL8(16 * 3) cmpl $128 * 4, %eax jle .L12 KERNEL1(16 * 4) KERNEL2(16 * 4) KERNEL3(16 * 4) KERNEL4(16 * 4) KERNEL5(16 * 4) KERNEL6(16 * 4) KERNEL7(16 * 4) KERNEL8(16 * 4) cmpl $128 * 5, %eax jle .L12 KERNEL1(16 * 5) KERNEL2(16 * 5) KERNEL3(16 * 5) KERNEL4(16 * 5) KERNEL5(16 * 5) KERNEL6(16 * 5) KERNEL7(16 * 5) KERNEL8(16 * 5) cmpl $128 * 6, %eax jle .L12 KERNEL1(16 * 6) KERNEL2(16 * 6) KERNEL3(16 * 6) KERNEL4(16 * 6) KERNEL5(16 * 6) KERNEL6(16 * 6) KERNEL7(16 * 6) KERNEL8(16 * 6) cmpl $128 * 7, %eax jle .L12 KERNEL1(16 * 7) KERNEL2(16 * 7) KERNEL3(16 * 7) KERNEL4(16 * 7) KERNEL5(16 * 7) KERNEL6(16 * 7) KERNEL7(16 * 7) KERNEL8(16 * 7)#if 1 cmpl $128 * 8, %eax jle .L12 KERNEL1(16 * 8) KERNEL2(16 * 8) KERNEL3(16 * 8) KERNEL4(16 * 8) KERNEL5(16 * 8) KERNEL6(16 * 8) KERNEL7(16 * 8) KERNEL8(16 * 8) cmpl $128 * 9, %eax jle .L12 KERNEL1(16 * 9) KERNEL2(16 * 9) KERNEL3(16 * 9) KERNEL4(16 * 9) KERNEL5(16 * 9) KERNEL6(16 * 9) KERNEL7(16 * 9) KERNEL8(16 * 9) cmpl $128 * 10, %eax jle .L12 KERNEL1(16 * 10) KERNEL2(16 * 10) KERNEL3(16 * 10) KERNEL4(16 * 10) KERNEL5(16 * 10) KERNEL6(16 * 10) KERNEL7(16 * 10) KERNEL8(16 * 10) cmpl $128 * 11, %eax jle .L12 KERNEL1(16 * 11) KERNEL2(16 * 11) KERNEL3(16 * 11) KERNEL4(16 * 11) KERNEL5(16 * 11) KERNEL6(16 * 11) KERNEL7(16 * 11) KERNEL8(16 * 11) cmpl $128 * 12, %eax jle .L12 KERNEL1(16 * 12) KERNEL2(16 * 12) KERNEL3(16 * 12) KERNEL4(16 * 12) KERNEL5(16 * 12) KERNEL6(16 * 12) KERNEL7(16 * 12) KERNEL8(16 * 12) cmpl $128 * 13, %eax jle .L12 KERNEL1(16 * 13) KERNEL2(16 * 13) KERNEL3(16 * 13) KERNEL4(16 * 13) KERNEL5(16 * 13) KERNEL6(16 * 13) KERNEL7(16 * 13) KERNEL8(16 * 13) cmpl $128 * 14, %eax jle .L12 KERNEL1(16 * 14) KERNEL2(16 * 14) KERNEL3(16 * 14) KERNEL4(16 * 14) KERNEL5(16 * 14) KERNEL6(16 * 14) KERNEL7(16 * 14) KERNEL8(16 * 14) cmpl $128 * 15, %eax jle .L12 KERNEL1(16 * 15) KERNEL2(16 * 15) KERNEL3(16 * 15) KERNEL4(16 * 15) KERNEL5(16 * 15) KERNEL6(16 * 15) KERNEL7(16 * 15) KERNEL8(16 * 15)#else addl $32 * 4 * SIZE, AA addl $32 * 8 * SIZE, BB subl $128 * 8, %eax jg .L1X#endif.L12: leal (AA, %eax, 1), AA # * 16 leal (BB, %eax, 2), BB # * 64#else sarl $3, %eax je .L15 ALIGN_4.L12: mulpd %xmm0, %xmm2 PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) addpd %xmm2, %xmm4 movddup 1 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movddup 2 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm6 movddup 3 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 movapd 2 * SIZE(AA), %xmm0 addpd %xmm2, %xmm7 movddup 4 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm4 movddup 5 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm5 movddup 6 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 addpd %xmm2, %xmm6 movddup 7 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm2 movapd 4 * SIZE(AA), %xmm0 addpd %xmm2, %xmm7 movddup 16 * SIZE(BB), %xmm2 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm4 movddup 9 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm5 movddup 10 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm6 movddup 11 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 movapd 6 * SIZE(AA), %xmm0 addpd %xmm3, %xmm7 movddup 12 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm4 movddup 13 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm5 movddup 14 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 addpd %xmm3, %xmm6 movddup 15 * SIZE(BB), %xmm3 mulpd %xmm0, %xmm3 movapd 16 * SIZE(AA), %xmm0 addpd %xmm3, %xmm7 movddup 24 * SIZE(BB), %xmm3 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm4 movddup 17 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm5 movddup 18 * SIZE(BB), %xmm2 mulpd %xmm1, %xmm2 addpd %xmm2, %xmm6 movddup 19 * SIZE(BB), %xmm2
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -