📄 gemm_kernel_4x4_barcelona.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h" #define OLD_M %rdi#define OLD_N %rsi#define M %r13#define N %r14#define K %rdx#define A %rcx#define B %r8#define C %r9#define LDC %r10 #define I %r11#define AO %rdi#define BO %rsi#define CO1 %r15#define CO2 %r12#define BB %rbp#define J %rbx#ifndef WINDOWS_ABI#define STACKSIZE 96#define ALPHA 48(%rsp)#define OFFSET 56(%rsp)#define KK 64(%rsp)#define KKK 72(%rsp)#else#define STACKSIZE 256#define OLD_A 40 + STACKSIZE(%rsp)#define OLD_B 48 + STACKSIZE(%rsp)#define OLD_C 56 + STACKSIZE(%rsp)#define OLD_LDC 64 + STACKSIZE(%rsp)#define OLD_OFFSET 72 + STACKSIZE(%rsp)#define ALPHA 224(%rsp)#define OFFSET 232(%rsp)#define KK 240(%rsp)#define KKK 248(%rsp)#endif#define PREFETCH prefetch#define PREFETCHSIZE (8 * 7 + 3)#define movlpd movsd#define movapd movups#define movupd movups#define KERNEL1(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm0, %xmm8 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE(AO, %rax, 4) ;\ movapd %xmm2, %xmm0 ;\ addpd %xmm1, %xmm12 ;\ movddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm0, %xmm10 ;\ movapd -12 * SIZE(AO, %rax, 4), %xmm0 ;\ addpd %xmm1, %xmm14 ;\ movddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm0, %xmm2#define KERNEL2(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ addpd %xmm1, %xmm12 ;\ movddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm0, %xmm10 ;\ addpd %xmm1, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\/**/ movddup (BO, %rax, 4), %xmm1 ;\ movapd %xmm4, %xmm2#define KERNEL3(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ addpd %xmm5, %xmm12 ;\ movddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm4, %xmm10 ;\ movapd -4 * SIZE(AO, %rax, 4), %xmm4 ;\ addpd %xmm5, %xmm14 ;\ movddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL4(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ addpd %xmm5, %xmm12 ;\ movddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm5 ;\/***/ movapd (AO, %rax, 4), %xmm6 ;\ addpd %xmm4, %xmm10 ;\ addpd %xmm5, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ movddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\ movapd %xmm6, %xmm2#define KERNEL5(xx) \ mulpd %xmm1, %xmm6 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm6, %xmm8 ;\ movapd %xmm2, %xmm6 ;\ addpd %xmm1, %xmm12 ;\ movddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\/**/ movapd 8 * SIZE(AO, %rax, 4), %xmm7 ;\ movapd %xmm6, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm6 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm6, %xmm10 ;\ movapd 4 * SIZE(AO, %rax, 4), %xmm6 ;\ addpd %xmm1, %xmm14 ;\ movddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 2 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm6, %xmm2#define KERNEL6(xx) \ mulpd %xmm1, %xmm6 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm6, %xmm8 ;\ movapd %xmm2, %xmm6 ;\ addpd %xmm1, %xmm12 ;\ movddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm6, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm1, %xmm6 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm1 ;\ addpd %xmm6, %xmm10 ;\/***/ movapd 16 * SIZE(AO, %rax, 4), %xmm0 ;\ addpd %xmm1, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 6 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ movddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\ movapd %xmm7, %xmm2#define KERNEL7(xx) \ mulpd %xmm5, %xmm7 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm7, %xmm8 ;\ movapd %xmm2, %xmm7 ;\ addpd %xmm5, %xmm12 ;\ movddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm7, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm7 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm7, %xmm10 ;\ movapd 12 * SIZE(AO, %rax, 4), %xmm7 ;\ addpd %xmm5, %xmm14 ;\ movddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 10 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\/**/ movapd 24 * SIZE(AO, %rax, 4), %xmm4 ;\ addpd %xmm3, %xmm15 ;\ movddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ movapd %xmm7, %xmm2#define KERNEL8(xx) \ mulpd %xmm5, %xmm7 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm7, %xmm8 ;\ movapd %xmm2, %xmm7 ;\ addpd %xmm5, %xmm12 ;\ movddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm7, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\ mulpd %xmm5, %xmm7 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm5 ;\ addpd %xmm7, %xmm10 ;\ addpd %xmm5, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd 14 * SIZE(AO, %rax, 4), %xmm3 ;\ addpd %xmm3, %xmm15 ;\ movddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ movddup 24 * SIZE(BO, %rax, 4), %xmm5 ;\ movapd %xmm0, %xmm2 ;\ addq $8 * SIZE, %rax #define KERNEL_SUB1(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO), %xmm1 ;\ addpd %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ addpd %xmm1, %xmm12 ;\ movddup -14 * SIZE(BO), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -13 * SIZE(BO), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -14 * SIZE(AO), %xmm1 ;\ addpd %xmm0, %xmm10 ;\ movapd -12 * SIZE(AO), %xmm0 ;\ addpd %xmm1, %xmm14 ;\ movddup -12 * SIZE(BO), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -14 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -11 * SIZE(BO), %xmm3 ;\ movapd %xmm0, %xmm2#define KERNEL_SUB2(xx) \ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO), %xmm1 ;\ addpd %xmm0, %xmm8 ;\ movapd %xmm2, %xmm0 ;\ addpd %xmm1, %xmm12 ;\ movddup -10 * SIZE(BO), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm0, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -9 * SIZE(BO), %xmm3 ;\ mulpd %xmm1, %xmm0 ;\ mulpd -10 * SIZE(AO), %xmm1 ;\ addpd %xmm0, %xmm10 ;\ movapd (AO), %xmm0 ;\ addpd %xmm1, %xmm14 ;\ movddup (BO), %xmm1 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -10 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -7 * SIZE(BO), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL_SUB3(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO), %xmm5 ;\ addpd %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ addpd %xmm5, %xmm12 ;\ movddup -6 * SIZE(BO), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -5 * SIZE(BO), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -6 * SIZE(AO), %xmm5 ;\ addpd %xmm4, %xmm10 ;\ movapd -4 * SIZE(AO), %xmm4 ;\ addpd %xmm5, %xmm14 ;\ movddup -4 * SIZE(BO), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -6 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup -3 * SIZE(BO), %xmm3 ;\ movapd %xmm4, %xmm2#define KERNEL_SUB4(xx) \ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO), %xmm5 ;\ addpd %xmm4, %xmm8 ;\ movapd %xmm2, %xmm4 ;\ addpd %xmm5, %xmm12 ;\ movddup -2 * SIZE(BO), %xmm5 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm9 ;\ movapd %xmm4, %xmm2 ;\ addpd %xmm3, %xmm13 ;\ movddup -1 * SIZE(BO), %xmm3 ;\ mulpd %xmm5, %xmm4 ;\ mulpd -2 * SIZE(AO), %xmm5 ;\ addpd %xmm4, %xmm10 ;\ addpd %xmm5, %xmm14 ;\ mulpd %xmm3, %xmm2 ;\ mulpd -2 * SIZE(AO), %xmm3 ;\ addpd %xmm2, %xmm11 ;\ addpd %xmm3, %xmm15 ;\ movddup 1 * SIZE(BO), %xmm3 ;\ movapd %xmm0, %xmm2 PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, (%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq ARG1, OLD_M movq ARG2, OLD_N movq ARG3, K movq OLD_A, A movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC#ifdef TRMMKERNEL movsd OLD_OFFSET, %xmm12#endif movaps %xmm3, %xmm0#else movq STACKSIZE + 8(%rsp), LDC#ifdef TRMMKERNEL movsd STACKSIZE + 16(%rsp), %xmm12#endif#endif movq OLD_M, M movq OLD_N, N subq $-16 * SIZE, A subq $-16 * SIZE, B movlpd %xmm0, ALPHA salq $BASE_SHIFT, LDC#ifdef TRMMKERNEL movsd %xmm12, OFFSET movsd %xmm12, KK#ifndef LEFT negq KK#endif #endif movq N, J sarq $2, J # j = (n >> 2) jle .L40 ALIGN_4.L01: movq C, CO1 # coffset1 = c leaq (C, LDC, 1), CO2 # coffset2 = c + ldc #if defined(TRMMKERNEL) && defined(LEFT) movq OFFSET, %rax movq %rax, KK#endif movq A, AO # aoffset = a movq K, %rax salq $BASE_SHIFT + 2, %rax leaq (B, %rax), BB movq M, I sarq $2, I # i = (m >> 2) jle .L20 ALIGN_4.L11:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movq B, BO#else movq KK, %rax leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (B, %rax, 4), BO#endif movapd -16 * SIZE(AO), %xmm0 movddup -16 * SIZE(BO), %xmm1 pxor %xmm8, %xmm8 movddup -15 * SIZE(BO), %xmm3 pxor %xmm9, %xmm9 movapd -8 * SIZE(AO), %xmm4 pxor %xmm10, %xmm10 movddup -8 * SIZE(BO), %xmm5 pxor %xmm11, %xmm11 prefetchw 3 * SIZE(CO1) pxor %xmm12, %xmm12 prefetchw 3 * SIZE(CO2) pxor %xmm13, %xmm13 prefetchw 3 * SIZE(CO1, LDC, 2) pxor %xmm14, %xmm14 prefetchw 3 * SIZE(CO2, LDC, 2) pxor %xmm15, %xmm15 movapd %xmm0, %xmm2 prefetch -16 * SIZE(BB) prefetch -8 * SIZE(BB) subq $-16 * SIZE, BB#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $4, %rax#else addq $4, %rax#endif movq %rax, KKK#endif andq $-8, %rax leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 4), BO negq %rax je,pn .L15 ALIGN_4.L12: KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) je,pn .L15 KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) jl,pt .L12 ALIGN_4.L15: movddup ALPHA, %xmm7#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif testq $4, %rax je .L16 ALIGN_4 KERNEL_SUB1(16 * 0) KERNEL_SUB2(16 * 0) KERNEL_SUB3(16 * 0) KERNEL_SUB4(16 * 0) subq $-16 * SIZE, BO subq $-16 * SIZE, AO ALIGN_4.L16:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif andq $3, %rax # if (k & 1) je .L19 leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 4), BO negq %rax ALIGN_4.L17: mulpd %xmm1, %xmm0 mulpd -14 * SIZE(AO, %rax, 4), %xmm1 addpd %xmm0, %xmm8 movapd %xmm2, %xmm0 addpd %xmm1, %xmm12 movddup -14 * SIZE(BO, %rax, 4), %xmm1 mulpd %xmm3, %xmm2 mulpd -14 * SIZE(AO, %rax, 4), %xmm3 addpd %xmm2, %xmm9 movapd %xmm0, %xmm2 addpd %xmm3, %xmm13 movddup -13 * SIZE(BO, %rax, 4), %xmm3 mulpd %xmm1, %xmm0 mulpd -14 * SIZE(AO, %rax, 4), %xmm1 addpd %xmm0, %xmm10 movapd -12 * SIZE(AO, %rax, 4), %xmm0 addpd %xmm1, %xmm14 movddup -12 * SIZE(BO, %rax, 4), %xmm1 mulpd %xmm3, %xmm2 mulpd -14 * SIZE(AO, %rax, 4), %xmm3 addpd %xmm2, %xmm11 addpd %xmm3, %xmm15 movddup -11 * SIZE(BO, %rax, 4), %xmm3 movapd %xmm0, %xmm2 addq $SIZE, %rax jl .L17 ALIGN_4.L19:#ifndef TRMMKERNEL movupd (CO1), %xmm0 movupd 2 * SIZE(CO1), %xmm1#endif mulpd %xmm7, %xmm8 mulpd %xmm7, %xmm12#ifndef TRMMKERNEL
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -