📄 zgemm_kernel_2x2_sse2.s
字号:
/*********************************************************************//* *//* Optimized BLAS libraries *//* By Kazushige Goto <kgoto@tacc.utexas.edu> *//* *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO *//* THE USE OF THE SOFTWARE OR DOCUMENTATION. *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of *//* profits, interruption of business, or related expenses which may *//* arise from use of Software or Documentation, including but not *//* limited to those resulting from defects in Software and/or *//* Documentation, or loss or inaccuracy of data of any kind. *//*********************************************************************/#define ASSEMBLER#include "common.h" #define OLD_M %rdi#define OLD_N %rsi#define M %r13#define N %r14#define K %rdx#define A %rcx#define B %r8#define C %r9#define LDC %r10#define I %r11#define AO %rdi#define BO %rsi#define CO1 %r15#define CO2 %rbp#define BB %r12#ifndef WINDOWS_ABI#define STACKSIZE 64#else#define STACKSIZE 256#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)#define OLD_A 48 + STACKSIZE(%rsp)#define OLD_B 56 + STACKSIZE(%rsp)#define OLD_C 64 + STACKSIZE(%rsp)#define OLD_LDC 72 + STACKSIZE(%rsp)#define OLD_OFFSET 80 + STACKSIZE(%rsp)#endif#define POSINV 0(%rsp)#define ALPHA_R 16(%rsp)#define ALPHA_I 32(%rsp)#define J 48(%rsp)#define OFFSET 56(%rsp)#define KK 64(%rsp)#define KKK 72(%rsp)#define BUFFER 128(%rsp)#ifdef OPTERON#define PREFETCH prefetch#define PREFETCHW prefetchw#define PREFETCHSIZE (8 * 5 + 4)#define RPREFETCHSIZE (8 * 7 + 4)#define WPREFETCHSIZE (8 * 8 + 4)#endif#ifdef GENERIC#define PREFETCH prefetcht0#define PREFETCHW prefetcht0#define PREFETCHSIZE (8 * 5 + 4)#define RPREFETCHSIZE (8 * 7 + 4)#define WPREFETCHSIZE (8 * 8 + 4)#endif#ifndef GENERIC#define KERNEL1(xx) \ mulpd %xmm0, %xmm1 ;\ addpd %xmm1, %xmm8 ;\ movapd -16 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm1 ;\ mulpd %xmm0, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd -14 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm0, %xmm5 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE + 1 * (xx) * SIZE(AO, %rax, 4) ;\ mulpd -10 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm0 ;\ addpd %xmm5, %xmm10 ;\ movapd -12 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm0, %xmm11 ;\ movapd -8 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm0#define KERNEL2(xx) \ mulpd %xmm2, %xmm1 ;\ addpd %xmm1, %xmm12 ;\ movapd 0 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm1 ;\ mulpd %xmm2, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd -6 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm2, %xmm5 ;\ mulpd -10 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm2 ;\ addpd %xmm5, %xmm14 ;\ movapd -4 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm2, %xmm15 ;\ movapd -6 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm2#define KERNEL3(xx) \ mulpd %xmm4, %xmm7 ;\ addpd %xmm7, %xmm8 ;\ movapd -8 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm7 ;\ mulpd %xmm4, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd -6 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm4, %xmm5 ;\ mulpd -2 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm4 ;\ addpd %xmm5, %xmm10 ;\ movapd -4 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm4, %xmm11 ;\ movapd -4 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm4#define KERNEL4(xx) \ mulpd %xmm6, %xmm7 ;\ addpd %xmm7, %xmm12 ;\ movapd 8 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm7 ;\ mulpd %xmm6, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 2 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm6, %xmm5 ;\ mulpd -2 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm6 ;\ addpd %xmm5, %xmm14 ;\ movapd 4 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ PREFETCH (PREFETCHSIZE + 8) * SIZE + 1 * (xx) * SIZE(AO, %rax, 4) ;\ addpd %xmm6, %xmm15 ;\ movapd -2 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm6#define KERNEL5(xx) \ mulpd %xmm0, %xmm1 ;\ addpd %xmm1, %xmm8 ;\ movapd 0 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm1 ;\ mulpd %xmm0, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd 2 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm0, %xmm5 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm0 ;\ addpd %xmm5, %xmm10 ;\ movapd 4 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm0, %xmm11 ;\ movapd 0 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm0#define KERNEL6(xx) \ mulpd %xmm2, %xmm1 ;\ addpd %xmm1, %xmm12 ;\ movapd 16 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm1 ;\ mulpd %xmm2, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm2, %xmm5 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm2 ;\ addpd %xmm5, %xmm14 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm2, %xmm15 ;\ movapd 2 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm2#define KERNEL7(xx) \ mulpd %xmm4, %xmm7 ;\ addpd %xmm7, %xmm8 ;\ movapd 8 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm7 ;\ mulpd %xmm4, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm4, %xmm5 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm4 ;\ addpd %xmm5, %xmm10 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm4, %xmm11 ;\ movapd 4 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm4#define KERNEL8(xx) \ mulpd %xmm6, %xmm7 ;\ addpd %xmm7, %xmm12 ;\ movapd 24 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm7 ;\ mulpd %xmm6, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 18 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm3 ;\ mulpd %xmm6, %xmm5 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm6 ;\ addpd %xmm5, %xmm14 ;\ movapd 20 * SIZE + 2 * (xx) * SIZE(BO, %rax, 8), %xmm5 ;\ addpd %xmm6, %xmm15 ;\ movapd 6 * SIZE + 1 * (xx) * SIZE(AO, %rax, 4), %xmm6#else#define KERNEL1(xx) \ mulpd %xmm0, %xmm1 ;\ addpd %xmm1, %xmm8 ;\ movapd -16 * SIZE + 2 * (xx) * SIZE(BO), %xmm1 ;\ mulpd %xmm0, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd -14 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm0, %xmm5 ;\ PREFETCH (PREFETCHSIZE + 0) * SIZE + 1 * (xx) * SIZE(AO) ;\ mulpd -10 * SIZE + 2 * (xx) * SIZE(BO), %xmm0 ;\ addpd %xmm5, %xmm10 ;\ movapd -12 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm0, %xmm11 ;\ movapd -8 * SIZE + 1 * (xx) * SIZE(AO), %xmm0#define KERNEL2(xx) \ mulpd %xmm2, %xmm1 ;\ addpd %xmm1, %xmm12 ;\ movapd 0 * SIZE + 2 * (xx) * SIZE(BO), %xmm1 ;\ mulpd %xmm2, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd -6 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm2, %xmm5 ;\ mulpd -10 * SIZE + 2 * (xx) * SIZE(BO), %xmm2 ;\ addpd %xmm5, %xmm14 ;\ movapd -4 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm2, %xmm15 ;\ movapd -6 * SIZE + 1 * (xx) * SIZE(AO), %xmm2#define KERNEL3(xx) \ mulpd %xmm4, %xmm7 ;\ addpd %xmm7, %xmm8 ;\ movapd -8 * SIZE + 2 * (xx) * SIZE(BO), %xmm7 ;\ mulpd %xmm4, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd -6 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm4, %xmm5 ;\ mulpd -2 * SIZE + 2 * (xx) * SIZE(BO), %xmm4 ;\ addpd %xmm5, %xmm10 ;\ movapd -4 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm4, %xmm11 ;\ movapd -4 * SIZE + 1 * (xx) * SIZE(AO), %xmm4#define KERNEL4(xx) \ mulpd %xmm6, %xmm7 ;\ addpd %xmm7, %xmm12 ;\ movapd 8 * SIZE + 2 * (xx) * SIZE(BO), %xmm7 ;\ mulpd %xmm6, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 2 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm6, %xmm5 ;\ mulpd -2 * SIZE + 2 * (xx) * SIZE(BO), %xmm6 ;\ addpd %xmm5, %xmm14 ;\ movapd 4 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ PREFETCH (PREFETCHSIZE + 8) * SIZE + 1 * (xx) * SIZE(AO) ;\ addpd %xmm6, %xmm15 ;\ movapd -2 * SIZE + 1 * (xx) * SIZE(AO), %xmm6#define KERNEL5(xx) \ mulpd %xmm0, %xmm1 ;\ addpd %xmm1, %xmm8 ;\ movapd 0 * SIZE + 2 * (xx) * SIZE(BO), %xmm1 ;\ mulpd %xmm0, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd 2 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm0, %xmm5 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO), %xmm0 ;\ addpd %xmm5, %xmm10 ;\ movapd 4 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm0, %xmm11 ;\ movapd 0 * SIZE + 1 * (xx) * SIZE(AO), %xmm0#define KERNEL6(xx) \ mulpd %xmm2, %xmm1 ;\ addpd %xmm1, %xmm12 ;\ movapd 16 * SIZE + 2 * (xx) * SIZE(BO), %xmm1 ;\ mulpd %xmm2, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm2, %xmm5 ;\ mulpd 6 * SIZE + 2 * (xx) * SIZE(BO), %xmm2 ;\ addpd %xmm5, %xmm14 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm2, %xmm15 ;\ movapd 2 * SIZE + 1 * (xx) * SIZE(AO), %xmm2#define KERNEL7(xx) \ mulpd %xmm4, %xmm7 ;\ addpd %xmm7, %xmm8 ;\ movapd 8 * SIZE + 2 * (xx) * SIZE(BO), %xmm7 ;\ mulpd %xmm4, %xmm3 ;\ addpd %xmm3, %xmm9 ;\ movapd 10 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm4, %xmm5 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO), %xmm4 ;\ addpd %xmm5, %xmm10 ;\ movapd 12 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm4, %xmm11 ;\ movapd 4 * SIZE + 1 * (xx) * SIZE(AO), %xmm4#define KERNEL8(xx) \ mulpd %xmm6, %xmm7 ;\ addpd %xmm7, %xmm12 ;\ movapd 24 * SIZE + 2 * (xx) * SIZE(BO), %xmm7 ;\ mulpd %xmm6, %xmm3 ;\ addpd %xmm3, %xmm13 ;\ movapd 18 * SIZE + 2 * (xx) * SIZE(BO), %xmm3 ;\ mulpd %xmm6, %xmm5 ;\ mulpd 14 * SIZE + 2 * (xx) * SIZE(BO), %xmm6 ;\ addpd %xmm5, %xmm14 ;\ movapd 20 * SIZE + 2 * (xx) * SIZE(BO), %xmm5 ;\ addpd %xmm6, %xmm15 ;\ movapd 6 * SIZE + 1 * (xx) * SIZE(AO), %xmm6#endif PROLOGUE PROFCODE subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) movq %r12, 16(%rsp) movq %r13, 24(%rsp) movq %r14, 32(%rsp) movq %r15, 40(%rsp)#ifdef WINDOWS_ABI movq %rdi, 48(%rsp) movq %rsi, 56(%rsp) movups %xmm6, 64(%rsp) movups %xmm7, 80(%rsp) movups %xmm8, 96(%rsp) movups %xmm9, 112(%rsp) movups %xmm10, 128(%rsp) movups %xmm11, 144(%rsp) movups %xmm12, 160(%rsp) movups %xmm13, 176(%rsp) movups %xmm14, 192(%rsp) movups %xmm15, 208(%rsp) movq ARG1, OLD_M movq ARG2, OLD_N movq ARG3, K movq OLD_A, A movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC#ifdef TRMMKERNEL movsd OLD_OFFSET, %xmm12#endif movaps %xmm3, %xmm0 movsd OLD_ALPHA_I, %xmm1#else movq 72(%rsp), LDC#ifdef TRMMKERNEL movsd 80(%rsp), %xmm12#endif#endif movq %rsp, %rbx # save old stack subq $128 + GEMM_Q * 64, %rsp andq $-4096, %rsp # align stack#ifdef WINDOWS_ABI#if GEMM_Q > 192 movq $0, 4096 * 3(%rsp)#endif#if GEMM_Q > 128 movq $0, 4096 * 2(%rsp)#endif#if GEMM_Q > 64 movq $0, 4096 * 1(%rsp)#endif movq $0, 4096 * 0(%rsp)#endif EMMS movq OLD_M, M movq OLD_N, N pcmpeqb %xmm7, %xmm7 psllq $63, %xmm7 # Generate mask pxor %xmm10, %xmm10 movlpd %xmm0, 0 + ALPHA_R movlpd %xmm0, 8 + ALPHA_R movlpd %xmm1, 8 + ALPHA_I xorpd %xmm7, %xmm1 movlpd %xmm1, 0 + ALPHA_I movlpd %xmm10, 0 + POSINV movlpd %xmm7, 8 + POSINV#ifdef TRMMKERNEL movlpd %xmm12, OFFSET movlpd %xmm12, KK#ifndef LEFT negq KK#endif #endif subq $-16 * SIZE, A salq $ZBASE_SHIFT, LDC movq N, J sarq $1, J # j = (n >> 2) jle .L100 ALIGN_4.L01:#if defined(TRMMKERNEL) && defined(LEFT) movq OFFSET, %rax movq %rax, KK#endif /* Copying to Sub Buffer */ leaq BUFFER, BO movq K, %rax sarq $2, %rax jle .L03 addq %rax, %rax ALIGN_4 .L02: PREFETCH (RPREFETCHSIZE + 0) * SIZE(B) movlpd 0 * SIZE(B), %xmm8 movlpd 1 * SIZE(B), %xmm9 movlpd 2 * SIZE(B), %xmm10 movlpd 3 * SIZE(B), %xmm11 PREFETCHW (WPREFETCHSIZE + 0) * SIZE(BO) movlpd 4 * SIZE(B), %xmm12 movlpd 5 * SIZE(B), %xmm13 movlpd 6 * SIZE(B), %xmm14 movlpd 7 * SIZE(B), %xmm15 PREFETCHW (WPREFETCHSIZE + 8) * SIZE(BO) movlpd %xmm8, 0 * SIZE(BO) movlpd %xmm8, 1 * SIZE(BO) movlpd %xmm9, 2 * SIZE(BO) movlpd %xmm9, 3 * SIZE(BO) PREFETCH (RPREFETCHSIZE + 8) * SIZE(B) movlpd %xmm10, 4 * SIZE(BO) movlpd %xmm10, 5 * SIZE(BO) movlpd %xmm11, 6 * SIZE(BO) movlpd %xmm11, 7 * SIZE(BO) PREFETCHW (WPREFETCHSIZE + 16) * SIZE(BO) movlpd %xmm12, 8 * SIZE(BO) movlpd %xmm12, 9 * SIZE(BO) movlpd %xmm13, 10 * SIZE(BO) movlpd %xmm13, 11 * SIZE(BO) PREFETCHW (WPREFETCHSIZE + 24) * SIZE(BO) movlpd %xmm14, 12 * SIZE(BO) movlpd %xmm14, 13 * SIZE(BO) movlpd %xmm15, 14 * SIZE(BO) movlpd %xmm15, 15 * SIZE(BO) subq $-16 * SIZE, BO addq $ 8 * SIZE, B decq %rax jne .L02 ALIGN_4.L03: movq K, %rax andq $3, %rax BRANCH jle .L05 ALIGN_4.L04: movlpd 0 * SIZE(B), %xmm8 movlpd 1 * SIZE(B), %xmm9 movlpd 2 * SIZE(B), %xmm10 movlpd 3 * SIZE(B), %xmm11 movlpd %xmm8, 0 * SIZE(BO) movlpd %xmm8, 1 * SIZE(BO) movlpd %xmm9, 2 * SIZE(BO) movlpd %xmm9, 3 * SIZE(BO) movlpd %xmm10, 4 * SIZE(BO) movlpd %xmm10, 5 * SIZE(BO) movlpd %xmm11, 6 * SIZE(BO) movlpd %xmm11, 7 * SIZE(BO) addq $ 4 * SIZE, B addq $ 8 * SIZE, BO decq %rax jne .L04 ALIGN_4 .L05: movq C, CO1 # coffset1 = c leaq (C, LDC, 1), CO2 # coffset2 = c + ldc movq A, AO # aoffset = a leaq (RPREFETCHSIZE + 0) * SIZE(B), BB movq M, I sarq $1, I # i = (m >> 2) jle .L30 ALIGN_4.L10: PREFETCH 0 * SIZE(BB) PREFETCH 8 * SIZE(BB) subq $-16 * SIZE, BB#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq 16 * SIZE + BUFFER, BO#else leaq 16 * SIZE + BUFFER, BO movq KK, %rax leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO#endif movapd -16 * SIZE(AO), %xmm0 movapd -16 * SIZE(BO), %xmm1 pxor %xmm8, %xmm8 movapd -14 * SIZE(AO), %xmm2 movapd -14 * SIZE(BO), %xmm3 pxor %xmm9, %xmm9 movapd -12 * SIZE(AO), %xmm4 movapd -12 * SIZE(BO), %xmm5 pxor %xmm10, %xmm10 movapd -10 * SIZE(AO), %xmm6 movapd -8 * SIZE(BO), %xmm7 pxor %xmm11, %xmm11 PREFETCHW 3 * SIZE(CO1) pxor %xmm12, %xmm12 PREFETCHW 3 * SIZE(CO2) pxor %xmm13, %xmm13 pxor %xmm14, %xmm14 pxor %xmm15, %xmm15#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $2, %rax#else addq $2, %rax#endif movq %rax, KKK#endif#ifndef GENERIC andq $-8, %rax leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO negq %rax je,pn .L15 ALIGN_3.L12: KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) KERNEL4(16 * 0) KERNEL5(16 * 0) KERNEL6(16 * 0) KERNEL7(16 * 0) KERNEL8(16 * 0) KERNEL1(16 * 1) KERNEL2(16 * 1)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -