📄 zgemm_kernel_4x2_barcelona.s
字号:
movaps %xmm4, 16 * SIZE(BO) movaps %xmm5, 20 * SIZE(BO) movaps %xmm6, 24 * SIZE(BO) movaps %xmm7, 28 * SIZE(BO) addq $ 8 * SIZE, B addq $32 * SIZE, BO decq %rax jne .L02 ALIGN_4.L03: movq K, %rax andq $3, %rax BRANCH jle .L10 ALIGN_4.L04: movaps 0 * SIZE(B), %xmm3 pshufd $0x00, %xmm3, %xmm0 pshufd $0x55, %xmm3, %xmm1 pshufd $0xaa, %xmm3, %xmm2 pshufd $0xff, %xmm3, %xmm3#if defined(NN) || defined(NT) || defined(NR) || defined(NC) || \ defined(TN) || defined(TT) || defined(TR) || defined(TC) xorps %xmm15, %xmm1 xorps %xmm15, %xmm3#else xorps %xmm15, %xmm0 xorps %xmm15, %xmm2#endif movaps %xmm0, 0 * SIZE(BO) movaps %xmm1, 4 * SIZE(BO) movaps %xmm2, 8 * SIZE(BO) movaps %xmm3, 12 * SIZE(BO) addq $ 4 * SIZE, B addq $16 * SIZE, BO decq %rax jne .L04 ALIGN_4 .L10: movq C, CO1 # coffset1 = c leaq (C, LDC, 1), CO2 # coffset2 = c + ldc movq A, AO # aoffset = a movq B, BB movq M, I sarq $2, I # i = (m >> 2) jle .L20 ALIGN_4.L11:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq 32 * SIZE + BUFFER, BO#else leaq 32 * SIZE + BUFFER, BO movq KK, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO#endif prefetch (RPREFETCHSIZE + 0) * SIZE(BB) prefetch (RPREFETCHSIZE + 8) * SIZE(BB) prefetch (RPREFETCHSIZE + 16) * SIZE(BB) subq $-16 * SIZE, BB movaps -32 * SIZE(AO), %xmm0 movaps -32 * SIZE(BO), %xmm1 pxor %xmm8, %xmm8 movaps -28 * SIZE(BO), %xmm3 pxor %xmm9, %xmm9 movaps -16 * SIZE(AO), %xmm4 pxor %xmm10, %xmm10 movaps 0 * SIZE(BO), %xmm5 pxor %xmm11, %xmm11 prefetchw 7 * SIZE(CO1) pxor %xmm12, %xmm12 prefetchw 7 * SIZE(CO2) pxor %xmm13, %xmm13 pxor %xmm14, %xmm14 pxor %xmm15, %xmm15 movaps %xmm0, %xmm2#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $4, %rax#else addq $2, %rax#endif movq %rax, KKK#endif andq $-8, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO negq %rax je,pn .L15 ALIGN_3.L12: KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) je,pn .L15 KERNEL1(32 * 0) KERNEL2(32 * 0) KERNEL3(32 * 0) KERNEL4(32 * 0) KERNEL5(32 * 0) KERNEL6(32 * 0) KERNEL7(32 * 0) KERNEL8(32 * 0) jl,pt .L12 ALIGN_4.L15:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif testq $4, %rax je .L16 xorq %rax, %rax ALIGN_3 KERNEL_SUB1(32 * 0) KERNEL_SUB2(32 * 0) KERNEL_SUB3(32 * 0) KERNEL_SUB4(32 * 0) addq $64 * SIZE, BO addq $32 * SIZE, AO ALIGN_3.L16:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif movaps ALPHA_R, %xmm6 movaps ALPHA_I, %xmm7 andq $3, %rax # if (k & 1) BRANCH je .L18 leaq (, %rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO negq %rax ALIGN_4.L17: mulps %xmm1, %xmm0 mulps -28 * SIZE(AO, %rax, 4), %xmm1 addps %xmm0, %xmm8 movaps %xmm2, %xmm0 addps %xmm1, %xmm12 movaps -24 * SIZE(BO, %rax, 8), %xmm1 mulps %xmm3, %xmm2 mulps -28 * SIZE(AO, %rax, 4), %xmm3 addps %xmm2, %xmm9 movaps %xmm0, %xmm2 addps %xmm3, %xmm13 movaps -20 * SIZE(BO, %rax, 8), %xmm3 mulps %xmm1, %xmm0 mulps -28 * SIZE(AO, %rax, 4), %xmm1 addps %xmm0, %xmm10 movaps -24 * SIZE(AO, %rax, 4), %xmm0 addps %xmm1, %xmm14 movaps -16 * SIZE(BO, %rax, 8), %xmm1 mulps %xmm3, %xmm2 mulps -28 * SIZE(AO, %rax, 4), %xmm3 addps %xmm2, %xmm11 addps %xmm3, %xmm15 movaps -12 * SIZE(BO, %rax, 8), %xmm3 movaps %xmm0, %xmm2 addq $SIZE * 2, %rax jl .L17 ALIGN_4.L18:#ifndef TRMMKERNEL movsd 0 * SIZE(CO1), %xmm0 movhps 2 * SIZE(CO1), %xmm0 movsd 4 * SIZE(CO1), %xmm2 movhps 6 * SIZE(CO1), %xmm2 movsd 0 * SIZE(CO2), %xmm1 movhps 2 * SIZE(CO2), %xmm1 movsd 4 * SIZE(CO2), %xmm3 movhps 6 * SIZE(CO2), %xmm3#endif shufps $0xb1, %xmm9, %xmm9 shufps $0xb1, %xmm11, %xmm11 shufps $0xb1, %xmm13, %xmm13 shufps $0xb1, %xmm15, %xmm15#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(RR) || defined(RC) || defined(CR) || defined(CC) subps %xmm9, %xmm8 subps %xmm11, %xmm10 subps %xmm13, %xmm12 subps %xmm15, %xmm14#else addps %xmm9, %xmm8 addps %xmm11, %xmm10 addps %xmm13, %xmm12 addps %xmm15, %xmm14#endif movaps %xmm8, %xmm9 movaps %xmm10, %xmm11 movaps %xmm12, %xmm13 movaps %xmm14, %xmm15 shufps $0xb1, %xmm8, %xmm8 shufps $0xb1, %xmm10, %xmm10 shufps $0xb1, %xmm12, %xmm12 shufps $0xb1, %xmm14, %xmm14 mulps %xmm6, %xmm9 mulps %xmm7, %xmm8 mulps %xmm6, %xmm11 mulps %xmm7, %xmm10 mulps %xmm6, %xmm13 mulps %xmm7, %xmm12 mulps %xmm6, %xmm15 mulps %xmm7, %xmm14 addps %xmm9, %xmm8 addps %xmm11, %xmm10 addps %xmm13, %xmm12 addps %xmm15, %xmm14#ifndef TRMMKERNEL addps %xmm0, %xmm8 addps %xmm1, %xmm10 addps %xmm2, %xmm12 addps %xmm3, %xmm14#endif movsd %xmm8, 0 * SIZE(CO1) movhps %xmm8, 2 * SIZE(CO1) movsd %xmm12, 4 * SIZE(CO1) movhps %xmm12, 6 * SIZE(CO1) movsd %xmm10, 0 * SIZE(CO2) movhps %xmm10, 2 * SIZE(CO2) movsd %xmm14, 4 * SIZE(CO2) movhps %xmm14, 6 * SIZE(CO2)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movq K, %rax subq KKK, %rax leaq (,%rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 8), BO#endif#if defined(TRMMKERNEL) && defined(LEFT) addq $4, KK#endif addq $8 * SIZE, CO1 # coffset += 4 addq $8 * SIZE, CO2 # coffset += 4 decq I # i -- jg .L11 ALIGN_4 .L20: testq $2, M je .L30#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq 32 * SIZE + BUFFER, BO#else leaq 32 * SIZE + BUFFER, BO movq KK, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 2), AO leaq (BO, %rax, 8), BO#endif movaps -32 * SIZE(AO), %xmm0 movaps -16 * SIZE(AO), %xmm2 movaps 0 * SIZE(AO), %xmm4 movaps 16 * SIZE(AO), %xmm6 movaps -32 * SIZE(BO), %xmm1 movaps -16 * SIZE(BO), %xmm3 movaps 0 * SIZE(BO), %xmm5 movaps 16 * SIZE(BO), %xmm7 pxor %xmm8, %xmm8 pxor %xmm9, %xmm9 pxor %xmm10, %xmm10 pxor %xmm11, %xmm11#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $2, %rax#else addq $2, %rax#endif movq %rax, KKK#endif sarq $3, %rax je .L25 ALIGN_4.L22: mulps %xmm0, %xmm1 PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) addps %xmm1, %xmm8 movaps -28 * SIZE(BO), %xmm1 mulps %xmm0, %xmm1 addps %xmm1, %xmm9 movaps -24 * SIZE(BO), %xmm1 mulps %xmm0, %xmm1 mulps -20 * SIZE(BO), %xmm0 addps %xmm1, %xmm10 movaps 32 * SIZE(BO), %xmm1 addps %xmm0, %xmm11 movaps -28 * SIZE(AO), %xmm0 mulps %xmm0, %xmm3 addps %xmm3, %xmm8 movaps -12 * SIZE(BO), %xmm3 mulps %xmm0, %xmm3 addps %xmm3, %xmm9 movaps -8 * SIZE(BO), %xmm3 mulps %xmm0, %xmm3 mulps -4 * SIZE(BO), %xmm0 addps %xmm3, %xmm10 movaps 48 * SIZE(BO), %xmm3 addps %xmm0, %xmm11 movaps -24 * SIZE(AO), %xmm0 mulps %xmm0, %xmm5 addps %xmm5, %xmm8 movaps 4 * SIZE(BO), %xmm5 mulps %xmm0, %xmm5 addps %xmm5, %xmm9 movaps 8 * SIZE(BO), %xmm5 mulps %xmm0, %xmm5 mulps 12 * SIZE(BO), %xmm0 addps %xmm5, %xmm10 movaps 64 * SIZE(BO), %xmm5 addps %xmm0, %xmm11 movaps -20 * SIZE(AO), %xmm0 mulps %xmm0, %xmm7 addps %xmm7, %xmm8 movaps 20 * SIZE(BO), %xmm7 mulps %xmm0, %xmm7 addps %xmm7, %xmm9 movaps 24 * SIZE(BO), %xmm7 mulps %xmm0, %xmm7 mulps 28 * SIZE(BO), %xmm0 addps %xmm7, %xmm10 movaps 80 * SIZE(BO), %xmm7 addps %xmm0, %xmm11 movaps 0 * SIZE(AO), %xmm0 mulps %xmm2, %xmm1 PREFETCH (PREFETCHSIZE + 16) * SIZE(AO) addps %xmm1, %xmm8 movaps 36 * SIZE(BO), %xmm1 mulps %xmm2, %xmm1 addps %xmm1, %xmm9 movaps 40 * SIZE(BO), %xmm1 mulps %xmm2, %xmm1 mulps 44 * SIZE(BO), %xmm2 addps %xmm1, %xmm10 movaps 96 * SIZE(BO), %xmm1 addps %xmm2, %xmm11 movaps -12 * SIZE(AO), %xmm2 mulps %xmm2, %xmm3 addps %xmm3, %xmm8 movaps 52 * SIZE(BO), %xmm3 mulps %xmm2, %xmm3 addps %xmm3, %xmm9 movaps 56 * SIZE(BO), %xmm3 mulps %xmm2, %xmm3 mulps 60 * SIZE(BO), %xmm2 addps %xmm3, %xmm10 movaps 112 * SIZE(BO), %xmm3 addps %xmm2, %xmm11 movaps -8 * SIZE(AO), %xmm2 mulps %xmm2, %xmm5 addps %xmm5, %xmm8 movaps 68 * SIZE(BO), %xmm5 mulps %xmm2, %xmm5 addps %xmm5, %xmm9 movaps 72 * SIZE(BO), %xmm5 mulps %xmm2, %xmm5 mulps 76 * SIZE(BO), %xmm2 addps %xmm5, %xmm10 movaps 128 * SIZE(BO), %xmm5 addps %xmm2, %xmm11 movaps -4 * SIZE(AO), %xmm2 mulps %xmm2, %xmm7 addps %xmm7, %xmm8 movaps 84 * SIZE(BO), %xmm7 mulps %xmm2, %xmm7 addps %xmm7, %xmm9 movaps 88 * SIZE(BO), %xmm7 mulps %xmm2, %xmm7 mulps 92 * SIZE(BO), %xmm2 addps %xmm7, %xmm10 movaps 144 * SIZE(BO), %xmm7 addps %xmm2, %xmm11 movaps 16 * SIZE(AO), %xmm2 subq $ -32 * SIZE, AO subq $-128 * SIZE, BO decq %rax jne .L22 ALIGN_4.L25:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif movaps ALPHA_R, %xmm6 movaps ALPHA_I, %xmm7 andq $7, %rax # if (k & 1) BRANCH je .L28 ALIGN_4.L26: mulps %xmm0, %xmm1 addps %xmm1, %xmm8 movaps -28 * SIZE(BO), %xmm1 mulps %xmm0, %xmm1 addps %xmm1, %xmm9 movaps -24 * SIZE(BO), %xmm1 mulps %xmm0, %xmm1 mulps -20 * SIZE(BO), %xmm0 addps %xmm1, %xmm10 movaps -16 * SIZE(BO), %xmm1 addps %xmm0, %xmm11 movaps -28 * SIZE(AO), %xmm0 subq $- 4 * SIZE, AO subq $-16 * SIZE, BO decq %rax jg .L26 ALIGN_4
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -