📄 zgemm_kernel_4x2_sse3.s
字号:
movddup 4 * SIZE(B), %xmm2 movddup 6 * SIZE(B), %xmm3 movddup 8 * SIZE(B), %xmm4 movddup 10 * SIZE(B), %xmm5 movddup 12 * SIZE(B), %xmm6 movddup 14 * SIZE(B), %xmm7 movaps %xmm0, 0 * SIZE(BO) movaps %xmm1, 4 * SIZE(BO) movaps %xmm2, 8 * SIZE(BO) movaps %xmm3, 12 * SIZE(BO) movaps %xmm4, 16 * SIZE(BO) movaps %xmm5, 20 * SIZE(BO) movaps %xmm6, 24 * SIZE(BO) movaps %xmm7, 28 * SIZE(BO) prefetcht1 128 * SIZE(BO) prefetcht0 112 * SIZE(B) addq $16 * SIZE, B addq $32 * SIZE, BO decq %rax jne .L42 ALIGN_4.L43: movq K, %rax andq $7, %rax BRANCH jle .L50 ALIGN_4.L44: movddup 0 * SIZE(B), %xmm0 movaps %xmm0, 0 * SIZE(BO) addq $2 * SIZE, B addq $4 * SIZE, BO decq %rax jne .L44 ALIGN_4 .L50: movq C, CO1 # coffset1 = c movq A, AO # aoffset = a movq M, I sarq $2, I # i = (m >> 2) jle .L60 ALIGN_4.L51:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER, BO#else leaq BUFFER, BO movq KK, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 2), BO#endif movaps 0 * SIZE(AO), %xmm8 pxor %xmm0, %xmm0 movaps 16 * SIZE(AO), %xmm10 pxor %xmm1, %xmm1 movaps 32 * SIZE(AO), %xmm12 pxor %xmm4, %xmm4 movaps 48 * SIZE(AO), %xmm14 pxor %xmm5, %xmm5 movsldup 0 * SIZE(BO), %xmm9 movsldup 16 * SIZE(BO), %xmm11 prefetchnta 4 * SIZE(CO1)#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $4, %rax#else addq $1, %rax#endif movq %rax, KKK#endif sarq $3, %rax je .L55 ALIGN_4.L52: mulps %xmm8, %xmm9 PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) addps %xmm9, %xmm0 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 4 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm4 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 8 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm5 movsldup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 12 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm4 movshdup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 64 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm5 movsldup 8 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 addps %xmm9, %xmm0 movshdup 8 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 movaps 20 * SIZE(AO), %xmm10 ADDSUB %xmm9, %xmm1 movsldup 8 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 addps %xmm9, %xmm4 movshdup 8 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 movaps 24 * SIZE(AO), %xmm10 ADDSUB %xmm9, %xmm5 movsldup 12 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 addps %xmm9, %xmm0 movshdup 12 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 movaps 28 * SIZE(AO), %xmm10 ADDSUB %xmm9, %xmm1 movsldup 12 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 addps %xmm9, %xmm4 movshdup 12 * SIZE(BO), %xmm9 mulps %xmm10, %xmm9 movaps 80 * SIZE(AO), %xmm10 ADDSUB %xmm9, %xmm5 movsldup 32 * SIZE(BO), %xmm9 mulps %xmm12, %xmm11 PREFETCH (PREFETCHSIZE + 32) * SIZE(AO) addps %xmm11, %xmm0 movshdup 16 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 movaps 36 * SIZE(AO), %xmm12 ADDSUB %xmm11, %xmm1 movsldup 16 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 addps %xmm11, %xmm4 movshdup 16 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 movaps 40 * SIZE(AO), %xmm12 ADDSUB %xmm11, %xmm5 movsldup 20 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 addps %xmm11, %xmm0 movshdup 20 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 movaps 44 * SIZE(AO), %xmm12 ADDSUB %xmm11, %xmm1 movsldup 20 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 addps %xmm11, %xmm4 movshdup 20 * SIZE(BO), %xmm11 mulps %xmm12, %xmm11 movaps 96 * SIZE(AO), %xmm12 ADDSUB %xmm11, %xmm5 movsldup 24 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 addps %xmm11, %xmm0 movshdup 24 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 movaps 52 * SIZE(AO), %xmm14 ADDSUB %xmm11, %xmm1 movsldup 24 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 addps %xmm11, %xmm4 movshdup 24 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 movaps 56 * SIZE(AO), %xmm14 ADDSUB %xmm11, %xmm5 movsldup 28 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 addps %xmm11, %xmm0 movshdup 28 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 movaps 60 * SIZE(AO), %xmm14 ADDSUB %xmm11, %xmm1 movsldup 28 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 addps %xmm11, %xmm4 movshdup 28 * SIZE(BO), %xmm11 mulps %xmm14, %xmm11 movaps 112 * SIZE(AO), %xmm14 ADDSUB %xmm11, %xmm5 movsldup 48 * SIZE(BO), %xmm11 addq $64 * SIZE, AO addq $32 * SIZE, BO decq %rax jne .L52 ALIGN_4.L55:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif movaps ALPHA_R, %xmm14 movaps ALPHA_I, %xmm15 andq $7, %rax # if (k & 1) BRANCH je .L58 ALIGN_4.L56: mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 4 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm4 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 8 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm5 movsldup 4 * SIZE(BO), %xmm9 addq $ 8 * SIZE, AO addq $ 4 * SIZE, BO decq %rax jg .L56 ALIGN_4.L58:#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(NR) || defined(NC) || defined(TR) || defined(TC) shufps $0xb1, %xmm1, %xmm1 shufps $0xb1, %xmm5, %xmm5 addsubps %xmm1, %xmm0 addsubps %xmm5, %xmm4 movaps %xmm0, %xmm1 movaps %xmm4, %xmm5 shufps $0xb1, %xmm0, %xmm0 shufps $0xb1, %xmm4, %xmm4#else shufps $0xb1, %xmm0, %xmm0 shufps $0xb1, %xmm4, %xmm4 addsubps %xmm0, %xmm1 addsubps %xmm4, %xmm5 movaps %xmm1, %xmm0 movaps %xmm5, %xmm4 shufps $0xb1, %xmm1, %xmm1 shufps $0xb1, %xmm5, %xmm5#endif mulps %xmm14, %xmm1 mulps %xmm15, %xmm0 mulps %xmm14, %xmm5 mulps %xmm15, %xmm4 addps %xmm1, %xmm0 addps %xmm5, %xmm4#if! defined(TRMMKERNEL) && !defined(BETAZERO) movsd 0 * SIZE(CO1), %xmm8 movhps 2 * SIZE(CO1), %xmm8 movsd 4 * SIZE(CO1), %xmm9 movhps 6 * SIZE(CO1), %xmm9 addps %xmm8, %xmm0 addps %xmm9, %xmm4#endif movsd %xmm0, 0 * SIZE(CO1) movhps %xmm0, 2 * SIZE(CO1) movsd %xmm4, 4 * SIZE(CO1) movhps %xmm4, 6 * SIZE(CO1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movq K, %rax subq KKK, %rax leaq (,%rax, 8), %rax leaq (AO, %rax, 4), AO leaq (BO, %rax, 2), BO#endif#if defined(TRMMKERNEL) && defined(LEFT) addq $4, KK#endif addq $8 * SIZE, CO1 # coffset += 4 decq I # i -- jg .L51 ALIGN_4 .L60: testq $2, M je .L70#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER, BO#else leaq BUFFER, BO movq KK, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 2), AO leaq (BO, %rax, 2), BO#endif movaps 0 * SIZE(AO), %xmm8 pxor %xmm0, %xmm0 movsldup 0 * SIZE(BO), %xmm9 pxor %xmm1, %xmm1 movaps 16 * SIZE(AO), %xmm10 movsldup 16 * SIZE(BO), %xmm11#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $2, %rax#else addq $1, %rax#endif movq %rax, KKK#endif sarq $3, %rax je .L65 ALIGN_4.L62: mulps %xmm8, %xmm9 PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) addps %xmm9, %xmm0 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 4 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 4 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 8 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 8 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 8 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 12 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 12 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 12 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 32 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 32 * SIZE(BO), %xmm9 mulps %xmm10, %xmm11 addps %xmm11, %xmm0 movshdup 16 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 movaps 20 * SIZE(AO), %xmm10 ADDSUB %xmm11, %xmm1 movsldup 20 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 addps %xmm11, %xmm0 movshdup 20 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 movaps 24 * SIZE(AO), %xmm10 ADDSUB %xmm11, %xmm1 movsldup 24 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 addps %xmm11, %xmm0 movshdup 24 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 movaps 28 * SIZE(AO), %xmm10 ADDSUB %xmm11, %xmm1 movsldup 28 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 addps %xmm11, %xmm0 movshdup 28 * SIZE(BO), %xmm11 mulps %xmm10, %xmm11 movaps 48 * SIZE(AO), %xmm10 ADDSUB %xmm11, %xmm1 movsldup 48 * SIZE(BO), %xmm11 addq $32 * SIZE, AO addq $32 * SIZE, BO decq %rax jne .L62 ALIGN_4.L65:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif movaps ALPHA_R, %xmm14 movaps ALPHA_I, %xmm15 andq $7, %rax # if (k & 1) BRANCH je .L68 ALIGN_4.L66: mulps %xmm8, %xmm9 addps %xmm9, %xmm0 movshdup 0 * SIZE(BO), %xmm9 mulps %xmm8, %xmm9 movaps 4 * SIZE(AO), %xmm8 ADDSUB %xmm9, %xmm1 movsldup 4 * SIZE(BO), %xmm9 addq $4 * SIZE, AO # aoffset += 4 addq $4 * SIZE, BO # boffset1 += 8 decq %rax jg .L66 ALIGN_4.L68:#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(NR) || defined(NC) || defined(TR) || defined(TC) shufps $0xb1, %xmm1, %xmm1 addsubps %xmm1, %xmm0 movaps %xmm0, %xmm1 shufps $0xb1, %xmm0, %xmm0#else shufps $0xb1, %xmm0, %xmm0 addsubps %xmm0, %xmm1 movaps %xmm1, %xmm0 shufps $0xb1, %xmm1, %xmm1#endif mulps %xmm14, %xmm1 mulps %xmm15, %xmm0 addps %xmm1, %xmm0#if! defined(TRMMKERNEL) && !defined(BETAZERO) movsd 0 * SIZE(CO1), %xmm8 movhps 2 * SIZE(CO1), %xmm8 addps %xmm8, %xmm0#endif movsd %xmm0, 0 * SIZE(CO1) movhps %xmm0, 2 * SIZE(CO1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movq K, %rax subq KKK, %rax leaq (,%rax, 8), %rax leaq (AO, %rax, 2), AO leaq (BO, %rax, 2), BO#endif#if defined(TRMMKERNEL) && defined(LEFT) addq $2, KK#endif addq $4 * SIZE, CO1 # coffset += 4 ALIGN_4 .L70: testq $1, M je .L999#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER, BO#else leaq BUFFER, BO movq KK, %rax leaq (, %rax, 8), %rax leaq (AO, %rax, 1), AO leaq (BO, %rax, 2), BO#endif movddup 0 * SIZE(AO), %xmm8 pxor %xmm0, %xmm0 movsd 0 * SIZE(BO), %xmm9 pxor %xmm1, %xmm1 movddup 8 * SIZE(AO), %xmm10 movsd 16 * SIZE(BO), %xmm11#ifndef TRMMKERNEL movq K, %rax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax subq KK, %rax movq %rax, KKK #else movq KK, %rax#ifdef LEFT addq $1, %rax#else addq $1, %rax#endif movq %rax, KKK#endif sarq $3, %rax je .L75 ALIGN_4.L72: shufps $0x50, %xmm9, %xmm9 PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) mulps %xmm8, %xmm9 movddup 2 * SIZE(AO), %xmm8 addps %xmm9, %xmm0 movsd 4 * SIZE(BO), %xmm9 shufps $0x50, %xmm9, %xmm9 mulps %xmm8, %xmm9 movddup 4 * SIZE(AO), %xmm8 addps %xmm9, %xmm1 movsd 8 * SIZE(BO), %xmm9 shufps $0x50, %xmm9, %xmm9 mulps %xmm8, %xmm9 movddup 6 * SIZE(AO), %xmm8 addps %xmm9, %xmm0 movsd 12 * SIZE(BO), %xmm9 shufps $0x50, %xmm9, %xmm9 mulps %xmm8, %xmm9 movddup 16 * SIZE(AO), %xmm8 addps %xmm9, %xmm1 movsd 32 * SIZE(BO), %xmm9 shufps $0x50, %xmm11, %xmm11 mulps %xmm10, %xmm11 movddup 10 * SIZE(AO), %xmm10 addps %xmm11, %xmm0 movsd 20 * SIZE(BO), %xmm11 shufps $0x50, %xmm11, %xmm11 mulps %xmm10, %xmm11 movddup 12 * SIZE(AO), %xmm10 addps %xmm11, %xmm1 movsd 24 * SIZE(BO), %xmm11 shufps $0x50, %xmm11, %xmm11 mulps %xmm10, %xmm11 movddup 14 * SIZE(AO), %xmm10 addps %xmm11, %xmm0 movsd 28 * SIZE(BO), %xmm11 shufps $0x50, %xmm11, %xmm11 mulps %xmm10, %xmm11 movddup 24 * SIZE(AO), %xmm10 addps %xmm11, %xmm1 movsd 48 * SIZE(BO), %xmm11 addq $16 * SIZE, AO addq $32 * SIZE, BO decq %rax jne .L72 ALIGN_4.L75:#ifndef TRMMKERNEL movq K, %rax#else movq KKK, %rax#endif movaps ALPHA_R, %xmm14 movaps ALPHA_I, %xmm15 andq $7, %rax # if (k & 1) BRANCH je .L78 ALIGN_4.L76: shufps $0x50, %xmm9, %xmm9 mulps %xmm8, %xmm9 movddup 2 * SIZE(AO), %xmm8 addps %xmm9, %xmm0 movsd 4 * SIZE(BO), %xmm9 addq $2 * SIZE, AO addq $4 * SIZE, BO decq %rax jg .L76 ALIGN_4.L78: addps %xmm1, %xmm0 movhlps %xmm0, %xmm1#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ defined(RR) || defined(RC) || defined(CR) || defined(CC) cmpeqps %xmm7, %xmm7 pslld $31, %xmm7 xorps %xmm7, %xmm1#endif #if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(NR) || defined(NC) || defined(TR) || defined(TC) shufps $0xb1, %xmm1, %xmm1 addsubps %xmm1, %xmm0 movaps %xmm0, %xmm1 shufps $0xb1, %xmm0, %xmm0#else shufps $0xb1, %xmm0, %xmm0 addsubps %xmm0, %xmm1 movaps %xmm1, %xmm0 shufps $0xb1, %xmm1, %xmm1#endif mulps %xmm14, %xmm1 mulps %xmm15, %xmm0 addps %xmm1, %xmm0#if! defined(TRMMKERNEL) && !defined(BETAZERO) movsd 0 * SIZE(CO1), %xmm8 addps %xmm8, %xmm0#endif movsd %xmm0, 0 * SIZE(CO1) ALIGN_4 .L999: movq %rbx, %rsp movq 0(%rsp), %rbx movq 8(%rsp), %rbp movq 16(%rsp), %r12 movq 24(%rsp), %r13 movq 32(%rsp), %r14 movq 40(%rsp), %r15#ifdef WINDOWS_ABI movq 48(%rsp), %rdi movq 56(%rsp), %rsi movups 64(%rsp), %xmm6 movups 80(%rsp), %xmm7 movups 96(%rsp), %xmm8 movups 112(%rsp), %xmm9 movups 128(%rsp), %xmm10 movups 144(%rsp), %xmm11 movups 160(%rsp), %xmm12 movups 176(%rsp), %xmm13 movups 192(%rsp), %xmm14 movups 208(%rsp), %xmm15#endif addq $STACKSIZE, %rsp ret EPILOGUE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -