📄 zgemm_kernel_4x1_sse.s
字号:
leal (, %eax, 8), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB /* because it's doubled */ movaps 0 * SIZE(BB), %xmm2 XORPS %xmm4, %xmm4 movaps 0 * SIZE(AA), %xmm0 XORPS %xmm5, %xmm5 movaps 8 * SIZE(BB), %xmm3 XORPS %xmm6, %xmm6 movaps 8 * SIZE(AA), %xmm1 XORPS %xmm7, %xmm7#endif #ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $1, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L52 ALIGN_4.L51: mulps %xmm0, %xmm2 mulps 4 * SIZE(BB), %xmm0 addps %xmm2, %xmm4 movaps 16 * SIZE(BB), %xmm2 addps %xmm0, %xmm5 movaps 4 * SIZE(AA), %xmm0 mulps %xmm0, %xmm3 mulps 12 * SIZE(BB), %xmm0 addps %xmm3, %xmm6 movaps 24 * SIZE(BB), %xmm3 addps %xmm0, %xmm7 movaps 16 * SIZE(AA), %xmm0 mulps %xmm1, %xmm2 mulps 20 * SIZE(BB), %xmm1 addps %xmm2, %xmm4 movaps 32 * SIZE(BB), %xmm2 addps %xmm1, %xmm5 movaps 12 * SIZE(AA), %xmm1 mulps %xmm1, %xmm3 mulps 28 * SIZE(BB), %xmm1 addps %xmm3, %xmm6 movaps 40 * SIZE(BB), %xmm3 addps %xmm1, %xmm7 movaps 24 * SIZE(AA), %xmm1 mulps %xmm0, %xmm2 mulps 36 * SIZE(BB), %xmm0 addps %xmm2, %xmm4 movaps 48 * SIZE(BB), %xmm2 addps %xmm0, %xmm5 movaps 20 * SIZE(AA), %xmm0 mulps %xmm0, %xmm3 mulps 44 * SIZE(BB), %xmm0 addps %xmm3, %xmm6 movaps 56 * SIZE(BB), %xmm3 addps %xmm0, %xmm7 movaps 32 * SIZE(AA), %xmm0 mulps %xmm1, %xmm2 mulps 52 * SIZE(BB), %xmm1 addps %xmm2, %xmm4 movaps 64 * SIZE(BB), %xmm2 addps %xmm1, %xmm5 movaps 28 * SIZE(AA), %xmm1 mulps %xmm1, %xmm3 mulps 60 * SIZE(BB), %xmm1 addps %xmm3, %xmm6 movaps 72 * SIZE(BB), %xmm3 addps %xmm1, %xmm7 movaps 40 * SIZE(AA), %xmm1 addl $32 * SIZE, AA addl $64 * SIZE, BB decl %eax jne .L51#endif .L52:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif movaps ALPHA_R, %xmm1 movaps ALPHA_I, %xmm3 andl $7, %eax # if (k & 1) BRANCH je .L54.L53: mulps %xmm0, %xmm2 mulps 4 * SIZE(BB), %xmm0 addps %xmm2, %xmm4 movaps 8 * SIZE(BB), %xmm2 addps %xmm0, %xmm5 movaps 4 * SIZE(AA), %xmm0 addl $4 * SIZE, AA # aoffset += 8 addl $8 * SIZE, BB # boffset1 += 8 decl %eax jg .L53.L54: addps %xmm6, %xmm4 addps %xmm7, %xmm5 shufps $0xb1, %xmm5, %xmm5#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(RR) || defined(RC) || defined(CR) || defined(CC) subps %xmm5, %xmm4#else addps %xmm5, %xmm4#endif movaps %xmm4, %xmm5 shufps $0xb1, %xmm4, %xmm4 mulps %xmm1, %xmm5 mulps %xmm3, %xmm4 addps %xmm5, %xmm4#ifndef TRMMKERNEL MOVSD 0 * SIZE(%esi), %xmm0 movhps 2 * SIZE(%esi), %xmm0 addps %xmm0, %xmm4#endif MOVSD %xmm4, 0 * SIZE(%esi) movhps %xmm4, 2 * SIZE(%esi)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, 8), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $4 * SIZE, %esi # coffset += 4 ALIGN_2.L70: testl $1, %ebx jle .L99#if (L1_DATA_LINESIZE == 64)#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB MOVSD 0 * SIZE + BUFFER, %xmm2 XORPS %xmm4, %xmm4 MOVSD 0 * SIZE(AA), %xmm0 XORPS %xmm5, %xmm5 MOVSD 16 * SIZE + BUFFER, %xmm3 XORPS %xmm6, %xmm6 MOVSD 8 * SIZE(AA), %xmm1 XORPS %xmm7, %xmm7#else leal BUFFER, BB movl KK, %eax leal (, %eax, 8), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB /* because it's doubled */ MOVSD 0 * SIZE(BB), %xmm2 XORPS %xmm4, %xmm4 MOVSD 0 * SIZE(AA), %xmm0 XORPS %xmm5, %xmm5 MOVSD 16 * SIZE(BB), %xmm3 XORPS %xmm6, %xmm6 MOVSD 8 * SIZE(AA), %xmm1 XORPS %xmm7, %xmm7#endif #ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax addl $1, %eax movl %eax, KKK#endif sarl $3, %eax je .L72 ALIGN_4.L71: mulps %xmm0, %xmm2 addps %xmm2, %xmm4 MOVSD 4 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 MOVSD 2 * SIZE(AA), %xmm0 addps %xmm2, %xmm5 MOVSD 8 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 addps %xmm2, %xmm6 MOVSD 12 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 MOVSD 4 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 MOVSD 32 * SIZE(BB), %xmm2 mulps %xmm0, %xmm3 addps %xmm3, %xmm4 MOVSD 20 * SIZE(BB), %xmm3 mulps %xmm0, %xmm3 MOVSD 6 * SIZE(AA), %xmm0 addps %xmm3, %xmm5 MOVSD 24 * SIZE(BB), %xmm3 mulps %xmm0, %xmm3 addps %xmm3, %xmm6 MOVSD 28 * SIZE(BB), %xmm3 mulps %xmm0, %xmm3 MOVSD 16 * SIZE(AA), %xmm0 addps %xmm3, %xmm7 MOVSD 48 * SIZE(BB), %xmm3 mulps %xmm1, %xmm2 addps %xmm2, %xmm4 MOVSD 36 * SIZE(BB), %xmm2 mulps %xmm1, %xmm2 MOVSD 10 * SIZE(AA), %xmm1 addps %xmm2, %xmm5 MOVSD 40 * SIZE(BB), %xmm2 mulps %xmm1, %xmm2 addps %xmm2, %xmm6 MOVSD 44 * SIZE(BB), %xmm2 mulps %xmm1, %xmm2 MOVSD 12 * SIZE(AA), %xmm1 addps %xmm2, %xmm7 MOVSD 64 * SIZE(BB), %xmm2 mulps %xmm1, %xmm3 addps %xmm3, %xmm4 MOVSD 52 * SIZE(BB), %xmm3 mulps %xmm1, %xmm3 MOVSD 14 * SIZE(AA), %xmm1 addps %xmm3, %xmm5 MOVSD 56 * SIZE(BB), %xmm3 mulps %xmm1, %xmm3 addps %xmm3, %xmm6 MOVSD 60 * SIZE(BB), %xmm3 mulps %xmm1, %xmm3 MOVSD 24 * SIZE(AA), %xmm1 addps %xmm3, %xmm7 MOVSD 80 * SIZE(BB), %xmm3 addl $16 * SIZE, AA addl $64 * SIZE, BB decl %eax jne .L71 ALIGN_2#else#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB MOVSD 0 * SIZE + BUFFER, %xmm2 XORPS %xmm4, %xmm4 MOVSD 0 * SIZE(AA), %xmm0 XORPS %xmm5, %xmm5 MOVSD 8 * SIZE + BUFFER, %xmm3 XORPS %xmm6, %xmm6 MOVSD 8 * SIZE(AA), %xmm1 XORPS %xmm7, %xmm7#else leal BUFFER, BB movl KK, %eax leal (, %eax, 8), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB /* because it's doubled */ MOVSD 0 * SIZE(BB), %xmm2 XORPS %xmm4, %xmm4 MOVSD 0 * SIZE(AA), %xmm0 XORPS %xmm5, %xmm5 MOVSD 8 * SIZE(BB), %xmm3 XORPS %xmm6, %xmm6 MOVSD 8 * SIZE(AA), %xmm1 XORPS %xmm7, %xmm7#endif #ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax addl $1, %eax movl %eax, KKK#endif sarl $3, %eax je .L72 ALIGN_4.L71: mulps %xmm0, %xmm2 addps %xmm2, %xmm4 MOVSD 4 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 MOVSD 2 * SIZE(AA), %xmm0 addps %xmm2, %xmm5 MOVSD 16 * SIZE(BB), %xmm2 mulps %xmm0, %xmm3 addps %xmm3, %xmm4 MOVSD 12 * SIZE(BB), %xmm3 mulps %xmm0, %xmm3 MOVSD 4 * SIZE(AA), %xmm0 addps %xmm3, %xmm5 MOVSD 24 * SIZE(BB), %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm4 MOVSD 20 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 MOVSD 6 * SIZE(AA), %xmm0 addps %xmm2, %xmm5 MOVSD 32 * SIZE(BB), %xmm2 mulps %xmm0, %xmm3 addps %xmm3, %xmm4 MOVSD 28 * SIZE(BB), %xmm3 mulps %xmm0, %xmm3 MOVSD 16 * SIZE(AA), %xmm0 addps %xmm3, %xmm5 MOVSD 40 * SIZE(BB), %xmm3 mulps %xmm1, %xmm2 addps %xmm2, %xmm4 MOVSD 36 * SIZE(BB), %xmm2 mulps %xmm1, %xmm2 MOVSD 10 * SIZE(AA), %xmm1 addps %xmm2, %xmm5 MOVSD 48 * SIZE(BB), %xmm2 mulps %xmm1, %xmm3 addps %xmm3, %xmm4 MOVSD 44 * SIZE(BB), %xmm3 mulps %xmm1, %xmm3 MOVSD 12 * SIZE(AA), %xmm1 addps %xmm3, %xmm5 MOVSD 56 * SIZE(BB), %xmm3 mulps %xmm1, %xmm2 addps %xmm2, %xmm4 MOVSD 52 * SIZE(BB), %xmm2 mulps %xmm1, %xmm2 MOVSD 14 * SIZE(AA), %xmm1 addps %xmm2, %xmm5 MOVSD 64 * SIZE(BB), %xmm2 mulps %xmm1, %xmm3 addps %xmm3, %xmm4 MOVSD 60 * SIZE(BB), %xmm3 mulps %xmm1, %xmm3 MOVSD 24 * SIZE(AA), %xmm1 addps %xmm3, %xmm5 MOVSD 72 * SIZE(BB), %xmm3 addl $16 * SIZE, AA addl $64 * SIZE, BB decl %eax jne .L71 ALIGN_2#endif .L72:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif movaps ALPHA_R, %xmm1 movaps ALPHA_I, %xmm3 andl $7, %eax # if (k & 1) BRANCH je .L74.L73: mulps %xmm0, %xmm2 addps %xmm2, %xmm4 MOVSD 4 * SIZE(BB), %xmm2 mulps %xmm0, %xmm2 MOVSD 2 * SIZE(AA), %xmm0 addps %xmm2, %xmm5 MOVSD 8 * SIZE(BB), %xmm2 addl $2 * SIZE, AA # aoffset += 8 addl $8 * SIZE, BB # boffset1 += 8 decl %eax jg .L73.L74: addps %xmm6, %xmm4 addps %xmm7, %xmm5 shufps $0xb1, %xmm5, %xmm5#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ defined(RR) || defined(RC) || defined(CR) || defined(CC) subps %xmm5, %xmm4#else addps %xmm5, %xmm4#endif movaps %xmm4, %xmm5 shufps $0xb1, %xmm4, %xmm4 mulps %xmm1, %xmm5 mulps %xmm3, %xmm4 addps %xmm5, %xmm4#ifndef TRMMKERNEL MOVSD 0 * SIZE(%esi), %xmm0 addps %xmm0, %xmm4#endif MOVSD %xmm4, 0 * SIZE(%esi)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, 8), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $1, KK#endif ALIGN_2.L99:#if defined(TRMMKERNEL) && !defined(LEFT) addl $1, KK#endif addl LDC, C # c += ldc decl J # j -- jg .L01 ALIGN_2.L999: movl OLD_STACK, %esp EMMS popl %ebx popl %esi popl %edi popl %ebp ret EPILOGUE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -