📄 gemm_kernel_2x4_sse2.s
字号:
leal (BB, %eax, 8), BB#endif pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7 leal (LDC, LDC, 2), %eax movsd 0 * SIZE(AA), %xmm0 movsd 4 * SIZE(AA), %xmm1 movsd 0 * SIZE(BB), %xmm2 movsd 8 * SIZE(BB), %xmm3#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $1, %eax#else addl $4, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L25 ALIGN_4.L22: mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4#if defined(OPTERON) || defined(BARCELONA) PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)#endif movsd 2 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movsd 4 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 6 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movsd 16 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movsd 1 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm4 movsd 10 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm5 movsd 12 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 mulsd 14 * SIZE(BB), %xmm0 addsd %xmm3, %xmm6 movsd 24 * SIZE(BB), %xmm3 addsd %xmm0, %xmm7 movsd 2 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4 movsd 18 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movsd 20 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 22 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movsd 32 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movsd 3 * SIZE(AA), %xmm0 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm4 movsd 26 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 addsd %xmm3, %xmm5 movsd 28 * SIZE(BB), %xmm3 mulsd %xmm0, %xmm3 mulsd 30 * SIZE(BB), %xmm0 addsd %xmm3, %xmm6 movsd 40 * SIZE(BB), %xmm3 addsd %xmm0, %xmm7 movsd 8 * SIZE(AA), %xmm0#if defined(OPTERON) || defined(BARCELONA) PREFETCH (PREFETCHSIZE + 8) * SIZE(AA)#endif mulsd %xmm1, %xmm2 addsd %xmm2, %xmm4 movsd 34 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm5 movsd 36 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 mulsd 38 * SIZE(BB), %xmm1 addsd %xmm2, %xmm6 movsd 48 * SIZE(BB), %xmm2 addsd %xmm1, %xmm7 movsd 5 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm4 movsd 42 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm5 movsd 44 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 mulsd 46 * SIZE(BB), %xmm1 addsd %xmm3, %xmm6 movsd 56 * SIZE(BB), %xmm3 addsd %xmm1, %xmm7 movsd 6 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm4 movsd 50 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 addsd %xmm2, %xmm5 movsd 52 * SIZE(BB), %xmm2 mulsd %xmm1, %xmm2 mulsd 54 * SIZE(BB), %xmm1 addsd %xmm2, %xmm6 movsd 64 * SIZE(BB), %xmm2 addsd %xmm1, %xmm7 movsd 7 * SIZE(AA), %xmm1 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm4 movsd 58 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 addsd %xmm3, %xmm5 movsd 60 * SIZE(BB), %xmm3 mulsd %xmm1, %xmm3 mulsd 62 * SIZE(BB), %xmm1 addsd %xmm3, %xmm6 movsd 72 * SIZE(BB), %xmm3 addl $64 * SIZE, BB addsd %xmm1, %xmm7 movsd 12 * SIZE(AA), %xmm1 addl $8 * SIZE, AA decl %eax jne .L22 ALIGN_4.L25:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif movsd ALPHA, %xmm3 andl $7, %eax # if (k & 1) BRANCH je .L28.L26: mulsd %xmm0, %xmm2 addsd %xmm2, %xmm4 movsd 2 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 addsd %xmm2, %xmm5 movsd 4 * SIZE(BB), %xmm2 mulsd %xmm0, %xmm2 mulsd 6 * SIZE(BB), %xmm0 addsd %xmm2, %xmm6 movsd 8 * SIZE(BB), %xmm2 addsd %xmm0, %xmm7 movsd 1 * SIZE(AA), %xmm0 addl $1 * SIZE, AA addl $8 * SIZE, BB decl %eax jg .L26 ALIGN_4.L28: leal (LDC, LDC, 2), %eax#ifndef TRMMKERNEL mulsd %xmm3, %xmm4 movsd 0 * SIZE(%esi), %xmm0 mulsd %xmm3, %xmm5 movsd 0 * SIZE(%esi, LDC, 1), %xmm1 mulsd %xmm3, %xmm6 movsd 0 * SIZE(%esi, LDC, 2), %xmm2 mulsd %xmm3, %xmm7 movsd 0 * SIZE(%esi, %eax, 1), %xmm3 addsd %xmm0, %xmm4 addsd %xmm1, %xmm5 addsd %xmm2, %xmm6 addsd %xmm3, %xmm7#else mulsd %xmm3, %xmm4 mulsd %xmm3, %xmm5 mulsd %xmm3, %xmm6 mulsd %xmm3, %xmm7#endif movsd %xmm4, 0 * SIZE(%esi) movsd %xmm5, 0 * SIZE(%esi, LDC, 1) movsd %xmm6, 0 * SIZE(%esi, LDC, 2) movsd %xmm7, 0 * SIZE(%esi, %eax, 1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 8), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $1, KK#endif ALIGN_4.L29:#if defined(TRMMKERNEL) && !defined(LEFT) addl $4, KK#endif leal (, LDC, 4), %eax addl %eax, C # c += 4 * ldc decl J # j -- jg .L01 ALIGN_4.L30: testl $2, N je .L60 ALIGN_2.L31:#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif /* Copying to Sub Buffer */ movl K, %eax leal BUFFER, %ecx sarl $2, %eax jle .L35 ALIGN_4 .L32:#ifdef PENTIUM4#ifdef HAVE_SSE3 movddup 0 * SIZE(%edi), %xmm0 movddup 1 * SIZE(%edi), %xmm1 movddup 2 * SIZE(%edi), %xmm2 movddup 3 * SIZE(%edi), %xmm3 movddup 4 * SIZE(%edi), %xmm4 movddup 5 * SIZE(%edi), %xmm5 movddup 6 * SIZE(%edi), %xmm6 movddup 7 * SIZE(%edi), %xmm7 movapd %xmm0, 0 * SIZE(%ecx) movapd %xmm1, 2 * SIZE(%ecx) movapd %xmm2, 4 * SIZE(%ecx) movapd %xmm3, 6 * SIZE(%ecx) movapd %xmm4, 8 * SIZE(%ecx) movapd %xmm5, 10 * SIZE(%ecx) movapd %xmm6, 12 * SIZE(%ecx) movapd %xmm7, 14 * SIZE(%ecx)#else movsd 0 * SIZE(%edi), %xmm0 movsd 1 * SIZE(%edi), %xmm1 movsd 2 * SIZE(%edi), %xmm2 movsd 3 * SIZE(%edi), %xmm3 movsd 4 * SIZE(%edi), %xmm4 movsd 5 * SIZE(%edi), %xmm5 movsd 6 * SIZE(%edi), %xmm6 movsd 7 * SIZE(%edi), %xmm7 unpcklpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 unpcklpd %xmm2, %xmm2 unpckhpd %xmm3, %xmm3 unpcklpd %xmm4, %xmm4 unpckhpd %xmm5, %xmm5 unpcklpd %xmm6, %xmm6 unpckhpd %xmm7, %xmm7 movapd %xmm0, 0 * SIZE(%ecx) movapd %xmm1, 2 * SIZE(%ecx) movapd %xmm2, 4 * SIZE(%ecx) movapd %xmm3, 6 * SIZE(%ecx) movapd %xmm4, 8 * SIZE(%ecx) movapd %xmm5, 10 * SIZE(%ecx) movapd %xmm6, 12 * SIZE(%ecx) movapd %xmm7, 14 * SIZE(%ecx)#endif prefetcht0 80 * SIZE(%edi) prefetcht1 112 * SIZE(%ecx)#endif#if defined(OPTERON) || defined(BARCELONA)#define COPYPREFETCH 40 prefetchnta (COPYPREFETCH) * SIZE(%edi) movq 0 * SIZE(%edi), %mm0 movq 1 * SIZE(%edi), %mm1 movq 2 * SIZE(%edi), %mm2 movq 3 * SIZE(%edi), %mm3 movq 4 * SIZE(%edi), %mm4 movq 5 * SIZE(%edi), %mm5 movq 6 * SIZE(%edi), %mm6 movq 7 * SIZE(%edi), %mm7 movq %mm0, 0 * SIZE(%ecx) movq %mm0, 1 * SIZE(%ecx) movq %mm1, 2 * SIZE(%ecx) movq %mm1, 3 * SIZE(%ecx) movq %mm2, 4 * SIZE(%ecx) movq %mm2, 5 * SIZE(%ecx) movq %mm3, 6 * SIZE(%ecx) movq %mm3, 7 * SIZE(%ecx) movq %mm4, 8 * SIZE(%ecx) movq %mm4, 9 * SIZE(%ecx) movq %mm5, 10 * SIZE(%ecx) movq %mm5, 11 * SIZE(%ecx) movq %mm6, 12 * SIZE(%ecx) movq %mm6, 13 * SIZE(%ecx) movq %mm7, 14 * SIZE(%ecx) movq %mm7, 15 * SIZE(%ecx)#endif addl $ 8 * SIZE, %edi addl $16 * SIZE, %ecx decl %eax jne .L32 ALIGN_2.L35: movl K, %eax andl $3, %eax BRANCH jle .L40 ALIGN_2.L36:#ifdef PENTIUM4#ifdef HAVE_SSE3 movddup 0 * SIZE(%edi), %xmm0 movddup 1 * SIZE(%edi), %xmm1 movapd %xmm0, 0 * SIZE(%ecx) movapd %xmm1, 2 * SIZE(%ecx)#else movsd 0 * SIZE(%edi), %xmm0 movsd 1 * SIZE(%edi), %xmm1 unpcklpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 movapd %xmm0, 0 * SIZE(%ecx) movapd %xmm1, 2 * SIZE(%ecx)#endif#endif#if defined(OPTERON) || defined(BARCELONA) movq 0 * SIZE(%edi), %mm0 movq 1 * SIZE(%edi), %mm1 movq %mm0, 0 * SIZE(%ecx) movq %mm0, 1 * SIZE(%ecx) movq %mm1, 2 * SIZE(%ecx) movq %mm1, 3 * SIZE(%ecx)#endif addl $2 * SIZE, %edi addl $4 * SIZE, %ecx decl %eax jne .L36 ALIGN_4 .L40: movl C, %esi # coffset = c movl A, AA # aoffset = a movl M, %ebx sarl $1, %ebx # i = (m >> 2) jle .L50 ALIGN_4.L41:#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB#else leal BUFFER, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7 movapd 0 * SIZE(AA), %xmm0 movapd 8 * SIZE(AA), %xmm1 movapd 0 * SIZE(BB), %xmm2 movapd 8 * SIZE(BB), %xmm3#ifdef HAVE_3DNOW prefetchw 2 * SIZE(%esi) prefetchw 2 * SIZE(%esi, LDC)#endif#ifdef PENTIUM4 prefetchnta 4 * SIZE(%esi) prefetchnta 4 * SIZE(%esi, LDC)#endif#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $2, %eax#endif movl %eax, KKK#endif sarl $3, %eax je .L45 ALIGN_4.L42: mulpd %xmm0, %xmm2#if defined(OPTERON) || defined(BARCELONA) prefetcht0 (PREFETCHSIZE + 0) * SIZE(AA)#endif mulpd 2 * SIZE(BB), %xmm0 addpd %xmm2, %xmm4 movapd 4 * SIZE(BB), %xmm2 addpd %xmm0, %xmm5 movapd 2 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm2 mulpd 6 * SIZE(BB), %xmm0 addpd %xmm2, %xmm6 movapd 16 * SIZE(BB), %xmm2 addpd %xmm0, %xmm7 movapd 4 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 10 * SIZE(BB), %xmm0 addpd %xmm3, %xmm4 movapd 12 * SIZE(BB), %xmm3 addpd %xmm0, %xmm5 movapd 6 * SIZE(AA), %xmm0 mulpd %xmm0, %xmm3 mulpd 14 * SIZE(BB), %xmm0 addpd %xmm3, %xmm6 movapd 24 * SIZE(BB), %xmm3 addpd %xmm0, %xmm7 movapd 16 * SIZE(AA), %xmm0#if defined(OPTERON) || defined(BARCELONA) prefetcht0 (PREFETCHSIZE + 8) * SIZE(AA)#endif mulpd %xmm1, %xmm2 mulpd 18 * SIZE(BB), %xmm1 addpd %xmm2, %xmm4 movapd 20 * SIZE(BB), %xmm2 addpd %xmm1, %xmm5 movapd 10 * SIZE(AA), %xmm1 mulpd %xmm1, %xmm2 mulpd 22 * SIZE(BB), %xmm1 addpd %xmm2, %xmm6 movapd 32 * SIZE(BB), %xmm2 addpd %xmm1, %xmm7 movapd 12 * SIZE(AA), %xmm1 mulpd %xmm1, %xmm3 mulpd 26 * SIZE(BB), %xmm1 addpd %xmm3, %xmm4 movapd 28 * SIZE(BB), %xmm3 addpd %xmm1, %xmm5 movapd 14 * SIZE(AA), %xmm1 mulpd %xmm1, %xmm3 mulpd 30 * SIZE(BB), %xmm1 addpd %xmm3, %xmm6 movapd 40 * SIZE(BB), %xmm3 addpd %xmm1, %xmm7 movapd 24 * SIZE(AA), %xmm1 addl $16 * SIZE, AA addl $32 * SIZE, BB decl %eax jne .L42 ALIGN_4.L45:#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif movapd ALPHA, %xmm3 andl $7, %eax # if (k & 1) BRANCH je .L48 ALIGN_3.L46: mulpd %xmm0, %xmm2 mulpd 2 * SIZE(BB), %xmm0 addpd %xmm2, %xmm4 movapd 4 * SIZE(BB), %xmm2 addpd %xmm0, %xmm5 movapd 2 * SIZE(AA), %xmm0 addl $2 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L46 ALIGN_4.L48:#ifndef TRMMKERNEL movsd 0 * SIZE(%esi), %xmm0 movhpd 1 * SIZE(%esi), %xmm0 movsd 0 * SIZE(%esi, LDC, 1), %xmm1 movhpd 1 * SIZE(%esi, LDC, 1), %xmm1#endif addpd %xmm6, %xmm4 addpd %xmm7, %xmm5 mulpd %xmm3, %xmm4 mulpd %xmm3, %xmm5#ifndef TRMMKERNEL addpd %xmm0, %xmm4 addpd %xmm1, %xmm5#endif movsd %xmm4, 0 * SIZE(%esi) movhpd %xmm4, 1 * SIZE(%esi) movsd %xmm5, 0 * SIZE(%esi, LDC, 1) movhpd %xmm5, 1 * SIZE(%esi, LDC, 1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $2 * SIZE, %esi # coffset += 2 decl %ebx # i -- jg .L41 ALIGN_4.L50: movl M, %ebx testl $1, %ebx # i = (m >> 2) jle .L59#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leal BUFFER, BB#else leal BUFFER, BB movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -