📄 gemm_kernel_2x4_3dnow.s
字号:
movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $1, %eax#else addl $2, %eax#endif movl %eax, KKK#endif sarl $4, %eax je .L55 ALIGN_4.L52: pfmul %mm0, %mm2 pfadd %mm2, %mm4 PADDING movd ( 4 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 2 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 1 + AOFFSET) * SIZE(AA), %mm0 PADDING prefetch (PREFETCHSIZE + 0) * SIZE(AA) pfmul %mm0, %mm2 pfadd %mm2, %mm6 PADDING movd ( 8 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 6 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 2 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm2 pfadd %mm2, %mm4 PADDING movd ( 12 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 10 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 3 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm2 pfadd %mm2, %mm6 PADDING movd ( 32 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 14 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 4 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm3 pfadd %mm3, %mm4 PADDING movd ( 20 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 18 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 5 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm3 pfadd %mm3, %mm6 PADDING movd ( 24 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 22 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 6 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm3 pfadd %mm3, %mm4 PADDING movd ( 28 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 26 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 7 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm0, %mm3 pfadd %mm3, %mm6 PADDING movd ( 48 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 30 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 16 + AOFFSET) * SIZE(AA), %mm0 pfmul %mm1, %mm2 pfadd %mm2, %mm4 PADDING movd ( 36 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 34 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 9 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm2 pfadd %mm2, %mm6 PADDING movd ( 40 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 38 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 10 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm2 pfadd %mm2, %mm4 PADDING movd ( 44 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 42 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 11 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm2 pfadd %mm2, %mm6 PADDING movd ( 64 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 46 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 12 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm3 pfadd %mm3, %mm4 PADDING movd ( 52 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 50 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 13 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm3 pfadd %mm3, %mm6 PADDING movd ( 56 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 54 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 14 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm3 pfadd %mm3, %mm4 PADDING movd ( 60 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 58 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 15 + AOFFSET) * SIZE(AA), %mm1 pfmul %mm1, %mm3 pfadd %mm3, %mm6 PADDING movd ( 80 + BOFFSET) * SIZE(BB), %mm3 pfmul ( 62 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 24 + AOFFSET) * SIZE(AA), %mm1 subl $-16 * SIZE, AA addl $ 64 * SIZE, BB decl %eax jne .L52 ALIGN_3.L55: movd ALPHA, %mm3#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $15, %eax BRANCH je .L58 ALIGN_3.L56: pfmul %mm0, %mm2 pfadd %mm2, %mm4 PADDING movd ( 4 + BOFFSET) * SIZE(BB), %mm2 pfmul ( 2 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 1 + AOFFSET) * SIZE(AA), %mm0 addl $1 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L56 ALIGN_3.L58: pfadd %mm6, %mm4 pfadd %mm7, %mm5 pfmul %mm3, %mm4 pfmul %mm3, %mm5#ifndef TRMMKERNEL movd 0 * SIZE(%esi) , %mm0 movd 0 * SIZE(%esi, %ebp, 1), %mm1 pfadd %mm0, %mm4 pfadd %mm1, %mm5#endif movd %mm4, 0 * SIZE(%esi) movd %mm5, 0 * SIZE(%esi, %ebp, 1)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 4), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $1, KK#endif ALIGN_4.L59:#if defined(TRMMKERNEL) && !defined(LEFT) addl $2, KK#endif leal (, %ebp, 2), %eax addl %eax, C # c += 4 * ldc ALIGN_4.L60: movl N, %eax testl $1, %eax jle .L999 ALIGN_3.L61:/* Copying to Sub Buffer */ leal BUFFER, %ecx#if defined(TRMMKERNEL) && defined(LEFT) movl OFFSET, %eax movl %eax, KK#endif movl K, %eax sarl $3, %eax jle .L63 ALIGN_3 .L62: movd 0 * SIZE(%edi), %mm0 movd 1 * SIZE(%edi), %mm1 movd 2 * SIZE(%edi), %mm2 movd 3 * SIZE(%edi), %mm3 movd 4 * SIZE(%edi), %mm4 movd 5 * SIZE(%edi), %mm5 movd 6 * SIZE(%edi), %mm6 movd 7 * SIZE(%edi), %mm7 prefetchnta 72 * SIZE(%edi) punpckldq %mm0, %mm0 punpckldq %mm1, %mm1 punpckldq %mm2, %mm2 punpckldq %mm3, %mm3 punpckldq %mm4, %mm4 punpckldq %mm5, %mm5 punpckldq %mm6, %mm6 punpckldq %mm7, %mm7 movq %mm0, 0 * SIZE(%ecx) movq %mm1, 2 * SIZE(%ecx) movq %mm2, 4 * SIZE(%ecx) movq %mm3, 6 * SIZE(%ecx) movq %mm4, 8 * SIZE(%ecx) movq %mm5, 10 * SIZE(%ecx) movq %mm6, 12 * SIZE(%ecx) movq %mm7, 14 * SIZE(%ecx) addl $ 8 * SIZE, %edi addl $16 * SIZE, %ecx decl %eax jne .L62.L63: movl K, %eax andl $7, %eax BRANCH jle .L70 ALIGN_2.L64: movd 0 * SIZE(%edi), %mm0 punpckldq %mm0, %mm0 movq %mm0, 0 * SIZE(%ecx) addl $1 * SIZE, %edi addl $2 * SIZE, %ecx decl %eax jne .L64 ALIGN_4 .L70: movl C, %esi # coffset = c movl A, %edx # aoffset = a movl M, %ebx sarl $1, %ebx # i = (m >> 2) jle .L90 ALIGN_4.L71: leal - BOFFSET * SIZE + BUFFER, BB#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))#else movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 2), BB#endif movq ( 0 + AOFFSET) * SIZE(AA), %mm0 pxor %mm4, %mm4 movq ( 16 + AOFFSET) * SIZE(AA), %mm1 pxor %mm5, %mm5 PADDING movq ( 0 + BOFFSET) * SIZE(BB), %mm2 pxor %mm6, %mm6 pxor %mm7, %mm7 prefetchw 2 * SIZE(%esi) prefetchw 2 * SIZE(%esi, %ebp)#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $2, %eax#else addl $1, %eax#endif movl %eax, KKK#endif sarl $4, %eax je .L75 ALIGN_4.L72: pfmul ( 0 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movq ( 2 + AOFFSET) * SIZE(AA), %mm0 PADDING prefetch (PREFETCHSIZE + 0) * SIZE(AA) pfmul ( 2 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movq ( 4 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 4 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm6 movq ( 6 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 6 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movq ( 8 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 8 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movq ( 10 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 10 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movq ( 12 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 12 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm6 movq ( 14 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 14 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movq ( 32 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 16 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm4 movq ( 18 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 18 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movq ( 20 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 20 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm6 movq ( 22 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 22 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movq ( 24 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 24 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm4 movq ( 26 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 26 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movq ( 28 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 28 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm6 movq ( 30 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 30 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movq ( 48 + AOFFSET) * SIZE(AA), %mm1 subl $-32 * SIZE, AA addl $ 32 * SIZE, BB decl %eax jne .L72 ALIGN_3.L75: movq ALPHA, %mm3#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $15, %eax BRANCH je .L78 ALIGN_3.L76: pfmul ( 0 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movq ( 2 + AOFFSET) * SIZE(AA), %mm0 addl $2 * SIZE, AA addl $2 * SIZE, BB decl %eax jg .L76 ALIGN_3.L78: pfadd %mm5, %mm4 pfadd %mm7, %mm6 pfadd %mm6, %mm4 pfmul %mm3, %mm4#ifndef TRMMKERNEL pfadd 0 * SIZE(%esi), %mm4#endif movq %mm4, 0 * SIZE(%esi)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $2, KK#endif addl $2 * SIZE, %esi # coffset += 2 decl %ebx # i -- jg .L71 ALIGN_4.L90: movl M, %ebx testl $1, %ebx # i = (m >> 2) jle .L999 ALIGN_4.L91: leal - BOFFSET * SIZE + BUFFER, BB#if !defined(TRMMKERNEL) || \ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))#else movl KK, %eax leal (, %eax, SIZE), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 2), BB#endif movq ( 0 + AOFFSET) * SIZE(AA), %mm0 pxor %mm4, %mm4 movq ( 8 + AOFFSET) * SIZE(AA), %mm1 pxor %mm5, %mm5 PADDING movq ( 0 + BOFFSET) * SIZE(BB), %mm2 pxor %mm6, %mm6 pxor %mm7, %mm7#ifndef TRMMKERNEL movl K, %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movl K, %eax subl KK, %eax movl %eax, KKK #else movl KK, %eax#ifdef LEFT addl $1, %eax#else addl $1, %eax#endif movl %eax, KKK#endif sarl $4, %eax je .L95 ALIGN_4.L92: PADDING prefetch (PREFETCHSIZE + 0) * SIZE(AA) pfmul ( 0 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movd ( 1 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 2 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 2 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 4 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm6 movd ( 3 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 6 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 4 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 8 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movd ( 5 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 10 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm5 movd ( 6 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 12 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm6 movd ( 7 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 14 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm7 movd ( 16 + AOFFSET) * SIZE(AA), %mm0 pfmul ( 16 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm4 movd ( 9 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 18 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 10 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 20 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm6 movd ( 11 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 22 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 12 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 24 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm4 movd ( 13 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 26 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm5 movd ( 14 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 28 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm6 movd ( 15 + AOFFSET) * SIZE(AA), %mm1 pfmul ( 30 + BOFFSET) * SIZE(BB), %mm1 pfadd %mm1, %mm7 movd ( 24 + AOFFSET) * SIZE(AA), %mm1 subl $-16 * SIZE, AA addl $ 32 * SIZE, BB decl %eax jne .L92 ALIGN_3.L95: movd ALPHA, %mm3#ifndef TRMMKERNEL movl K, %eax#else movl KKK, %eax#endif andl $15, %eax BRANCH je .L98 ALIGN_3.L96: pfmul ( 0 + BOFFSET) * SIZE(BB), %mm0 pfadd %mm0, %mm4 movd ( 1 + AOFFSET) * SIZE(AA), %mm0 addl $1 * SIZE, AA addl $2 * SIZE, BB decl %eax jg .L96 ALIGN_3.L98:#ifndef TRMMKERNEL movd 0 * SIZE(%esi), %mm0#endif pfadd %mm5, %mm4 pfadd %mm7, %mm6 pfadd %mm6, %mm4 pfmul %mm3, %mm4 pfmul %mm3, %mm5#ifndef TRMMKERNEL pfadd %mm0, %mm4#endif movd %mm4, 0 * SIZE(%esi)#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) movl K, %eax subl KKK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 1), AA leal (BB, %eax, 2), BB#endif#if defined(TRMMKERNEL) && defined(LEFT) addl $1, KK#endif ALIGN_4.L999: EMMS movl OLD_STACK, %esp popl %ebx popl %esi popl %edi popl %ebp ret EPILOGUE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -