📄 trsm_kernel_ln_4x4_penryn.s
字号:
unpcklps %xmm7, %xmm3 unpckhps %xmm7, %xmm4 movaps %xmm1, %xmm2 unpcklps %xmm3, %xmm1 unpckhps %xmm3, %xmm2 movaps %xmm0, %xmm6 unpcklps %xmm4, %xmm0 unpckhps %xmm4, %xmm6 movlps %xmm1, 0 * SIZE(CO1) movlps %xmm2, 0 * SIZE(CO1, LDC, 1) movlps %xmm0, 0 * SIZE(CO1, LDC, 2) movlps %xmm6, 0 * SIZE(CO1, %eax, 1)#else movlps %xmm0, 0 * SIZE(CO1) movlps %xmm1, 0 * SIZE(CO1, LDC, 1) movlps %xmm2, 0 * SIZE(CO1, LDC, 2) movlps %xmm3, 0 * SIZE(CO1, %eax, 1)#endif#ifndef LN addl $2 * SIZE, CO1#endif#if defined(LT) || defined(RN) movl K, %eax subl KK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 2), AA leal (BB, %eax, 4), BB#endif#ifdef LN subl $2, KK#endif#ifdef LT addl $2, KK#endif#ifdef RT movl K, %eax sall $1 + BASE_SHIFT, %eax addl %eax, AORIG#endif ALIGN_4.L30: movl M, %ebx sarl $2, %ebx # i = (m >> 2) jle .L39 ALIGN_4.L11:#ifdef LN movl K, %eax sall $2 + BASE_SHIFT, %eax subl %eax, AORIG#endif#if defined(LN) || defined(RT) movl KK, %eax movl AORIG, AA leal (, %eax, SIZE), %eax leal (AA, %eax, 4), AA#endif movl B, BB#if defined(LN) || defined(RT) movl KK, %eax sall $2 + BASE_SHIFT, %eax addl %eax, BB#endif leal (CO1, LDC, 2), %eax movaps -32 * SIZE(AA), %xmm0 pxor %xmm2, %xmm2 movaps -32 * SIZE(BB), %xmm1 pxor %xmm3, %xmm3#ifdef LN pxor %xmm4, %xmm4 prefetcht0 -4 * SIZE(CO1) pxor %xmm5, %xmm5 prefetcht0 -4 * SIZE(CO1, LDC) pxor %xmm6, %xmm6 prefetcht0 -4 * SIZE(%eax) pxor %xmm7, %xmm7 prefetcht0 -4 * SIZE(%eax, LDC)#else pxor %xmm4, %xmm4 prefetcht0 3 * SIZE(CO1) pxor %xmm5, %xmm5 prefetcht0 3 * SIZE(CO1, LDC) pxor %xmm6, %xmm6 prefetcht0 3 * SIZE(%eax) pxor %xmm7, %xmm7 prefetcht0 3 * SIZE(%eax, LDC)#endif#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $3, %eax je .L15 ALIGN_4.L12: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -28 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -28 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -24 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -24 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -20 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -20 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -16 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -16 * SIZE(AA), %xmm0 PREFETCH (PREFETCHSIZE + 16) * SIZE(AA) addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -12 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -12 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -8 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -8 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -4 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -4 * SIZE(AA), %xmm0 addps %xmm2, %xmm7 subl $-32 * SIZE, BB pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 subl $-32 * SIZE, AA pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -32 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -32 * SIZE(AA), %xmm0 subl $1, %eax jne .L12 ALIGN_4.L15:#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif andl $7, %eax # if (k & 1) BRANCH je .L18 ALIGN_4.L16: addps %xmm2, %xmm7 pshufd $0x93, %xmm1, %xmm2 mulps %xmm0, %xmm1 addps %xmm3, %xmm6 pshufd $0x93, %xmm2, %xmm3 mulps %xmm0, %xmm2 addps %xmm2, %xmm5 pshufd $0x93, %xmm3, %xmm2 mulps %xmm0, %xmm3 addps %xmm1, %xmm4 movaps -28 * SIZE(BB), %xmm1 mulps %xmm0, %xmm2 movaps -28 * SIZE(AA), %xmm0 addl $4 * SIZE, AA addl $4 * SIZE, BB decl %eax jg .L16 ALIGN_4.L18:#if defined(LN) || defined(RT) movl KK, %eax#ifdef LN subl $4, %eax#else subl $4, %eax#endif movl AORIG, AA leal (, %eax, SIZE), %eax leal (AA, %eax, 4), AA leal (B, %eax, 4), BB#endif addps %xmm3, %xmm6 addps %xmm2, %xmm7#if defined(LN) || defined(LT) movaps %xmm4, %xmm0 unpcklps %xmm7, %xmm0 unpckhps %xmm7, %xmm4 movaps %xmm6, %xmm2 unpcklps %xmm5, %xmm2 unpckhps %xmm5, %xmm6 movaps %xmm0, %xmm1 movlhps %xmm2, %xmm0 movhlps %xmm2, %xmm1 movaps %xmm6, %xmm7 movlhps %xmm4, %xmm6 movhlps %xmm4, %xmm7 pshufd $0x39, %xmm1, %xmm2 pshufd $0x39, %xmm7, %xmm4 movaps -32 * SIZE(BB), %xmm1 movaps -28 * SIZE(BB), %xmm3 movaps -24 * SIZE(BB), %xmm5 movaps -20 * SIZE(BB), %xmm7 subps %xmm0, %xmm1 subps %xmm2, %xmm3 subps %xmm6, %xmm5 subps %xmm4, %xmm7#else pshufd $0x39, %xmm5, %xmm2 pshufd $0x4e, %xmm6, %xmm0 pshufd $0x93, %xmm7, %xmm7 movaps %xmm4, %xmm6 unpcklps %xmm0, %xmm4 unpckhps %xmm0, %xmm6 movaps %xmm2, %xmm1 unpcklps %xmm7, %xmm2 unpckhps %xmm7, %xmm1 movaps %xmm4, %xmm5 unpcklps %xmm2, %xmm4 unpckhps %xmm2, %xmm5 movaps %xmm6, %xmm7 unpcklps %xmm1, %xmm6 unpckhps %xmm1, %xmm7 pshufd $0x93, %xmm5, %xmm5 pshufd $0x4e, %xmm6, %xmm6 pshufd $0x39, %xmm7, %xmm7 movaps -32 * SIZE(AA), %xmm0 movaps -28 * SIZE(AA), %xmm1 movaps -24 * SIZE(AA), %xmm2 movaps -20 * SIZE(AA), %xmm3 subps %xmm4, %xmm0 subps %xmm5, %xmm1 subps %xmm6, %xmm2 subps %xmm7, %xmm3#endif#ifdef LN movaps -20 * SIZE(AA), %xmm4 pshufd $0xff, %xmm4, %xmm6 mulps %xmm6, %xmm7 pshufd $0xaa, %xmm4, %xmm6 mulps %xmm7, %xmm6 subps %xmm6, %xmm5 pshufd $0x55, %xmm4, %xmm6 mulps %xmm7, %xmm6 subps %xmm6, %xmm3 pshufd $0x00, %xmm4, %xmm6 mulps %xmm7, %xmm6 subps %xmm6, %xmm1 movaps -24 * SIZE(AA), %xmm4 pshufd $0xaa, %xmm4, %xmm6 mulps %xmm6, %xmm5 pshufd $0x55, %xmm4, %xmm6 mulps %xmm5, %xmm6 subps %xmm6, %xmm3 pshufd $0x00, %xmm4, %xmm6 mulps %xmm5, %xmm6 subps %xmm6, %xmm1 movaps -28 * SIZE(AA), %xmm4 pshufd $0x55, %xmm4, %xmm6 mulps %xmm6, %xmm3 pshufd $0x00, %xmm4, %xmm6 mulps %xmm3, %xmm6 subps %xmm6, %xmm1 movaps -32 * SIZE(AA), %xmm4 pshufd $0x00, %xmm4, %xmm6 mulps %xmm6, %xmm1#endif#ifdef LT movaps -32 * SIZE(AA), %xmm4 pshufd $0x00, %xmm4, %xmm6 mulps %xmm6, %xmm1 pshufd $0x55, %xmm4, %xmm6 mulps %xmm1, %xmm6 subps %xmm6, %xmm3 pshufd $0xaa, %xmm4, %xmm6 mulps %xmm1, %xmm6 subps %xmm6, %xmm5 pshufd $0xff, %xmm4, %xmm6 mulps %xmm1, %xmm6 subps %xmm6, %xmm7 movaps -28 * SIZE(AA), %xmm4 pshufd $0x55, %xmm4, %xmm6 mulps %xmm6, %xmm3 pshufd $0xaa, %xmm4, %xmm6 mulps %xmm3, %xmm6 subps %xmm6, %xmm5 pshufd $0xff, %xmm4, %xmm6 mulps %xmm3, %xmm6 subps %xmm6, %xmm7 movaps -24 * SIZE(AA), %xmm4 pshufd $0xaa, %xmm4, %xmm6 mulps %xmm6, %xmm5 pshufd $0xff, %xmm4, %xmm6 mulps %xmm5, %xmm6 subps %xmm6, %xmm7 movaps -20 * SIZE(AA), %xmm4 pshufd $0xff, %xmm4, %xmm6 mulps %xmm6, %xmm7#endif#ifdef RN movaps -32 * SIZE(BB), %xmm6 pshufd $0x00, %xmm6, %xmm7 mulps %xmm7, %xmm0 pshufd $0x55, %xmm6, %xmm7 mulps %xmm0, %xmm7 subps %xmm7, %xmm1 pshufd $0xaa, %xmm6, %xmm7 mulps %xmm0, %xmm7 subps %xmm7, %xmm2 pshufd $0xff, %xmm6, %xmm7 mulps %xmm0, %xmm7 subps %xmm7, %xmm3 movaps -28 * SIZE(BB), %xmm6 pshufd $0x55, %xmm6, %xmm7 mulps %xmm7, %xmm1 pshufd $0xaa, %xmm6, %xmm7 mulps %xmm1, %xmm7 subps %xmm7, %xmm2 pshufd $0xff, %xmm6, %xmm7 mulps %xmm1, %xmm7 subps %xmm7, %xmm3 movaps -24 * SIZE(BB), %xmm6 pshufd $0xaa, %xmm6, %xmm7 mulps %xmm7, %xmm2 pshufd $0xff, %xmm6, %xmm7 mulps %xmm2, %xmm7 subps %xmm7, %xmm3 movaps -20 * SIZE(BB), %xmm6 pshufd $0xff, %xmm6, %xmm7 mulps %xmm7, %xmm3#endif#ifdef RT movaps -20 * SIZE(BB), %xmm6 pshufd $0xff, %xmm6, %xmm7 mulps %xmm7, %xmm3 pshufd $0xaa, %xmm6, %xmm7 mulps %xmm3, %xmm7 subps %xmm7, %xmm2 pshufd $0x55, %xmm6, %xmm7 mulps %xmm3, %xmm7 subps %xmm7, %xmm1 pshufd $0x00, %xmm6, %xmm7 mulps %xmm3, %xmm7 subps %xmm7, %xmm0 movaps -24 * SIZE(BB), %xmm6 pshufd $0xaa, %xmm6, %xmm7 mulps %xmm7, %xmm2 pshufd $0x55, %xmm6, %xmm7 mulps %xmm2, %xmm7 subps %xmm7, %xmm1 pshufd $0x00, %xmm6, %xmm7 mulps %xmm2, %xmm7 subps %xmm7, %xmm0 movaps -28 * SIZE(BB), %xmm6 pshufd $0x55, %xmm6, %xmm7 mulps %xmm7, %xmm1 pshufd $0x00, %xmm6, %xmm7 mulps %xmm1, %xmm7 subps %xmm7, %xmm0 movaps -32 * SIZE(BB), %xmm6 pshufd $0x00, %xmm6, %xmm7 mulps %xmm7, %xmm0#endif#if defined(LN) || defined(LT) movaps %xmm1, -32 * SIZE(BB) movaps %xmm3, -28 * SIZE(BB) movaps %xmm5, -24 * SIZE(BB) movaps %xmm7, -20 * SIZE(BB)#else movaps %xmm0, -32 * SIZE(AA) movaps %xmm1, -28 * SIZE(AA) movaps %xmm2, -24 * SIZE(AA) movaps %xmm3, -20 * SIZE(AA)#endif#ifdef LN subl $4 * SIZE, CO1#endif leal (LDC, LDC, 2), %eax#if defined(LN) || defined(LT) movaps %xmm1, %xmm0 unpcklps %xmm5, %xmm1 unpckhps %xmm5, %xmm0 movaps %xmm3, %xmm4 unpcklps %xmm7, %xmm3 unpckhps %xmm7, %xmm4 movaps %xmm1, %xmm2 unpcklps %xmm3, %xmm1 unpckhps %xmm3, %xmm2 movaps %xmm0, %xmm6 unpcklps %xmm4, %xmm0 unpckhps %xmm4, %xmm6 movlps %xmm1, 0 * SIZE(CO1) movhps %xmm1, 2 * SIZE(CO1) movlps %xmm2, 0 * SIZE(CO1, LDC, 1) movhps %xmm2, 2 * SIZE(CO1, LDC, 1) movlps %xmm0, 0 * SIZE(CO1, LDC, 2) movhps %xmm0, 2 * SIZE(CO1, LDC, 2) movlps %xmm6, 0 * SIZE(CO1, %eax, 1) movhps %xmm6, 2 * SIZE(CO1, %eax, 1)#else movlps %xmm0, 0 * SIZE(CO1) movhps %xmm0, 2 * SIZE(CO1) movlps %xmm1, 0 * SIZE(CO1, LDC, 1) movhps %xmm1, 2 * SIZE(CO1, LDC, 1) movlps %xmm2, 0 * SIZE(CO1, LDC, 2) movhps %xmm2, 2 * SIZE(CO1, LDC, 2) movlps %xmm3, 0 * SIZE(CO1, %eax, 1) movhps %xmm3, 2 * SIZE(CO1, %eax, 1)#endif#ifndef LN addl $4 * SIZE, CO1#endif#if defined(LT) || defined(RN) movl K, %eax subl KK, %eax leal (,%eax, SIZE), %eax leal (AA, %eax, 4), AA leal (BB, %eax, 4), BB#endif#ifdef LN subl $4, KK#endif#ifdef LT addl $4, KK#endif#ifdef RT movl K, %eax sall $2 + BASE_SHIFT, %eax addl %eax, AORIG#endif decl %ebx # i -- jg .L11 ALIGN_4.L39:#ifdef LN movl K, %eax leal (, %eax, SIZE), %eax leal (B, %eax, 4), B#endif#if defined(LT) || defined(RN) movl BB, B#endif#ifdef RN addl $4, KK#endif#ifdef RT subl $4, KK#endif decl J # j -- jg .L10 ALIGN_4.L40: testl $2, N je .L80#if defined(LT) || defined(RN) movl A, AA#else movl A, %eax movl %eax, AORIG#endif#ifdef RT movl K, %eax sall $1 + BASE_SHIFT, %eax subl %eax, B#endif leal (, LDC, 2), %eax#ifdef RT subl %eax, C#endif movl C, CO1#ifndef RT addl %eax, C#endif#ifdef LN movl OFFSET, %eax addl M, %eax movl %eax, KK#endif #ifdef LT movl OFFSET, %eax movl %eax, KK#endif testl $1, M je .L60#ifdef LN movl K, %eax sall $BASE_SHIFT, %eax subl %eax, AORIG#endif#if defined(LN) || defined(RT) movl KK, %eax movl AORIG, AA leal (AA, %eax, SIZE), AA#endif movl B, BB#if defined(LN) || defined(RT) movl KK, %eax sall $1 + BASE_SHIFT, %eax addl %eax, BB#endif pxor %xmm4, %xmm4 movsd -32 * SIZE(AA), %xmm0 pxor %xmm5, %xmm5 movsd -32 * SIZE(BB), %xmm1#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif sarl $3, %eax je .L75 ALIGN_4.L72: PREFETCH (PREFETCHSIZE + 0) * SIZE(AA) pshufd $0x00, %xmm0, %xmm2 mulps %xmm2, %xmm1 addps %xmm1, %xmm4 movsd -30 * SIZE(BB), %xmm1 pshufd $0x55, %xmm0, %xmm2 movsd -30 * SIZE(AA), %xmm0 mulps %xmm2, %xmm1 addps %xmm1, %xmm5 movsd -28 * SIZE(BB), %xmm1 pshufd $0x00, %xmm0, %xmm2 mulps %xmm2, %xmm1 addps %xmm1, %xmm4 movsd -26 * SIZE(BB), %xmm1 pshufd $0x55, %xmm0, %xmm2 movsd -28 * SIZE(AA), %xmm0 mulps %xmm2, %xmm1 addps %xmm1, %xmm5 movsd -24 * SIZE(BB), %xmm1 pshufd $0x00, %xmm0, %xmm2 mulps %xmm2, %xmm1 addps %xmm1, %xmm4 movsd -22 * SIZE(BB), %xmm1 pshufd $0x55, %xmm0, %xmm2 movsd -26 * SIZE(AA), %xmm0 mulps %xmm2, %xmm1 addps %xmm1, %xmm5 movsd -20 * SIZE(BB), %xmm1 pshufd $0x00, %xmm0, %xmm2 mulps %xmm2, %xmm1 addps %xmm1, %xmm4 movsd -18 * SIZE(BB), %xmm1 pshufd $0x55, %xmm0, %xmm2 movsd -24 * SIZE(AA), %xmm0 mulps %xmm2, %xmm1 addps %xmm1, %xmm5 movsd -16 * SIZE(BB), %xmm1 subl $ -8 * SIZE, AA subl $-16 * SIZE, BB subl $1, %eax jne .L72 ALIGN_4.L75:#if defined(LT) || defined(RN) movl KK, %eax#else movl K, %eax subl KK, %eax#endif andl $7, %eax # if (k & 1) BRANCH je .L78 ALIGN_4.L76: pshufd $0x00, %xmm0, %xmm2 movss -31 * SIZE(AA), %xmm0 mulps %xmm2, %xmm1 addps %xmm1, %xmm4 movsd -30 * SIZE(BB), %xmm1 addl $1 * SIZE, AA addl $2 * SIZE, BB decl %eax jg .L76 ALIGN_4.L78:#if defined(LN) || defined(RT) movl KK, %eax#ifdef LN subl $1, %eax
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -