📄 trsm_kernel_ln_4x4_core2.s
字号:
addpd %xmm2, %xmm8 addpd %xmm3, %xmm9 addpd %xmm4, %xmm10 addpd %xmm5, %xmm11 subq $ -8 * SIZE, AO subq $-32 * SIZE, BO subq $1, %rax jne .L22 ALIGN_4.L25:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax # if (k & 1) BRANCH je .L29 ALIGN_4.L26: movapd -16 * SIZE(AO), %xmm0 movapd -16 * SIZE(BO), %xmm2 movapd -14 * SIZE(BO), %xmm3 movapd -12 * SIZE(BO), %xmm4 movapd -10 * SIZE(BO), %xmm5 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 mulpd %xmm0, %xmm4 mulpd %xmm0, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm9 addpd %xmm4, %xmm10 addpd %xmm5, %xmm11 addq $2 * SIZE, AO addq $8 * SIZE, BO subq $1, %rax jne .L26 ALIGN_4.L29:#if defined(LN) || defined(RT) movq KK, %rax#ifdef LN subq $2, %rax#else subq $4, %rax#endif movq AORIG, AO movq BORIG, B leaq 16 * SIZE + BUFFER, BO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 2), AO leaq (B, %rax, 4), B leaq (BO, %rax, 8), BO#endif#if defined(LN) || defined(LT) movapd %xmm8, %xmm0 unpcklpd %xmm9, %xmm8 unpckhpd %xmm9, %xmm0 movapd %xmm10, %xmm2 unpcklpd %xmm11, %xmm10 unpckhpd %xmm11, %xmm2 movapd -16 * SIZE(B), %xmm9 movapd -14 * SIZE(B), %xmm11 movapd -12 * SIZE(B), %xmm13 movapd -10 * SIZE(B), %xmm15 subpd %xmm8, %xmm9 subpd %xmm10, %xmm11 subpd %xmm0, %xmm13 subpd %xmm2, %xmm15#else movapd -16 * SIZE(AO), %xmm0 movapd -14 * SIZE(AO), %xmm2 movapd -12 * SIZE(AO), %xmm4 movapd -10 * SIZE(AO), %xmm6 subpd %xmm8, %xmm0 subpd %xmm9, %xmm2 subpd %xmm10, %xmm4 subpd %xmm11, %xmm6#endif#ifdef LN movddup -13 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13 mulpd %xmm8, %xmm15 movddup -14 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm13, %xmm10 subpd %xmm10, %xmm9 mulpd %xmm15, %xmm12 subpd %xmm12, %xmm11 movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9 mulpd %xmm8, %xmm11#endif#ifdef LT movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9 mulpd %xmm8, %xmm11 movddup -15 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm9, %xmm10 subpd %xmm10, %xmm13 mulpd %xmm11, %xmm12 subpd %xmm12, %xmm15 movddup -13 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13 mulpd %xmm8, %xmm15#endif#ifdef RN movddup -16 * SIZE(B), %xmm8 mulpd %xmm8, %xmm0 movddup -15 * SIZE(B), %xmm9 mulpd %xmm0, %xmm9 subpd %xmm9, %xmm2 movddup -14 * SIZE(B), %xmm10 mulpd %xmm0, %xmm10 subpd %xmm10, %xmm4 movddup -13 * SIZE(B), %xmm11 mulpd %xmm0, %xmm11 subpd %xmm11, %xmm6 movddup -11 * SIZE(B), %xmm8 mulpd %xmm8, %xmm2 movddup -10 * SIZE(B), %xmm9 mulpd %xmm2, %xmm9 subpd %xmm9, %xmm4 movddup -9 * SIZE(B), %xmm10 mulpd %xmm2, %xmm10 subpd %xmm10, %xmm6 movddup -6 * SIZE(B), %xmm8 mulpd %xmm8, %xmm4 movddup -5 * SIZE(B), %xmm9 mulpd %xmm4, %xmm9 subpd %xmm9, %xmm6 movddup -1 * SIZE(B), %xmm8 mulpd %xmm8, %xmm6#endif#ifdef RT movddup -1 * SIZE(B), %xmm8 mulpd %xmm8, %xmm6 movddup -2 * SIZE(B), %xmm9 mulpd %xmm6, %xmm9 subpd %xmm9, %xmm4 movddup -3 * SIZE(B), %xmm10 mulpd %xmm6, %xmm10 subpd %xmm10, %xmm2 movddup -4 * SIZE(B), %xmm11 mulpd %xmm6, %xmm11 subpd %xmm11, %xmm0 movddup -6 * SIZE(B), %xmm8 mulpd %xmm8, %xmm4 movddup -7 * SIZE(B), %xmm9 mulpd %xmm4, %xmm9 subpd %xmm9, %xmm2 movddup -8 * SIZE(B), %xmm10 mulpd %xmm4, %xmm10 subpd %xmm10, %xmm0 movddup -11 * SIZE(B), %xmm8 mulpd %xmm8, %xmm2 movddup -12 * SIZE(B), %xmm9 mulpd %xmm2, %xmm9 subpd %xmm9, %xmm0 movddup -16 * SIZE(B), %xmm8 mulpd %xmm8, %xmm0#endif#ifdef LN subq $2 * SIZE, CO1 subq $2 * SIZE, CO2#endif#if defined(LN) || defined(LT) movsd %xmm9, 0 * SIZE(CO1) movsd %xmm13, 1 * SIZE(CO1) movhpd %xmm9, 0 * SIZE(CO2) movhpd %xmm13, 1 * SIZE(CO2) movsd %xmm11, 0 * SIZE(CO1, LDC, 2) movsd %xmm15, 1 * SIZE(CO1, LDC, 2) movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) movhpd %xmm15, 1 * SIZE(CO2, LDC, 2)#else movsd %xmm0, 0 * SIZE(CO1) movhpd %xmm0, 1 * SIZE(CO1) movsd %xmm2, 0 * SIZE(CO2) movhpd %xmm2, 1 * SIZE(CO2) movsd %xmm4, 0 * SIZE(CO1, LDC, 2) movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) movsd %xmm6, 0 * SIZE(CO2, LDC, 2) movhpd %xmm6, 1 * SIZE(CO2, LDC, 2)#endif#if defined(LN) || defined(LT) movapd %xmm9, -16 * SIZE(B) movapd %xmm11, -14 * SIZE(B) movapd %xmm13, -12 * SIZE(B) movapd %xmm15, -10 * SIZE(B) movddup %xmm9, %xmm8 SHUFPD_3 %xmm9, %xmm9 movddup %xmm11, %xmm10 SHUFPD_3 %xmm11, %xmm11 movddup %xmm13, %xmm12 SHUFPD_3 %xmm13, %xmm13 movddup %xmm15, %xmm14 SHUFPD_3 %xmm15, %xmm15 movapd %xmm8, -16 * SIZE(BO) movapd %xmm9, -14 * SIZE(BO) movapd %xmm10, -12 * SIZE(BO) movapd %xmm11, -10 * SIZE(BO) movapd %xmm12, -8 * SIZE(BO) movapd %xmm13, -6 * SIZE(BO) movapd %xmm14, -4 * SIZE(BO) movapd %xmm15, -2 * SIZE(BO)#else movapd %xmm0, -16 * SIZE(AO) movapd %xmm2, -14 * SIZE(AO) movapd %xmm4, -12 * SIZE(AO) movapd %xmm6, -10 * SIZE(AO)#endif#ifndef LN addq $2 * SIZE, CO1 addq $2 * SIZE, CO2#endif#if defined(LT) || defined(RN) movq K, %rax subq KK, %rax leaq (,%rax, SIZE), %rax leaq (AO, %rax, 2), AO#ifdef LT addq $8 * SIZE, B#endif#endif#ifdef LN subq $2, KK movq BORIG, B#endif#ifdef LT addq $2, KK#endif#ifdef RT movq K, %rax movq BORIG, B salq $1 + BASE_SHIFT, %rax addq %rax, AORIG#endif ALIGN_4 .L30: movq M, I sarq $2, I # i = (m >> 2) jle .L39 ALIGN_4.L11:#ifdef LN movq K, %rax salq $2 + BASE_SHIFT, %rax subq %rax, AORIG#endif#if defined(LN) || defined(RT) movq KK, %rax movq AORIG, AO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO#endif leaq 16 * SIZE + BUFFER, BO#if defined(LN) || defined(RT) movq KK, %rax salq $2 + BASE_SHIFT, %rax leaq (BO, %rax, 2), BO#endif pxor %xmm8, %xmm8 pxor %xmm9, %xmm9 pxor %xmm10, %xmm10 pxor %xmm11, %xmm11#ifdef LN prefetcht2 -3 * SIZE(CO1) pxor %xmm12, %xmm12 prefetcht2 -3 * SIZE(CO2) pxor %xmm13, %xmm13 prefetcht2 -3 * SIZE(CO1, LDC, 2) pxor %xmm14, %xmm14 prefetcht2 -3 * SIZE(CO2, LDC, 2) pxor %xmm15, %xmm15#else prefetcht2 3 * SIZE(CO1) pxor %xmm12, %xmm12 prefetcht2 3 * SIZE(CO2) pxor %xmm13, %xmm13 prefetcht2 3 * SIZE(CO1, LDC, 2) pxor %xmm14, %xmm14 prefetcht2 3 * SIZE(CO2, LDC, 2) pxor %xmm15, %xmm15#endif pxor %xmm2, %xmm2 pxor %xmm3, %xmm3 pxor %xmm4, %xmm4 pxor %xmm5, %xmm5#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $2, %rax jle .L15 ALIGN_4.L12: PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) addpd %xmm2, %xmm10 movapd -16 * SIZE(AO), %xmm0 addpd %xmm3, %xmm14 movapd -16 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 movapd -14 * SIZE(AO), %xmm1 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm11 movapd -14 * SIZE(BO), %xmm4 addpd %xmm5, %xmm15 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm12 movapd -12 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm9 movapd -10 * SIZE(BO), %xmm4 addpd %xmm5, %xmm13 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 movapd -12 * SIZE(AO), %xmm0 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm10 addpd %xmm3, %xmm14 movapd -8 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 movapd -10 * SIZE(AO), %xmm1 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm11 addpd %xmm5, %xmm15 movapd -6 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm12 movapd -4 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm9 addpd %xmm5, %xmm13 movapd -2 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 PREFETCH (PREFETCHSIZE + 8) * SIZE(AO) movapd -8 * SIZE(AO), %xmm0 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm10 addpd %xmm3, %xmm14 movapd 0 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 movapd -6 * SIZE(AO), %xmm1 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm11 addpd %xmm5, %xmm15 movapd 2 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm12 movapd 4 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm9 addpd %xmm5, %xmm13 movapd 6 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 movapd -4 * SIZE(AO), %xmm0 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm10 addpd %xmm3, %xmm14 movapd 8 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 movapd -2 * SIZE(AO), %xmm1 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm11 addpd %xmm5, %xmm15 movapd 10 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 addq $32 * SIZE, BO mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm12 movapd -20 * SIZE(BO), %xmm2 movapd %xmm2, %xmm3 mulpd %xmm0, %xmm2 subq $-16 * SIZE, AO mulpd %xmm1, %xmm3 addpd %xmm4, %xmm9 addpd %xmm5, %xmm13 movapd -18 * SIZE(BO), %xmm4 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 subq $1, %rax mulpd %xmm1, %xmm5 jg,pt .L12 ALIGN_4.L15:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax # if (k & 1) BRANCH je .L19 ALIGN_4.L16: movapd -16 * SIZE(AO), %xmm0 addpd %xmm2, %xmm10 movapd -16 * SIZE(BO), %xmm2 addpd %xmm3, %xmm14 movapd %xmm2, %xmm3 movapd -14 * SIZE(AO), %xmm1 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm11 movapd -14 * SIZE(BO), %xmm4 addpd %xmm5, %xmm15 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 movapd -12 * SIZE(BO), %xmm2 addpd %xmm3, %xmm12 movapd %xmm2, %xmm3 mulpd %xmm0, %xmm2 mulpd %xmm1, %xmm3 addpd %xmm4, %xmm9 movapd -10 * SIZE(BO), %xmm4 addpd %xmm5, %xmm13 movapd %xmm4, %xmm5 mulpd %xmm0, %xmm4 mulpd %xmm1, %xmm5 addq $4 * SIZE, AO # aoffset += 4 addq $8 * SIZE, BO # boffset1 += 8 subq $1, %rax jg,pt .L16 ALIGN_4.L19: addpd %xmm2, %xmm10 addpd %xmm3, %xmm14 addpd %xmm4, %xmm11 addpd %xmm5, %xmm15#if defined(LN) || defined(RT) movq KK, %rax#ifdef LN subq $4, %rax#else subq $4, %rax#endif movq AORIG, AO movq BORIG, B leaq 16 * SIZE + BUFFER, BO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 4), AO leaq (B, %rax, 4), B leaq (BO, %rax, 8), BO#endif#if defined(LN) || defined(LT) movapd %xmm8, %xmm0 unpcklpd %xmm9, %xmm8 unpckhpd %xmm9, %xmm0 movapd %xmm10, %xmm2 unpcklpd %xmm11, %xmm10 unpckhpd %xmm11, %xmm2 movapd %xmm12, %xmm4 unpcklpd %xmm13, %xmm12 unpckhpd %xmm13, %xmm4 movapd %xmm14, %xmm6 unpcklpd %xmm15, %xmm14 unpckhpd %xmm15, %xmm6 movapd -16 * SIZE(B), %xmm9 movapd -14 * SIZE(B), %xmm11 movapd -12 * SIZE(B), %xmm13 movapd -10 * SIZE(B), %xmm15 movapd -8 * SIZE(B), %xmm1 movapd -6 * SIZE(B), %xmm3 movapd -4 * SIZE(B), %xmm5 movapd -2 * SIZE(B), %xmm7 subpd %xmm8, %xmm9 subpd %xmm10, %xmm11 subpd %xmm0, %xmm13 subpd %xmm2, %xmm15 subpd %xmm12, %xmm1 subpd %xmm14, %xmm3 subpd %xmm4, %xmm5 subpd %xmm6, %xmm7#else movapd -16 * SIZE(AO), %xmm0 movapd -14 * SIZE(AO), %xmm1 movapd -12 * SIZE(AO), %xmm2 movapd -10 * SIZE(AO), %xmm3 movapd -8 * SIZE(AO), %xmm4 movapd -6 * SIZE(AO), %xmm5 movapd -4 * SIZE(AO), %xmm6 movapd -2 * SIZE(AO), %xmm7 subpd %xmm8, %xmm0 subpd %xmm12, %xmm1 subpd %xmm9, %xmm2 subpd %xmm13, %xmm3 subpd %xmm10, %xmm4 subpd %xmm14, %xmm5 subpd %xmm11, %xmm6 subpd %xmm15, %xmm7#endif#ifdef LN movddup -1 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm5 mulpd %xmm8, %xmm7 movddup -2 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm5, %xmm10 subpd %xmm10, %xmm1 mulpd %xmm7, %xmm12 subpd %xmm12, %xmm3 movddup -3 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm5, %xmm10 subpd %xmm10, %xmm13 mulpd %xmm7, %xmm12 subpd %xmm12, %xmm15 movddup -4 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm5, %xmm10 subpd %xmm10, %xmm9 mulpd %xmm7, %xmm12 subpd %xmm12, %xmm11 movddup -6 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm1 mulpd %xmm8, %xmm3 movddup -7 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm1, %xmm10 subpd %xmm10, %xmm13 mulpd %xmm3, %xmm12 subpd %xmm12, %xmm15 movddup -8 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm1, %xmm10 subpd %xmm10, %xmm9 mulpd %xmm3, %xmm12 subpd %xmm12, %xmm11 movddup -11 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13 mulpd %xmm8, %xmm15 movddup -12 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm13, %xmm10 subpd %xmm10, %xmm9 mulpd %xmm15, %xmm12 subpd %xmm12, %xmm11 movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9 mulpd %xmm8, %xmm11#endif#ifdef LT movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9 mulpd %xmm8, %xmm11 movddup -15 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm9, %xmm10 subpd %xmm10, %xmm13 mulpd %xmm11, %xmm12 subpd %xmm12, %xmm15 movddup -14 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm9, %xmm10 subpd %xmm10, %xmm1 mulpd %xmm11, %xmm12 subpd %xmm12, %xmm3 movddup -13 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm9, %xmm10 subpd %xmm10, %xmm5 mulpd %xmm11, %xmm12 subpd %xmm12, %xmm7 movddup -11 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13 mulpd %xmm8, %xmm15 movddup -10 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm13, %xmm10 subpd %xmm10, %xmm1 mulpd %xmm15, %xmm12 subpd %xmm12, %xmm3 movddup -9 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm13, %xmm10 subpd %xmm10, %xmm5 mulpd %xmm15, %xmm12 subpd %xmm12, %xmm7 movddup -6 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm1 mulpd %xmm8, %xmm3 movddup -5 * SIZE(AO), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm1, %xmm10 subpd %xmm10, %xmm5 mulpd %xmm3, %xmm12 subpd %xmm12, %xmm7 movddup -1 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm5 mulpd %xmm8, %xmm7#endif#ifdef RN movddup -16 * SIZE(B), %xmm8 mulpd %xmm8, %xmm0 mulpd %xmm8, %xmm1 movddup -15 * SIZE(B), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm0, %xmm10 subpd %xmm10, %xmm2 mulpd %xmm1, %xmm12 subpd %xmm12, %xmm3 movddup -14 * SIZE(B), %xmm10 movapd %xmm10, %xmm12 mulpd %xmm0, %xmm10 subpd %xmm10, %xmm4 mulpd %xmm1, %xmm12
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -