📄 trsm_kernel_rt_4x4_core2.s
字号:
movsd %xmm9, 0 * SIZE(CO1) movsd %xmm13, 1 * SIZE(CO1) movsd %xmm1, 2 * SIZE(CO1) movsd %xmm5, 3 * SIZE(CO1) movhpd %xmm9, 0 * SIZE(CO2) movhpd %xmm13, 1 * SIZE(CO2) movhpd %xmm1, 2 * SIZE(CO2) movhpd %xmm5, 3 * SIZE(CO2)#else movsd %xmm0, 0 * SIZE(CO1) movhpd %xmm0, 1 * SIZE(CO1) movsd %xmm1, 2 * SIZE(CO1) movhpd %xmm1, 3 * SIZE(CO1) movsd %xmm2, 0 * SIZE(CO2) movhpd %xmm2, 1 * SIZE(CO2) movsd %xmm3, 2 * SIZE(CO2) movhpd %xmm3, 3 * SIZE(CO2)#endif#if defined(LN) || defined(LT) movapd %xmm9, -16 * SIZE(B) movapd %xmm13, -14 * SIZE(B) movapd %xmm1, -12 * SIZE(B) movapd %xmm5, -10 * SIZE(B) movddup %xmm9, %xmm8 SHUFPD_3 %xmm9, %xmm9 movddup %xmm13, %xmm12 SHUFPD_3 %xmm13, %xmm13 movddup %xmm1, %xmm0 SHUFPD_3 %xmm1, %xmm1 movddup %xmm5, %xmm4 SHUFPD_3 %xmm5, %xmm5 movapd %xmm8, -16 * SIZE(BO) movapd %xmm9, -14 * SIZE(BO) movapd %xmm12, -12 * SIZE(BO) movapd %xmm13, -10 * SIZE(BO) movapd %xmm0, -8 * SIZE(BO) movapd %xmm1, -6 * SIZE(BO) movapd %xmm4, -4 * SIZE(BO) movapd %xmm5, -2 * SIZE(BO)#else movapd %xmm0, -16 * SIZE(AO) movapd %xmm1, -14 * SIZE(AO) movapd %xmm2, -12 * SIZE(AO) movapd %xmm3, -10 * SIZE(AO)#endif#ifndef LN addq $4 * SIZE, CO1 addq $4 * SIZE, CO2#endif#if defined(LT) || defined(RN) movq K, %rax subq KK, %rax leaq (,%rax, SIZE), %rax leaq (AO, %rax, 4), AO#ifdef LT addq $8 * SIZE, B#endif#endif#ifdef LN subq $4, KK movq BORIG, B#endif#ifdef LT addq $4, KK#endif#ifdef RT movq K, %rax movq BORIG, B salq $2 + BASE_SHIFT, %rax addq %rax, AORIG#endif decq I # i -- jg .L51 ALIGN_4 .L60: testq $2, M je .L70 ALIGN_4.L61:#ifdef LN movq K, %rax salq $1 + BASE_SHIFT, %rax subq %rax, AORIG#endif#if defined(LN) || defined(RT) movq KK, %rax movq AORIG, AO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 2), AO#endif leaq 16 * SIZE + BUFFER, BO#if defined(LN) || defined(RT) movq KK, %rax salq $1 + BASE_SHIFT, %rax leaq (BO, %rax, 2), BO#endif pxor %xmm8, %xmm8 pxor %xmm9, %xmm9 pxor %xmm10, %xmm10 pxor %xmm11, %xmm11#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $2, %rax je .L65 ALIGN_4.L62: PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) movapd -16 * SIZE(AO), %xmm0 movapd -14 * SIZE(AO), %xmm1 movapd -16 * SIZE(BO), %xmm2 movapd -14 * SIZE(BO), %xmm3 movapd -12 * SIZE(BO), %xmm4 movapd -10 * SIZE(BO), %xmm5 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 mulpd %xmm1, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm9 addpd %xmm4, %xmm10 addpd %xmm5, %xmm11 movapd -12 * SIZE(AO), %xmm0 movapd -10 * SIZE(AO), %xmm1 movapd -8 * SIZE(BO), %xmm2 movapd -6 * SIZE(BO), %xmm3 movapd -4 * SIZE(BO), %xmm4 movapd -2 * SIZE(BO), %xmm5 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 mulpd %xmm1, %xmm4 mulpd %xmm1, %xmm5 addpd %xmm2, %xmm8 addpd %xmm3, %xmm9 addpd %xmm4, %xmm10 addpd %xmm5, %xmm11 subq $ -8 * SIZE, AO subq $-16 * SIZE, BO subq $1, %rax jne .L62 ALIGN_4.L65:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax # if (k & 1) BRANCH je .L69 ALIGN_4.L66: movapd -16 * SIZE(AO), %xmm0 movapd -16 * SIZE(BO), %xmm2 movapd -14 * SIZE(BO), %xmm3 mulpd %xmm0, %xmm2 mulpd %xmm0, %xmm3 addpd %xmm2, %xmm8 addpd %xmm3, %xmm9 addq $2 * SIZE, AO addq $4 * SIZE, BO subq $1, %rax jg .L66 ALIGN_4.L69: addpd %xmm10, %xmm8 addpd %xmm11, %xmm9#if defined(LN) || defined(RT) movq KK, %rax#ifdef LN subq $2, %rax#else subq $2, %rax#endif movq AORIG, AO movq BORIG, B leaq 16 * SIZE + BUFFER, BO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 2), AO leaq (B, %rax, 2), B leaq (BO, %rax, 4), BO#endif#if defined(LN) || defined(LT) movapd %xmm8, %xmm0 unpcklpd %xmm9, %xmm8 unpckhpd %xmm9, %xmm0 movapd -16 * SIZE(B), %xmm9 movapd -14 * SIZE(B), %xmm13 subpd %xmm8, %xmm9 subpd %xmm0, %xmm13#else movapd -16 * SIZE(AO), %xmm0 movapd -14 * SIZE(AO), %xmm2 subpd %xmm8, %xmm0 subpd %xmm9, %xmm2#endif#ifdef LN movddup -13 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13 movddup -14 * SIZE(AO), %xmm10 mulpd %xmm13, %xmm10 subpd %xmm10, %xmm9 movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9#endif#ifdef LT movddup -16 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm9 movddup -15 * SIZE(AO), %xmm10 mulpd %xmm9, %xmm10 subpd %xmm10, %xmm13 movddup -13 * SIZE(AO), %xmm8 mulpd %xmm8, %xmm13#endif#ifdef RN movddup -16 * SIZE(B), %xmm8 mulpd %xmm8, %xmm0 movddup -15 * SIZE(B), %xmm9 mulpd %xmm0, %xmm9 subpd %xmm9, %xmm2 movddup -13 * SIZE(B), %xmm8 mulpd %xmm8, %xmm2#endif#ifdef RT movddup -13 * SIZE(B), %xmm8 mulpd %xmm8, %xmm2 movddup -14 * SIZE(B), %xmm9 mulpd %xmm2, %xmm9 subpd %xmm9, %xmm0 movddup -16 * SIZE(B), %xmm8 mulpd %xmm8, %xmm0#endif#ifdef LN subq $2 * SIZE, CO1 subq $2 * SIZE, CO2#endif#if defined(LN) || defined(LT) movsd %xmm9, 0 * SIZE(CO1) movsd %xmm13, 1 * SIZE(CO1) movhpd %xmm9, 0 * SIZE(CO2) movhpd %xmm13, 1 * SIZE(CO2)#else movsd %xmm0, 0 * SIZE(CO1) movhpd %xmm0, 1 * SIZE(CO1) movsd %xmm2, 0 * SIZE(CO2) movhpd %xmm2, 1 * SIZE(CO2)#endif#if defined(LN) || defined(LT) movapd %xmm9, -16 * SIZE(B) movapd %xmm13, -14 * SIZE(B) movddup %xmm9, %xmm8 SHUFPD_3 %xmm9, %xmm9 movddup %xmm13, %xmm12 SHUFPD_3 %xmm13, %xmm13 movapd %xmm8, -16 * SIZE(BO) movapd %xmm9, -14 * SIZE(BO) movapd %xmm12, -12 * SIZE(BO) movapd %xmm13, -10 * SIZE(BO)#else movapd %xmm0, -16 * SIZE(AO) movapd %xmm2, -14 * SIZE(AO)#endif#ifndef LN addq $2 * SIZE, CO1 addq $2 * SIZE, CO2#endif#if defined(LT) || defined(RN) movq K, %rax subq KK, %rax leaq (,%rax, SIZE), %rax leaq (AO, %rax, 2), AO#ifdef LT addq $4 * SIZE, B#endif#endif#ifdef LN subq $2, KK movq BORIG, B#endif#ifdef LT addq $2, KK#endif#ifdef RT movq K, %rax movq BORIG, B salq $1 + BASE_SHIFT, %rax addq %rax, AORIG#endif ALIGN_4 .L70: testq $1, M je .L79 ALIGN_4.L71:#ifdef LN movq K, %rax salq $0 + BASE_SHIFT, %rax subq %rax, AORIG#endif#if defined(LN) || defined(RT) movq KK, %rax movq AORIG, AO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 1), AO#endif leaq 16 * SIZE + BUFFER, BO#if defined(LN) || defined(RT) movq KK, %rax salq $1 + BASE_SHIFT, %rax leaq (BO, %rax, 2), BO#endif pxor %xmm8, %xmm8 pxor %xmm9, %xmm9 pxor %xmm10, %xmm10 pxor %xmm11, %xmm11#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $2, %rax je .L75 ALIGN_4.L72: PREFETCH (PREFETCHSIZE + 0) * SIZE(AO) movsd -16 * SIZE(AO), %xmm0 movsd -15 * SIZE(AO), %xmm1 movsd -16 * SIZE(BO), %xmm2 movsd -14 * SIZE(BO), %xmm3 movsd -12 * SIZE(BO), %xmm4 movsd -10 * SIZE(BO), %xmm5 mulsd %xmm0, %xmm2 mulsd %xmm0, %xmm3 mulsd %xmm1, %xmm4 mulsd %xmm1, %xmm5 addsd %xmm2, %xmm8 addsd %xmm3, %xmm9 addsd %xmm4, %xmm10 addsd %xmm5, %xmm11 movsd -14 * SIZE(AO), %xmm0 movsd -13 * SIZE(AO), %xmm1 movsd -8 * SIZE(BO), %xmm2 movsd -6 * SIZE(BO), %xmm3 movsd -4 * SIZE(BO), %xmm4 movsd -2 * SIZE(BO), %xmm5 mulsd %xmm0, %xmm2 mulsd %xmm0, %xmm3 mulsd %xmm1, %xmm4 mulsd %xmm1, %xmm5 addsd %xmm2, %xmm8 addsd %xmm3, %xmm9 addsd %xmm4, %xmm10 addsd %xmm5, %xmm11 subq $ -4 * SIZE, AO subq $-16 * SIZE, BO subq $1, %rax jne .L72 ALIGN_4.L75:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax # if (k & 1) BRANCH je .L78 ALIGN_4.L76: movsd -16 * SIZE(AO), %xmm0 movsd -16 * SIZE(BO), %xmm2 movsd -14 * SIZE(BO), %xmm3 mulsd %xmm0, %xmm2 mulsd %xmm0, %xmm3 addsd %xmm2, %xmm8 addsd %xmm3, %xmm9 addq $1 * SIZE, AO addq $4 * SIZE, BO subq $1, %rax jg .L76 ALIGN_4.L78: addsd %xmm10, %xmm8 addsd %xmm11, %xmm9#if defined(LN) || defined(RT) movq KK, %rax#ifdef LN subq $1, %rax#else subq $2, %rax#endif movq AORIG, AO movq BORIG, B leaq 16 * SIZE + BUFFER, BO leaq (, %rax, SIZE), %rax leaq (AO, %rax, 1), AO leaq (B, %rax, 2), B leaq (BO, %rax, 4), BO#endif#if defined(LN) || defined(LT) movsd -16 * SIZE(B), %xmm12 movsd -15 * SIZE(B), %xmm13#else movsd -16 * SIZE(AO), %xmm12 movsd -15 * SIZE(AO), %xmm13#endif subsd %xmm8, %xmm12 subsd %xmm9, %xmm13#ifdef LN movsd -16 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm12 mulsd %xmm8, %xmm13#endif#ifdef LT movsd -16 * SIZE(AO), %xmm8 mulsd %xmm8, %xmm12 mulsd %xmm8, %xmm13#endif#ifdef RN mulsd -16 * SIZE(B), %xmm12 movsd -15 * SIZE(B), %xmm9 mulsd %xmm12, %xmm9 subsd %xmm9, %xmm13 mulsd -13 * SIZE(B), %xmm13#endif#ifdef RT mulsd -13 * SIZE(B), %xmm13 movlpd -14 * SIZE(B), %xmm9 mulsd %xmm13, %xmm9 subsd %xmm9, %xmm12 mulsd -16 * SIZE(B), %xmm12#endif#ifdef LN subq $1 * SIZE, CO1 subq $1 * SIZE, CO2#endif movsd %xmm12, 0 * SIZE(CO1) movsd %xmm13, 0 * SIZE(CO2)#if defined(LN) || defined(LT) movsd %xmm12, -16 * SIZE(B) movsd %xmm13, -15 * SIZE(B) movsd %xmm12, -16 * SIZE(BO) movsd %xmm12, -15 * SIZE(BO) movsd %xmm13, -14 * SIZE(BO) movsd %xmm13, -13 * SIZE(BO)#else movsd %xmm12, -16 * SIZE(AO) movsd %xmm13, -15 * SIZE(AO)#endif#ifndef LN addq $1 * SIZE, CO1 addq $1 * SIZE, CO2#endif#if defined(LT) || defined(RN) movq K, %rax subq KK, %rax leaq (,%rax, SIZE), %rax leaq (AO, %rax, 1), AO#ifdef LT addq $2 * SIZE, B#endif#endif#ifdef LN subq $1, KK movq BORIG, B#endif#ifdef LT addq $1, KK#endif#ifdef RT movq K, %rax movq BORIG, B salq $0 + BASE_SHIFT, %rax addq %rax, AORIG#endif ALIGN_4 .L79:#ifdef LN leaq (, K, SIZE), %rax leaq (B, %rax, 2), B#endif#if defined(LT) || defined(RN) movq K, %rax subq KK, %rax leaq (,%rax, SIZE), %rax leaq (B, %rax, 2), B#endif#ifdef RN addq $2, KK#endif#ifdef RT subq $2, KK#endif ALIGN_4.L80: movq N, J sarq $2, J # j = (n >> 2) jle .L999.L01:/* Copying to Sub Buffer */#ifdef LN movq OFFSET, %rax addq M, %rax movq %rax, KK#endif leaq 16 * SIZE + BUFFER, BO#ifdef RT movq K, %rax salq $2 + BASE_SHIFT, %rax subq %rax, B#endif#if defined(LN) || defined(RT) movq KK, %rax movq B, BORIG leaq (, %rax, SIZE), %rax leaq (B, %rax, 4), B leaq (BO, %rax, 8), BO#endif #if defined(LT) movq OFFSET, %rax movq %rax, KK#endif#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif sarq $2, %rax jle .L03 ALIGN_4 .L02: prefetcht0 (PREFETCH_R + 0) * SIZE(B) movapd -16 * SIZE(B), %xmm0 movapd -14 * SIZE(B), %xmm1 movapd -12 * SIZE(B), %xmm2 movapd -10 * SIZE(B), %xmm3 movapd -8 * SIZE(B), %xmm4 movapd -6 * SIZE(B), %xmm5 movapd -4 * SIZE(B), %xmm6 movapd -2 * SIZE(B), %xmm7 prefetcht0 (PREFETCH_R + 8) * SIZE(B) movddup %xmm0, %xmm8 unpckhpd %xmm0, %xmm0 movddup %xmm1, %xmm9 unpckhpd %xmm1, %xmm1 movddup %xmm2, %xmm10 unpckhpd %xmm2, %xmm2 movddup %xmm3, %xmm11 unpckhpd %xmm3, %xmm3 movddup %xmm4, %xmm12 unpckhpd %xmm4, %xmm4 movddup %xmm5, %xmm13 unpckhpd %xmm5, %xmm5 movddup %xmm6, %xmm14 unpckhpd %xmm6, %xmm6 movddup %xmm7, %xmm15 unpckhpd %xmm7, %xmm7 prefetcht0 (PREFETCH_W + 0) * SIZE(BO) movapd %xmm8, -16 * SIZE(BO) movapd %xmm0, -14 * SIZE(BO) movapd %xmm9, -12 * SIZE(BO) movapd %xmm1, -10 * SIZE(BO) prefetcht0 (PREFETCH_W + 8) * SIZE(BO) movapd %xmm10, -8 * SIZE(BO) movapd %xmm2, -6 * SIZE(BO) movapd %xmm11, -4 * SIZE(BO) movapd %xmm3, -2 * SIZE(BO) prefetcht0 (PREFETCH_W + 16) * SIZE(BO) movapd %xmm12, 0 * SIZE(BO) movapd %xmm4, 2 * SIZE(BO) movapd %xmm13, 4 * SIZE(BO) movapd %xmm5, 6 * SIZE(BO) prefetcht0 (PREFETCH_W + 24) * SIZE(BO) movapd %xmm14, 8 * SIZE(BO) movapd %xmm6, 10 * SIZE(BO) movapd %xmm15, 12 * SIZE(BO) movapd %xmm7, 14 * SIZE(BO) subq $-16 * SIZE, B subq $-32 * SIZE, BO subq $1, %rax jne .L02 ALIGN_4.L03:#if defined(LT) || defined(RN) movq KK, %rax#else movq K, %rax subq KK, %rax#endif andq $3, %rax BRANCH jle .L10 ALIGN_4.L04: movapd -16 * SIZE(B), %xmm0 movapd -14 * SIZE(B), %xmm1 movddup %xmm0, %xmm8 unpckhpd %xmm0, %xmm0 movddup %xmm1, %xmm9 unpckhpd %xmm1, %xmm1 movapd %xmm8, -16 * SIZE(BO) movapd %xmm0, -14 * SIZE(BO) movapd %xmm9, -12 * SIZE(BO) movapd %xmm1, -10 * SIZE(BO)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -