📄 gemv_t_sse2_core2.s
字号:
addpd %xmm9, %xmm1 movapd 5 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd 6 * SIZE(AO1, LDA, 2), %xmm10 SHUFPD_1 %xmm7, %xmm11 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd 5 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 SHUFPD_1 %xmm9, %xmm5 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 7 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 SHUFPD_1 %xmm11, %xmm7 mulpd %xmm12, %xmm7 movapd 8 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd 7 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 SHUFPD_1 %xmm5, %xmm9 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 SHUFPD_1 %xmm7, %xmm11 mulpd %xmm13, %xmm11 addpd %xmm11, %xmm3 movapd 10 * SIZE(BO), %xmm13 subq $-8 * SIZE, AO1 subq $-8 * SIZE, AO2 subq $-8 * SIZE, BO.L125: movq MIN_M, I andq $4, I je .L126 movapd 0 * SIZE(AO1), %xmm4 movapd 0 * SIZE(AO1, LDA, 2), %xmm6 movapd 2 * SIZE(AO1), %xmm8 movapd 1 * SIZE(AO2), %xmm9 movapd 2 * SIZE(AO1, LDA, 2), %xmm10 movapd 1 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 SHUFPD_1 %xmm9, %xmm5 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 3 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 SHUFPD_1 %xmm11, %xmm7 mulpd %xmm12, %xmm7 movapd 4 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd 3 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 SHUFPD_1 %xmm5, %xmm9 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 SHUFPD_1 %xmm7, %xmm11 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 subq $-4 * SIZE, AO1 subq $-4 * SIZE, AO2 subq $-4 * SIZE, BO.L126: movq MIN_M, I andq $2, I je .L127 movapd 0 * SIZE(AO1), %xmm4 movapd 1 * SIZE(AO2), %xmm9 movapd 0 * SIZE(AO1, LDA, 2), %xmm6 movapd 1 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 SHUFPD_1 %xmm9, %xmm5 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 SHUFPD_1 %xmm11, %xmm7 mulpd %xmm12, %xmm7 movapd 2 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd %xmm9, %xmm5 movapd %xmm7, %xmm11 subq $-2 * SIZE, AO1 subq $-2 * SIZE, AO2 subq $-2 * SIZE, BO.L127: movq MIN_M, I andq $1, I je .L129 movsd 0 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0 movsd 0 * SIZE(AO2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm1 movsd 0 * SIZE(AO1, LDA, 2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm2 movsd 0 * SIZE(AO2, LDA, 2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm3.L129:#ifdef HAVE_SSE3 haddpd %xmm0, %xmm0 haddpd %xmm1, %xmm1 haddpd %xmm2, %xmm2 haddpd %xmm3, %xmm3#else movapd %xmm0, %xmm9 movapd %xmm1, %xmm10 movapd %xmm2, %xmm11 movapd %xmm3, %xmm12 unpckhpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 unpckhpd %xmm2, %xmm2 unpckhpd %xmm3, %xmm3 addsd %xmm9, %xmm0 addsd %xmm10, %xmm1 addsd %xmm11, %xmm2 addsd %xmm12, %xmm3#endif mulsd ALPHA, %xmm0 mulsd ALPHA, %xmm1 mulsd ALPHA, %xmm2 mulsd ALPHA, %xmm3 movq CO, TEMP addsd (TEMP), %xmm0 addq INCY, TEMP addsd (TEMP), %xmm1 addq INCY, TEMP addsd (TEMP), %xmm2 addq INCY, TEMP addsd (TEMP), %xmm3 movsd %xmm0, (CO) addq INCY, CO movsd %xmm1, (CO) addq INCY, CO movsd %xmm2, (CO) addq INCY, CO movsd %xmm3, (CO) addq INCY, CO decq J jg .L121 ALIGN_3.L130: movq N, J andq $2, J jle .L140 ALIGN_3.L131: movq A, AO1 leaq (A, LDA, 1), AO2 leaq (A, LDA, 2), A movq BUFFER, BO movapd 0 * SIZE(BO), %xmm12 pxor %xmm0, %xmm0 movapd 2 * SIZE(BO), %xmm13 pxor %xmm1, %xmm1 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif movapd -1 * SIZE(AO2), %xmm5 movq MIN_M, I sarq $4, I jle .L134 movapd 0 * SIZE(AO1), %xmm4 movapd 2 * SIZE(AO1), %xmm8 movapd 1 * SIZE(AO2), %xmm9 decq I jle .L133 ALIGN_3.L132: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 3 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 5 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 7 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 10 * SIZE(AO1), %xmm8 movapd 9 * SIZE(AO2), %xmm9 PREFETCH PREFETCHSIZE * SIZE(AO2) mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 11 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 14 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 13 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 16 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 16 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 15 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 18 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 17 * SIZE(AO2), %xmm9 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO decq I jg .L132 ALIGN_3.L133: mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 3 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 5 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 7 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 10 * SIZE(AO1), %xmm8 movapd 9 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 11 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 14 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 13 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 16 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 15 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO.L134: movq MIN_M, I andq $8, I je .L135 movapd 0 * SIZE(AO1), %xmm4 movapd 1 * SIZE(AO2), %xmm9 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 3 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd 5 * SIZE(AO2), %xmm9 SHUFPD_1 %xmm9, %xmm5 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 7 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-8 * SIZE, AO1 subq $-8 * SIZE, AO2 subq $-8 * SIZE, BO.L135: movq MIN_M, I andq $4, I je .L136 movapd 0 * SIZE(AO1), %xmm4 movapd 1 * SIZE(AO2), %xmm9 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 3 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 SHUFPD_1 %xmm5, %xmm9 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-4 * SIZE, AO1 subq $-4 * SIZE, AO2 subq $-4 * SIZE, BO.L136: movq MIN_M, I andq $2, I je .L137 movapd 0 * SIZE(AO1), %xmm4 movapd 1 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 SHUFPD_1 %xmm9, %xmm5 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 2 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 subq $-2 * SIZE, AO1 subq $-2 * SIZE, AO2 subq $-2 * SIZE, BO.L137: movq MIN_M, I andq $1, I je .L139 movsd 0 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0 movsd 0 * SIZE(AO2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm1.L139: addpd %xmm2, %xmm0 addpd %xmm3, %xmm1#ifdef HAVE_SSE3 haddpd %xmm0, %xmm0 haddpd %xmm1, %xmm1#else movapd %xmm0, %xmm9 movapd %xmm1, %xmm10 unpckhpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 addsd %xmm9, %xmm0 addsd %xmm10, %xmm1#endif mulsd ALPHA, %xmm0 mulsd ALPHA, %xmm1 movq CO, TEMP addsd (TEMP), %xmm0 addq INCY, TEMP addsd (TEMP), %xmm1 addq INCY, TEMP movsd %xmm0, (CO) addq INCY, CO movsd %xmm1, (CO) addq INCY, CO ALIGN_3.L140: movq N, J andq $1, J jle .L149 ALIGN_3.L141: movq A, AO1 addq LDA, A movq BUFFER, BO movapd 0 * SIZE(BO), %xmm12 pxor %xmm0, %xmm0 movapd 2 * SIZE(BO), %xmm13 pxor %xmm1, %xmm1 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif movq MIN_M, I sarq $4, I jle .L144 movapd 0 * SIZE(AO1), %xmm4 movapd 2 * SIZE(AO1), %xmm8 decq I jle .L143 ALIGN_3.L142: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 8 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd 10 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 12 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 12 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 14 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd 14 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 16 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 16 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 18 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd 18 * SIZE(AO1), %xmm8 subq $-16 * SIZE, AO1 subq $-16 * SIZE, BO decq I jg .L142 ALIGN_3.L143: mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 movapd 8 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 movapd 10 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 12 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 12 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 14 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd 14 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 16 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 subq $-16 * SIZE, AO1 mulpd %xmm13, %xmm8 movapd 18 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 subq $-16 * SIZE, BO.L144: movq MIN_M, I andq $8, I je .L145 movapd 0 * SIZE(AO1), %xmm4 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 4 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd 6 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 subq $-8 * SIZE, AO1 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 subq $-8 * SIZE, BO.L145: movq MIN_M, I andq $4, I je .L146 movapd 0 * SIZE(AO1), %xmm4 movapd 2 * SIZE(AO1), %xmm8 subq $-4 * SIZE, AO1 mulpd %xmm12, %xmm4
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -