📄 gemv_t_sse2_core2.s
字号:
mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -6 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -6 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 10 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -6 * SIZE(AO2, LDA, 2), %xmm11 PREFETCH PREFETCHSIZE * SIZE(AO1, LDA, 2) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -4 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 12 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -4 * SIZE(AO2, LDA, 2), %xmm7#if defined(CORE2) || defined(PENRYN) PREFETCH (PREFETCHSIZE + 8) * SIZE(AO1, LDA, 2)#endif mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -2 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 14 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -2 * SIZE(AO2, LDA, 2), %xmm11 PREFETCH PREFETCHSIZE * SIZE(AO2, LDA, 2) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd 0 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 0 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd 0 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 16 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd 0 * SIZE(AO2, LDA, 2), %xmm7#if defined(CORE2) || defined(PENRYN) PREFETCH (PREFETCHSIZE + 8) * SIZE(AO2, LDA, 2)#endif mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd 2 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd 2 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 18 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd 2 * SIZE(AO2, LDA, 2), %xmm11 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO decq I jg .L62 ALIGN_3.L63: mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -12 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 4 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -12 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -10 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -10 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -8 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 8 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -8 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -6 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -6 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 10 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -6 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -4 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 12 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -4 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -2 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 14 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -2 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 mulpd %xmm12, %xmm7 movapd 16 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 mulpd %xmm13, %xmm11 movapd 18 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO.L64: leaq -1(MIN_M), I andq $8, I je .L65 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -16 * SIZE(AO1, LDA, 2), %xmm6 movapd -16 * SIZE(AO2, LDA, 2), %xmm7 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 movapd -14 * SIZE(AO1, LDA, 2), %xmm10 movapd -14 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -12 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 4 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -12 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -10 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -10 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 mulpd %xmm12, %xmm7 movapd 8 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 mulpd %xmm13, %xmm11 movapd 10 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 subq $-8 * SIZE, AO1 subq $-8 * SIZE, AO2 subq $-8 * SIZE, BO.L65: leaq -1(MIN_M), I andq $4, I je .L66 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -16 * SIZE(AO1, LDA, 2), %xmm6 movapd -16 * SIZE(AO2, LDA, 2), %xmm7 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 movapd -14 * SIZE(AO1, LDA, 2), %xmm10 movapd -14 * SIZE(AO2, LDA, 2), %xmm11 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 mulpd %xmm12, %xmm7 movapd 4 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 subq $-4 * SIZE, AO1 subq $-4 * SIZE, AO2 subq $-4 * SIZE, BO.L66: leaq -1(MIN_M), I andq $2, I je .L67 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -16 * SIZE(AO1, LDA, 2), %xmm6 movapd -16 * SIZE(AO2, LDA, 2), %xmm7 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 mulpd %xmm12, %xmm7 movapd 2 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 subq $-2 * SIZE, AO1 subq $-2 * SIZE, AO2 subq $-2 * SIZE, BO.L67: leaq -1(MIN_M), I andq $1, I je .L69 movsd -16 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0 movsd -16 * SIZE(AO2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm1 movsd -16 * SIZE(AO1, LDA, 2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm2 movsd -16 * SIZE(AO2, LDA, 2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm3.L69:#ifdef HAVE_SSE3 haddpd %xmm0, %xmm0 haddpd %xmm1, %xmm1 haddpd %xmm2, %xmm2 haddpd %xmm3, %xmm3#else movapd %xmm0, %xmm9 movapd %xmm1, %xmm10 movapd %xmm2, %xmm11 movapd %xmm3, %xmm12 unpckhpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 unpckhpd %xmm2, %xmm2 unpckhpd %xmm3, %xmm3 addsd %xmm9, %xmm0 addsd %xmm10, %xmm1 addsd %xmm11, %xmm2 addsd %xmm12, %xmm3#endif mulsd ALPHA, %xmm0 mulsd ALPHA, %xmm1 mulsd ALPHA, %xmm2 mulsd ALPHA, %xmm3 movq CO, TEMP addsd (TEMP), %xmm0 addq INCY, TEMP addsd (TEMP), %xmm1 addq INCY, TEMP addsd (TEMP), %xmm2 addq INCY, TEMP addsd (TEMP), %xmm3 movsd %xmm0, (CO) addq INCY, CO movsd %xmm1, (CO) addq INCY, CO movsd %xmm2, (CO) addq INCY, CO movsd %xmm3, (CO) addq INCY, CO decq J jg .L61 ALIGN_3.L70: movq N, J andq $2, J jle .L80 ALIGN_3.L71: movq A, AO1 leaq (A, LDA, 1), AO2 leaq (A, LDA, 2), A movq BUFFER, BO movsd 0 * SIZE(BO), %xmm12 movsd -16 * SIZE(AO1), %xmm0 mulsd %xmm12, %xmm0 movsd -16 * SIZE(AO2), %xmm1 mulsd %xmm12, %xmm1 movapd 2 * SIZE(BO), %xmm12 movapd 4 * SIZE(BO), %xmm13 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3 addq $1 * SIZE, AO1 addq $1 * SIZE, AO2 addq $2 * SIZE, BO#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif leaq -1(MIN_M), I sarq $4, I jle .L74 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 decq I jle .L73 ALIGN_3.L72: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -6 * SIZE(AO2), %xmm9 PREFETCH PREFETCHSIZE * SIZE(AO2) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd 0 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 16 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 0 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd 2 * SIZE(AO2), %xmm9 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO decq I jg .L72 ALIGN_3.L73: mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -6 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 16 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO.L74: leaq -1(MIN_M), I andq $8, I je .L75 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 8 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-8 * SIZE, AO1 subq $-8 * SIZE, AO2 subq $-8 * SIZE, BO.L75: leaq -1(MIN_M), I andq $4, I je .L76 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 4 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-4 * SIZE, AO1 subq $-4 * SIZE, AO2 subq $-4 * SIZE, BO.L76: leaq -1(MIN_M), I andq $2, I je .L77 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 2 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 subq $-2 * SIZE, AO1 subq $-2 * SIZE, AO2 subq $-2 * SIZE, BO.L77: leaq -1(MIN_M), I andq $1, I je .L79 movsd -16 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0 movsd -16 * SIZE(AO2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm1.L79: addpd %xmm2, %xmm0 addpd %xmm3, %xmm1
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -