📄 gemv_t_sse2_core2.s
字号:
movapd 0 * SIZE(BO), %xmm12 pxor %xmm0, %xmm0 movapd 2 * SIZE(BO), %xmm13 pxor %xmm1, %xmm1 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif movq MIN_M, I sarq $4, I jle .L34 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 decq I jle .L33 ALIGN_3.L32: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -6 * SIZE(AO2), %xmm9 PREFETCH PREFETCHSIZE * SIZE(AO2) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd 0 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 16 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd 0 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd 2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm1 movapd 2 * SIZE(AO2), %xmm9 subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2 subq $-16 * SIZE, BO decq I jg .L32 ALIGN_3.L33: mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 8 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -6 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 12 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -4 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 14 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -2 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 16 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 18 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-16 * SIZE, BO subq $-16 * SIZE, AO1 subq $-16 * SIZE, AO2.L34: movq MIN_M, I andq $8, I je .L35 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 movapd 4 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 8 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 10 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-8 * SIZE, AO1 subq $-8 * SIZE, AO2 subq $-8 * SIZE, BO.L35: movq MIN_M, I andq $4, I je .L36 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd 4 * SIZE(BO), %xmm12 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm2 mulpd %xmm13, %xmm9 movapd 6 * SIZE(BO), %xmm13 addpd %xmm9, %xmm3 subq $-4 * SIZE, AO1 subq $-4 * SIZE, AO2 subq $-4 * SIZE, BO.L36: movq MIN_M, I andq $2, I je .L37 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 mulpd %xmm12, %xmm5 movapd 2 * SIZE(BO), %xmm12 addpd %xmm5, %xmm1 subq $-2 * SIZE, AO1 subq $-2 * SIZE, AO2 subq $-2 * SIZE, BO.L37: movq MIN_M, I andq $1, I je .L39 movsd -16 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0 movsd -16 * SIZE(AO2), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm1.L39: addpd %xmm2, %xmm0 addpd %xmm3, %xmm1#ifdef HAVE_SSE3 haddpd %xmm0, %xmm0 haddpd %xmm1, %xmm1#else movapd %xmm0, %xmm9 movapd %xmm1, %xmm10 unpckhpd %xmm0, %xmm0 unpckhpd %xmm1, %xmm1 addsd %xmm9, %xmm0 addsd %xmm10, %xmm1#endif mulsd ALPHA, %xmm0 mulsd ALPHA, %xmm1 movq CO, TEMP addsd (TEMP), %xmm0 addq INCY, TEMP addsd (TEMP), %xmm1 addq INCY, TEMP movsd %xmm0, (CO) addq INCY, CO movsd %xmm1, (CO) addq INCY, CO ALIGN_3.L40: movq N, J andq $1, J jle .L49 ALIGN_3.L41: movq A, AO1 addq LDA, A movq BUFFER, BO movapd 0 * SIZE(BO), %xmm12 pxor %xmm0, %xmm0 movapd 2 * SIZE(BO), %xmm13 pxor %xmm1, %xmm1 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif movq MIN_M, I sarq $4, I jle .L44 movapd -16 * SIZE(AO1), %xmm4 movapd -14 * SIZE(AO1), %xmm8 decq I jle .L43 ALIGN_3.L42: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 12 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 14 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 16 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd 0 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 18 * SIZE(BO), %xmm13 addpd %xmm8, %xmm0 movapd 2 * SIZE(AO1), %xmm8 subq $-16 * SIZE, AO1 subq $-16 * SIZE, BO decq I jg .L42 ALIGN_3.L43: mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 movapd -6 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 12 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -4 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 14 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd -2 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 16 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 mulpd %xmm13, %xmm8 movapd 18 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 subq $-16 * SIZE, AO1 subq $-16 * SIZE, BO.L44: movq MIN_M, I andq $8, I je .L45 movapd -16 * SIZE(AO1), %xmm4 movapd -14 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm13, %xmm8 movapd 6 * SIZE(BO), %xmm13 addpd %xmm8, %xmm1 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 8 * SIZE(BO), %xmm12 addpd %xmm4, %xmm2 mulpd %xmm13, %xmm8 movapd 10 * SIZE(BO), %xmm13 addpd %xmm8, %xmm3 subq $-8 * SIZE, AO1 subq $-8 * SIZE, BO.L45: movq MIN_M, I andq $4, I je .L46 movapd -16 * SIZE(AO1), %xmm4 movapd -14 * SIZE(AO1), %xmm8 mulpd %xmm12, %xmm4 movapd 4 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 mulpd %xmm13, %xmm8 addpd %xmm8, %xmm1 subq $-4 * SIZE, AO1 subq $-4 * SIZE, BO.L46: movq MIN_M, I andq $2, I je .L47 movapd -16 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm4 movapd 2 * SIZE(BO), %xmm12 addpd %xmm4, %xmm0 subq $-2 * SIZE, AO1 subq $-2 * SIZE, BO.L47: movq MIN_M, I andq $1, I je .L48 movsd -16 * SIZE(AO1), %xmm8 mulsd %xmm12, %xmm8 addsd %xmm8, %xmm0.L48: addpd %xmm2, %xmm0 addpd %xmm3, %xmm1 addpd %xmm1, %xmm0#ifdef HAVE_SSE3 haddpd %xmm0, %xmm0#else movapd %xmm0, %xmm9 unpckhpd %xmm0, %xmm0 addsd %xmm9, %xmm0#endif mulsd ALPHA, %xmm0 addsd (CO), %xmm0 movsd %xmm0, (CO) ALIGN_3.L49: addq NLDA, A addq $P, IS cmpq M, IS jl .L10 jmp .L999 ALIGN_3.L50: movq $P, TEMP movq M, MIN_M subq IS, MIN_M cmpq TEMP, MIN_M cmovg TEMP, MIN_M movq BUFFER, BO movsd (X), %xmm0 movsd %xmm0, 0 * SIZE(BO) addq INCX, X addq $2 * SIZE, BO leaq -1(MIN_M), I sarq $3, I jle .L55 ALIGN_3.L52: movsd (X), %xmm0 addq INCX, X movhpd (X), %xmm0 addq INCX, X movsd (X), %xmm2 addq INCX, X movhpd (X), %xmm2 addq INCX, X movsd (X), %xmm4 addq INCX, X movhpd (X), %xmm4 addq INCX, X movsd (X), %xmm6 addq INCX, X movhpd (X), %xmm6 addq INCX, X movapd %xmm0, 0 * SIZE(BO) movapd %xmm2, 2 * SIZE(BO) movapd %xmm4, 4 * SIZE(BO) movapd %xmm6, 6 * SIZE(BO) addq $8 * SIZE, BO decq I jg .L52 ALIGN_3.L55: leaq -1(MIN_M), I andq $7, I jle .L60 ALIGN_2.L56: movsd (X), %xmm0 addq INCX, X movsd %xmm0, 0 * SIZE(BO) addq $SIZE, BO decq I jg .L56 ALIGN_3.L60: movq Y, CO movq N, J sarq $2, J jle .L70 ALIGN_3.L61: movq A, AO1 leaq (A, LDA, 1), AO2 leaq (A, LDA, 4), A movq BUFFER, BO movapd (BO), %xmm12 movsd -16 * SIZE(AO1), %xmm0 mulsd %xmm12, %xmm0 movsd -16 * SIZE(AO2), %xmm1 mulsd %xmm12, %xmm1 movsd -16 * SIZE(AO1, LDA, 2), %xmm2 mulsd %xmm12, %xmm2 movsd -16 * SIZE(AO2, LDA, 2), %xmm3 mulsd %xmm12, %xmm3 movapd 2 * SIZE(BO), %xmm12 movapd 4 * SIZE(BO), %xmm13 addq $SIZE, AO1 addq $SIZE, AO2 addq $2 * SIZE, BO#if defined(CORE2) || defined(PENRYN) PREFETCHW 4 * SIZE(CO)#endif leaq -1(MIN_M), I sarq $4, I jle .L64 movapd -16 * SIZE(AO1), %xmm4 movapd -16 * SIZE(AO2), %xmm5 movapd -16 * SIZE(AO1, LDA, 2), %xmm6 movapd -16 * SIZE(AO2, LDA, 2), %xmm7 movapd -14 * SIZE(AO1), %xmm8 movapd -14 * SIZE(AO2), %xmm9 movapd -14 * SIZE(AO1, LDA, 2), %xmm10 movapd -14 * SIZE(AO2, LDA, 2), %xmm11 decq I jle .L63 ALIGN_3.L62: PREFETCH PREFETCHSIZE * SIZE(AO1) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -12 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -12 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -12 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 4 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -12 * SIZE(AO2, LDA, 2), %xmm7#if defined(CORE2) || defined(PENRYN) PREFETCH (PREFETCHSIZE + 8) * SIZE(AO1)#endif mulpd %xmm13, %xmm8 addpd %xmm8, %xmm0 movapd -10 * SIZE(AO1), %xmm8 mulpd %xmm13, %xmm9 addpd %xmm9, %xmm1 movapd -10 * SIZE(AO2), %xmm9 mulpd %xmm13, %xmm10 addpd %xmm10, %xmm2 movapd -10 * SIZE(AO1, LDA, 2), %xmm10 mulpd %xmm13, %xmm11 movapd 6 * SIZE(BO), %xmm13 addpd %xmm11, %xmm3 movapd -10 * SIZE(AO2, LDA, 2), %xmm11 PREFETCH PREFETCHSIZE * SIZE(AO2) mulpd %xmm12, %xmm4 addpd %xmm4, %xmm0 movapd -8 * SIZE(AO1), %xmm4 mulpd %xmm12, %xmm5 addpd %xmm5, %xmm1 movapd -8 * SIZE(AO2), %xmm5 mulpd %xmm12, %xmm6 addpd %xmm6, %xmm2 movapd -8 * SIZE(AO1, LDA, 2), %xmm6 mulpd %xmm12, %xmm7 movapd 8 * SIZE(BO), %xmm12 addpd %xmm7, %xmm3 movapd -8 * SIZE(AO2, LDA, 2), %xmm7#if defined(CORE2) || defined(PENRYN) PREFETCH (PREFETCHSIZE + 8) * SIZE(AO2)#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -