⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 trsm_kernel_ln_8x4_sse.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 5 页
字号:
	unpckhps %xmm11, %xmm7	movaps	 %xmm1,  %xmm10	unpcklps %xmm5,  %xmm1	unpckhps %xmm5,  %xmm10	movaps	 %xmm0,  %xmm11	unpcklps %xmm7,  %xmm0	unpckhps %xmm7,  %xmm11	movss	%xmm1,  0 * SIZE(CO1)	movss	%xmm10, 0 * SIZE(CO2)	movss	%xmm0,  0 * SIZE(CO1, LDC, 2)	movss	%xmm11, 0 * SIZE(CO2, LDC, 2)#else	movss	%xmm8,   0 * SIZE(CO1)	movss	%xmm10,  0 * SIZE(CO2)	movss	%xmm12,  0 * SIZE(CO1, LDC, 2)	movss	%xmm14,  0 * SIZE(CO2, LDC, 2)#endif#ifndef LN	addq	$1 * SIZE, CO1	addq	$1 * SIZE, CO2#endif#if defined(LT) || defined(RN)	movq	K,  %rax	subq	KK, %rax	leaq	(,%rax, SIZE), %rax	leaq	(AO, %rax, 1), AO#ifdef LT	addq	$4 * SIZE, B#endif#endif#ifdef LN	subq	$1, KK	movq	BORIG, B#endif#ifdef LT	addq	$1, KK#endif#ifdef RT	movq	K, %rax	movq	BORIG, B	salq	$BASE_SHIFT, %rax	addq	%rax, AORIG#endif	ALIGN_4	.L20:	testq	$2, M	je	.L30#ifdef LN       movq	K, %rax       salq	$1 + BASE_SHIFT, %rax       subq	%rax, AORIG#endif#if defined(LN) || defined(RT)	movq	KK, %rax	movq	AORIG, AO	leaq	(, %rax, SIZE), %rax	leaq	(AO, %rax, 2), AO#endif	leaq	BUFFER, BO#if defined(LN) || defined(RT)	movq	KK, %rax	salq	$2 + BASE_SHIFT, %rax	leaq	(BO, %rax, 4), BO#endif		movaps	 0 * SIZE(AO), %xmm8	movaps	 8 * SIZE(AO), %xmm10	movaps	 0 * SIZE(BO), %xmm9	movaps	16 * SIZE(BO), %xmm11	movaps	32 * SIZE(BO), %xmm13	movaps	48 * SIZE(BO), %xmm15	pxor	%xmm0, %xmm0	pxor	%xmm1, %xmm1	pxor	%xmm2, %xmm2	pxor	%xmm3, %xmm3#if defined(LT) || defined(RN)	movq	KK, %rax#else	movq	K, %rax	subq	KK, %rax#endif	sarq	$3, %rax	je	.L35	ALIGN_4.L32:	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm0#if defined(OPTERON) && defined(HAVE_PREFETCH)	PREFETCH	(PREFETCHSIZE +  0) * SIZE(AO)#endif	movlps	 4 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm1	movlps	 8 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm2	movlps	12 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	movlps	 2 * SIZE(AO), %xmm8	addps	%xmm9, %xmm3	movlps	64 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm11	addps	%xmm11, %xmm0	movlps	20 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm11	addps	%xmm11, %xmm1	movlps	24 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm11	addps	%xmm11, %xmm2	movlps	28 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm11	movlps	 4 * SIZE(AO), %xmm8	addps	%xmm11, %xmm3	movlps	80 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm13	addps	%xmm13, %xmm0	movlps	36 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm13	addps	%xmm13, %xmm1	movlps	40 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm13	addps	%xmm13, %xmm2	movlps	44 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm13	movlps	 6 * SIZE(AO), %xmm8	addps	%xmm13, %xmm3	movlps	96 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm15	addps	%xmm15, %xmm0	movlps	52 * SIZE(BO), %xmm15	mulps	%xmm8, %xmm15	addps	%xmm15, %xmm1	movlps	56 * SIZE(BO), %xmm15	mulps	%xmm8, %xmm15	addps	%xmm15, %xmm2	movlps	60 * SIZE(BO), %xmm15	mulps	%xmm8, %xmm15	movlps	16 * SIZE(AO), %xmm8	addps	%xmm15, %xmm3	movlps	112 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm9	addps	%xmm9, %xmm0	movlps	68 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm9	addps	%xmm9, %xmm1	movlps	72 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm9	addps	%xmm9, %xmm2	movlps	76 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm9	movlps	10 * SIZE(AO), %xmm10	addps	%xmm9, %xmm3	movlps	128 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm11	addps	%xmm11, %xmm0	movlps	84 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm11	addps	%xmm11, %xmm1	movlps	88 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm11	addps	%xmm11, %xmm2	movlps	92 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm11	movlps	12 * SIZE(AO), %xmm10	addps	%xmm11, %xmm3	movlps	144 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm13	addps	%xmm13, %xmm0	movlps	100 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm13	addps	%xmm13, %xmm1	movlps	104 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm13	addps	%xmm13, %xmm2	movlps	108 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm13	movlps	14 * SIZE(AO), %xmm10	addps	%xmm13, %xmm3	movlps	160 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm15	addps	%xmm15, %xmm0	movlps	116 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm15	addps	%xmm15, %xmm1	movlps	120 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm15	addps	%xmm15, %xmm2	movlps	124 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm15	movlps	24 * SIZE(AO), %xmm10	addps	%xmm15, %xmm3	movlps	176 * SIZE(BO), %xmm15	addq   $ 16 * SIZE, AO	addq   $128 * SIZE, BO	decq   %rax	jne    .L32	ALIGN_4.L35:#if defined(LT) || defined(RN)	movq	KK, %rax#else	movq	K, %rax	subq	KK, %rax#endif	andq	$7, %rax		# if (k & 1)	BRANCH	je .L38	ALIGN_4.L36:	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm0	movlps	 4 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm1	movlps	 8 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm2	movlps	12 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	movlps	 2 * SIZE(AO), %xmm8	addps	%xmm9, %xmm3	movlps	16 * SIZE(BO), %xmm9	addq	$ 2 * SIZE, AO		# aoffset  += 4	addq	$16 * SIZE, BO		# boffset1 += 8	decq	%rax	jg	.L36	ALIGN_4.L38:#if defined(LN) || defined(RT)	movq	KK, %rax#ifdef LN	subq	$2, %rax#else	subq	$4, %rax#endif	movq	AORIG, AO	movq	BORIG, B	leaq	BUFFER, BO	salq	$1 + BASE_SHIFT, %rax	leaq	(AO, %rax, 1), AO	leaq	(B,  %rax, 2), B	leaq	(BO, %rax, 8), BO#endif#if defined(LN) || defined(LT)	unpcklps %xmm2,  %xmm0	unpcklps %xmm3,  %xmm1	movaps	 %xmm0,  %xmm2	unpcklps %xmm1,  %xmm0	unpckhps %xmm1,  %xmm2	movapd	 0 * SIZE(B), %xmm1	movapd	 4 * SIZE(B), %xmm5	subps	%xmm0,  %xmm1	subps	%xmm2,  %xmm5#else	movsd	 0 * SIZE(AO), %xmm8	movsd	 2 * SIZE(AO), %xmm10	movsd	 4 * SIZE(AO), %xmm12	movsd	 6 * SIZE(AO), %xmm14	subps	%xmm0, %xmm8	subps	%xmm1, %xmm10	subps	%xmm2, %xmm12	subps	%xmm3, %xmm14#endif#ifdef LN	movaps	  0 * SIZE(AO), %xmm6	pshufd	 $0xff, %xmm6, %xmm8	mulps	 %xmm8, %xmm5	pshufd	 $0xaa, %xmm6, %xmm8	mulps	 %xmm5, %xmm8	subps	 %xmm8, %xmm1	pshufd	 $0x00, %xmm6, %xmm8	mulps	 %xmm8, %xmm1#endif#ifdef LT	movaps	 0 * SIZE(AO), %xmm6	pshufd	 $0x00, %xmm6, %xmm8	mulps	 %xmm8, %xmm1	pshufd	 $0x55, %xmm6, %xmm8	mulps	 %xmm1, %xmm8	subps	 %xmm8, %xmm5	pshufd	 $0xff, %xmm6, %xmm8	mulps	 %xmm8, %xmm5#endif#ifdef RN	movaps	 0 * SIZE(B), %xmm0	pshufd	 $0x00, %xmm0, %xmm2	mulps	 %xmm2, %xmm8	pshufd	 $0x55, %xmm0, %xmm2	mulps	 %xmm8, %xmm2	subps	 %xmm2, %xmm10	pshufd	 $0xaa, %xmm0, %xmm2	mulps	 %xmm8, %xmm2	subps	 %xmm2, %xmm12	pshufd	 $0xff, %xmm0, %xmm2	mulps	 %xmm8, %xmm2	subps	 %xmm2, %xmm14	movaps	 4 * SIZE(B), %xmm0	pshufd	 $0x55, %xmm0, %xmm2	mulps	 %xmm2, %xmm10	pshufd	 $0xaa, %xmm0, %xmm2	mulps	 %xmm10, %xmm2	subps	 %xmm2, %xmm12	pshufd	 $0xff, %xmm0, %xmm2	mulps	 %xmm10, %xmm2	subps	 %xmm2, %xmm14	movaps	 8 * SIZE(B), %xmm0	pshufd	 $0xaa, %xmm0, %xmm2	mulps	 %xmm2, %xmm12	pshufd	 $0xff, %xmm0, %xmm2	mulps	 %xmm12, %xmm2	subps	 %xmm2, %xmm14	movaps	 12 * SIZE(B), %xmm0	pshufd	 $0xff, %xmm0, %xmm2	mulps	 %xmm2, %xmm14#endif#ifdef RT	movaps	 12 * SIZE(B), %xmm0	pshufd	 $0xff, %xmm0, %xmm2	mulps	 %xmm2, %xmm14	pshufd	 $0xaa, %xmm0, %xmm2	mulps	 %xmm14, %xmm2	subps	 %xmm2, %xmm12	pshufd	 $0x55, %xmm0, %xmm2	mulps	 %xmm14, %xmm2	subps	 %xmm2, %xmm10	pshufd	 $0x00, %xmm0, %xmm2	mulps	 %xmm14, %xmm2	subps	 %xmm2, %xmm8	movaps	  8 * SIZE(B), %xmm0	pshufd	 $0xaa, %xmm0, %xmm2	mulps	 %xmm2, %xmm12	pshufd	 $0x55, %xmm0, %xmm2	mulps	 %xmm12, %xmm2	subps	 %xmm2, %xmm10	pshufd	 $0x00, %xmm0, %xmm2	mulps	 %xmm12, %xmm2	subps	 %xmm2, %xmm8	movaps	  4 * SIZE(B), %xmm0	pshufd	 $0x55, %xmm0, %xmm2	mulps	 %xmm2, %xmm10	pshufd	 $0x00, %xmm0, %xmm2	mulps	 %xmm10, %xmm2	subps	 %xmm2, %xmm8	movaps	  0 * SIZE(B), %xmm0	pshufd	 $0x00, %xmm0, %xmm2	mulps	 %xmm2, %xmm8#endif#ifdef LN	subq	$2 * SIZE, CO1	subq	$2 * SIZE, CO2#endif#if defined(LN) || defined(LT)	movaps	%xmm1,   0 * SIZE(B)	movaps	%xmm5,   4 * SIZE(B)	pshufd	$0x00, %xmm1, %xmm2	pshufd	$0x55, %xmm1, %xmm3	pshufd	$0xaa, %xmm1, %xmm4	pshufd	$0xff, %xmm1, %xmm6	movaps	%xmm2,   0 * SIZE(BO)	movaps	%xmm3,   4 * SIZE(BO)	movaps	%xmm4,   8 * SIZE(BO)	movaps	%xmm6,  12 * SIZE(BO)	pshufd	$0x00, %xmm5, %xmm2	pshufd	$0x55, %xmm5, %xmm3	pshufd	$0xaa, %xmm5, %xmm4	pshufd	$0xff, %xmm5, %xmm6	movaps	%xmm2,  16 * SIZE(BO)	movaps	%xmm3,  20 * SIZE(BO)	movaps	%xmm4,  24 * SIZE(BO)	movaps	%xmm6,  28 * SIZE(BO)#else	movlps	%xmm8,   0 * SIZE(AO)	movlps	%xmm10,  2 * SIZE(AO)	movlps	%xmm12,  4 * SIZE(AO)	movlps	%xmm14,  6 * SIZE(AO)#endif#if defined(LN) || defined(LT)	movaps	 %xmm1,  %xmm0	unpcklps %xmm10, %xmm1	unpckhps %xmm10, %xmm0	movaps	 %xmm5,  %xmm7	unpcklps %xmm11, %xmm5	unpckhps %xmm11, %xmm7	movaps	 %xmm1,  %xmm10	unpcklps %xmm5,  %xmm1	unpckhps %xmm5,  %xmm10	movaps	 %xmm0,  %xmm11	unpcklps %xmm7,  %xmm0	unpckhps %xmm7,  %xmm11	movlps	%xmm1,  0 * SIZE(CO1)	movlps	%xmm10, 0 * SIZE(CO2)	movlps	%xmm0,  0 * SIZE(CO1, LDC, 2)	movlps	%xmm11, 0 * SIZE(CO2, LDC, 2)#else	movlps	%xmm8,   0 * SIZE(CO1)	movlps	%xmm10,  0 * SIZE(CO2)	movlps	%xmm12,  0 * SIZE(CO1, LDC, 2)	movlps	%xmm14,  0 * SIZE(CO2, LDC, 2)#endif#ifndef LN	addq	$2 * SIZE, CO1	addq	$2 * SIZE, CO2#endif#if defined(LT) || defined(RN)	movq	K,  %rax	subq	KK, %rax	leaq	(,%rax, SIZE), %rax	leaq	(AO, %rax, 2), AO#ifdef LT	addq	$8 * SIZE, B#endif#endif#ifdef LN	subq	$2, KK	movq	BORIG, B#endif#ifdef LT	addq	$2, KK#endif#ifdef RT	movq	K, %rax	movq	BORIG, B	salq	$1 + BASE_SHIFT, %rax	addq	%rax, AORIG#endif	ALIGN_4	.L30:	testq	$4, M	je	.L40#ifdef LN       movq	K, %rax       salq	$2 + BASE_SHIFT, %rax       subq	%rax, AORIG#endif#if defined(LN) || defined(RT)	movq	KK, %rax	movq	AORIG, AO	leaq	(, %rax, SIZE), %rax	leaq	(AO, %rax, 4), AO#endif	leaq	BUFFER, BO#if defined(LN) || defined(RT)	movq	KK, %rax	salq	$2 + BASE_SHIFT, %rax	leaq	(BO, %rax, 4), BO#endif		movaps	 0 * SIZE(AO), %xmm8	movaps	16 * SIZE(AO), %xmm10	movaps	 0 * SIZE(BO), %xmm9	movaps	16 * SIZE(BO), %xmm11	movaps	32 * SIZE(BO), %xmm13	movaps	48 * SIZE(BO), %xmm15	pxor	%xmm0, %xmm0	pxor	%xmm1, %xmm1	pxor	%xmm2, %xmm2	pxor	%xmm3, %xmm3#if defined(LT) || defined(RN)	movq	KK, %rax#else	movq	K, %rax	subq	KK, %rax#endif	sarq	$3, %rax	je	.L25	ALIGN_4.L22:	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm0#if defined(OPTERON) && defined(HAVE_PREFETCH)	PREFETCH	(PREFETCHSIZE +  0) * SIZE(AO)#endif	movaps	 4 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	addps	%xmm9, %xmm1	movaps	 8 * SIZE(BO), %xmm9	mulps	%xmm8, %xmm9	mulps	12 * SIZE(BO), %xmm8	addps	%xmm9, %xmm2	movaps	64 * SIZE(BO), %xmm9	addps	%xmm8, %xmm3	movaps	 4 * SIZE(AO), %xmm8	mulps	%xmm8, %xmm11	addps	%xmm11, %xmm0	movaps	20 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm11	addps	%xmm11, %xmm1	movaps	24 * SIZE(BO), %xmm11	mulps	%xmm8, %xmm11	mulps	28 * SIZE(BO), %xmm8	addps	%xmm11, %xmm2	movaps	80 * SIZE(BO), %xmm11	addps	%xmm8, %xmm3	movaps	 8 * SIZE(AO), %xmm8	mulps	%xmm8, %xmm13	addps	%xmm13, %xmm0	movaps	36 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm13	addps	%xmm13, %xmm1	movaps	40 * SIZE(BO), %xmm13	mulps	%xmm8, %xmm13	mulps	44 * SIZE(BO), %xmm8	addps	%xmm13, %xmm2	movaps	96 * SIZE(BO), %xmm13	addps	%xmm8, %xmm3	movaps	12 * SIZE(AO), %xmm8	mulps	%xmm8, %xmm15	addps	%xmm15, %xmm0	movaps	52 * SIZE(BO), %xmm15	mulps	%xmm8, %xmm15	addps	%xmm15, %xmm1	movaps	56 * SIZE(BO), %xmm15	mulps	%xmm8, %xmm15	mulps	60 * SIZE(BO), %xmm8	addps	%xmm15, %xmm2	movaps	112 * SIZE(BO), %xmm15	addps	%xmm8, %xmm3	movaps	32 * SIZE(AO), %xmm8#if defined(OPTERON) && defined(HAVE_PREFETCH)	PREFETCH	(PREFETCHSIZE + 16) * SIZE(AO)#endif	mulps	%xmm10, %xmm9	addps	%xmm9, %xmm0	movaps	68 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm9	addps	%xmm9, %xmm1	movaps	72 * SIZE(BO), %xmm9	mulps	%xmm10, %xmm9	mulps	76 * SIZE(BO), %xmm10	addps	%xmm9, %xmm2	movaps	128 * SIZE(BO), %xmm9	addps	%xmm10, %xmm3	movaps	20 * SIZE(AO), %xmm10	mulps	%xmm10, %xmm11	addps	%xmm11, %xmm0	movaps	84 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm11	addps	%xmm11, %xmm1	movaps	88 * SIZE(BO), %xmm11	mulps	%xmm10, %xmm11	mulps	92 * SIZE(BO), %xmm10	addps	%xmm11, %xmm2	movaps	144 * SIZE(BO), %xmm11	addps	%xmm10, %xmm3	movaps	24 * SIZE(AO), %xmm10	mulps	%xmm10, %xmm13	addps	%xmm13, %xmm0	movaps	100 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm13	addps	%xmm13, %xmm1	movaps	104 * SIZE(BO), %xmm13	mulps	%xmm10, %xmm13	mulps	108 * SIZE(BO), %xmm10	addps	%xmm13, %xmm2	movaps	160 * SIZE(BO), %xmm13	addps	%xmm10, %xmm3	movaps	28 * SIZE(AO), %xmm10	mulps	%xmm10, %xmm15	addps	%xmm15, %xmm0	movaps	116 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm15	addps	%xmm15, %xmm1	movaps	120 * SIZE(BO), %xmm15	mulps	%xmm10, %xmm15	mulps	124 * SIZE(BO), %xmm10	addps	%xmm15, %xmm2	movaps	176 * SIZE(BO), %xmm15	addps	%xmm10, %xmm3	movaps	48 * SIZE(AO), %xmm10	addq   $ 32 * SIZE, AO	addq   $128 * SIZE, BO	decq   %rax	jne    .L22	ALIGN_4.L25:#if defined(LT) || defined(RN)	movq	KK, %rax#else	movq	K, %rax	subq	KK, %rax#endif	andq	$7, %rax		# if (k & 1)	BRANCH	je .L28	ALIGN_4.L26:	mulps	%xmm8, %xmm9

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -