⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lib1funcs.asm

📁 linux下的gcc编译器
💻 ASM
📖 第 1 页 / 共 4 页
字号:
	mov.l	@(40,r5),r0	mov.l	r0,@(40,r4)	.global	GLOBAL(movstrSI40)GLOBAL(movstrSI40):	mov.l	@(36,r5),r0	mov.l	r0,@(36,r4)	.global	GLOBAL(movstrSI36)GLOBAL(movstrSI36):	mov.l	@(32,r5),r0	mov.l	r0,@(32,r4)	.global	GLOBAL(movstrSI32)GLOBAL(movstrSI32):	mov.l	@(28,r5),r0	mov.l	r0,@(28,r4)	.global	GLOBAL(movstrSI28)GLOBAL(movstrSI28):	mov.l	@(24,r5),r0	mov.l	r0,@(24,r4)	.global	GLOBAL(movstrSI24)GLOBAL(movstrSI24):	mov.l	@(20,r5),r0	mov.l	r0,@(20,r4)	.global	GLOBAL(movstrSI20)GLOBAL(movstrSI20):	mov.l	@(16,r5),r0	mov.l	r0,@(16,r4)	.global	GLOBAL(movstrSI16)GLOBAL(movstrSI16):	mov.l	@(12,r5),r0	mov.l	r0,@(12,r4)	.global	GLOBAL(movstrSI12)GLOBAL(movstrSI12):	mov.l	@(8,r5),r0	mov.l	r0,@(8,r4)	.global	GLOBAL(movstrSI8)GLOBAL(movstrSI8):	mov.l	@(4,r5),r0	mov.l	r0,@(4,r4)	.global	GLOBAL(movstrSI4)GLOBAL(movstrSI4):	mov.l	@(0,r5),r0	mov.l	r0,@(0,r4)GLOBAL(movstrSI0):	rts	nop	.align	4	.global	GLOBAL(movstr)GLOBAL(movstr):	mov.l	@(60,r5),r0	mov.l	r0,@(60,r4)	mov.l	@(56,r5),r0	mov.l	r0,@(56,r4)	mov.l	@(52,r5),r0	mov.l	r0,@(52,r4)	mov.l	@(48,r5),r0	mov.l	r0,@(48,r4)	mov.l	@(44,r5),r0	mov.l	r0,@(44,r4)	mov.l	@(40,r5),r0	mov.l	r0,@(40,r4)	mov.l	@(36,r5),r0	mov.l	r0,@(36,r4)	mov.l	@(32,r5),r0	mov.l	r0,@(32,r4)	mov.l	@(28,r5),r0	mov.l	r0,@(28,r4)	mov.l	@(24,r5),r0	mov.l	r0,@(24,r4)	mov.l	@(20,r5),r0	mov.l	r0,@(20,r4)	mov.l	@(16,r5),r0	mov.l	r0,@(16,r4)	mov.l	@(12,r5),r0	mov.l	r0,@(12,r4)	mov.l	@(8,r5),r0	mov.l	r0,@(8,r4)	mov.l	@(4,r5),r0	mov.l	r0,@(4,r4)	mov.l	@(0,r5),r0	mov.l	r0,@(0,r4)	add	#-16,r6	cmp/pl	r6	bf	done	add	#64,r5	bra	GLOBAL(movstr)	add	#64,r4#endif#ifdef L_movstr_i4	.text	.global	GLOBAL(movstr_i4_even)	.global	GLOBAL(movstr_i4_odd)	.global	GLOBAL(movstrSI12_i4)	.p2align	5L_movstr_2mod4_end:	mov.l	r0,@(16,r4)	rts	mov.l	r1,@(20,r4)	.p2align	2GLOBAL(movstr_i4_odd):	mov.l	@r5+,r1	add	#-4,r4	mov.l	@r5+,r2	mov.l	@r5+,r3	mov.l	r1,@(4,r4)	mov.l	r2,@(8,r4)L_movstr_loop:	mov.l	r3,@(12,r4)	dt	r6	mov.l	@r5+,r0	bt/s	L_movstr_2mod4_end	mov.l	@r5+,r1	add	#16,r4L_movstr_start_even:	mov.l	@r5+,r2	mov.l	@r5+,r3	mov.l	r0,@r4	dt	r6	mov.l	r1,@(4,r4)	bf/s	L_movstr_loop	mov.l	r2,@(8,r4)	rts	mov.l	r3,@(12,r4)GLOBAL(movstr_i4_even):	mov.l	@r5+,r0	bra	L_movstr_start_even	mov.l	@r5+,r1	.p2align	4GLOBAL(movstrSI12_i4):	mov.l	@r5,r0	mov.l	@(4,r5),r1	mov.l	@(8,r5),r2	mov.l	r0,@r4	mov.l	r1,@(4,r4)	rts	mov.l	r2,@(8,r4)#endif#ifdef L_mulsi3	.global	GLOBAL(mulsi3)! r4 =       aabb! r5 =       ccdd! r0 = aabb*ccdd  via partial products!! if aa == 0 and cc = 0! r0 = bb*dd!! else! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536)!GLOBAL(mulsi3):	mulu.w  r4,r5		! multiply the lsws  macl=bb*dd	mov     r5,r3		! r3 = ccdd	swap.w  r4,r2		! r2 = bbaa	xtrct   r2,r3		! r3 = aacc	tst  	r3,r3		! msws zero ?	bf      hiset	rts			! yes - then we have the answer	sts     macl,r0hiset:	sts	macl,r0		! r0 = bb*dd	mulu.w	r2,r5		! brewing macl = aa*dd	sts	macl,r1	mulu.w	r3,r4		! brewing macl = cc*bb	sts	macl,r2	add	r1,r2	shll16	r2	rts	add	r2,r0#endif#endif /* ! __SH5__ */#ifdef L_sdivsi3_i4	.title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber dr0, dr2	.global	GLOBAL(sdivsi3_i4)GLOBAL(sdivsi3_i4):	lds r4,fpul	float fpul,dr0	lds r5,fpul	float fpul,dr2	fdiv dr2,dr0	rts	ftrc dr0,fpul#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) || (defined (__SH5__) && ! defined __SH4_NOFPU__)!! args in r4 and r5, result in fpul, clobber r2, dr0, dr2#if ! __SH5__ || __SH5__ == 32#if __SH5__	.mode	SHcompact#endif	.global	GLOBAL(sdivsi3_i4)GLOBAL(sdivsi3_i4):	sts.l fpscr,@-r15	mov #8,r2	swap.w r2,r2	lds r2,fpscr	lds r4,fpul	float fpul,dr0	lds r5,fpul	float fpul,dr2	fdiv dr2,dr0	ftrc dr0,fpul	rts	lds.l @r15+,fpscr#endif /* ! __SH5__ || __SH5__ == 32 */#endif /* ! __SH4__ */#endif#ifdef L_sdivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with   sh3e code.  */#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)!!!! Steve Chamberlain!! sac@cygnus.com!!!!!! args in r4 and r5, result in r0 clobber r1,r2,r3	.global	GLOBAL(sdivsi3)#if __SHMEDIA__#if __SH5__ == 32	.section	.text..SHmedia32,"ax"#else	.text#endif	.align	2#if 0/* The assembly code that follows is a hand-optimized version of the C   code that follows.  Note that the registers that are modified are   exactly those listed as clobbered in the patterns divsi3_i1 and   divsi3_i1_media.	int __sdivsi3 (i, j)     int i, j;{  register unsigned long long r18 asm ("r18");  register unsigned long long r19 asm ("r19");  register unsigned long long r0 asm ("r0") = 0;  register unsigned long long r1 asm ("r1") = 1;  register int r2 asm ("r2") = i >> 31;  register int r3 asm ("r3") = j >> 31;  r2 = r2 ? r2 : r1;  r3 = r3 ? r3 : r1;  r18 = i * r2;  r19 = j * r3;  r2 *= r3;    r19 <<= 31;  r1 <<= 31;  do    if (r18 >= r19)      r0 |= r1, r18 -= r19;  while (r19 >>= 1, r1 >>= 1);  return r2 * (int)r0;}*/GLOBAL(sdivsi3):	pt/l	LOCAL(sdivsi3_dontadd), tr2	pt/l	LOCAL(sdivsi3_loop), tr1	ptabs/l	r18, tr0	movi	0, r0	movi	1, r1	shari.l	r4, 31, r2	shari.l	r5, 31, r3	cmveq	r2, r1, r2	cmveq	r3, r1, r3	muls.l	r4, r2, r18	muls.l	r5, r3, r19	muls.l	r2, r3, r2	shlli	r19, 31, r19	shlli	r1, 31, r1LOCAL(sdivsi3_loop):	bgtu	r19, r18, tr2	or	r0, r1, r0	sub	r18, r19, r18LOCAL(sdivsi3_dontadd):	shlri	r1, 1, r1	shlri	r19, 1, r19	bnei	r1, 0, tr1	muls.l	r0, r2, r0	add.l	r0, r63, r0	blink	tr0, r63#else /* ! 0 */ // inputs: r4,r5 // clobbered: r1,r2,r3,r18,r19,r20,r21,r25,tr0 // result in r0GLOBAL(sdivsi3): // can create absolute value without extra latency, // but dependent on proper sign extension of inputs: // shari.l r5,31,r2 // xor r5,r2,r20 // sub r20,r2,r20 // r20 is now absolute value of r5, zero-extended. shari.l r5,31,r2 ori r2,1,r2 muls.l r5,r2,r20 // r20 is now absolute value of r5, zero-extended. movi 0xffffffffffffbb0c,r19 // shift count eqiv 76 shari.l r4,31,r3 nsb r20,r0 shlld r20,r0,r25 shlri r25,48,r25 sub r19,r25,r1 mmulfx.w r1,r1,r2 mshflo.w r1,r63,r1 // If r4 was to be used in-place instead of r21, could use this sequence // to compute absolute: // sub r63,r4,r19 // compute absolute value of r4 // shlri r4,32,r3 // into lower 32 bit of r4, keeping // mcmv r19,r3,r4 // the sign in the upper 32 bits intact. ori r3,1,r3 mmulfx.w r25,r2,r2 sub r19,r0,r0 muls.l r4,r3,r21 msub.w r1,r2,r2 addi r2,-2,r1 mulu.l r21,r1,r19 mmulfx.w r2,r2,r2 shlli r1,15,r1 shlrd r19,r0,r19 mulu.l r19,r20,r3 mmacnfx.wl r25,r2,r1 ptabs r18,tr0 sub r21,r3,r25 mulu.l r25,r1,r2 addi r0,14,r0 xor r4,r5,r18 shlrd r2,r0,r2 mulu.l r2,r20,r3 add r19,r2,r19 shari.l r18,31,r18 sub r25,r3,r25 mulu.l r25,r1,r2 sub r25,r20,r25 add r19,r18,r19 shlrd r2,r0,r2 mulu.l r2,r20,r3 addi r25,1,r25 add r19,r2,r19 cmpgt r25,r3,r25 add.l r19,r25,r0 xor r0,r18,r0 blink tr0,r63#endif#elif defined __SHMEDIA__/* m5compact-nofpu */ // clobbered: r18,r19,r20,r21,r25,tr0,tr1,tr2	.mode	SHmedia	.section	.text..SHmedia32,"ax"	.align	2GLOBAL(sdivsi3):	pt/l LOCAL(sdivsi3_dontsub), tr0	pt/l LOCAL(sdivsi3_loop), tr1	ptabs/l r18,tr2	shari.l r4,31,r18	shari.l r5,31,r19	xor r4,r18,r20	xor r5,r19,r21	sub.l r20,r18,r20	sub.l r21,r19,r21	xor r18,r19,r19	shlli r21,32,r25	addi r25,-1,r21	addz.l r20,r63,r20LOCAL(sdivsi3_loop):	shlli r20,1,r20	bgeu/u r21,r20,tr0	sub r20,r21,r20LOCAL(sdivsi3_dontsub):	addi.l r25,-1,r25	bnei r25,-32,tr1	xor r20,r19,r20	sub.l r20,r19,r0	blink tr2,r63#else /* ! __SHMEDIA__ */GLOBAL(sdivsi3):	mov	r4,r1	mov	r5,r0	tst	r0,r0	bt	div0	mov	#0,r2	div0s	r2,r1	subc	r3,r3	subc	r2,r1	div0s	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	addc	r2,r1	rts	mov	r1,r0div0:	rts	mov	#0,r0#endif /* ! __SHMEDIA__ */#endif /* ! __SH4__ */#endif#ifdef L_udivsi3_i4	.title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4	.global	GLOBAL(udivsi3_i4)GLOBAL(udivsi3_i4):	mov #1,r1	cmp/hi r1,r5	bf trivial	rotr r1	xor r1,r4	lds r4,fpul	mova L1,r0#ifdef FMOVD_WORKS	fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__	fmov.s @r0+,fr5	fmov.s @r0,fr4#else	fmov.s @r0+,fr4	fmov.s @r0,fr5#endif#endif	float fpul,dr0	xor r1,r5	lds r5,fpul	float fpul,dr2	fadd dr4,dr0	fadd dr4,dr2	fdiv dr2,dr0	rts	ftrc dr0,fpultrivial:	rts	lds r4,fpul	.align 2#ifdef FMOVD_WORKS	.align 3	! make double below 8 byte aligned.#endifL1:	.double 2147483648#elif defined (__SH5__) && ! defined (__SH4_NOFPU__)#if ! __SH5__ || __SH5__ == 32!! args in r4 and r5, result in fpul, clobber r20, r21, dr0, fr33	.mode	SHmedia	.global	GLOBAL(udivsi3_i4)GLOBAL(udivsi3_i4):	addz.l	r4,r63,r20	addz.l	r5,r63,r21	fmov.qd	r20,dr0	fmov.qd	r21,dr32	ptabs	r18,tr0	float.qd dr0,dr0	float.qd dr32,dr32	fdiv.d	dr0,dr32,dr0	ftrc.dq dr0,dr32	fmov.s fr33,fr32	blink tr0,r63#endif /* ! __SH5__ || __SH5__ == 32 */#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4	.global	GLOBAL(udivsi3_i4)GLOBAL(udivsi3_i4):	mov #1,r1	cmp/hi r1,r5	bf trivial	sts.l fpscr,@-r15	mova L1,r0	lds.l @r0+,fpscr	rotr r1	xor r1,r4	lds r4,fpul#ifdef FMOVD_WORKS	fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__	fmov.s @r0+,fr5	fmov.s @r0,fr4#else	fmov.s @r0+,fr4	fmov.s @r0,fr5#endif#endif	float fpul,dr0	xor r1,r5	lds r5,fpul	float fpul,dr2	fadd dr4,dr0	fadd dr4,dr2	fdiv dr2,dr0	ftrc dr0,fpul	rts	lds.l @r15+,fpscr#ifdef FMOVD_WORKS	.align 3	! make double below 8 byte aligned.#endiftrivial:	rts	lds r4,fpul	.align 2L1:#ifndef FMOVD_WORKS	.long 0x80000#else	.long 0x180000#endif	.double 2147483648#endif /* ! __SH4__ */#endif#ifdef L_udivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with   sh3e code.  */#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)!! args in r4 and r5, result in r0, clobbers r4, pr, and t bit	.global	GLOBAL(udivsi3)#if __SHMEDIA__#if __SH5__ == 32	.section	.text..SHmedia32,"ax"#else	.text#endif	.align	2#if 0/* The assembly code that follows is a hand-optimized version of the C   code that follows.  Note that the registers that are modified are   exactly those listed as clobbered in the patterns udivsi3_i1 and   udivsi3_i1_media.	unsigned __udivsi3 (i, j)    unsigned i, j; {  register unsigned long long r0 asm ("r0") = 0;  register unsigned long long r18 asm ("r18") = 1;  register unsigned long long r4 asm ("r4") = i;  register unsigned long long r19 asm ("r19") = j;  r19 <<= 31;  r18 <<= 31;  do    if (r4 >= r19)      r0 |= r18, r4 -= r19;  while (r19 >>= 1, r18 >>= 1);  return r0;}*/GLOBAL(udivsi3):	pt/l	LOCAL(udivsi3_dontadd), tr2	pt/l	LOCAL(udivsi3_loop), tr1	ptabs/l	r18, tr0

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -