⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lib1funcs.asm

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 ASM
📖 第 1 页 / 共 5 页
字号:
	shlri	r19, 1, r19	bnei	r1, 0, tr1	muls.l	r0, r2, r0	add.l	r0, r63, r0	blink	tr0, r63#else /* ! 0 */ // inputs: r4,r5 // clobbered: r1,r2,r3,r18,r19,r20,r21,r25,tr0 // result in r0GLOBAL(sdivsi3): // can create absolute value without extra latency, // but dependent on proper sign extension of inputs: // shari.l r5,31,r2 // xor r5,r2,r20 // sub r20,r2,r20 // r20 is now absolute value of r5, zero-extended. shari.l r5,31,r2 ori r2,1,r2 muls.l r5,r2,r20 // r20 is now absolute value of r5, zero-extended. movi 0xffffffffffffbb0c,r19 // shift count eqiv 76 shari.l r4,31,r3 nsb r20,r0 shlld r20,r0,r25 shlri r25,48,r25 sub r19,r25,r1 mmulfx.w r1,r1,r2 mshflo.w r1,r63,r1 // If r4 was to be used in-place instead of r21, could use this sequence // to compute absolute: // sub r63,r4,r19 // compute absolute value of r4 // shlri r4,32,r3 // into lower 32 bit of r4, keeping // mcmv r19,r3,r4 // the sign in the upper 32 bits intact. ori r3,1,r3 mmulfx.w r25,r2,r2 sub r19,r0,r0 muls.l r4,r3,r21 msub.w r1,r2,r2 addi r2,-2,r1 mulu.l r21,r1,r19 mmulfx.w r2,r2,r2 shlli r1,15,r1 shlrd r19,r0,r19 mulu.l r19,r20,r3 mmacnfx.wl r25,r2,r1 ptabs r18,tr0 sub r21,r3,r25 mulu.l r25,r1,r2 addi r0,14,r0 xor r4,r5,r18 shlrd r2,r0,r2 mulu.l r2,r20,r3 add r19,r2,r19 shari.l r18,31,r18 sub r25,r3,r25 mulu.l r25,r1,r2 sub r25,r20,r25 add r19,r18,r19 shlrd r2,r0,r2 mulu.l r2,r20,r3 addi r25,1,r25 add r19,r2,r19 cmpgt r25,r3,r25 add.l r19,r25,r0 xor r0,r18,r0 blink tr0,r63#endif#elif defined __SHMEDIA__/* m5compact-nofpu */ // clobbered: r18,r19,r20,r21,r25,tr0,tr1,tr2	.mode	SHmedia	.section	.text..SHmedia32,"ax"	.align	2GLOBAL(sdivsi3):	pt/l LOCAL(sdivsi3_dontsub), tr0	pt/l LOCAL(sdivsi3_loop), tr1	ptabs/l r18,tr2	shari.l r4,31,r18	shari.l r5,31,r19	xor r4,r18,r20	xor r5,r19,r21	sub.l r20,r18,r20	sub.l r21,r19,r21	xor r18,r19,r19	shlli r21,32,r25	addi r25,-1,r21	addz.l r20,r63,r20LOCAL(sdivsi3_loop):	shlli r20,1,r20	bgeu/u r21,r20,tr0	sub r20,r21,r20LOCAL(sdivsi3_dontsub):	addi.l r25,-1,r25	bnei r25,-32,tr1	xor r20,r19,r20	sub.l r20,r19,r0	blink tr2,r63#else /* ! __SHMEDIA__ */GLOBAL(sdivsi3):	mov	r4,r1	mov	r5,r0	tst	r0,r0	bt	div0	mov	#0,r2	div0s	r2,r1	subc	r3,r3	subc	r2,r1	div0s	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	div1	r0,r3	rotcl	r1	addc	r2,r1	rts	mov	r1,r0div0:	rts	mov	#0,r0	ENDFUNC(GLOBAL(sdivsi3))#endif /* ! __SHMEDIA__ */#endif /* ! __SH4__ */#endif#ifdef L_udivsi3_i4	.title "SH DIVIDE"!! 4 byte integer Divide code for the Renesas SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4,!! and t bit	.global	GLOBAL(udivsi3_i4)	FUNC(GLOBAL(udivsi3_i4))GLOBAL(udivsi3_i4):	mov #1,r1	cmp/hi r1,r5	bf trivial	rotr r1	xor r1,r4	lds r4,fpul	mova L1,r0#ifdef FMOVD_WORKS	fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__	fmov.s @r0+,fr5	fmov.s @r0,fr4#else	fmov.s @r0+,fr4	fmov.s @r0,fr5#endif#endif	float fpul,dr0	xor r1,r5	lds r5,fpul	float fpul,dr2	fadd dr4,dr0	fadd dr4,dr2	fdiv dr2,dr0	rts	ftrc dr0,fpultrivial:	rts	lds r4,fpul	.align 2#ifdef FMOVD_WORKS	.align 3	! make double below 8 byte aligned.#endifL1:	.double 2147483648	ENDFUNC(GLOBAL(udivsi3_i4))#elif defined (__SH5__) && ! defined (__SH4_NOFPU__)#if ! __SH5__ || __SH5__ == 32!! args in r4 and r5, result in fpul, clobber r20, r21, dr0, fr33	.mode	SHmedia	.global	GLOBAL(udivsi3_i4)	FUNC(GLOBAL(udivsi3_i4))GLOBAL(udivsi3_i4):	addz.l	r4,r63,r20	addz.l	r5,r63,r21	fmov.qd	r20,dr0	fmov.qd	r21,dr32	ptabs	r18,tr0	float.qd dr0,dr0	float.qd dr32,dr32	fdiv.d	dr0,dr32,dr0	ftrc.dq dr0,dr32	fmov.s fr33,fr32	blink tr0,r63	ENDFUNC(GLOBAL(udivsi3_i4))#endif /* ! __SH5__ || __SH5__ == 32 */#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4	.global	GLOBAL(udivsi3_i4)GLOBAL(udivsi3_i4):	mov #1,r1	cmp/hi r1,r5	bf trivial	sts.l fpscr,@-r15	mova L1,r0	lds.l @r0+,fpscr	rotr r1	xor r1,r4	lds r4,fpul#ifdef FMOVD_WORKS	fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__	fmov.s @r0+,fr5	fmov.s @r0,fr4#else	fmov.s @r0+,fr4	fmov.s @r0,fr5#endif#endif	float fpul,dr0	xor r1,r5	lds r5,fpul	float fpul,dr2	fadd dr4,dr0	fadd dr4,dr2	fdiv dr2,dr0	ftrc dr0,fpul	rts	lds.l @r15+,fpscr#ifdef FMOVD_WORKS	.align 3	! make double below 8 byte aligned.#endiftrivial:	rts	lds r4,fpul	.align 2L1:#ifndef FMOVD_WORKS	.long 0x80000#else	.long 0x180000#endif	.double 2147483648	ENDFUNC(GLOBAL(udivsi3_i4))#endif /* ! __SH4__ */#endif#ifdef L_udivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with   sh2e/sh3e code.  */#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)!! args in r4 and r5, result in r0, clobbers r4, pr, and t bit	.global	GLOBAL(udivsi3)	FUNC(GLOBAL(udivsi3))#if __SHMEDIA__#if __SH5__ == 32	.section	.text..SHmedia32,"ax"#else	.text#endif	.align	2#if 0/* The assembly code that follows is a hand-optimized version of the C   code that follows.  Note that the registers that are modified are   exactly those listed as clobbered in the patterns udivsi3_i1 and   udivsi3_i1_media.	unsigned __udivsi3 (i, j)    unsigned i, j; {  register unsigned long long r0 asm ("r0") = 0;  register unsigned long long r18 asm ("r18") = 1;  register unsigned long long r4 asm ("r4") = i;  register unsigned long long r19 asm ("r19") = j;  r19 <<= 31;  r18 <<= 31;  do    if (r4 >= r19)      r0 |= r18, r4 -= r19;  while (r19 >>= 1, r18 >>= 1);  return r0;}*/GLOBAL(udivsi3):	pt/l	LOCAL(udivsi3_dontadd), tr2	pt/l	LOCAL(udivsi3_loop), tr1	ptabs/l	r18, tr0	movi	0, r0	movi	1, r18	addz.l	r5, r63, r19	addz.l	r4, r63, r4	shlli	r19, 31, r19	shlli	r18, 31, r18LOCAL(udivsi3_loop):	bgtu	r19, r4, tr2	or	r0, r18, r0	sub	r4, r19, r4LOCAL(udivsi3_dontadd):	shlri	r18, 1, r18	shlri	r19, 1, r19	bnei	r18, 0, tr1	blink	tr0, r63#elseGLOBAL(udivsi3): // inputs: r4,r5 // clobbered: r18,r19,r20,r21,r22,r25,tr0 // result in r0. addz.l r5,r63,r22 nsb r22,r0 shlld r22,r0,r25 shlri r25,48,r25 movi 0xffffffffffffbb0c,r20 // shift count eqiv 76 sub r20,r25,r21 mmulfx.w r21,r21,r19 mshflo.w r21,r63,r21 ptabs r18,tr0 mmulfx.w r25,r19,r19 sub r20,r0,r0 /* bubble */ msub.w r21,r19,r19 addi r19,-2,r21 /* It would be nice for scheduling to do this add to r21		    before the msub.w, but we need a different value for		    r19 to keep errors under control.  */ mulu.l r4,r21,r18 mmulfx.w r19,r19,r19 shlli r21,15,r21 shlrd r18,r0,r18 mulu.l r18,r22,r20 mmacnfx.wl r25,r19,r21 /* bubble */ sub r4,r20,r25 mulu.l r25,r21,r19 addi r0,14,r0 /* bubble */ shlrd r19,r0,r19 mulu.l r19,r22,r20 add r18,r19,r18 /* bubble */ sub.l r25,r20,r25 mulu.l r25,r21,r19 addz.l r25,r63,r25 sub r25,r22,r25 shlrd r19,r0,r19 mulu.l r19,r22,r20 addi r25,1,r25 add r18,r19,r18 cmpgt r25,r20,r25 add.l r18,r25,r0 blink tr0,r63#endif#elif defined (__SHMEDIA__)/* m5compact-nofpu - more emphasis on code size than on speed, but don't   ignore speed altogether - div1 needs 9 cycles, subc 7 and rotcl 4.   So use a short shmedia loop.  */ // clobbered: r20,r21,r25,tr0,tr1,tr2	.mode	SHmedia	.section	.text..SHmedia32,"ax"	.align	2GLOBAL(udivsi3): pt/l LOCAL(udivsi3_dontsub), tr0 pt/l LOCAL(udivsi3_loop), tr1 ptabs/l r18,tr2 shlli r5,32,r25 addi r25,-1,r21 addz.l r4,r63,r20LOCAL(udivsi3_loop): shlli r20,1,r20 bgeu/u r21,r20,tr0 sub r20,r21,r20LOCAL(udivsi3_dontsub): addi.l r25,-1,r25 bnei r25,-32,tr1 add.l r20,r63,r0 blink tr2,r63#else /* ! defined (__SHMEDIA__) */LOCAL(div8): div1 r5,r4LOCAL(div7): div1 r5,r4; div1 r5,r4; div1 r5,r4 div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4LOCAL(divx4): div1 r5,r4; rotcl r0 div1 r5,r4; rotcl r0 div1 r5,r4; rotcl r0 rts; div1 r5,r4GLOBAL(udivsi3): sts.l pr,@-r15 extu.w r5,r0 cmp/eq r5,r0#ifdef __sh1__ bf LOCAL(large_divisor)#else bf/s LOCAL(large_divisor)#endif div0u swap.w r4,r0 shlr16 r4 bsr LOCAL(div8) shll16 r5 bsr LOCAL(div7) div1 r5,r4 xtrct r4,r0 xtrct r0,r4 bsr LOCAL(div8) swap.w r4,r4 bsr LOCAL(div7) div1 r5,r4 lds.l @r15+,pr xtrct r4,r0 swap.w r0,r0 rotcl r0 rts shlr16 r5LOCAL(large_divisor):#ifdef __sh1__ div0u#endif mov #0,r0 xtrct r4,r0 xtrct r0,r4 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 lds.l @r15+,pr rts rotcl r0	ENDFUNC(GLOBAL(udivsi3))#endif /* ! __SHMEDIA__ */#endif /* __SH4__ */#endif /* L_udivsi3 */#ifdef L_udivdi3#ifdef __SHMEDIA__	.mode	SHmedia	.section	.text..SHmedia32,"ax"	.align	2	.global	GLOBAL(udivdi3)	FUNC(GLOBAL(udivdi3))GLOBAL(udivdi3):	shlri r3,1,r4	nsb r4,r22	shlld r3,r22,r6	shlri r6,49,r5	movi 0xffffffffffffbaf1,r21 /* .l shift count 17.  */	sub r21,r5,r1	mmulfx.w r1,r1,r4	mshflo.w r1,r63,r1	sub r63,r22,r20 // r63 == 64 % 64	mmulfx.w r5,r4,r4	pta LOCAL(large_divisor),tr0	addi r20,32,r9	msub.w r1,r4,r1	madd.w r1,r1,r1	mmulfx.w r1,r1,r4	shlri r6,32,r7	bgt/u r9,r63,tr0 // large_divisor	mmulfx.w r5,r4,r4	shlri r2,32+14,r19	addi r22,-31,r0	msub.w r1,r4,r1	mulu.l r1,r7,r4	addi r1,-3,r5	mulu.l r5,r19,r5	sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2	shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as	                 the case may be, %0000000000000000 000.11111111111, still */	muls.l r1,r4,r4 /* leaving at least one sign bit.  */	mulu.l r5,r3,r8	mshalds.l r1,r21,r1	shari r4,26,r4	shlld r8,r0,r8	add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)	sub r2,r8,r2	/* Can do second step of 64 : 32 div now, using r1 and the rest in r2.  */	shlri r2,22,r21	mulu.l r21,r1,r21	shlld r5,r0,r8	addi r20,30-22,r0	shlrd r21,r0,r21	mulu.l r21,r3,r5	add r8,r21,r8	mcmpgt.l r21,r63,r21 // See Note 1	addi r20,30,r0	mshfhi.l r63,r21,r21	sub r2,r5,r2	andc r2,r21,r2	/* small divisor: need a third divide step */	mulu.l r2,r1,r7	ptabs r18,tr0	addi r2,1,r2	shlrd r7,r0,r7

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -