📄 lib1funcs.asm
字号:
shlr2 r0LOCAL(lshrsi3_26): shlr2 r0LOCAL(lshrsi3_24): shlr16 r0 rts shlr8 r0LOCAL(lshrsi3_31): shlr2 r0LOCAL(lshrsi3_29): shlr2 r0LOCAL(lshrsi3_27): shlr2 r0LOCAL(lshrsi3_25): shlr16 r0 shlr8 r0 rts shlr r0LOCAL(lshrsi3_0): rts nop#endif#ifdef L_movstr .text! done all the large groups, do the remainder! jump to movstr+done: add #64,r5 mova ___movstrSI0,r0 shll2 r6 add r6,r0 jmp @r0 add #64,r4 .align 4 .global ___movstrSI64___movstrSI64: mov.l @(60,r5),r0 mov.l r0,@(60,r4) .global ___movstrSI60___movstrSI60: mov.l @(56,r5),r0 mov.l r0,@(56,r4) .global ___movstrSI56___movstrSI56: mov.l @(52,r5),r0 mov.l r0,@(52,r4) .global ___movstrSI52___movstrSI52: mov.l @(48,r5),r0 mov.l r0,@(48,r4) .global ___movstrSI48___movstrSI48: mov.l @(44,r5),r0 mov.l r0,@(44,r4) .global ___movstrSI44___movstrSI44: mov.l @(40,r5),r0 mov.l r0,@(40,r4) .global ___movstrSI40___movstrSI40: mov.l @(36,r5),r0 mov.l r0,@(36,r4) .global ___movstrSI36___movstrSI36: mov.l @(32,r5),r0 mov.l r0,@(32,r4) .global ___movstrSI32___movstrSI32: mov.l @(28,r5),r0 mov.l r0,@(28,r4) .global ___movstrSI28___movstrSI28: mov.l @(24,r5),r0 mov.l r0,@(24,r4) .global ___movstrSI24___movstrSI24: mov.l @(20,r5),r0 mov.l r0,@(20,r4) .global ___movstrSI20___movstrSI20: mov.l @(16,r5),r0 mov.l r0,@(16,r4) .global ___movstrSI16___movstrSI16: mov.l @(12,r5),r0 mov.l r0,@(12,r4) .global ___movstrSI12___movstrSI12: mov.l @(8,r5),r0 mov.l r0,@(8,r4) .global ___movstrSI8___movstrSI8: mov.l @(4,r5),r0 mov.l r0,@(4,r4) .global ___movstrSI4___movstrSI4: mov.l @(0,r5),r0 mov.l r0,@(0,r4)___movstrSI0: rts nop .align 4 .global ___movstr___movstr: mov.l @(60,r5),r0 mov.l r0,@(60,r4) mov.l @(56,r5),r0 mov.l r0,@(56,r4) mov.l @(52,r5),r0 mov.l r0,@(52,r4) mov.l @(48,r5),r0 mov.l r0,@(48,r4) mov.l @(44,r5),r0 mov.l r0,@(44,r4) mov.l @(40,r5),r0 mov.l r0,@(40,r4) mov.l @(36,r5),r0 mov.l r0,@(36,r4) mov.l @(32,r5),r0 mov.l r0,@(32,r4) mov.l @(28,r5),r0 mov.l r0,@(28,r4) mov.l @(24,r5),r0 mov.l r0,@(24,r4) mov.l @(20,r5),r0 mov.l r0,@(20,r4) mov.l @(16,r5),r0 mov.l r0,@(16,r4) mov.l @(12,r5),r0 mov.l r0,@(12,r4) mov.l @(8,r5),r0 mov.l r0,@(8,r4) mov.l @(4,r5),r0 mov.l r0,@(4,r4) mov.l @(0,r5),r0 mov.l r0,@(0,r4) add #-16,r6 cmp/pl r6 bf done add #64,r5 bra ___movstr add #64,r4#endif#ifdef L_movstr_i4#if defined(__SH4__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) .text .global ___movstr_i4_even .global ___movstr_i4_odd .global ___movstrSI12_i4 .p2align 5L_movstr_2mod4_end: mov.l r0,@(16,r4) rts mov.l r1,@(20,r4) .p2align 2___movstr_i4_odd: mov.l @r5+,r1 add #-4,r4 mov.l @r5+,r2 mov.l @r5+,r3 mov.l r1,@(4,r4) mov.l r2,@(8,r4)L_movstr_loop: mov.l r3,@(12,r4) dt r6 mov.l @r5+,r0 bt/s L_movstr_2mod4_end mov.l @r5+,r1 add #16,r4L_movstr_start_even: mov.l @r5+,r2 mov.l @r5+,r3 mov.l r0,@r4 dt r6 mov.l r1,@(4,r4) bf/s L_movstr_loop mov.l r2,@(8,r4) rts mov.l r3,@(12,r4)___movstr_i4_even: mov.l @r5+,r0 bra L_movstr_start_even mov.l @r5+,r1 .p2align 4___movstrSI12_i4: mov.l @r5,r0 mov.l @(4,r5),r1 mov.l @(8,r5),r2 mov.l r0,@r4 mov.l r1,@(4,r4) rts mov.l r2,@(8,r4)#endif /* ! __SH4__ */#endif#ifdef L_mulsi3 .global ___mulsi3! r4 = aabb! r5 = ccdd! r0 = aabb*ccdd via partial products!! if aa == 0 and cc = 0! r0 = bb*dd!! else! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536)!___mulsi3: mulu r4,r5 ! multiply the lsws macl=bb*dd mov r5,r3 ! r3 = ccdd swap.w r4,r2 ! r2 = bbaa xtrct r2,r3 ! r3 = aacc tst r3,r3 ! msws zero ? bf hiset rts ! yes - then we have the answer sts macl,r0hiset: sts macl,r0 ! r0 = bb*dd mulu r2,r5 ! brewing macl = aa*dd sts macl,r1 mulu r3,r4 ! brewing macl = cc*bb sts macl,r2 add r1,r2 shll16 r2 rts add r2,r0#endif#ifdef L_sdivsi3_i4 .title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber dr0, dr2 .global ___sdivsi3_i4___sdivsi3_i4: lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 rts ftrc dr0,fpul#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)!! args in r4 and r5, result in fpul, clobber r2, dr0, dr2 .global ___sdivsi3_i4___sdivsi3_i4: sts.l fpscr,@-r15 mov #8,r2 swap.w r2,r2 lds r2,fpscr lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts lds.l @r15+,fpscr#endif /* ! __SH4__ */#endif#ifdef L_sdivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with sh3e code. */#if ! defined(__SH4__) && ! defined (__SH4_SINGLE__)!!!! Steve Chamberlain!! sac@cygnus.com!!!!!! args in r4 and r5, result in r0 clobber r1,r2,r3 .global ___sdivsi3___sdivsi3: mov r4,r1 mov r5,r0 tst r0,r0 bt div0 mov #0,r2 div0s r2,r1 subc r3,r3 subc r2,r1 div0s r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 addc r2,r1 rts mov r1,r0div0: rts mov #0,r0#endif /* ! __SH4__ */#endif#ifdef L_udivsi3_i4 .title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4 .global ___udivsi3_i4___udivsi3_i4: mov #1,r1 cmp/hi r1,r5 bf trivial rotr r1 xor r1,r4 lds r4,fpul mova L1,r0#ifdef FMOVD_WORKS fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__ fmov.s @r0+,fr5 fmov.s @r0,fr4#else fmov.s @r0+,fr4 fmov.s @r0,fr5#endif#endif float fpul,dr0 xor r1,r5 lds r5,fpul float fpul,dr2 fadd dr4,dr0 fadd dr4,dr2 fdiv dr2,dr0 rts ftrc dr0,fpultrivial: rts lds r4,fpul .align 2L1: .double 2147483648#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4 .global ___udivsi3_i4___udivsi3_i4: mov #1,r1 cmp/hi r1,r5 bf trivial sts.l fpscr,@-r15 mova L1,r0 lds.l @r0+,fpscr rotr r1 xor r1,r4 lds r4,fpul#ifdef FMOVD_WORKS fmov.d @r0+,dr4#else#ifdef __LITTLE_ENDIAN__ fmov.s @r0+,fr5 fmov.s @r0,fr4#else fmov.s @r0+,fr4 fmov.s @r0,fr5#endif#endif float fpul,dr0 xor r1,r5 lds r5,fpul float fpul,dr2 fadd dr4,dr0 fadd dr4,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts lds.l @r15+,fpscrtrivial: rts lds r4,fpul .align 2L1:#if defined (__LITTLE_ENDIAN__) || ! defined (FMOVD_WORKS) .long 0x80000#else .long 0x180000#endif .double 2147483648#endif /* ! __SH4__ */#endif#ifdef L_udivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with sh3e code. */#if ! defined(__SH4__) && ! defined (__SH4_SINGLE__)!!!! Steve Chamberlain!! sac@cygnus.com!!!!!! args in r4 and r5, result in r0, clobbers r4, pr, and t bit .global ___udivsi3___udivsi3:longway: mov #0,r0 div0u ! get one bit from the msb of the numerator into the T ! bit and divide it by whats in r5. Put the answer bit ! into the T bit so it can come out again at the bottom rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0shortway: rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0vshortway: rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4 ; div1 r5,r0 rotcl r4ret: rts mov r4,r0#endif /* __SH4__ */#endif#ifdef L_set_fpscr#if defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) .global ___set_fpscr___set_fpscr: lds r4,fpscr mov.l ___set_fpscr_L1,r1 swap.w r4,r0 or #24,r0#ifndef FMOVD_WORKS xor #16,r0#endif#if defined(__SH4__) swap.w r0,r3 mov.l r3,@(4,r1)#else /* defined(__SH3E__) || defined(__SH4_SINGLE*__) */ swap.w r0,r2 mov.l r2,@r1#endif#ifndef FMOVD_WORKS xor #8,r0#else xor #24,r0#endif#if defined(__SH4__) swap.w r0,r2 rts mov.l r2,@r1#else /* defined(__SH3E__) || defined(__SH4_SINGLE*__) */ swap.w r0,r3 rts mov.l r3,@(4,r1)#endif .align 2___set_fpscr_L1: .long ___fpscr_values#ifdef __ELF__ .comm ___fpscr_values,8,4#else .comm ___fpscr_values,8#endif /* ELF */#endif /* SH3E / SH4 */#endif /* L_set_fpscr */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -