📄 lib1funcs.asm
字号:
.byte LOCAL(lshrsi3_28)-LOCAL(lshrsi3_table) .byte LOCAL(lshrsi3_29)-LOCAL(lshrsi3_table) .byte LOCAL(lshrsi3_30)-LOCAL(lshrsi3_table) .byte LOCAL(lshrsi3_31)-LOCAL(lshrsi3_table)LOCAL(lshrsi3_6): shlr2 r0LOCAL(lshrsi3_4): shlr2 r0LOCAL(lshrsi3_2): rts shlr2 r0LOCAL(lshrsi3_7): shlr2 r0LOCAL(lshrsi3_5): shlr2 r0LOCAL(lshrsi3_3): shlr2 r0LOCAL(lshrsi3_1): rts shlr r0LOCAL(lshrsi3_14): shlr2 r0LOCAL(lshrsi3_12): shlr2 r0LOCAL(lshrsi3_10): shlr2 r0LOCAL(lshrsi3_8): rts shlr8 r0LOCAL(lshrsi3_15): shlr2 r0LOCAL(lshrsi3_13): shlr2 r0LOCAL(lshrsi3_11): shlr2 r0LOCAL(lshrsi3_9): shlr8 r0 rts shlr r0LOCAL(lshrsi3_22): shlr2 r0LOCAL(lshrsi3_20): shlr2 r0LOCAL(lshrsi3_18): shlr2 r0LOCAL(lshrsi3_16): rts shlr16 r0LOCAL(lshrsi3_23): shlr2 r0LOCAL(lshrsi3_21): shlr2 r0LOCAL(lshrsi3_19): shlr2 r0LOCAL(lshrsi3_17): shlr16 r0 rts shlr r0LOCAL(lshrsi3_30): shlr2 r0LOCAL(lshrsi3_28): shlr2 r0LOCAL(lshrsi3_26): shlr2 r0LOCAL(lshrsi3_24): shlr16 r0 rts shlr8 r0LOCAL(lshrsi3_31): shlr2 r0LOCAL(lshrsi3_29): shlr2 r0LOCAL(lshrsi3_27): shlr2 r0LOCAL(lshrsi3_25): shlr16 r0 shlr8 r0 rts shlr r0LOCAL(lshrsi3_0): rts nop#endif#ifdef L_movstr .text! done all the large groups, do the remainder! jump to movstr+done: add #64,r5 mova GLOBAL(movstrSI0),r0 shll2 r6 add r6,r0 jmp @r0 add #64,r4 .align 4 .global GLOBAL(movstrSI64)GLOBAL(movstrSI64): mov.l @(60,r5),r0 mov.l r0,@(60,r4) .global GLOBAL(movstrSI60)GLOBAL(movstrSI60): mov.l @(56,r5),r0 mov.l r0,@(56,r4) .global GLOBAL(movstrSI56)GLOBAL(movstrSI56): mov.l @(52,r5),r0 mov.l r0,@(52,r4) .global GLOBAL(movstrSI52)GLOBAL(movstrSI52): mov.l @(48,r5),r0 mov.l r0,@(48,r4) .global GLOBAL(movstrSI48)GLOBAL(movstrSI48): mov.l @(44,r5),r0 mov.l r0,@(44,r4) .global GLOBAL(movstrSI44)GLOBAL(movstrSI44): mov.l @(40,r5),r0 mov.l r0,@(40,r4) .global GLOBAL(movstrSI40)GLOBAL(movstrSI40): mov.l @(36,r5),r0 mov.l r0,@(36,r4) .global GLOBAL(movstrSI36)GLOBAL(movstrSI36): mov.l @(32,r5),r0 mov.l r0,@(32,r4) .global GLOBAL(movstrSI32)GLOBAL(movstrSI32): mov.l @(28,r5),r0 mov.l r0,@(28,r4) .global GLOBAL(movstrSI28)GLOBAL(movstrSI28): mov.l @(24,r5),r0 mov.l r0,@(24,r4) .global GLOBAL(movstrSI24)GLOBAL(movstrSI24): mov.l @(20,r5),r0 mov.l r0,@(20,r4) .global GLOBAL(movstrSI20)GLOBAL(movstrSI20): mov.l @(16,r5),r0 mov.l r0,@(16,r4) .global GLOBAL(movstrSI16)GLOBAL(movstrSI16): mov.l @(12,r5),r0 mov.l r0,@(12,r4) .global GLOBAL(movstrSI12)GLOBAL(movstrSI12): mov.l @(8,r5),r0 mov.l r0,@(8,r4) .global GLOBAL(movstrSI8)GLOBAL(movstrSI8): mov.l @(4,r5),r0 mov.l r0,@(4,r4) .global GLOBAL(movstrSI4)GLOBAL(movstrSI4): mov.l @(0,r5),r0 mov.l r0,@(0,r4)GLOBAL(movstrSI0): rts nop .align 4 .global GLOBAL(movstr)GLOBAL(movstr): mov.l @(60,r5),r0 mov.l r0,@(60,r4) mov.l @(56,r5),r0 mov.l r0,@(56,r4) mov.l @(52,r5),r0 mov.l r0,@(52,r4) mov.l @(48,r5),r0 mov.l r0,@(48,r4) mov.l @(44,r5),r0 mov.l r0,@(44,r4) mov.l @(40,r5),r0 mov.l r0,@(40,r4) mov.l @(36,r5),r0 mov.l r0,@(36,r4) mov.l @(32,r5),r0 mov.l r0,@(32,r4) mov.l @(28,r5),r0 mov.l r0,@(28,r4) mov.l @(24,r5),r0 mov.l r0,@(24,r4) mov.l @(20,r5),r0 mov.l r0,@(20,r4) mov.l @(16,r5),r0 mov.l r0,@(16,r4) mov.l @(12,r5),r0 mov.l r0,@(12,r4) mov.l @(8,r5),r0 mov.l r0,@(8,r4) mov.l @(4,r5),r0 mov.l r0,@(4,r4) mov.l @(0,r5),r0 mov.l r0,@(0,r4) add #-16,r6 cmp/pl r6 bf done add #64,r5 bra GLOBAL(movstr) add #64,r4#endif#ifdef L_movstr_i4 .text .global GLOBAL(movstr_i4_even) .global GLOBAL(movstr_i4_odd) .global GLOBAL(movstrSI12_i4) .p2align 5L_movstr_2mod4_end: mov.l r0,@(16,r4) rts mov.l r1,@(20,r4) .p2align 2GLOBAL(movstr_i4_odd): mov.l @r5+,r1 add #-4,r4 mov.l @r5+,r2 mov.l @r5+,r3 mov.l r1,@(4,r4) mov.l r2,@(8,r4)L_movstr_loop: mov.l r3,@(12,r4) dt r6 mov.l @r5+,r0 bt/s L_movstr_2mod4_end mov.l @r5+,r1 add #16,r4L_movstr_start_even: mov.l @r5+,r2 mov.l @r5+,r3 mov.l r0,@r4 dt r6 mov.l r1,@(4,r4) bf/s L_movstr_loop mov.l r2,@(8,r4) rts mov.l r3,@(12,r4)GLOBAL(movstr_i4_even): mov.l @r5+,r0 bra L_movstr_start_even mov.l @r5+,r1 .p2align 4GLOBAL(movstrSI12_i4): mov.l @r5,r0 mov.l @(4,r5),r1 mov.l @(8,r5),r2 mov.l r0,@r4 mov.l r1,@(4,r4) rts mov.l r2,@(8,r4)#endif#ifdef L_mulsi3 .global GLOBAL(mulsi3)! r4 = aabb! r5 = ccdd! r0 = aabb*ccdd via partial products!! if aa == 0 and cc = 0! r0 = bb*dd!! else! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536)!GLOBAL(mulsi3): mulu.w r4,r5 ! multiply the lsws macl=bb*dd mov r5,r3 ! r3 = ccdd swap.w r4,r2 ! r2 = bbaa xtrct r2,r3 ! r3 = aacc tst r3,r3 ! msws zero ? bf hiset rts ! yes - then we have the answer sts macl,r0hiset: sts macl,r0 ! r0 = bb*dd mulu.w r2,r5 ! brewing macl = aa*dd sts macl,r1 mulu.w r3,r4 ! brewing macl = cc*bb sts macl,r2 add r1,r2 shll16 r2 rts add r2,r0#endif#endif /* ! __SH5__ */#ifdef L_sdivsi3_i4 .title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__!! args in r4 and r5, result in fpul, clobber dr0, dr2 .global GLOBAL(sdivsi3_i4)GLOBAL(sdivsi3_i4): lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 rts ftrc dr0,fpul#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) || (defined (__SH5__) && ! defined __SH4_NOFPU__)!! args in r4 and r5, result in fpul, clobber r2, dr0, dr2#if ! __SH5__ || __SH5__ == 32#if __SH5__ .mode SHcompact#endif .global GLOBAL(sdivsi3_i4)GLOBAL(sdivsi3_i4): sts.l fpscr,@-r15 mov #8,r2 swap.w r2,r2 lds r2,fpscr lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts lds.l @r15+,fpscr#endif /* ! __SH5__ || __SH5__ == 32 */#endif /* ! __SH4__ */#endif#ifdef L_sdivsi3/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with sh3e code. */#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)!!!! Steve Chamberlain!! sac@cygnus.com!!!!!! args in r4 and r5, result in r0 clobber r1,r2,r3 .global GLOBAL(sdivsi3)#if __SHMEDIA__#if __SH5__ == 32 .section .text..SHmedia32,"ax"#else .text#endif .align 2/* The assembly code that follows is a hand-optimized version of the C code that follows. Note that the registers that are modified are exactly those listed as clobbered in the patterns divsi3_i1 and divsi3_i1_media. int __sdivsi3 (i, j) int i, j;{ register unsigned long long r18 asm ("r18"); register unsigned long long r19 asm ("r19"); register unsigned long long r0 asm ("r0") = 0; register unsigned long long r1 asm ("r1") = 1; register int r2 asm ("r2") = i >> 31; register int r3 asm ("r3") = j >> 31; r2 = r2 ? r2 : r1; r3 = r3 ? r3 : r1; r18 = i * r2; r19 = j * r3; r2 *= r3; r19 <<= 31; r1 <<= 31; do if (r18 >= r19) r0 |= r1, r18 -= r19; while (r19 >>= 1, r1 >>= 1); return r2 * (int)r0;}*/GLOBAL(sdivsi3): pt/l LOCAL(sdivsi3_dontadd), tr2 pt/l LOCAL(sdivsi3_loop), tr1 ptabs/l r18, tr0 movi 0, r0 movi 1, r1 shari.l r4, 31, r2 shari.l r5, 31, r3 cmveq r2, r1, r2 cmveq r3, r1, r3 muls.l r4, r2, r18 muls.l r5, r3, r19 muls.l r2, r3, r2 shlli r19, 31, r19 shlli r1, 31, r1LOCAL(sdivsi3_loop): bgtu r19, r18, tr2 or r0, r1, r0 sub r18, r19, r18LOCAL(sdivsi3_dontadd): shlri r1, 1, r1 shlri r19, 1, r19 bnei r1, 0, tr1 muls.l r0, r2, r0 add.l r0, r63, r0 blink tr0, r63#elseGLOBAL(sdivsi3): mov r4,r1 mov r5,r0 tst r0,r0 bt div0 mov #0,r2 div0s r2,r1 subc r3,r3 subc r2,r1 div0s r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 addc r2,r1 rts mov r1,r0div0: rts mov #0,r0#endif /* ! __SHMEDIA__ */#endif /* ! __SH4__ */#endif#ifdef L_udivsi3_i4 .title "SH DIVIDE"!! 4 byte integer Divide code for the Hitachi SH#ifdef __SH4__
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -