📄 entry.s
字号:
stxa %g4, [%g0] ASI_AFSR membar #Sync CHEETAH_LOG_ERROR rdpr %pil, %g2 wrpr %g0, 15, %pil ba,pt %xcc, etrap_irq rd %pc, %g7 mov %l4, %o1 mov %l5, %o2 call cheetah_cee_handler add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,a,pt %xcc, rtrap_clr_l6 /* Our caller has disabled I-cache+D-cache and performed membar Sync. */ .globl cheetah_deferred_trapcheetah_deferred_trap: ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN membar #Sync /* Fetch and clear AFSR/AFAR */ ldxa [%g0] ASI_AFSR, %g4 ldxa [%g0] ASI_AFAR, %g5 stxa %g4, [%g0] ASI_AFSR membar #Sync CHEETAH_LOG_ERROR rdpr %pil, %g2 wrpr %g0, 15, %pil ba,pt %xcc, etrap_irq rd %pc, %g7 mov %l4, %o1 mov %l5, %o2 call cheetah_deferred_handler add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,a,pt %xcc, rtrap_clr_l6 .globl __do_privact__do_privact: mov TLB_SFSR, %g3 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit membar #Sync sethi %hi(109f), %g7 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 call do_privact add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl do_mnado_mna: rdpr %tl, %g3 cmp %g3, 1 /* Setup %g4/%g5 now as they are used in the * winfixup code. */ mov TLB_SFSR, %g3 mov DMMU_SFAR, %g4 ldxa [%g4] ASI_DMMU, %g4 ldxa [%g3] ASI_DMMU, %g5 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit membar #Sync bgu,pn %icc, winfix_mna rdpr %tpc, %g31: sethi %hi(109f), %g7 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 mov %l4, %o1 mov %l5, %o2 call mem_address_unaligned add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl do_lddfmnado_lddfmna: sethi %hi(109f), %g7 mov TLB_SFSR, %g4 ldxa [%g4] ASI_DMMU, %g5 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit membar #Sync mov DMMU_SFAR, %g4 ldxa [%g4] ASI_DMMU, %g4 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 mov %l4, %o1 mov %l5, %o2 call handle_lddfmna add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl do_stdfmnado_stdfmna: sethi %hi(109f), %g7 mov TLB_SFSR, %g4 ldxa [%g4] ASI_DMMU, %g5 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit membar #Sync mov DMMU_SFAR, %g4 ldxa [%g4] ASI_DMMU, %g4 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 mov %l4, %o1 mov %l5, %o2 call handle_stdfmna add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl breakpoint_trapbreakpoint_trap: call sparc_breakpoint add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap nop#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ defined(CONFIG_SOLARIS_EMUL_MODULE) /* SunOS uses syscall zero as the 'indirect syscall' it looks * like indir_syscall(scall_num, arg0, arg1, arg2...); etc. * This is complete brain damage. */ .globl sunos_indirsunos_indir: srl %o0, 0, %o0 mov %o7, %l4 cmp %o0, NR_SYSCALLS blu,a,pt %icc, 1f sll %o0, 0x2, %o0 sethi %hi(sunos_nosys), %l6 b,pt %xcc, 2f or %l6, %lo(sunos_nosys), %l61: sethi %hi(sunos_sys_table), %l7 or %l7, %lo(sunos_sys_table), %l7 lduw [%l7 + %o0], %l62: mov %o1, %o0 mov %o2, %o1 mov %o3, %o2 mov %o4, %o3 mov %o5, %o4 call %l6 mov %l4, %o7 .globl sunos_getpidsunos_getpid: call sys_getppid nop call sys_getpid stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1] b,pt %xcc, ret_sys_call stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0] /* SunOS getuid() returns uid in %o0 and euid in %o1 */ .globl sunos_getuidsunos_getuid: call sys32_geteuid16 nop call sys32_getuid16 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1] b,pt %xcc, ret_sys_call stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0] /* SunOS getgid() returns gid in %o0 and egid in %o1 */ .globl sunos_getgidsunos_getgid: call sys32_getegid16 nop call sys32_getgid16 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1] b,pt %xcc, ret_sys_call stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]#endif /* SunOS's execv() call only specifies the argv argument, the * environment settings are the same as the calling processes. */ .globl sunos_execv, sys_execve, sys32_execvesys_execve: sethi %hi(sparc_execve), %g1 ba,pt %xcc, execve_merge or %g1, %lo(sparc_execve), %g1sunos_execv: stx %g0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]sys32_execve: sethi %hi(sparc32_execve), %g1 or %g1, %lo(sparc32_execve), %g1execve_merge: flushw jmpl %g1, %g0 add %sp, STACK_BIAS + REGWIN_SZ, %o0 .globl sys_pipe, sys_sigpause, sys_nis_syscall .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend .globl sys_rt_sigreturn .globl sys32_sigreturn, sys32_rt_sigreturn .globl sys32_execve, sys_ptrace .globl sys_sigaltstack, sys32_sigaltstack .globl sys32_sigstack .align 32sys_pipe: ba,pt %xcc, sparc_pipe add %sp, STACK_BIAS + REGWIN_SZ, %o0sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall add %sp, STACK_BIAS + REGWIN_SZ, %o0sys_memory_ordering: ba,pt %xcc, sparc_memory_ordering add %sp, STACK_BIAS + REGWIN_SZ, %o1sys_sigaltstack:ba,pt %xcc, do_sigaltstack add %i6, STACK_BIAS, %o2sys32_sigstack: ba,pt %xcc, do_sys32_sigstack mov %i6, %o2sys32_sigaltstack: ba,pt %xcc, do_sys32_sigaltstack mov %i6, %o2 .align 32sys_sigsuspend: add %sp, STACK_BIAS + REGWIN_SZ, %o0 call do_sigsuspend add %o7, 1f-.-4, %o7 nopsys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ add %sp, STACK_BIAS + REGWIN_SZ, %o2 call do_rt_sigsuspend add %o7, 1f-.-4, %o7 nopsys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ srl %o0, 0, %o0 add %sp, STACK_BIAS + REGWIN_SZ, %o2 call do_rt_sigsuspend32 add %o7, 1f-.-4, %o7 /* NOTE: %o0 has a correct value already */sys_sigpause: add %sp, STACK_BIAS + REGWIN_SZ, %o1 call do_sigpause add %o7, 1f-.-4, %o7 nopsys32_sigreturn: add %sp, STACK_BIAS + REGWIN_SZ, %o0 call do_sigreturn32 add %o7, 1f-.-4, %o7 nopsys_rt_sigreturn: add %sp, STACK_BIAS + REGWIN_SZ, %o0 call do_rt_sigreturn add %o7, 1f-.-4, %o7 nopsys32_rt_sigreturn: add %sp, STACK_BIAS + REGWIN_SZ, %o0 call do_rt_sigreturn32 add %o7, 1f-.-4, %o7 nopsys_ptrace: add %sp, STACK_BIAS + REGWIN_SZ, %o0 call do_ptrace add %o7, 1f-.-4, %o7 nop .align 321: ldx [%curptr + AOFF_task_ptrace], %l5 andcc %l5, 0x02, %g0 be,pt %icc, rtrap clr %l6 call syscall_trace nop ba,pt %xcc, rtrap clr %l6 /* This is how fork() was meant to be done, 8 instruction entry. * * I questioned the following code briefly, let me clear things * up so you must not reason on it like I did. * * Know the fork_kpsr etc. we use in the sparc32 port? We don't * need it here because the only piece of window state we copy to * the child is the CWP register. Even if the parent sleeps, * we are safe because we stuck it into pt_regs of the parent * so it will not change. * * XXX This raises the question, whether we can do the same on * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The * XXX answer is yes. We stick fork_kpsr in UREG_G0 and * XXX fork_kwim in UREG_G1 (global registers are considered * XXX volatile across a system call in the sparc ABI I think * XXX if it isn't we can use regs->y instead, anyone who depends * XXX upon the Y register being preserved across a fork deserves * XXX to lose). * * In fact we should take advantage of that fact for other things * during system calls... */ .globl sys_fork, sys_vfork, sys_clone, sparc_exit .globl ret_from_syscall .align 32sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */ sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 ba,pt %xcc, sys_clonesys_fork: clr %o1 mov SIGCHLD, %o0sys_clone: flushw movrz %o1, %fp, %o1 mov 0, %o3 ba,pt %xcc, do_fork add %sp, STACK_BIAS + REGWIN_SZ, %o2ret_from_syscall: /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in * %o7 for us. Check performance counter stuff too. */ andn %o7, SPARC_FLAG_NEWCHILD, %l0 mov %g5, %o0 /* 'prev' */ call schedule_tail stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags] andcc %l0, SPARC_FLAG_PERFCTR, %g0 be,pt %icc, 1f nop ldx [%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7 wr %g0, %o7, %pcr /* Blackbird errata workaround. See commentary in * smp.c:smp_percpu_timer_interrupt() for more * information. */ ba,pt %xcc, 99f nop .align 6499: wr %g0, %g0, %pic rd %pic, %g01: b,pt %xcc, ret_sys_call ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate rdpr %otherwin, %g1 rdpr %cansave, %g3 add %g3, %g1, %g3 wrpr %g3, 0x0, %cansave wrpr %g0, 0x0, %otherwin wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate ba,pt %xcc, sys_exit stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]linux_sparc_ni_syscall: sethi %hi(sys_ni_syscall), %l7 b,pt %xcc, 4f or %l7, %lo(sys_ni_syscall), %l7linux_syscall_trace32: call syscall_trace nop srl %i0, 0, %o0 mov %i4, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 b,pt %xcc, 2f srl %i3, 0, %o3linux_syscall_trace: call syscall_trace nop mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 b,pt %xcc, 2f mov %i4, %o4 /* Linux 32-bit and SunOS system calls enter here... */ .align 32 .globl linux_sparc_syscall32linux_sparc_syscall32: /* Direct access to user regs, much faster. */ cmp %g1, NR_SYSCALLS ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI srl %i0, 0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group#ifdef SYSCALL_TRACING call syscall_trace_entry add %sp, STACK_BIAS + REGWIN_SZ, %o0 srl %i0, 0, %o0#endif mov %i4, %o4 ! IEU1 lduw [%l7 + %l4], %l7 ! Load srl %i1, 0, %o1 ! IEU0 Group ldx [%curptr + AOFF_task_ptrace], %l0 ! Load mov %i5, %o5 ! IEU1 srl %i2, 0, %o2 ! IEU0 Group andcc %l0, 0x02, %g0 ! IEU0 Group bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 call %l7 ! CTI Group brk forced srl %i3, 0, %o3 ! IEU0 ba,a,pt %xcc, 3f /* Linux native and SunOS system calls enter here... */ .align 32 .globl linux_sparc_syscall, ret_sys_calllinux_sparc_syscall: /* Direct access to user regs, much faster. */ cmp %g1, NR_SYSCALLS ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI mov %i0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group#ifdef SYSCALL_TRACING call syscall_trace_entry add %sp, STACK_BIAS + REGWIN_SZ, %o0 mov %i0, %o0#endif mov %i1, %o1 ! IEU1 lduw [%l7 + %l4], %l7 ! Load4: mov %i2, %o2 ! IEU0 Group ldx [%curptr + AOFF_task_ptrace], %l0 ! Load mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group andcc %l0, 0x02, %g0 ! IEU1 Group+1 bubble bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU02: call %l7 ! CTI Group brk forced mov %i5, %o5 ! IEU0 nop3: stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]ret_sys_call:#ifdef SYSCALL_TRACING mov %o0, %o1 call syscall_trace_exit add %sp, STACK_BIAS + REGWIN_SZ, %o0 mov %o1, %o0#endif ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %g3 ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1 ! pc = npc sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 cmp %o0, -ENOIOCTLCMD sllx %g2, 32, %g2 bgeu,pn %xcc, 1f andcc %l0, 0x02, %l6 andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */ stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 ! npc = npc+4 stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC] ba,pt %xcc, rtrap_clr_l6 stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]1: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ sub %g0, %o0, %o0 or %g3, %g2, %g3 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0] mov 1, %l6 stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 !npc = npc+4 stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC] b,pt %xcc, rtrap stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]linux_syscall_trace2: call syscall_trace nop stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC] ba,pt %xcc, rtrap stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC] .align 32 .globl __flushw_user__flushw_user: rdpr %otherwin, %g1 brz,pn %g1, 2f clr %g21: save %sp, -128, %sp rdpr %otherwin, %g1 brnz,pt %g1, 1b add %g2, 1, %g21: sub %g2, 1, %g2 brnz,pt %g2, 1b restore %g0, %g0, %g02: retl nop/* This need not obtain the xtime_lock as it is coded in * an implicitly SMP safe way already. */ .align 64 .globl do_gettimeofdaydo_gettimeofday: /* %o0 = timevalp */ /* Load doubles must be used on xtime so that what we get * is guarenteed to be atomic, this is why we can run this * with interrupts on full blast. Don't touch this... -DaveM * * Note with time_t changes to the timeval type, I must now use * nucleus atomic quad 128-bit loads. * * If xtime was stored recently, I've seen crap from the * quad load on Cheetah. Putting a membar SYNC before * the quad load seems to make the problem go away. -DaveM * (we should nop out workarounds like this on spitfire) */ sethi %hi(timer_tick_offset), %g3 sethi %hi(xtime), %g2 sethi %hi(timer_tick_compare), %g1 ldx [%g3 + %lo(timer_tick_offset)], %g3 or %g2, %lo(xtime), %g2 or %g1, %lo(timer_tick_compare), %g11: rdpr %ver, %o2 sethi %hi(0x003e0014), %o1 srlx %o2, 32, %o2 or %o1, %lo(0x003e0014), %o1 membar #Sync ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %o4 cmp %o2, %o1 bne,pt %xcc, 2f nop ba,pt %xcc, 3f rd %asr24, %o12: rd %tick, %o13: ldx [%g1], %g7 membar #Sync ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %o2 xor %o4, %o2, %o2 xor %o5, %o3, %o3 orcc %o2, %o3, %g0 bne,pn %xcc, 1b sethi %hi(wall_jiffies), %o2 sethi %hi(jiffies), %o3 ldx [%o2 + %lo(wall_jiffies)], %o2 ldx [%o3 + %lo(jiffies)], %o3 sub %o3, %o2, %o2 sethi %hi(timer_ticks_per_usec_quotient), %o3 add %g3, %o1, %o1 ldx [%o3 + %lo(timer_ticks_per_usec_quotient)], %o3 sub %o1, %g7, %o1 mulx %o3, %o1, %o1 brz,pt %o2, 1f srlx %o1, 32, %o1 sethi %hi(10000), %g2 or %g2, %lo(10000), %g2 add %o1, %g2, %o11: sethi %hi(1000000), %o2 srlx %o5, 32, %o5 or %o2, %lo(1000000), %o2 add %o5, %o1, %o5 cmp %o5, %o2 bl,a,pn %xcc, 1f stx %o4, [%o0 + 0x0] add %o4, 0x1, %o4 sub %o5, %o2, %o5 stx %o4, [%o0 + 0x0]1: retl st %o5, [%o0 + 0x8]
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -