📄 entry.s
字号:
nop call syscall_trace nop1: /* We are returning to a signal handler. */ RESTORE_ALL .align 4 .globl sys_rt_sigsuspendsys_rt_sigsuspend: /* Note: %o0, %o1 already have correct value... */ call do_rt_sigsuspend add %sp, STACKFRAME_SZ, %o2 ld [%curptr + TI_FLAGS], %l5 andcc %l5, _TIF_SYSCALL_TRACE, %g0 be 1f nop call syscall_trace nop1: /* We are returning to a signal handler. */ RESTORE_ALL .align 4 .globl sys_sigreturnsys_sigreturn: call do_sigreturn add %sp, STACKFRAME_SZ, %o0 ld [%curptr + TI_FLAGS], %l5 andcc %l5, _TIF_SYSCALL_TRACE, %g0 be 1f nop call syscall_trace nop1: /* We don't want to muck with user registers like a * normal syscall, just return. */ RESTORE_ALL .align 4 .globl sys_rt_sigreturnsys_rt_sigreturn: call do_rt_sigreturn add %sp, STACKFRAME_SZ, %o0 ld [%curptr + TI_FLAGS], %l5 andcc %l5, _TIF_SYSCALL_TRACE, %g0 be 1f nop call syscall_trace nop1: /* We are returning to a signal handler. */ RESTORE_ALL /* Now that we have a real sys_clone, sys_fork() is * implemented in terms of it. Our _real_ implementation * of SunOS vfork() will use sys_vfork(). * * XXX These three should be consolidated into mostly shared * XXX code just like on sparc64... -DaveM */ .align 4 .globl sys_fork, flush_patch_twosys_fork: mov %o7, %l5flush_patch_two: FLUSH_ALL_KERNEL_WINDOWS; ld [%curptr + TI_TASK], %o4 rd %psr, %g4 WRITE_PAUSE mov SIGCHLD, %o0 ! arg0: clone flags rd %wim, %g5 WRITE_PAUSE mov %fp, %o1 ! arg1: usp std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr mov 0, %o3 call sparc_do_fork mov %l5, %o7 /* Whee, kernel threads! */ .globl sys_clone, flush_patch_threesys_clone: mov %o7, %l5flush_patch_three: FLUSH_ALL_KERNEL_WINDOWS; ld [%curptr + TI_TASK], %o4 rd %psr, %g4 WRITE_PAUSE /* arg0,1: flags,usp -- loaded already */ cmp %o1, 0x0 ! Is new_usp NULL? rd %wim, %g5 WRITE_PAUSE be,a 1f mov %fp, %o1 ! yes, use callers usp andn %o1, 7, %o1 ! no, align to 8 bytes1: std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr mov 0, %o3 call sparc_do_fork mov %l5, %o7 /* Whee, real vfork! */ .globl sys_vfork, flush_patch_foursys_vfork:flush_patch_four: FLUSH_ALL_KERNEL_WINDOWS; ld [%curptr + TI_TASK], %o4 rd %psr, %g4 WRITE_PAUSE rd %wim, %g5 WRITE_PAUSE std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 mov %fp, %o1 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 sethi %hi(sparc_do_fork), %l1 mov 0, %o3 jmpl %l1 + %lo(sparc_do_fork), %g0 add %sp, STACKFRAME_SZ, %o2 .align 4linux_sparc_ni_syscall: sethi %hi(sys_ni_syscall), %l7 b syscall_is_too_hard or %l7, %lo(sys_ni_syscall), %l7linux_fast_syscall: andn %l7, 3, %l7 mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 jmpl %l7 + %g0, %g0 mov %i3, %o3linux_syscall_trace: call syscall_trace nop mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 b 2f mov %i4, %o4 .globl ret_from_forkret_from_fork: call schedule_tail mov %g3, %o0 b ret_sys_call ld [%sp + STACKFRAME_SZ + PT_I0], %o0 /* Linux native and SunOS system calls enter here... */ .align 4 .globl linux_sparc_syscalllinux_sparc_syscall: /* Direct access to user regs, must faster. */ cmp %g1, NR_SYSCALLS bgeu linux_sparc_ni_syscall sll %g1, 2, %l4 ld [%l7 + %l4], %l7 andcc %l7, 1, %g0 bne linux_fast_syscall /* Just do first insn from SAVE_ALL in the delay slot */ .globl syscall_is_too_hardsyscall_is_too_hard: SAVE_ALL_HEAD rd %wim, %l3 wr %l0, PSR_ET, %psr mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 ld [%curptr + TI_FLAGS], %l5 mov %i3, %o3 andcc %l5, _TIF_SYSCALL_TRACE, %g0 mov %i4, %o4 bne linux_syscall_trace mov %i0, %l52: call %l7 mov %i5, %o5 st %o0, [%sp + STACKFRAME_SZ + PT_I0] .globl ret_sys_callret_sys_call: ld [%curptr + TI_FLAGS], %l6 cmp %o0, -ENOIOCTLCMD ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 set PSR_C, %g2 bgeu 1f andcc %l6, _TIF_SYSCALL_TRACE, %g0 /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 clr %l6 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] bne linux_syscall_trace2 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ add %l1, 0x4, %l2 /* npc = npc+4 */ st %l1, [%sp + STACKFRAME_SZ + PT_PC] b ret_trap_entry st %l2, [%sp + STACKFRAME_SZ + PT_NPC]1: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ sub %g0, %o0, %o0 or %g3, %g2, %g3 st %o0, [%sp + STACKFRAME_SZ + PT_I0] mov 1, %l6 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] bne linux_syscall_trace2 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ add %l1, 0x4, %l2 /* npc = npc+4 */ st %l1, [%sp + STACKFRAME_SZ + PT_PC] b ret_trap_entry st %l2, [%sp + STACKFRAME_SZ + PT_NPC]linux_syscall_trace2: call syscall_trace add %l1, 0x4, %l2 /* npc = npc+4 */ st %l1, [%sp + STACKFRAME_SZ + PT_PC] b ret_trap_entry st %l2, [%sp + STACKFRAME_SZ + PT_NPC] /* * Solaris system calls and indirect system calls enter here. * * I have named the solaris indirect syscalls like that because * it seems like Solaris has some fast path syscalls that can * be handled as indirect system calls. - mig */linux_syscall_for_solaris: sethi %hi(sys_call_table), %l7 b linux_sparc_syscall or %l7, %lo(sys_call_table), %l7 .align 4 .globl solaris_syscallsolaris_syscall: cmp %g1,59 be linux_syscall_for_solaris cmp %g1,2 be linux_syscall_for_solaris cmp %g1,42 be linux_syscall_for_solaris cmp %g1,119 be,a linux_syscall_for_solaris mov 2, %g11: SAVE_ALL_HEAD rd %wim, %l3 wr %l0, PSR_ET, %psr nop nop mov %i0, %l5 call do_solaris_syscall add %sp, STACKFRAME_SZ, %o0 st %o0, [%sp + STACKFRAME_SZ + PT_I0] set PSR_C, %g2 cmp %o0, -ENOIOCTLCMD bgeu 1f ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 clr %l6 b 2f st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ sub %g0, %o0, %o0 mov 1, %l6 st %o0, [%sp + STACKFRAME_SZ + PT_I0] or %g3, %g2, %g3 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] /* Advance the pc and npc over the trap instruction. * If the npc is unaligned (has a 1 in the lower byte), it means * the kernel does not want us to play magic (ie, skipping over * traps). Mainly when the Solaris code wants to set some PC and * nPC (setcontext). */2: ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ andcc %l1, 1, %g0 bne 1f add %l1, 0x4, %l2 /* npc = npc+4 */ st %l1, [%sp + STACKFRAME_SZ + PT_PC] b ret_trap_entry st %l2, [%sp + STACKFRAME_SZ + PT_NPC] /* kernel knows what it is doing, fixup npc and continue */1: sub %l1, 1, %l1 b ret_trap_entry st %l1, [%sp + STACKFRAME_SZ + PT_NPC]#ifndef CONFIG_SUNOS_EMUL .align 4 .globl sunos_syscallsunos_syscall: SAVE_ALL_HEAD rd %wim, %l3 wr %l0, PSR_ET, %psr nop nop mov %i0, %l5 call do_sunos_syscall add %sp, STACKFRAME_SZ, %o0#endif /* {net, open}bsd system calls enter here... */ .align 4 .globl bsd_syscallbsd_syscall: /* Direct access to user regs, must faster. */ cmp %g1, NR_SYSCALLS blu,a 1f sll %g1, 2, %l4 set sys_ni_syscall, %l7 b bsd_is_too_hard nop1: ld [%l7 + %l4], %l7 .globl bsd_is_too_hardbsd_is_too_hard: rd %wim, %l3 SAVE_ALL wr %l0, PSR_ET, %psr WRITE_PAUSE2: mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 mov %i0, %l5 mov %i3, %o3 mov %i4, %o4 call %l7 mov %i5, %o5 st %o0, [%sp + STACKFRAME_SZ + PT_I0] set PSR_C, %g2 cmp %o0, -ENOIOCTLCMD bgeu 1f ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 clr %l6 b 2f st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ sub %g0, %o0, %o0#if 0 /* XXX todo XXX */ sethi %hi(bsd_xlatb_rorl), %o3 or %o3, %lo(bsd_xlatb_rorl), %o3 sll %o0, 2, %o0 ld [%o3 + %o0], %o0#endif mov 1, %l6 st %o0, [%sp + STACKFRAME_SZ + PT_I0] or %g3, %g2, %g3 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] /* Advance the pc and npc over the trap instruction. */2: ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ add %l1, 0x4, %l2 /* npc = npc+4 */ st %l1, [%sp + STACKFRAME_SZ + PT_PC] b ret_trap_entry st %l2, [%sp + STACKFRAME_SZ + PT_NPC]/* Saving and restoring the FPU state is best done from lowlevel code. * * void fpsave(unsigned long *fpregs, unsigned long *fsr, * void *fpqueue, unsigned long *fpqdepth) */ .globl fpsavefpsave: st %fsr, [%o1] ! this can trap on us if fpu is in bogon state ld [%o1], %g1 set 0x2000, %g4 andcc %g1, %g4, %g0 be 2f mov 0, %g2 /* We have an fpqueue to save. */1: std %fq, [%o2]fpsave_magic: st %fsr, [%o1] ld [%o1], %g3 andcc %g3, %g4, %g0 add %g2, 1, %g2 bne 1b add %o2, 8, %o22: st %g2, [%o3] std %f0, [%o0 + 0x00] std %f2, [%o0 + 0x08] std %f4, [%o0 + 0x10] std %f6, [%o0 + 0x18] std %f8, [%o0 + 0x20] std %f10, [%o0 + 0x28] std %f12, [%o0 + 0x30] std %f14, [%o0 + 0x38] std %f16, [%o0 + 0x40] std %f18, [%o0 + 0x48] std %f20, [%o0 + 0x50] std %f22, [%o0 + 0x58] std %f24, [%o0 + 0x60] std %f26, [%o0 + 0x68] std %f28, [%o0 + 0x70] retl std %f30, [%o0 + 0x78] /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd * code for pointing out this possible deadlock, while we save state * above we could trap on the fsr store so our low level fpu trap * code has to know how to deal with this. */fpsave_catch: b fpsave_magic + 4 st %fsr, [%o1]fpsave_catch2: b fpsave + 4 st %fsr, [%o1] /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ .globl fploadfpload: ldd [%o0 + 0x00], %f0 ldd [%o0 + 0x08], %f2 ldd [%o0 + 0x10], %f4 ldd [%o0 + 0x18], %f6 ldd [%o0 + 0x20], %f8 ldd [%o0 + 0x28], %f10 ldd [%o0 + 0x30], %f12 ldd [%o0 + 0x38], %f14 ldd [%o0 + 0x40], %f16 ldd [%o0 + 0x48], %f18 ldd [%o0 + 0x50], %f20 ldd [%o0 + 0x58], %f22 ldd [%o0 + 0x60], %f24 ldd [%o0 + 0x68], %f26 ldd [%o0 + 0x70], %f28 ldd [%o0 + 0x78], %f30 ld [%o1], %fsr retl nop /* __ndelay and __udelay take two arguments: * 0 - nsecs or usecs to delay * 1 - per_cpu udelay_val (loops per jiffy) * * Note that ndelay gives HZ times higher resolution but has a 10ms * limit. udelay can handle up to 1s. */ .globl __ndelay__ndelay: save %sp, -STACKFRAME_SZ, %sp mov %i0, %o0 call .umul mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ) call .umul mov %i1, %o1 ! udelay_val ba delay_continue mov %o1, %o0 ! >>32 later for better resolution .globl __udelay__udelay: save %sp, -STACKFRAME_SZ, %sp mov %i0, %o0 sethi %hi(0x10c6), %o1 call .umul or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000 call .umul mov %i1, %o1 ! udelay_val call .umul mov HZ, %o0 ! >>32 earlier for wider rangedelay_continue: cmp %o0, 0x01: bne 1b subcc %o0, 1, %o0 ret restore /* Handle a software breakpoint */ /* We have to inform parent that child has stopped */ .align 4 .globl breakpoint_trapbreakpoint_trap: rd %wim,%l3 SAVE_ALL wr %l0, PSR_ET, %psr WRITE_PAUSE st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls call sparc_breakpoint add %sp, STACKFRAME_SZ, %o0 RESTORE_ALL .align 4 .globl __handle_exception, flush_patch_exception__handle_exception:flush_patch_exception: FLUSH_ALL_KERNEL_WINDOWS; ldd [%o0], %o6 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h mov 1, %g1 ! signal EFAULT condition .align 4 .globl kill_user_windows, kuw_patch1_7win .globl kuw_patch1kuw_patch1_7win: sll %o3, 6, %o3 /* No matter how much overhead this routine has in the worst * case scenerio, it is several times better than taking the * traps with the old method of just doing flush_user_windows(). */kill_user_windows: ld [%g6 + TI_UWINMASK], %o0 ! get current umask orcc %g0, %o0, %g0 ! if no bits set, we are done be 3f ! nothing to do rd %psr, %o5 ! must clear interrupts or %o5, PSR_PIL, %o4 ! or else that could change wr %o4, 0x0, %psr ! the uwinmask state WRITE_PAUSE ! burn them cycles1: ld [%g6 + TI_UWINMASK], %o0 ! get consistent state orcc %g0, %o0, %g0 ! did an interrupt come in? be 4f ! yep, we are done rd %wim, %o3 ! get current wim srl %o3, 1, %o4 ! simulate a savekuw_patch1: sll %o3, 7, %o3 ! compute next wim or %o4, %o3, %o3 ! result andncc %o0, %o3, %o0 ! clean this bit in umask bne kuw_patch1 ! not done yet srl %o3, 1, %o4 ! begin another save simulation wr %o3, 0x0, %wim ! set the new wim st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask4: wr %o5, 0x0, %psr ! re-enable interrupts WRITE_PAUSE ! burn baby burn3: retl ! return st %g0, [%g6 + TI_W_SAVED] ! no windows saved .align 4 .globl restore_currentrestore_current: LOAD_CURRENT(g6, o0) retl nop#ifdef CONFIG_PCI#include <asm/pcic.h> .align 4 .globl linux_trap_ipi15_pciclinux_trap_ipi15_pcic: rd %wim, %l3 SAVE_ALL /* * First deactivate NMI * or we cannot drop ET, cannot get window spill traps. * The busy loop is necessary because the PIO error * sometimes does not go away quickly and we trap again. */ sethi %hi(pcic_regs), %o1 ld [%o1 + %lo(pcic_regs)], %o2 ! Get pending status for printouts later. ld [%o2 + PCI_SYS_INT_PENDING], %o0 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]1: ld [%o2 + PCI_SYS_INT_PENDING], %o1 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 bne 1b nop or %l0, PSR_PIL, %l4 wr %l4, 0x0, %psr WRITE_PAUSE wr %l4, PSR_ET, %psr WRITE_PAUSE call pcic_nmi add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs RESTORE_ALL .globl pcic_nmi_trap_patchpcic_nmi_trap_patch: sethi %hi(linux_trap_ipi15_pcic), %l3 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 rd %psr, %l0 .word 0#endif /* CONFIG_PCI *//* End of entry.S */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -