📄 entry.s
字号:
cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000) /* on LP64, ldi will sign extend into the upper 32 bits, * which is behaviour we don't want */ depdi 0,31,32,\tmp#endif copy \va,\tmp1 DEPI 0,31,23,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot depd,z \prot,8,7,\prot /* * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */#ifdef __LP64__ extrd,u,*= \va,41,1,%r0#else extrw,u,= \va,9,1,%r0#endif or,COND(tr) %r23,%r0,\pte or %r26,%r0,\pte .endm /* * Align fault_vector_20 on 4K boundary so that both * fault_vector_11 and fault_vector_20 are on the * same page. This is only necessary as long as we * write protect the kernel text, which we may stop * doing once we use large page translations to cover * the static part of the kernel address space. */ .export fault_vector_20 .text .align 4096fault_vector_20: /* First vector is invalid (0) */ .ascii "cows can fly" .byte 0 .align 32 hpmc 1 def 2 def 3 extint 4 def 5 itlb_20 6 def 7 def 8 def 9 def 10 def 11 def 12 def 13 def 14 dtlb_20 15#if 0 naitlb_20 16#else def 16#endif nadtlb_20 17 def 18 def 19 dbit_20 20 def 21 def 22 def 23 def 24 def 25 def 26 def 27 def 28 def 29 def 30 def 31#ifndef __LP64__ .export fault_vector_11 .align 2048fault_vector_11: /* First vector is invalid (0) */ .ascii "cows can fly" .byte 0 .align 32 hpmc 1 def 2 def 3 extint 4 def 5 itlb_11 6 def 7 def 8 def 9 def 10 def 11 def 12 def 13 def 14 dtlb_11 15#if 0 naitlb_11 16#else def 16#endif nadtlb_11 17 def 18 def 19 dbit_11 20 def 21 def 22 def 23 def 24 def 25 def 26 def 27 def 28 def 29 def 30 def 31#endif .import handle_interruption,code .import do_cpu_irq_mask,code /* * r26 = function to be called * r25 = argument to pass in * r24 = flags for do_fork() * * Kernel threads don't ever return, so they don't need * a true register context. We just save away the arguments * for copy_thread/ret_ to properly set up the child. */#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */#define CLONE_UNTRACED 0x00800000 .export __kernel_thread, code .import do_fork__kernel_thread: STREG %r2, -RP_OFFSET(%r30) copy %r30, %r1 ldo PT_SZ_ALGN(%r30),%r30#ifdef __LP64__ /* Yo, function pointers in wide mode are little structs... -PB */ ldd 24(%r26), %r2 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ ldd 16(%r26), %r26 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ copy %r0, %r22 /* user_tid */#endif STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ STREG %r25, PT_GR25(%r1) ldil L%CLONE_UNTRACED, %r26 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ or %r26, %r24, %r26 /* will have kernel mappings. */ ldi 1, %r25 /* stack_start, signals kernel thread */ stw %r0, -52(%r30) /* user_tid */#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif BL do_fork, %r2 copy %r1, %r24 /* pt_regs */ /* Parent Returns here */ LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 ldo -PT_SZ_ALGN(%r30), %r30 bv %r0(%r2) nop /* * Child Returns here * * copy_thread moved args from temp save area set up above * into task save area. */ .export ret_from_kernel_threadret_from_kernel_thread: /* Call schedule_tail first though */ BL schedule_tail, %r2 nop LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 LDREG TASK_PT_GR25(%r1), %r26#ifdef __LP64__ LDREG TASK_PT_GR27(%r1), %r27 LDREG TASK_PT_GR22(%r1), %r22#endif LDREG TASK_PT_GR26(%r1), %r1 ble 0(%sr7, %r1) copy %r31, %r2#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */ loadgp /* Thread could have been in a module */#endif b sys_exit ldi 0, %r26 .import sys_execve, code .export __execve, code__execve: copy %r2, %r15 copy %r30, %r16 ldo PT_SZ_ALGN(%r30), %r30 STREG %r26, PT_GR26(%r16) STREG %r25, PT_GR25(%r16) STREG %r24, PT_GR24(%r16)#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif BL sys_execve, %r2 copy %r16, %r26 cmpib,=,n 0,%r28,intr_return /* forward */ /* yes, this will trap and die. */ copy %r15, %r2 copy %r16, %r30 bv %r0(%r2) nop .align 4 /* * struct task_struct *_switch_to(struct task_struct *prev, * struct task_struct *next) * * switch kernel stacks and return prev */ .export _switch_to, code_switch_to: STREG %r2, -RP_OFFSET(%r30) callee_save load32 _switch_to_ret, %r2 STREG %r2, TASK_PT_KPC(%r26) LDREG TASK_PT_KPC(%r25), %r2 STREG %r30, TASK_PT_KSP(%r26) LDREG TASK_PT_KSP(%r25), %r30 LDREG TASK_THREAD_INFO(%r25), %r25 bv %r0(%r2) mtctl %r25,%cr30_switch_to_ret: mtctl %r0, %cr0 /* Needed for single stepping */ callee_rest LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) copy %r26, %r28 /* * Common rfi return path for interruptions, kernel execve, and * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will * return via this path if the signal was received when the process * was running; if the process was blocked on a syscall then the * normal syscall_exit path is used. All syscalls for traced * proceses exit via intr_restore. * * XXX If any syscalls that change a processes space id ever exit * this way, then we will need to copy %sr3 in to PT_SR[3..7], and * adjust IASQ[0..1]. * * Note that the following code uses a "relied upon translation". * See the parisc ACD for details. The ssm is necessary due to a * PCXT bug. */ .align 4096 .export syscall_exit_rfisyscall_exit_rfi: mfctl %cr30,%r16 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ ldo TASK_REGS(%r16),%r16 /* Force iaoq to userspace, as the user has had access to our current * context via sigcontext. Also Filter the PSW for the same reason. */ LDREG PT_IAOQ0(%r16),%r19 depi 3,31,2,%r19 STREG %r19,PT_IAOQ0(%r16) LDREG PT_IAOQ1(%r16),%r19 depi 3,31,2,%r19 STREG %r19,PT_IAOQ1(%r16) LDREG PT_PSW(%r16),%r19 load32 USER_PSW_MASK,%r1#ifdef __LP64__ load32 USER_PSW_HI_MASK,%r20 depd %r20,31,32,%r1#endif and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ load32 USER_PSW,%r1 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ STREG %r19,PT_PSW(%r16) /* * If we aren't being traced, we never saved space registers * (we don't store them in the sigcontext), so set them * to "proper" values now (otherwise we'll wind up restoring * whatever was last stored in the task structure, which might * be inconsistent if an interrupt occured while on the gateway * page) Note that we may be "trashing" values the user put in * them, but we don't support the the user changing them. */ STREG %r0,PT_SR2(%r16) mfsp %sr3,%r19 STREG %r19,PT_SR0(%r16) STREG %r19,PT_SR1(%r16) STREG %r19,PT_SR3(%r16) STREG %r19,PT_SR4(%r16) STREG %r19,PT_SR5(%r16) STREG %r19,PT_SR6(%r16) STREG %r19,PT_SR7(%r16)intr_return: /* NOTE: Need to enable interrupts incase we schedule. */ ssm PSW_SM_I, %r0 /* Check for software interrupts */ .import irq_stat,data load32 irq_stat,%r19#ifdef CONFIG_SMP mfctl %cr30,%r1 ldw TI_CPU(%r1),%r1 /* get cpu # - int */ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount ** irq_stat[] is defined using ____cacheline_aligned. */#ifdef __LP64__ shld %r1, 6, %r20#else shlw %r1, 5, %r20#endif add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */#endif /* CONFIG_SMP */ LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ cmpib,<>,n 0,%r20,intr_do_softirq /* forward */intr_check_resched: /* check for reschedule */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */intr_check_sig: /* As above */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */ bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */intr_restore: copy %r16,%r29 ldo PT_FR31(%r29),%r1 rest_fp %r1 rest_general %r29 /* Create a "relied upon translation" PA 2.0 Arch. F-5 */ ssm 0,%r0 nop nop nop nop nop nop nop tophys_r1 %r29 rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0 /* Restore space id's and special cr's from PT_REGS * structure pointed to by r29 */ rest_specials %r29 /* Important: Note that rest_stack restores r29 * last (we are using it)! It also restores r1 and r30. */ rest_stack rfi nop nop nop nop nop nop nop nop .import do_softirq,codeintr_do_softirq: bl do_softirq,%r2#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#else nop#endif b intr_check_resched nop .import schedule,codeintr_do_resched: /* Only do reschedule if we are returning to user space */ LDREG PT_IASQ0(%r16), %r20 CMPIB= 0,%r20,intr_restore /* backward */ nop LDREG PT_IASQ1(%r16), %r20 CMPIB= 0,%r20,intr_restore /* backward */ nop#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif ldil L%intr_check_sig, %r2 b schedule ldo R%intr_check_sig(%r2), %r2 .import do_signal,codeintr_do_signal: /* This check is critical to having LWS working. The IASQ is zero on the gateway page and we cannot deliver any signals until we get off the gateway page. Only do signals if we are returning to user space */ LDREG PT_IASQ0(%r16), %r20 CMPIB= 0,%r20,intr_restore /* backward */ nop LDREG PT_IASQ1(%r16), %r20 CMPIB= 0,%r20,intr_restore /* backward */ nop copy %r0, %r24 /* unsigned long in_syscall */ copy %r16, %r25 /* struct pt_regs *regs */#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif BL do_signal,%r2 copy %r0, %r26 /* sigset_t *oldset = NULL */ b intr_check_sig nop /* * External interrupts. */intr_extint: CMPIB=,n 0,%r16,1f get_stack_use_cr30 b,n 3f1:#if 0 /* Interrupt Stack support not working yet! */ mfctl %cr31,%r1 copy %r30,%r17 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/#ifdef __LP64__ depdi 0,63,15,%r17#else depi 0,31,15,%r17#endif CMPB=,n %r1,%r17,2f get_stack_use_cr31 b,n 3f#endif2: get_stack_use_r303: save_specials %r29 virt_map save_general %r29 ldo PT_FR0(%r29), %r24 save_fp %r24 loadgp copy %r29, %r26 /* arg0 is pt_regs */ copy %r29, %r16 /* save pt_regs */ ldil L%intr_return, %r2#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif b do_cpu_irq_mask ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ .export intr_save, code /* for os_hpmc */intr_save: mfsp %sr7,%r16 CMPIB=,n 0,%r16,1f get_stack_use_cr30 b 2f copy %r8,%r261: get_stack_use_r30 copy %r8,%r262: save_specials %r29 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ /* * FIXME: 1) Use a #define for the hardwired "6" below (and in * traps.c. * 2) Once we start executing code above 4 Gb, we need * to adjust iasq/iaoq here in the same way we * adjust isr/ior below. */ CMPIB=,n 6,%r26,skip_save_ior /* save_specials left ipsw value in r8 for us to test */ mfctl %cr20, %r16 /* isr */ mfctl %cr21, %r17 /* ior */#ifdef __LP64__ /* * If the interrupted code was running with W bit off (32 bit), * clear the b bits (bits 0 & 1) in the ior. */ extrd,u,*<> %r8,PSW_W_BIT,1,%r0 depdi 0,1,2,%r17 /* * FIXME: This code has hardwired assumptions about the split * between space bits and offset bits. This will change * when we allow alternate page sizes. */ /* adjust isr/ior. */ extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ depd %r1,31,7,%r17 /* deposit them into ior */ depdi 0,63,7,%r16 /* clear them from isr */#endif STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29)skip_save_ior: virt_map save_general %r29 ldo PT_FR0(%r29), %r25 save_fp %r25 loadgp copy %r29, %r25 /* arg1 is pt_regs */#ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */#endif ldil L%intr_check_sig, %r2 copy %r25, %r16 /* save pt_regs */ b handle_interruption ldo R%intr_check_sig(%r2), %r2 /* * Note for all tlb miss handlers: * * cr24 contains a pointer to the kernel address space * page directory. * * cr25 contains a pointer to the current user address * space page directory. *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -