📄 entry.s
字号:
mov pr=r28,-1 ;; ld8.fill r6=[r14],16 ld8.fill r7=[r15],16 mov ar.unat=r18 // restore caller's unat mov ar.rnat=r30 // must restore after bspstore but before rsc! mov ar.fpsr=r19 // restore fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.many b7END(load_switch_stack)GLOBAL_ENTRY(__ia64_syscall) .regstk 6,0,0,0 mov r15=in5 // put syscall number in place break __BREAK_SYSCALL movl r2=errno cmp.eq p6,p7=-1,r10 ;;(p6) st4 [r2]=r8(p6) mov r8=-1 br.ret.sptk.many rpEND(__ia64_syscall) /* * We invoke syscall_trace through this intermediate function to * ensure that the syscall input arguments are not clobbered. We * also use it to preserve b6, which contains the syscall entry point. */GLOBAL_ENTRY(invoke_syscall_trace) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,3,0,0 mov loc0=rp .body mov loc2=b6 ;; br.call.sptk.many rp=syscall_trace.ret3: mov rp=loc0 mov ar.pfs=loc1 mov b6=loc2 br.ret.sptk.many rpEND(invoke_syscall_trace) /* * Invoke a system call, but do some tracing before and after the call. * We MUST preserve the current register frame throughout this routine * because some system calls (such as ia64_execve) directly * manipulate ar.pfs. * * Input: * r15 = syscall number * b6 = syscall entry point */ .global ia64_strace_leave_kernelGLOBAL_ENTRY(ia64_trace_syscall) PT_REGS_UNWIND_INFO(0) br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args.ret6: br.call.sptk.many rp=b6 // do the syscallstrace_check_retval: cmp.lt p6,p0=r8,r0 // syscall failed? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=0(p6) br.cond.sptk strace_error // syscall failed -> ;; // avoid RAW on r10strace_save_retval:.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10ia64_strace_leave_kernel: br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value.rety: br.cond.sptk ia64_leave_kernelstrace_error: ld8 r3=[r2] // load pt_regs.r8 sub r9=0,r8 // negate return value to get errno value ;; cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? adds r3=16,r2 // r3=&pt_regs.r10 ;;(p6) mov r10=-1(p6) mov r8=r9 br.cond.sptk strace_save_retvalEND(ia64_trace_syscall)GLOBAL_ENTRY(ia64_ret_from_clone) PT_REGS_UNWIND_INFO(0) /* * We need to call schedule_tail() to complete the scheduling process. * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the * address of the previously executing task. */ br.call.sptk.many rp=ia64_invoke_schedule_tail.ret8: adds r2=IA64_TASK_PTRACE_OFFSET,r13 ;; ld8 r2=[r2] ;; mov r8=0 tbit.nz p6,p0=r2,PT_TRACESYS_BIT(p6) br.cond.spnt strace_check_retval ;; // added stop bits to prevent r8 dependencyEND(ia64_ret_from_clone) // fall throughGLOBAL_ENTRY(ia64_ret_from_syscall) PT_REGS_UNWIND_INFO(0) cmp.ge p6,p7=r8,r0 // syscall executed successfully? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 ;; .mem.offset 0,0(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit .mem.offset 8,0(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit(p7) br.cond.spnt handle_syscall_error // handle potential syscall failureEND(ia64_ret_from_syscall) // fall throughGLOBAL_ENTRY(ia64_leave_kernel) PT_REGS_UNWIND_INFO(0) lfetch.fault [sp] movl r14=.restart ;; mov.ret.sptk rp=r14,.restart.restart: adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13 adds r18=IA64_TASK_SIGPENDING_OFFSET,r13#ifdef CONFIG_PERFMON adds r19=IA64_TASK_PFM_MUST_BLOCK_OFFSET,r13#endif ;;#ifdef CONFIG_PERFMON(pUser) ld8 r19=[r19] // load current->thread.pfm_must_block#endif(pUser) ld8 r17=[r17] // load current->need_resched(pUser) ld4 r18=[r18] // load current->sigpending ;;#ifdef CONFIG_PERFMON(pUser) cmp.ne.unc p9,p0=r19,r0 // current->thread.pfm_must_block != 0?#endif(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0? ;; adds r2=PT(R8)+16,r12 adds r3=PT(R9)+16,r12#ifdef CONFIG_PERFMON(p9) br.call.spnt.many b7=pfm_block_on_overflow#endif#if __GNUC__ < 3(p7) br.call.spnt.many b7=invoke_schedule#else(p7) br.call.spnt.many b7=schedule#endif(p8) br.call.spnt.many b7=handle_signal_delivery // check & deliver pending signals ;; // start restoring the state saved on the kernel stack (struct pt_regs): ld8.fill r8=[r2],16 ld8.fill r9=[r3],16 ;; ld8.fill r10=[r2],16 ld8.fill r11=[r3],16 ;; ld8.fill r16=[r2],16 ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 ;; ld8.fill r22=[r2],16 ld8.fill r23=[r3],16 ;; ld8.fill r24=[r2],16 ld8.fill r25=[r3],16 ;; ld8.fill r26=[r2],16 ld8.fill r27=[r3],16 ;; ld8.fill r28=[r2],16 ld8.fill r29=[r3],16 ;; ld8.fill r30=[r2],16 ld8.fill r31=[r3],16 ;; rsm psr.i | psr.ic // initiate turning off of interrupts & interruption collection invala // invalidate ALAT ;; ld8 r1=[r2],16 // ar.ccv ld8 r13=[r3],16 // ar.fpsr ;; ld8 r14=[r2],16 // b0 ld8 r15=[r3],16+8 // b7 ;; ldf.fill f6=[r2],32 ldf.fill f7=[r3],32 ;; ldf.fill f8=[r2],32 ldf.fill f9=[r3],32 ;; mov ar.ccv=r1 mov ar.fpsr=r13 mov b0=r14 ;; srlz.i // ensure interrupts & interruption collection are off mov b7=r15 ;; bsw.0 // switch back to bank 0 ;; adds r16=16,r12 adds r17=24,r12 ;; ld8 rCRIPSR=[r16],16 // load cr.ipsr ld8 rCRIIP=[r17],16 // load cr.iip ;; ld8 rCRIFS=[r16],16 // load cr.ifs ld8 rARUNAT=[r17],16 // load ar.unat cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs ;; ld8 rARPFS=[r16],16 // load ar.pfs ld8 rARRSC=[r17],16 // load ar.rsc ;; ld8 rARRNAT=[r16],16 // load ar.rnat (may be garbage) ld8 rARBSPSTORE=[r17],16 // load ar.bspstore (may be garbage) ;; ld8 rARPR=[r16],16 // load predicates ld8 rB6=[r17],16 // load b6 ;; ld8 r19=[r16],16 // load ar.rsc value for "loadrs" ld8.fill r1=[r17],16 // load r1 ;; ld8.fill r2=[r16],16 ld8.fill r3=[r17],16 ;; ld8.fill r12=[r16],16 ld8.fill r13=[r17],16 ;; ld8.fill r14=[r16] ld8.fill r15=[r17] shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; mov r16=ar.bsp // get existing backing store pointer movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET ;; ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8(pKern) br.cond.dpnt skip_rbs_switch /* * Restore user backing store. * * NOTE: alloc, loadrs, and cover can't be predicated. */(pNonSys) br.cond.dpnt dont_preserve_current_frame cover // add current frame into dirty partition ;; mov r19=ar.bsp // get new backing store pointer sub r16=r16,r18 // krbs = old bsp - size of dirty partition cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs ;; sub r19=r19,r16 // calculate total byte size of dirty partition add r18=64,r18 // don't force in0-in7 into memory... ;; shl r19=r19,16 // shift size of dirty partition into loadrs position ;;dont_preserve_current_frame: /* * To prevent leaking bits between the kernel and user-space, * we must clear the stacked registers in the "invalid" partition here. * Not pretty, but at least it's fast (3.34 registers/cycle). * Architecturally, this loop could go at 4.67 registers/cycle, but that would * oversubscribe Itanium. */# define pRecurse p6# define pReturn p7# define Nregs 10 alloc loc0=ar.pfs,2,Nregs-2,2,0 shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize ;; mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" shladd in0=loc1,3,r17 mov in1=0 ;;// .align 32 // gas-2.11.90 is unable to generate a stop bit after .alignrse_clear_invalid: // cycle 0 { .mii alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0}{ .mfb add out1=1,in1 // increment recursion count nop.f 0 nop.b 0 // can't do br.call here because of alloc (WAW on CFM) ;;}{ .mfi // cycle 1 mov loc1=0 nop.f 0 mov loc2=0}{ .mib mov loc3=0 mov loc4=0(pRecurse) br.call.sptk.many b6=rse_clear_invalid}{ .mfi // cycle 2 mov loc5=0 nop.f 0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret}{ .mib mov loc6=0 mov loc7=0(pReturn) br.ret.sptk.many b6}# undef pRecurse# undef pReturn alloc r17=ar.pfs,0,0,0,0 // drop current register frame ;; loadrs ;;skip_rbs_switch: mov b6=rB6 mov ar.pfs=rARPFS(pUser) mov ar.bspstore=rARBSPSTORE(p9) mov cr.ifs=rCRIFS mov cr.ipsr=rCRIPSR mov cr.iip=rCRIIP ;;(pUser) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode mov ar.rsc=rARRSC mov ar.unat=rARUNAT mov pr=rARPR,-1 rfiEND(ia64_leave_kernel)ENTRY(handle_syscall_error) /* * Some system calls (e.g., ptrace, mmap) can return arbitrary * values which could lead us to mistake a negative return * value as a failed syscall. Those syscall must deposit * a non-zero value in pt_regs.r8 to indicate an error. * If pt_regs.r8 is zero, we assume that the call completed * successfully. */ PT_REGS_UNWIND_INFO(0) ld8 r3=[r2] // load pt_regs.r8 sub r9=0,r8 // negate return value to get errno ;; mov r10=-1 // return -1 in pt_regs.r10 to indicate error cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? adds r3=16,r2 // r3=&pt_regs.r10 ;;(p6) mov r9=r8(p6) mov r10=0 ;;.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit br.cond.sptk ia64_leave_kernelEND(handle_syscall_error) /* * Invoke schedule_tail(task) while preserving in0-in7, which may be needed * in case a system call gets restarted. */GLOBAL_ENTRY(ia64_invoke_schedule_tail) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,1,0 mov loc0=rp mov out0=r8 // Address of previous task ;; br.call.sptk.many rp=schedule_tail.ret11: mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rpEND(ia64_invoke_schedule_tail)#if __GNUC__ < 3 /* * Invoke schedule() while preserving in0-in7, which may be needed * in case a system call gets restarted. Note that declaring schedule() * with asmlinkage() is NOT enough because that will only preserve as many * registers as there are formal arguments. * * XXX fix me: with gcc 3.0, we won't need this anymore because syscall_linkage * renders all eight input registers (in0-in7) as "untouchable". */ENTRY(invoke_schedule) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,0,0 mov loc0=rp ;; .body br.call.sptk.many rp=schedule.ret14: mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rpEND(invoke_schedule)#endif /* __GNUC__ < 3 */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -