📄 ivt.s
字号:
* * ia64_syscall_setup() is a separate subroutine so that it can * allocate stacked registers so it can safely demine any * potential NaT values from the input registers. * * On entry: * - executing on bank 0 or bank 1 register set (doesn't matter) * - r1: stack pointer * - r2: current task pointer * - r3: preserved * - r12: original contents (sp to be saved) * - r13: original contents (tp to be saved) * - r15: original contents (syscall # to be saved) * - r18: saved bsp (after switching to kernel stack) * - r20: saved r1 (gp) * - r21: saved ar.fpsr * - r22: kernel's register backing store base (krbs_base) * - r23: saved ar.bspstore * - r24: saved ar.rnat * - r25: saved ar.unat * - r26: saved ar.pfs * - r27: saved ar.rsc * - r28: saved cr.iip * - r29: saved cr.ipsr * - r31: saved pr * - b0: original contents (to be saved) * On exit: * - executing on bank 1 registers * - psr.ic enabled, interrupts restored * - r1: kernel's gp * - r3: preserved (same as on entry) * - r12: points to kernel stack * - r13: points to current task * - p15: TRUE if interrupts need to be re-enabled * - ar.fpsr: set to kernel settings */ENTRY(ia64_syscall_setup) alloc r19=ar.pfs,8,0,0,0 tnat.nz p8,p0=in0 add r16=PT(CR_IPSR),r1 /* initialize first base pointer */ ;; st8 [r16]=r29,16; /* save cr.ipsr */ adds r17=PT(CR_IIP),r1; /* initialize second base pointer */ ;;(p8) mov in0=-1 tnat.nz p9,p0=in1 st8 [r17]=r28,16; /* save cr.iip */ mov r28=b0; (pKern) mov r18=r0; /* make sure r18 isn't NaT */ ;;(p9) mov in1=-1 tnat.nz p10,p0=in2 st8 [r16]=r30,16; /* save cr.ifs */ st8 [r17]=r25,16; /* save ar.unat */(pUser) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ ;; st8 [r16]=r26,16; /* save ar.pfs */ st8 [r17]=r27,16; /* save ar.rsc */ tbit.nz p15,p0=r29,IA64_PSR_I_BIT ;; /* avoid RAW on r16 & r17 */(p10) mov in2=-1 nop.f 0 tnat.nz p11,p0=in3(pKern) adds r16=16,r16; /* skip over ar_rnat field */(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ ;;(p11) mov in3=-1 tnat.nz p12,p0=in4(pUser) st8 [r16]=r24,16; /* save ar.rnat */(pUser) st8 [r17]=r23,16; /* save ar.bspstore */ ;;(p12) mov in4=-1 tnat.nz p13,p0=in5 st8 [r16]=r31,16; /* save predicates */ st8 [r17]=r28,16; /* save b0 */ dep r14=-1,r0,61,3; ;; st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ st8.spill [r17]=r20,16; /* save original r1 */ adds r2=IA64_PT_REGS_R16_OFFSET,r1; ;;(p13) mov in5=-1 tnat.nz p14,p0=in6.mem.offset 0,0; st8.spill [r16]=r12,16;.mem.offset 8,0; st8.spill [r17]=r13,16; cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ ;;(p14) mov in6=-1 tnat.nz p8,p0=in7.mem.offset 0,0; st8 [r16]=r21,16; /* ar.fpsr */.mem.offset 8,0; st8.spill [r17]=r15,16; adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ ;; mov r13=IA64_KR(CURRENT); /* establish `current' */ movl r1=__gp; /* establish kernel global pointer */ ;; MINSTATE_END_SAVE_MIN_VIRT tnat.nz p9,p0=r15(p8) mov in7=-1 ssm psr.ic | PSR_DEFAULT_BITS movl r17=FPSR_DEFAULT adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2 ;; srlz.i // guarantee that interruption collection is on cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0(p9) mov r15=-1(p15) ssm psr.i // restore psr.i mov.m ar.fpsr=r17 stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error) br.ret.sptk.many b7END(ia64_syscall_setup) .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x3c00 Entry 15 (size 64 bundles) Reserved DBG_FAULT(15) FAULT(15) /* * Squatting in this space ... * * This special case dispatcher for illegal operation faults allows preserved * registers to be modified through a callback function (asm only) that is handed * back from the fault handler in r8. Up to three arguments can be passed to the * callback function by returning an aggregate with the callback as its first * element, followed by the arguments. */ENTRY(dispatch_illegal_op_fault) SAVE_MIN_WITH_COVER ssm psr.ic | PSR_DEFAULT_BITS ;; srlz.i // guarantee that interruption collection is on ;;(p15) ssm psr.i // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; alloc r14=ar.pfs,0,0,1,0 // must be first in insn group mov out0=ar.ec ;; SAVE_REST ;; br.call.sptk.many rp=ia64_illegal_op_fault.ret0: ;; alloc r14=ar.pfs,0,0,3,0 // must be first in insn group mov out0=r9 mov out1=r10 mov out2=r11 movl r15=ia64_leave_kernel ;; mov rp=r15 mov b6=r8 ;; cmp.ne p6,p0=0,r8(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel br.sptk.many ia64_leave_kernelEND(dispatch_illegal_op_fault) .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x4000 Entry 16 (size 64 bundles) Reserved DBG_FAULT(16) FAULT(16) .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x4400 Entry 17 (size 64 bundles) Reserved DBG_FAULT(17) FAULT(17)ENTRY(non_syscall) SAVE_MIN_WITH_COVER // There is no particular reason for this code to be here, other than that // there happens to be space here that would go unused otherwise. If this // fault ever gets "unreserved", simply moved the following code to a more // suitable spot... alloc r14=ar.pfs,0,0,2,0 mov out0=cr.iim add out1=16,sp adds r3=8,r2 // set up second base pointer for SAVE_REST ssm psr.ic | PSR_DEFAULT_BITS ;; srlz.i // guarantee that interruption collection is on ;;(p15) ssm psr.i // restore psr.i movl r15=ia64_leave_kernel ;; SAVE_REST mov rp=r15 ;; br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addrEND(non_syscall) .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x4800 Entry 18 (size 64 bundles) Reserved DBG_FAULT(18) FAULT(18) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ENTRY(dispatch_unaligned_handler) SAVE_MIN_WITH_COVER ;; alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) mov out0=cr.ifa adds out1=16,sp ssm psr.ic | PSR_DEFAULT_BITS ;; srlz.i // guarantee that interruption collection is on ;;(p15) ssm psr.i // restore psr.i adds r3=8,r2 // set up second base pointer ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.sptk.many ia64_prepare_handle_unalignedEND(dispatch_unaligned_handler) .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x4c00 Entry 19 (size 64 bundles) Reserved DBG_FAULT(19) FAULT(19) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ENTRY(dispatch_to_fault_handler) /* * Input: * psr.ic: off * r19: fault vector number (e.g., 24 for General Exception) * r31: contains saved predicates (pr) */ SAVE_MIN_WITH_COVER_R19 alloc r14=ar.pfs,0,0,5,0 mov out0=r15 mov out1=cr.isr mov out2=cr.ifa mov out3=cr.iim mov out4=cr.itir ;; ssm psr.ic | PSR_DEFAULT_BITS ;; srlz.i // guarantee that interruption collection is on ;;(p15) ssm psr.i // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.call.sptk.many b6=ia64_faultEND(dispatch_to_fault_handler)//// --- End of long entries, Beginning of short entries// .align 1024/////////////////////////////////////////////////////////////////////////////////////////// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)ENTRY(page_not_present) DBG_FAULT(20) mov r16=cr.ifa rsm psr.dt /* * The Linux page fault handler doesn't expect non-present pages to be in * the TLB. Flush the existing entry now, so we meet that expectation. */ mov r17=PAGE_SHIFT<<2 ;; ptc.l r16,r17 ;; mov r31=pr srlz.d br.sptk.many page_faultEND(page_not_present) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)ENTRY(key_permission) DBG_FAULT(21) mov r16=cr.ifa rsm psr.dt mov r31=pr ;; srlz.d br.sptk.many page_faultEND(key_permission) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)ENTRY(iaccess_rights) DBG_FAULT(22) mov r16=cr.ifa rsm psr.dt mov r31=pr ;; srlz.d br.sptk.many page_faultEND(iaccess_rights) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)ENTRY(daccess_rights) DBG_FAULT(23) mov r16=cr.ifa rsm psr.dt mov r31=pr ;; srlz.d br.sptk.many page_faultEND(daccess_rights) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)ENTRY(general_exception) DBG_FAULT(24) mov r16=cr.isr mov r31=pr ;; cmp4.eq p6,p0=0,r16(p6) br.sptk.many dispatch_illegal_op_fault ;; mov r19=24 // fault number br.sptk.many dispatch_to_fault_handlerEND(general_exception) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)ENTRY(disabled_fp_reg) DBG_FAULT(25) rsm psr.dfh // ensure we can access fph ;; srlz.d mov r31=pr mov r19=25 br.sptk.many dispatch_to_fault_handlerEND(disabled_fp_reg) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)ENTRY(nat_consumption) DBG_FAULT(26) FAULT(26)END(nat_consumption) .align 256/////////////////////////////////////////////////////////////////////////////////////////// 0x5700 Entry 27 (size 16 bundles) Speculation (40)ENTRY(speculation_vector) DBG_FAULT(27) /* * A [f]chk.[as] instruction needs to take the branch to the recovery code but * this part of the architecture is not implemented in hardware on some CPUs, such * as Itanium. Thus, in general we need to emulate the behavior. IIM contains * the relative target (not yet sign extended). So after sign extending it we * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, * i.e., the slot to restart into. * * cr.imm contains zero_ext(imm21) */ mov r18=cr.iim ;;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -