⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmx_entry.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 2 页
字号:
GLOBAL_ENTRY(ia64_vmm_entry)/* *  must be at bank 0 *  parameter: *  r17:cr.isr *  r18:vpd *  r19:vpsr *  r22:b0 *  r23:predicate */    mov r24=r22    mov r25=r18    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic    (p1) br.cond.sptk.few vmx_vps_resume_normal    (p2) br.cond.sptk.many vmx_vps_resume_handler    ;;END(ia64_vmm_entry)/* * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't *  need to switch to bank 0 and doesn't restore the scratch registers. *  To avoid leaking kernel bits, the scratch registers are set to *  the following known-to-be-safe values: * *        r1: restored (global pointer) *        r2: cleared *        r3: 1 (when returning to user-level) *        r8-r11: restored (syscall return value(s)) *       r12: restored (user-level stack pointer) *       r13: restored (user-level thread pointer) *       r14: set to __kernel_syscall_via_epc *       r15: restored (syscall #) *       r16-r17: cleared *       r18: user-level b6 *       r19: cleared *       r20: user-level ar.fpsr *       r21: user-level b0 *       r22: cleared *       r23: user-level ar.bspstore *       r24: user-level ar.rnat *       r25: user-level ar.unat *       r26: user-level ar.pfs *       r27: user-level ar.rsc *       r28: user-level ip *       r29: user-level psr *       r30: user-level cfm *       r31: user-level pr *        f6-f11: cleared *        pr: restored (user-level pr) *        b0: restored (user-level rp) *        b6: restored *        b7: set to __kernel_syscall_via_epc *        ar.unat: restored (user-level ar.unat) *        ar.pfs: restored (user-level ar.pfs) *        ar.rsc: restored (user-level ar.rsc) *        ar.rnat: restored (user-level ar.rnat) *        ar.bspstore: restored (user-level ar.bspstore) *        ar.fpsr: restored (user-level ar.fpsr) *        ar.ccv: cleared *        ar.csd: cleared *        ar.ssd: cleared */GLOBAL_ENTRY(ia64_leave_hypercall)    PT_REGS_UNWIND_INFO(0)    /*     * work.need_resched etc. mustn't get changed by this CPU before it returns to     * user- or fsys-mode, hence we disable interrupts early on.     *     * p6 controls whether current_thread_info()->flags needs to be check for     * extra work.  We always check for extra work when returning to user-level.     * With CONFIG_PREEMPT, we also check for extra work when the preempt_count     * is 0.  After extra work processing has been completed, execution     * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check     * needs to be redone.     */    ;;    adds r16=PT(R8)+16,r12    ;;    st8 [r16]=r8    ;;//(pUStk) rsm psr.i    rsm psr.i    cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall//(pUStk) cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk    ;;    br.call.sptk.many b0=leave_hypervisor_tail.work_processed_syscall:    //clean up bank 1 registers    ;;    adds r16=PT(R8)+16,r12    ;;    ld8 r8=[r16]    ;;    mov r16=r0    mov r17=r0    mov r18=r0    mov r19=r0    mov r20=r0    mov r21=r0    mov r22=r0    mov r23=r0    mov r24=r0    mov r25=r0    mov r26=r0    mov r27=r0    mov r28=r0    mov r29=r0    mov r30=r0    mov r31=r0    bsw.0    ;;    adds r2=PT(LOADRS)+16,r12    adds r3=PT(AR_BSPSTORE)+16,r12#ifndef XEN    adds r18=TI_FLAGS+IA64_TASK_SIZE,r13    ;;(p6) ld4 r31=[r18]				// load current_thread_info()->flags#endif    ;;    ld8 r20=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"    nop.i 0    ;;//  mov r16=ar.bsp				// M2  get existing backing store pointer    ld8 r18=[r2],PT(R9)-PT(B6)		// load b6#ifndef XEN(p6)    and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?#endif    ;;    ld8 r24=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)#ifndef XEN(p6)    cmp4.ne.unc p6,p0=r15, r0		// any special work pending?(p6)    br.cond.spnt .work_pending_syscall#endif    ;;    // start restoring the state saved on the kernel stack (struct pt_regs):    ld8 r9=[r2],PT(CR_IPSR)-PT(R9)    ld8 r11=[r3],PT(CR_IIP)-PT(R11)//(pNonSys) break 0		//      bug check: we shouldn't be here if pNonSys is TRUE!    ;;    invala			// M0|1 invalidate ALAT    rsm psr.i | psr.ic	// M2   turn off interrupts and interruption collection#ifndef XEN    cmp.eq p9,p0=r0,r0	// A    set p9 to indicate that we should restore cr.ifs#endif    ld8 r31=[r2],32		// M0|1 load cr.ipsr    ld8 r30=[r3],16		// M0|1 load cr.iip    ;;//  ld8 r29=[r2],16		// M0|1 load cr.ifs    ld8 r28=[r3],16		// M0|1 load ar.unat//(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13    ;;    ld8 r27=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs//(pKStk) mov r22=psr			// M2   read PSR now that interrupts are disabled    nop 0    ;;    ld8 r22=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0    ld8 r26=[r3],PT(PR)-PT(AR_RSC)	// M0|1 load ar.rsc    mov f6=f0			// F    clear f6    ;;    ld8 r25=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// M0|1 load ar.rnat (may be garbage)    ld8 r23=[r3],PT(R1)-PT(PR)		// M0|1 load predicates    mov f7=f0				// F    clear f7    ;;    ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// M0|1 load ar.fpsr    ld8.fill r1=[r3],16			// M0|1 load r1//(pUStk) mov r17=1				// A    ;;//(pUStk) st1 [r14]=r17				// M2|3    ld8.fill r13=[r3],16			// M0|1    mov f8=f0				// F    clear f8    ;;    ld8.fill r12=[r2]			// M0|1 restore r12 (sp)#ifdef XEN        ld8.fill r2=[r3]			// M0|1#else        ld8.fill r15=[r3]			// M0|1 restore r15#endif        mov b6=r18				// I0   restore b6    mov ar.fpsr=r20//  addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A    mov f9=f0					// F    clear f9//(pKStk) br.cond.dpnt.many skip_rbs_switch		// B//  srlz.d				// M0   ensure interruption collection is off (for cover)//  shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition    mov r3=r21    cover				// B    add current frame into dirty partition & set cr.ifs    ;;//(pUStk) ld4 r17=[r17]			// M0|1 r17 = cpu_data->phys_stacked_size_p8    mov r19=ar.bsp			// M2   get new backing store pointer    addl r18=IA64_RBS_OFFSET, r3    ;;    mov r3=r0    sub r18=r19,r18     // get byte size of existing "dirty" partition    ;;    shl r20=r18,16     // set rsc.load     mov f10=f0			// F    clear f10#ifdef XEN    mov r14=r0#else    movl r14=__kernel_syscall_via_epc // X#endif    ;;    mov.m ar.csd=r0			// M2   clear ar.csd    mov.m ar.ccv=r0			// M2   clear ar.ccv    mov b7=r14			// I0   clear b7 (hint with __kernel_syscall_via_epc)    mov.m ar.ssd=r0			// M2   clear ar.ssd    mov f11=f0			// F    clear f11    br.cond.sptk.many vmx_rbs_switch	// BEND(ia64_leave_hypercall)/* * in0: new rr7 * in1: virtual address of guest_vhpt * in2: virtual addres of guest shared_info * r8: will contain old rid value */#define PSR_BITS_TO_CLEAR                                           \	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |     \	 IA64_PSR_RT | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI |    \	 IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)#define PSR_BITS_TO_SET    IA64_PSR_BNGLOBAL_ENTRY(__vmx_switch_rr7)       // not sure this unwind statement is correct...       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)	alloc loc1 = ar.pfs, 4, 7, 0, 01:{	mov r28  = in0                  // copy procedure index	mov r8   = ip                   // save ip to compute branch	mov loc0 = rp                   // save rp};;	.body	movl loc2=PERCPU_ADDR	;;	tpa loc2 = loc2                 // get physical address of per cpu date	tpa r3 = r8                     // get physical address of ip	dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt	dep loc6 = 0,in2,60,4           // get physical address of privregs	;;	dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT                                        // mask granule shift	mov loc4 = psr                  // save psr	;;	mov loc3 = ar.rsc               // save RSE configuration	;;	mov ar.rsc = 0                  // put RSE in enforced lazy, LE mode	movl r16=PSR_BITS_TO_CLEAR	movl r17=PSR_BITS_TO_SET	;;	or loc4 = loc4,r17              // add in psr the bits to set	;;	andcm r16=loc4,r16              // removes bits to clear from psr	br.call.sptk.many rp=ia64_switch_mode_phys1:	// now in physical mode with psr.i/ic off so do rr7 switch	dep r16=-1,r0,61,3	;;	mov rr[r16]=in0	;;	srlz.d	;;	// re-pin mappings for kernel text and data	mov r18=KERNEL_TR_PAGE_SHIFT<<2	movl r17=KERNEL_START	;;	ptr.i   r17,r18	ptr.d   r17,r18	;;	mov cr.itir=r18	mov cr.ifa=r17	mov r16=IA64_TR_KERNEL	movl r25 = PAGE_KERNEL	// r2=KERNEL_TR_PAGE_SHIFT truncated physicall address of ip	//   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)	dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT	;;	or r24=r2,r25	;;	srlz.i	;;	itr.i itr[r16]=r24	;;	itr.d dtr[r16]=r24	;;	// re-pin mapping for stack (current)	mov r26=IA64_GRANULE_SHIFT<<2	dep r21=0,r13,60,4              // physical address of "current"	;;	ptr.d   r13,r26	or r23=r21,r25                  // construct PA | page properties	mov cr.itir=r26	mov cr.ifa=r13                  // VA of next task...	mov r18=IA64_TR_CURRENT_STACK	;;	itr.d dtr[r18]=r23              // wire in new mapping...	// re-pin mappings for per-cpu data	movl r22 = PERCPU_ADDR	;;	mov r24=IA64_TR_PERCPU_DATA	or loc2 = r25,loc2              // construct PA | page properties	mov r23=PERCPU_PAGE_SHIFT<<2	;;	ptr.d   r22,r23	;;	mov cr.itir=r23	mov cr.ifa=r22	;;	itr.d dtr[r24]=loc2             // wire in new mapping...	;;	// re-pin mappings for guest_vhpt	// unless overlaps with IA64_TR_CURRENT_STACK	// r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)	dep r21=0,r21,0,IA64_GRANULE_SHIFT 	// r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)	dep r17=0,loc5,0,IA64_GRANULE_SHIFT 	;;	cmp.eq p7,p0=r17,r21            // check overlap with current stack(p7)	br.cond.sptk .vhpt_overlaps	mov r24=IA64_TR_VHPT	;;	or loc5 = r25,loc5              // construct PA | page properties	mov r23 = IA64_GRANULE_SHIFT <<2	;;	ptr.d   in1,r23	;;	mov cr.itir=r23	mov cr.ifa=in1	;;	itr.d dtr[r24]=loc5             // wire in new mapping...	;;.vhpt_overlaps:	// r16, r19, r20 are used by	//  ia64_switch_mode_phys()/ia64_switch_mode_virt()	// re-pin mappings for privregs	// r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)	// r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)	// loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)	cmp.ne.unc p7,p0=r21,loc6	// check overlap with current stack	;;(p7)	cmp.ne.unc p8,p0=r17,loc6	// check overlap with guest_vhpt	;;	// loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL	or loc6 = r25,loc6          // construct PA | page properties	;;	mov r22=IA64_TR_VPD	mov r24=IA64_TR_MAPPED_REGS	mov r23=IA64_GRANULE_SHIFT<<2	;;	ptr.i   in2,r23(p8)	ptr.d   in2,r23	mov cr.itir=r23	mov cr.ifa=in2	;;	itr.i itr[r22]=loc6         // wire in new mapping...	;;(p8)	itr.d dtr[r24]=loc6         // wire in new mapping...	;;	// done, switch back to virtual and return	mov r16=loc4                    // r16= original psr	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode	mov ar.pfs = loc1	mov rp = loc0	;;	mov ar.rsc=loc3                 // restore RSE configuration	srlz.d                          // seralize restoration of psr.l	br.ret.sptk.many rpEND(__vmx_switch_rr7)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -