📄 optvfault.s
字号:
/* * arch/ia64/vmx/optvfault.S * optimize virtualization fault handler * * Copyright (C) 2006 Intel Co * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> */#include <linux/config.h> #include <asm/config.h>#include <asm/pgtable.h>#include <asm/asmmacro.h>#include <asm/kregs.h>#include <asm/offsets.h>#include <asm/percpu.h>#include <asm/processor.h>#include <asm/vmx_vpd.h>#include <asm/vmx_pal_vsa.h>#include <asm/asm-offsets.h>#include <asm-ia64/vmx_mm_def.h>#include <asm-ia64/vmx_phy_mode.h>#define ACCE_MOV_FROM_AR#define ACCE_MOV_FROM_RR#define ACCE_MOV_TO_RR#define ACCE_RSM#define ACCE_SSM#define ACCE_MOV_TO_PSR#define ACCE_THASH// Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)//mov r1=ar3 (only itc is virtualized)GLOBAL_ENTRY(vmx_asm_mov_from_ar)#ifndef ACCE_MOV_FROM_AR br.many vmx_virtualization_fault_back#endif add r18=VCPU_VTM_OFFSET_OFS,r21 add r16=VCPU_VTM_LAST_ITC_OFS,r21 extr.u r17=r25,6,7 ;; ld8 r18=[r18] mov r19=ar.itc mov r24=b0 ;; ld8 r16=[r16] add r19=r19,r18 movl r20=asm_mov_to_reg ;; adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20 shladd r17=r17,4,r20 cmp.gtu p6,p0=r16,r19 ;; (p6) mov r19=r16 mov b0=r17 br.sptk.few b0 ;;END(vmx_asm_mov_from_ar)// mov r1=rr[r3]GLOBAL_ENTRY(vmx_asm_mov_from_rr)#ifndef ACCE_MOV_FROM_RR br.many vmx_virtualization_fault_back#endif extr.u r16=r25,20,7 extr.u r17=r25,6,7 movl r20=asm_mov_from_reg ;; adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 shladd r16=r16,4,r20 mov r24=b0 ;; add r27=VCPU_VRR0_OFS,r21 mov b0=r16 br.many b0 ;; vmx_asm_mov_from_rr_back_1: adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 shr.u r26=r19,61 ;; shladd r17=r17,4,r22 shladd r27=r26,3,r27 ;; ld8 r19=[r27] mov b0=r17 br.many b0END(vmx_asm_mov_from_rr)// mov rr[r3]=r2GLOBAL_ENTRY(vmx_asm_mov_to_rr)#ifndef ACCE_MOV_TO_RR br.many vmx_virtualization_fault_back#endif add r22=IA64_VCPU_DOMAIN_OFFSET,r21 extr.u r16=r25,20,7 // r3 extr.u r17=r25,13,7 // r2 ;; ld8 r22=[r22] // Get domain movl r20=asm_mov_from_reg ;; adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20 shladd r16=r16,4,r20 // get r3 mov r18=b0 // save b0 ;; add r27=VCPU_VRR0_OFS,r21 mov b0=r16 br.many b0 ;; vmx_asm_mov_to_rr_back_1: adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20 shr.u r23=r19,61 // get RR # shladd r17=r17,4,r20 // get r2 ;; //if rr7, go back cmp.eq p6,p0=7,r23 mov b0=r18 // restore b0 (p6) br.cond.dpnt.many vmx_virtualization_fault_back ;; mov r28=r19 // save r3 mov b0=r17 br.many b0vmx_asm_mov_to_rr_back_2: adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20 shladd r27=r23,3,r27 // address of VRR add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22 ;; ld1 r22=[r22] // Load rid_bits from domain mov b0=r18 // restore b0 adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21 ;; ld4 r16=[r16] // load starting_rid extr.u r17=r19,8,24 // Extract RID ;; shr r17=r17,r22 // Shift out used bits shl r16=r16,8 ;; add r20=r19,r16 cmp.ne p6,p0=0,r17 // If reserved RID bits are set, use C fall back. (p6) br.cond.dpnt.many vmx_virtualization_fault_back ;; //mangling rid 1 and 3 extr.u r16=r20,8,8 extr.u r17=r20,24,8 mov r24=r18 // saved b0 for resume ;; extr.u r18=r20,2,6 // page size dep r20=r16,r20,24,8 mov b0=r30 ;; dep r20=r17,r20,8,8 ;; //set ve 1 dep r20=-1,r20,0,1 // If ps > PAGE_SHIFT, use PAGE_SHIFT cmp.lt p6,p0=PAGE_SHIFT,r18 ;; (p6) mov r18=PAGE_SHIFT ;; (p6) dep r20=r18,r20,2,6 ;; st8 [r27]=r19 // Write to vrr. // Write to save_rr if rr=0 or rr=4. cmp.eq p6,p0=0,r23 ;; cmp.eq.or p6,p0=4,r23 ;; adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21 (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 ;; ld1 r16=[r16] cmp.eq p7,p0=r0,r0 (p6) shladd r17=r23,1,r17 ;; (p6) st8 [r17]=r20 (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode ;; (p7) mov rr[r28]=r20 br.many b0END(vmx_asm_mov_to_rr)//rsm GLOBAL_ENTRY(vmx_asm_rsm)#ifndef ACCE_RSM br.many vmx_virtualization_fault_back#endif add r16=IA64_VPD_BASE_OFFSET,r21 extr.u r26=r25,6,21 // Imm21 extr.u r27=r25,31,2 // I2d ;; ld8 r16=[r16] extr.u r28=r25,36,1 // I dep r26=r27,r26,21,2 ;; add r17=VPD_VPSR_START_OFFSET,r16 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 //r26 is imm24 dep r26=r28,r26,23,1 ;; ld8 r18=[r17] // xenoprof // Don't change mPSR.pp. // It is manipulated by xenoprof. movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_PP ld1 r23=[r22] sub r27=-1,r26 // ~r26 mov r24=b0 ;; mov r20=cr.ipsr or r28=r27,r28 // Keep IC,I,DT,SI and r19=r18,r27 // Update vpsr ;; st8 [r17]=r19 and r20=r20,r28 // Update ipsr adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 ;; ld8 r27=[r27] ;; tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT ;; (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh ;; mov cr.ipsr=r20 cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23 ;; tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT (p6) br.dptk vmx_resume_to_guest // DT not cleared or already in phy mode ;; // Switch to meta physical mode D. add r26=IA64_VCPU_META_RID_D_OFFSET,r21 mov r23=VMX_MMU_PHY_D ;; ld8 r26=[r26] st1 [r22]=r23 dep.z r28=4,61,3 ;; mov rr[r0]=r26 ;; mov rr[r28]=r26 ;; srlz.d br.many vmx_resume_to_guestEND(vmx_asm_rsm)//ssm GLOBAL_ENTRY(vmx_asm_ssm)#ifndef ACCE_SSM br.many vmx_virtualization_fault_back#endif add r16=IA64_VPD_BASE_OFFSET,r21 extr.u r26=r25,6,21 extr.u r27=r25,31,2 ;; ld8 r16=[r16] extr.u r28=r25,36,1 dep r26=r27,r26,21,2 ;; //r26 is imm24 add r27=VPD_VPSR_START_OFFSET,r16 dep r26=r28,r26,23,1 ;; //r19 vpsr ld8 r29=[r27] mov r24=b0 dep r17=0,r26,IA64_PSR_PP_BIT,1 // For xenoprof // Don't change mPSR.pp // It is maintained by xenoprof. ;; add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 mov r20=cr.ipsr or r19=r29,r26 ;; ld1 r23=[r22] // mmu_mode st8 [r27]=r19 // vpsr or r20=r20,r17 ;; mov cr.ipsr=r20 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT ;; and r19=r28,r19 cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23 ;; cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy (p6) br.dptk vmx_asm_ssm_1 ;; add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21 mov r23=VMX_MMU_VIRTUAL ;; ld8 r26=[r26] ld8 r27=[r27] st1 [r22]=r23 dep.z r28=4,61,3 ;; mov rr[r0]=r26 ;; mov rr[r28]=r27 ;; srlz.d ;;vmx_asm_ssm_1: tbit.nz p6,p0=r29,IA64_PSR_I_BIT ;; tbit.z.or p6,p0=r19,IA64_PSR_I_BIT (p6) br.dptk vmx_resume_to_guest ;; add r29=VPD_VTPR_START_OFFSET,r16 add r30=VPD_VHPI_START_OFFSET,r16 ;; ld8 r29=[r29] ld8 r30=[r30] ;; extr.u r17=r29,4,4 extr.u r18=r29,16,1 ;; dep r17=r18,r17,4,1 ;; cmp.gt p6,p0=r30,r17 (p6) br.dpnt.few vmx_asm_dispatch_vexirq br.many vmx_resume_to_guestEND(vmx_asm_ssm)//mov psr.l=r2 GLOBAL_ENTRY(vmx_asm_mov_to_psr)#ifndef ACCE_MOV_TO_PSR br.many vmx_virtualization_fault_back#endif add r16=IA64_VPD_BASE_OFFSET,r21 extr.u r26=r25,13,7 //r2 ;; ld8 r16=[r16] movl r20=asm_mov_from_reg ;; adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r20 shladd r26=r26,4,r20 mov r24=b0 ;; add r27=VPD_VPSR_START_OFFSET,r16 mov b0=r26 br.many b0 ;; vmx_asm_mov_to_psr_back: ld8 r17=[r27] // vpsr add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 dep r19=0,r19,32,32 // Clear bits 32-63 ;; ld1 r23=[r22] // mmu_mode dep r18=0,r17,0,32 ;; or r30=r18,r19 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT ;; st8 [r27]=r30 // set vpsr and r27=r28,r30 and r29=r28,r17 ;; cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it)) cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it) (p5) br.many vmx_asm_mov_to_psr_1 // no change ;; //virtual to physical D (p7) add r26=IA64_VCPU_META_RID_D_OFFSET,r21 (p7) add r27=IA64_VCPU_META_RID_D_OFFSET,r21 (p7) mov r23=VMX_MMU_PHY_D ;; //physical to virtual (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21 (p6) mov r23=VMX_MMU_VIRTUAL ;; ld8 r26=[r26] ld8 r27=[r27] st1 [r22]=r23 dep.z r28=4,61,3 ;; mov rr[r0]=r26 ;; mov rr[r28]=r27 ;; srlz.d ;;vmx_asm_mov_to_psr_1: mov r20=cr.ipsr movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT ;; tbit.nz p7,p0=r20,IA64_PSR_PP_BIT // For xenoprof or r19=r19,r28 dep r20=0,r20,0,32 ;; add r20=r19,r20 mov b0=r24 ;; adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 (p7) dep r20=-1,r20,IA64_PSR_PP_BIT,1 // For xenoprof // Dom't change mPSR.pp // It is maintaned by xenoprof ;; ld8 r27=[r27] ;; tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT ;; (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 ;; mov cr.ipsr=r20 cmp.ne p6,p0=r0,r0 ;; tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT tbit.z.or p6,p0=r30,IA64_PSR_I_BIT (p6) br.dpnt.few vmx_resume_to_guest ;; add r29=VPD_VTPR_START_OFFSET,r16 add r30=VPD_VHPI_START_OFFSET,r16 ;; ld8 r29=[r29] ld8 r30=[r30] ;; extr.u r17=r29,4,4 extr.u r18=r29,16,1 ;; dep r17=r18,r17,4,1 ;; cmp.gt p6,p0=r30,r17 (p6) br.dpnt.few vmx_asm_dispatch_vexirq br.many vmx_resume_to_guestEND(vmx_asm_mov_to_psr)ENTRY(vmx_asm_dispatch_vexirq)//increment iip mov r16=cr.ipsr ;; extr.u r17=r16,IA64_PSR_RI_BIT,2 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 ;; (p6) mov r18=cr.iip (p6) mov r17=r0 (p7) add r17=1,r17 ;; (p6) add r18=0x10,r18 dep r16=r17,r16,IA64_PSR_RI_BIT,2 ;; (p6) mov cr.iip=r18 mov cr.ipsr=r16 br.many vmx_dispatch_vexirqEND(vmx_asm_dispatch_vexirq)// thash r1=r3// TODO: add support when pta.vf = 1GLOBAL_ENTRY(vmx_asm_thash)#ifndef ACCE_THASH br.many vmx_virtualization_fault_back#endif extr.u r17=r25,20,7 // get r3 from opcode in r25 extr.u r18=r25,6,7 // get r1 from opcode in r25 movl r20=asm_mov_from_reg ;; adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17) adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs mov r24=b0 // save b0 ;; ld8 r16=[r16] // get VPD addr mov b0=r17 br.many b0 // r19 return value ;; vmx_asm_thash_back1: shr.u r23=r19,61 // get RR number adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta ;; shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr ld8 r17=[r16] // get virtual PTA mov r26=1 ;;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -