📄 vmx_vcpu.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmx_vcpu.c: handling all virtual cpu related thing. * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Fred yang (fred.yang@intel.com) * Arun Sharma (arun.sharma@intel.com) * Shaofan Li (Susue Li) <susie.li@intel.com> * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) */#include <xen/sched.h>#include <public/xen.h>#include <asm/ia64_int.h>#include <asm/vmx_vcpu.h>#include <asm/regionreg.h>#include <asm/tlb.h>#include <asm/processor.h>#include <asm/delay.h>#include <asm/regs.h>#include <asm/gcc_intrin.h>#include <asm/vmx_mm_def.h>#include <asm/vmx.h>#include <asm/vmx_phy_mode.h>#include <asm/debugger.h>/************************************************************************** VCPU general register access routines**************************************************************************/#include <asm/hw_irq.h>#include <asm/vmx_pal_vsa.h>#include <asm/kregs.h>#include <linux/efi.h>//unsigned long last_guest_rsm = 0x0;#ifdef VTI_DEBUGstruct guest_psr_bundle{ unsigned long ip; unsigned long psr;};struct guest_psr_bundle guest_psr_buf[100];unsigned long guest_psr_index = 0;#endifvoidvmx_ia64_set_dcr(VCPU *v) { /* xenoprof: * don't change psr.pp. * It is manipulated by xenoprof. */ unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) | (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP); // if guest is runing on cpl > 0, set dcr.dm=1 // if geust is runing on cpl = 0, set dcr.dm=0 // because Guest OS may ld.s on tr mapped page. if (!(VCPU(v, vpsr) & IA64_PSR_CPL)) dcr_bits &= ~IA64_DCR_DM; ia64_set_dcr(dcr_bits);}voidvmx_vcpu_set_psr(VCPU *vcpu, unsigned long value){ u64 mask; REGS *regs; IA64_PSR old_psr, new_psr; old_psr.val=VCPU(vcpu, vpsr); regs=vcpu_regs(vcpu); /* We only support guest as: * vpsr.pk = 0 * vpsr.is = 0 * Otherwise panic */ if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) { panic_domain (regs,"Setting unsupport guest psr!"); } /* * For those IA64_PSR bits: id/da/dd/ss/ed/ia * Since these bits will become 0, after success execution of each * instruction, we will change set them to mIA64_PSR */ VCPU(vcpu,vpsr) = value & (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_ED | IA64_PSR_IA)); if ( !old_psr.i && (value & IA64_PSR_I) ) { // vpsr.i 0->1 vcpu->arch.irq_new_condition = 1; } new_psr.val=VCPU(vcpu, vpsr);#ifdef VTI_DEBUG guest_psr_buf[guest_psr_index].ip = regs->cr_iip; guest_psr_buf[guest_psr_index].psr = new_psr.val; if (++guest_psr_index >= 100) guest_psr_index = 0;#endif #if 0 if (old_psr.i != new_psr.i) { if (old_psr.i) last_guest_rsm = vcpu_regs(vcpu)->cr_iip; else last_guest_rsm = 0; }#endif /* * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) * , except for the following bits: * ic/i/dt/si/rt/mc/it/bn/vm */ mask = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI | IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_VM; /* xenoprof: * don't change psr.pp. * It is manipulated by xenoprof. */ mask |= IA64_PSR_PP; regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) ); if (FP_PSR(vcpu) & IA64_PSR_DFH) regs->cr_ipsr |= IA64_PSR_DFH; if (unlikely(vcpu->domain->debugger_attached)) { if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS) regs->cr_ipsr |= IA64_PSR_SS; if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB) regs->cr_ipsr |= IA64_PSR_DB; } check_mm_mode_switch(vcpu, old_psr, new_psr); return ;}IA64FAULT vmx_vcpu_cover(VCPU *vcpu){ REGS *regs = vcpu_regs(vcpu); IA64_PSR vpsr; vpsr.val = VCPU(vcpu, vpsr); if(!vpsr.ic) VCPU(vcpu,ifs) = regs->cr_ifs; regs->cr_ifs = IA64_IFS_V; return (IA64_NO_FAULT);}/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0 * so that no tlb miss is allowed. */void vmx_vcpu_set_rr_fast(VCPU *vcpu, u64 reg, u64 val){ u64 rrval; VMX(vcpu, vrr[reg >> VRN_SHIFT]) = val; switch((u64)(reg >> VRN_SHIFT)) { case VRN4: rrval = vrrtomrr(vcpu, val); vcpu->arch.metaphysical_saved_rr4 = rrval; if (is_virtual_mode(vcpu) && likely(vcpu == current)) ia64_set_rr(reg, rrval); break; case VRN0: rrval = vrrtomrr(vcpu, val); vcpu->arch.metaphysical_saved_rr0 = rrval; if (is_virtual_mode(vcpu) && likely(vcpu == current)) ia64_set_rr(reg, rrval); break; default: if (likely(vcpu == current)) ia64_set_rr(reg, vrrtomrr(vcpu, val)); break; }}void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void *shared_arch_info){ __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt; __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info; __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info; __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);}IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val){ u64 rrval; if (unlikely(is_reserved_rr_rid(vcpu, val))) { gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); return IA64_RSVDREG_FAULT; } VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; switch((u64)(reg>>VRN_SHIFT)) { case VRN7: if (likely(vcpu == current)) vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash, vcpu->arch.privregs); break; case VRN4: rrval = vrrtomrr(vcpu,val); vcpu->arch.metaphysical_saved_rr4 = rrval; if (is_virtual_mode(vcpu) && likely(vcpu == current)) ia64_set_rr(reg,rrval); break; case VRN0: rrval = vrrtomrr(vcpu,val); vcpu->arch.metaphysical_saved_rr0 = rrval; if (is_virtual_mode(vcpu) && likely(vcpu == current)) ia64_set_rr(reg,rrval); break; default: if (likely(vcpu == current)) ia64_set_rr(reg,vrrtomrr(vcpu,val)); break; } return (IA64_NO_FAULT);}/************************************************************************** VCPU protection key register access routines**************************************************************************/u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg){ return ((u64)ia64_get_pkr(reg));}IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val){ ia64_set_pkr(reg,val); return (IA64_NO_FAULT);}#if 0int tlb_debug=0;check_entry(u64 va, u64 ps, char *str){ va &= ~ (PSIZE(ps)-1); if ( va == 0x2000000002908000UL || va == 0x600000000000C000UL ) { stop(); } if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);}#endifu64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa){ ia64_rr rr,rr1; vcpu_get_rr(vcpu,ifa,&rr.rrval); rr1.rrval=0; rr1.ps=rr.ps; rr1.rid=rr.rid; return (rr1.rrval);}/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0 * so that no tlb miss is allowed. */void vmx_vcpu_mov_to_psr_fast(VCPU *vcpu, u64 value){ /* TODO: Only allowed for current vcpu */ u64 old_vpsr, new_vpsr, mipsr, mask; old_vpsr = VCPU(vcpu, vpsr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -