📄 vmx_virt.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmx_virt.c: * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Fred yang (fred.yang@intel.com) * Shaofan Li (Susue Li) <susie.li@intel.com> * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) */#include <asm/bundle.h>#include <asm/vmx_vcpu.h>#include <asm/processor.h>#include <asm/delay.h> // Debug only#include <asm/vmmu.h>#include <asm/vmx_mm_def.h>#include <asm/smp.h>#include <asm/vmx.h>#include <asm/virt_event.h>#include <asm/vmx_phy_mode.h>#include <asm/debugger.h>#ifdef BYPASS_VMAL_OPCODEstatic voidia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause){ *cause=0; switch (slot_type) { case M: if (inst.generic.major==0){ if(inst.M28.x3==0){ if(inst.M44.x4==6){ *cause=EVENT_SSM; }else if(inst.M44.x4==7){ *cause=EVENT_RSM; }else if(inst.M30.x4==8&&inst.M30.x2==2){ *cause=EVENT_MOV_TO_AR_IMM; } } } else if(inst.generic.major==1){ if(inst.M28.x3==0){ if(inst.M32.x6==0x2c){ *cause=EVENT_MOV_TO_CR; }else if(inst.M33.x6==0x24){ *cause=EVENT_MOV_FROM_CR; }else if(inst.M35.x6==0x2d){ *cause=EVENT_MOV_TO_PSR; }else if(inst.M36.x6==0x25){ *cause=EVENT_MOV_FROM_PSR; }else if(inst.M29.x6==0x2A){ *cause=EVENT_MOV_TO_AR; }else if(inst.M31.x6==0x22){ *cause=EVENT_MOV_FROM_AR; }else if(inst.M45.x6==0x09){ *cause=EVENT_PTC_L; }else if(inst.M45.x6==0x0A){ *cause=EVENT_PTC_G; }else if(inst.M45.x6==0x0B){ *cause=EVENT_PTC_GA; }else if(inst.M45.x6==0x0C){ *cause=EVENT_PTR_D; }else if(inst.M45.x6==0x0D){ *cause=EVENT_PTR_I; }else if(inst.M46.x6==0x1A){ *cause=EVENT_THASH; }else if(inst.M46.x6==0x1B){ *cause=EVENT_TTAG; }else if(inst.M46.x6==0x1E){ *cause=EVENT_TPA; }else if(inst.M46.x6==0x1F){ *cause=EVENT_TAK; }else if(inst.M47.x6==0x34){ *cause=EVENT_PTC_E; }else if(inst.M41.x6==0x2E){ *cause=EVENT_ITC_D; }else if(inst.M41.x6==0x2F){ *cause=EVENT_ITC_I; }else if(inst.M42.x6==0x00){ *cause=EVENT_MOV_TO_RR; }else if(inst.M42.x6==0x01){ *cause=EVENT_MOV_TO_DBR; }else if(inst.M42.x6==0x02){ *cause=EVENT_MOV_TO_IBR; }else if(inst.M42.x6==0x03){ *cause=EVENT_MOV_TO_PKR; }else if(inst.M42.x6==0x04){ *cause=EVENT_MOV_TO_PMC; }else if(inst.M42.x6==0x05){ *cause=EVENT_MOV_TO_PMD; }else if(inst.M42.x6==0x0E){ *cause=EVENT_ITR_D; }else if(inst.M42.x6==0x0F){ *cause=EVENT_ITR_I; }else if(inst.M43.x6==0x10){ *cause=EVENT_MOV_FROM_RR; }else if(inst.M43.x6==0x11){ *cause=EVENT_MOV_FROM_DBR; }else if(inst.M43.x6==0x12){ *cause=EVENT_MOV_FROM_IBR; }else if(inst.M43.x6==0x13){ *cause=EVENT_MOV_FROM_PKR; }else if(inst.M43.x6==0x14){ *cause=EVENT_MOV_FROM_PMC;/* }else if(inst.M43.x6==0x15){ *cause=EVENT_MOV_FROM_PMD;*/ }else if(inst.M43.x6==0x17){ *cause=EVENT_MOV_FROM_CPUID; } } } break; case B: if(inst.generic.major==0){ if(inst.B8.x6==0x02){ *cause=EVENT_COVER; }else if(inst.B8.x6==0x08){ *cause=EVENT_RFI; }else if(inst.B8.x6==0x0c){ *cause=EVENT_BSW_0; }else if(inst.B8.x6==0x0d){ *cause=EVENT_BSW_1; } } case I: case F: case L: case ILLEGAL: break; }}#endifstatic IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst){ u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; return vmx_vcpu_reset_psr_sm(vcpu,imm24);}static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst){ u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; return vmx_vcpu_set_psr_sm(vcpu,imm24);}static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst){ u64 tgt = inst.M33.r1; u64 val;/* if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT) return vcpu_set_gr(vcpu, tgt, val); else return fault; */ val = vmx_vcpu_get_psr(vcpu); val = (val & MASK(0, 32)) | (val & MASK(35, 2)); return vcpu_set_gr(vcpu, tgt, val, 0);}/** * @todo Check for reserved bits and return IA64_RSVDREG_FAULT. */static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst){ u64 val; if (vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT) panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n"); return vmx_vcpu_set_psr_l(vcpu, val);}/**************************************************************************Privileged operation emulation routines**************************************************************************/static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst){ IA64_PSR vpsr; REGS *regs;#ifdef CHECK_FAULT vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // CHECK_FAULT if (debugger_event(XEN_IA64_DEBUG_ON_RFI)) { raise_softirq(SCHEDULE_SOFTIRQ); do_softirq(); } regs=vcpu_regs(vcpu); vpsr.val=regs->cr_ipsr; if ( vpsr.is == 1 ) { panic_domain(regs,"We do not support IA32 instruction yet"); } return vmx_vcpu_rfi(vcpu);}static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst){#ifdef CHECK_FAULT IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // CHECK_FAULT return vcpu_bsw0(vcpu);}static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst){#ifdef CHECK_FAULT IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // CHECK_FAULT return vcpu_bsw1(vcpu);}static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst){ return vmx_vcpu_cover(vcpu);}static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst){ u64 r2,r3;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){#ifdef VMAL_NO_FAULT_CHECK ISR isr; set_isr_reg_nat_consumption(vcpu,0,0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK }#ifdef VMAL_NO_FAULT_CHECK if (unimplemented_gva(vcpu,r3) ) { unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK debugger_event(XEN_IA64_DEBUG_ON_TC); return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst){ u64 r3;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); ISR isr; if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){#ifdef VMAL_NO_FAULT_CHECK set_isr_reg_nat_consumption(vcpu,0,0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK } return vmx_vcpu_ptc_e(vcpu,r3);}static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst){ u64 r2,r3;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){#ifdef VMAL_NO_FAULT_CHECK ISR isr; set_isr_reg_nat_consumption(vcpu,0,0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK }#ifdef VMAL_NO_FAULT_CHECK if (unimplemented_gva(vcpu,r3) ) { unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK debugger_event(XEN_IA64_DEBUG_ON_TC); return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst){ u64 r2,r3;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){#ifdef VMAL_NO_FAULT_CHECK ISR isr; set_isr_reg_nat_consumption(vcpu,0,0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK }#ifdef VMAL_NO_FAULT_CHECK if (unimplemented_gva(vcpu,r3) ) { unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK debugger_event(XEN_IA64_DEBUG_ON_TC); return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));}static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3){ IA64FAULT ret1, ret2;#ifdef VMAL_NO_FAULT_CHECK ISR isr; IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3); ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);#ifdef VMAL_NO_FAULT_CHECK if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) { set_isr_reg_nat_consumption(vcpu,0,0); rnat_comsumption(vcpu); return IA64_FAULT; } if (unimplemented_gva(vcpu,r3) ) { unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK return IA64_NO_FAULT;}static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst){ u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) return IA64_FAULT; debugger_event(XEN_IA64_DEBUG_ON_TR); return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst){ u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) return IA64_FAULT; debugger_event(XEN_IA64_DEBUG_ON_TR); return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr; if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if(unimplemented_gva(vcpu, r3)){ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT; }#endif //CHECK_FAULT r1 = vmx_vcpu_thash(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr;#endif#ifdef CHECK_FAULT if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if(unimplemented_gva(vcpu, r3)){ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT; }#endif //CHECK_FAULT r1 = vmx_vcpu_ttag(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if(vpsr.cpl!=0){ visr.val=0; vcpu_set_isr(vcpu, visr.val); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT set_isr_reg_nat_consumption(vcpu,0,1); rnat_comsumption(vcpu); return IA64_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if (unimplemented_gva(vcpu,r3) ) { // inject unimplemented_data_address_fault visr.val = set_isr_ei_ni(vcpu); visr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); // FAULT_UNIMPLEMENTED_DATA_ADDRESS. unimpl_daddr(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vmx_vcpu_tpa(vcpu, r3, &r1)){ return IA64_FAULT; } vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -