📄 vmx_virt.c
字号:
vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK return IA64_NO_FAULT;}static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst){ u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) return IA64_FAULT; debugger_event(XEN_IA64_DEBUG_ON_TR); return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst){ u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) return IA64_FAULT; debugger_event(XEN_IA64_DEBUG_ON_TR); return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));}static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr; if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if(unimplemented_gva(vcpu, r3)){ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT; }#endif //CHECK_FAULT r1 = vmx_vcpu_thash(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr;#endif#ifdef CHECK_FAULT if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if(unimplemented_gva(vcpu, r3)){ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); return IA64_NO_FAULT; }#endif //CHECK_FAULT r1 = vmx_vcpu_ttag(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if(vpsr.cpl!=0){ visr.val=0; vcpu_set_isr(vcpu, visr.val); return IA64_FAULT; }#endif //CHECK_FAULT if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT set_isr_reg_nat_consumption(vcpu,0,1); rnat_comsumption(vcpu); return IA64_FAULT;#endif //CHECK_FAULT }#ifdef CHECK_FAULT if (unimplemented_gva(vcpu,r3) ) { // inject unimplemented_data_address_fault visr.val = set_isr_ei_ni(vcpu); visr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); // FAULT_UNIMPLEMENTED_DATA_ADDRESS. unimpl_daddr(vcpu); return IA64_FAULT; }#endif //CHECK_FAULT if(vmx_vcpu_tpa(vcpu, r3, &r1)){ return IA64_FAULT; } vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst){ u64 r1,r3;#ifdef CHECK_FAULT ISR visr; IA64_PSR vpsr; int fault=IA64_NO_FAULT; visr.val=0; if(check_target_register(vcpu, inst.M46.r1)){ set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } vpsr.val=vmx_vcpu_get_psr(vcpu); if(vpsr.cpl!=0){ vcpu_set_isr(vcpu, visr.val); return IA64_FAULT; }#endif if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){#ifdef CHECK_FAULT set_isr_reg_nat_consumption(vcpu,0,1); rnat_comsumption(vcpu); return IA64_FAULT;#endif } r1 = vmx_vcpu_tak(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT);}/************************************ * Insert translation register/cache************************************/static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst){ u64 itir, ifa, pte, slot; ISR isr;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val = vmx_vcpu_get_psr(vcpu); if (vpsr.ic) { set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } if (vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr(vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot) || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {#ifdef VMAL_NO_FAULT_CHECK set_isr_reg_nat_consumption(vcpu, 0, 0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK }#ifdef VMAL_NO_FAULT_CHECK if (is_reserved_rr_register(vcpu, slot)) { set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (vcpu_get_itir(vcpu ,&itir)) { return(IA64_FAULT); } if (vcpu_get_ifa(vcpu, &ifa)) { return(IA64_FAULT); }#ifdef VMAL_NO_FAULT_CHECK if (is_reserved_itir_field(vcpu, itir)) { // TODO return IA64_FAULT; } if (unimplemented_gva(vcpu, ifa)) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (slot >= NDTRS) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); rsv_reg_field(vcpu); return IA64_FAULT; } debugger_event(XEN_IA64_DEBUG_ON_TR); return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));}static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst){ u64 itir, ifa, pte, slot; ISR isr;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val = vmx_vcpu_get_psr(vcpu); if (vpsr.ic) { set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } if (vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr(vcpu, 0); privilege_op(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot) || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {#ifdef VMAL_NO_FAULT_CHECK set_isr_reg_nat_consumption(vcpu, 0, 0); rnat_comsumption(vcpu); return IA64_FAULT;#endif // VMAL_NO_FAULT_CHECK }#ifdef VMAL_NO_FAULT_CHECK if (is_reserved_rr_register(vcpu, slot)) { set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (vcpu_get_itir(vcpu, &itir)) { return IA64_FAULT; } if (vcpu_get_ifa(vcpu, &ifa)) { return IA64_FAULT; }#ifdef VMAL_NO_FAULT_CHECK if (is_reserved_itir_field(vcpu, itir)) { // TODO return IA64_FAULT; } if (unimplemented_gva(vcpu, ifa)) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (slot >= NITRS) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); rsv_reg_field(vcpu); return IA64_FAULT; } debugger_event(XEN_IA64_DEBUG_ON_TR); return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);}static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa, u64 *pte){ IA64FAULT ret1;#ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; vpsr.val = vmx_vcpu_get_psr(vcpu); if (vpsr.ic) { set_illegal_op_isr(vcpu); illegal_op(vcpu); return IA64_FAULT; } u64 fault; ISR isr; if (vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr(vcpu, 0); privilege_op(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK ret1 = vcpu_get_gr_nat(vcpu, inst.M45.r2,pte);#ifdef VMAL_NO_FAULT_CHECK if (ret1 != IA64_NO_FAULT) { set_isr_reg_nat_consumption(vcpu, 0, 0); rnat_comsumption(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK if (vcpu_get_itir(vcpu, itir)) { return IA64_FAULT; } if (vcpu_get_ifa(vcpu, ifa)) { return IA64_FAULT; }#ifdef VMAL_NO_FAULT_CHECK if (unimplemented_gva(vcpu,ifa) ) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; }#endif // VMAL_NO_FAULT_CHECK return IA64_NO_FAULT;}static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst){ u64 itir, ifa, pte; if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) { return IA64_FAULT; } debugger_event(XEN_IA64_DEBUG_ON_TC); return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);}static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst){ u64 itir, ifa, pte; if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) { return IA64_FAULT; } debugger_event(XEN_IA64_DEBUG_ON_TC); return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);}/************************************* * Moves to semi-privileged registers*************************************/static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst){ // I27 and M30 are identical for these fields u64 imm; if(inst.M30.ar3!=44){ panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc"); }#ifdef CHECK_FAULT IA64_PSR vpsr; vpsr.val=vmx_vcpu_get_psr(vcpu); if ( vpsr.cpl != 0) { /* Inject Privileged Operation fault into guest */ set_privileged_operation_isr (vcpu, 0); privilege_op (vcpu); return IA64_FAULT; }#endif // CHECK_FAULT if(inst.M30.s){ imm = -inst.M30.imm; }else{ imm = inst.M30.imm; } return (vmx_vcpu_set_itc(vcpu, imm));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -