⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmx_virt.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 3 页
字号:
    if(check_target_register(vcpu, inst.M43.r1)){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }    IA64_PSR vpsr;    vpsr.val=vmx_vcpu_get_psr(vcpu);    if (vpsr.cpl != 0) {        /* Inject Privileged Operation fault into guest */        set_privileged_operation_isr (vcpu, 0);        privilege_op (vcpu);        return IA64_FAULT;    }#endif //CHECK_FAULT     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){#ifdef  CHECK_FAULT        set_isr_reg_nat_consumption(vcpu,0,0);        rnat_comsumption(vcpu);        return IA64_FAULT;#endif  //CHECK_FAULT    }#ifdef  CHECK_FAULT    if(is_reserved_indirect_register(vcpu,r3)){        set_rsv_reg_field_isr(vcpu);        rsv_reg_field(vcpu);        return IA64_FAULT;    }#endif  //CHECK_FAULT    res = vmx_vcpu_get_dbr(vcpu, r3, &r1);    if (res != IA64_NO_FAULT)        return res;    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);}static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst){    u64 r3,r1;    IA64FAULT res;#ifdef  CHECK_FAULT    if(check_target_register(vcpu, inst.M43.r1)){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }    IA64_PSR vpsr;    vpsr.val=vmx_vcpu_get_psr(vcpu);    if (vpsr.cpl != 0) {        /* Inject Privileged Operation fault into guest */        set_privileged_operation_isr (vcpu, 0);        privilege_op (vcpu);        return IA64_FAULT;    }#endif //CHECK_FAULT     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){#ifdef  CHECK_FAULT        set_isr_reg_nat_consumption(vcpu,0,0);        rnat_comsumption(vcpu);        return IA64_FAULT;#endif  //CHECK_FAULT    }#ifdef  CHECK_FAULT    if(is_reserved_indirect_register(vcpu,r3)){        set_rsv_reg_field_isr(vcpu);        rsv_reg_field(vcpu);        return IA64_FAULT;    }#endif  //CHECK_FAULT    res = vmx_vcpu_get_ibr(vcpu, r3, &r1);    if (res != IA64_NO_FAULT)        return res;    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);}static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst){    u64 r3,r1;#ifdef  CHECK_FAULT    if(check_target_register(vcpu, inst.M43.r1)){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }    IA64_PSR vpsr;    vpsr.val=vmx_vcpu_get_psr(vcpu);    if (vpsr.cpl != 0) {        /* Inject Privileged Operation fault into guest */        set_privileged_operation_isr (vcpu, 0);        privilege_op (vcpu);        return IA64_FAULT;    }#endif //CHECK_FAULT     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){#ifdef  CHECK_FAULT        set_isr_reg_nat_consumption(vcpu,0,0);        rnat_comsumption(vcpu);        return IA64_FAULT;#endif  //CHECK_FAULT    }#ifdef  CHECK_FAULT    if(is_reserved_indirect_register(vcpu,r3)){        set_rsv_reg_field_isr(vcpu);        rsv_reg_field(vcpu);        return IA64_FAULT;    }#endif  //CHECK_FAULT    r1 = vmx_vcpu_get_pmc(vcpu, r3);    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);}static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst){    u64 r3,r1;#ifdef  CHECK_FAULT    if(check_target_register(vcpu, inst.M43.r1)){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }#endif //CHECK_FAULT     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){#ifdef  CHECK_FAULT        set_isr_reg_nat_consumption(vcpu,0,0);        rnat_comsumption(vcpu);        return IA64_FAULT;#endif  //CHECK_FAULT    }#ifdef  CHECK_FAULT    if(is_reserved_indirect_register(vcpu,r3)){        set_rsv_reg_field_isr(vcpu);        rsv_reg_field(vcpu);        return IA64_FAULT;    }#endif  //CHECK_FAULT    r1 = vmx_vcpu_get_cpuid(vcpu, r3);    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);}static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst){    u64 r2;    extern u64 cr_igfld_mask(int index, u64 value);#ifdef  CHECK_FAULT    IA64_PSR  vpsr;    vpsr.val=vmx_vcpu_get_psr(vcpu);    if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }    if ( vpsr.cpl != 0) {        /* Inject Privileged Operation fault into guest */        set_privileged_operation_isr (vcpu, 0);        privilege_op (vcpu);        return IA64_FAULT;    }#endif // CHECK_FAULT    if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){#ifdef  CHECK_FAULT        set_isr_reg_nat_consumption(vcpu,0,0);        rnat_comsumption(vcpu);        return IA64_FAULT;#endif  //CHECK_FAULT    }#ifdef   CHECK_FAULT    if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {        /* Inject Reserved Register/Field fault         * into guest */        set_rsv_reg_field_isr (vcpu,0);        rsv_reg_field (vcpu);        return IA64_FAULT;    }#endif  //CHECK_FAULT    r2 = cr_igfld_mask(inst.M32.cr3,r2);    switch (inst.M32.cr3) {        case 0: return vcpu_set_dcr(vcpu,r2);        case 1: return vmx_vcpu_set_itm(vcpu,r2);        case 2: return vmx_vcpu_set_iva(vcpu,r2);        case 8: return vmx_vcpu_set_pta(vcpu,r2);        case 16:return vcpu_set_ipsr(vcpu,r2);        case 17:return vcpu_set_isr(vcpu,r2);        case 19:return vcpu_set_iip(vcpu,r2);        case 20:return vcpu_set_ifa(vcpu,r2);        case 21:return vcpu_set_itir(vcpu,r2);        case 22:return vcpu_set_iipa(vcpu,r2);        case 23:return vcpu_set_ifs(vcpu,r2);        case 24:return vcpu_set_iim(vcpu,r2);        case 25:return vcpu_set_iha(vcpu,r2);        case 64:printk("SET LID to 0x%lx\n", r2);                return IA64_NO_FAULT;        case 65:return IA64_NO_FAULT;        case 66:return vmx_vcpu_set_tpr(vcpu,r2);        case 67:return vmx_vcpu_set_eoi(vcpu,r2);        case 68:return IA64_NO_FAULT;        case 69:return IA64_NO_FAULT;        case 70:return IA64_NO_FAULT;        case 71:return IA64_NO_FAULT;        case 72:return vmx_vcpu_set_itv(vcpu,r2);        case 73:return vmx_vcpu_set_pmv(vcpu,r2);        case 74:return vmx_vcpu_set_cmcv(vcpu,r2);        case 80:return vmx_vcpu_set_lrr0(vcpu,r2);        case 81:return vmx_vcpu_set_lrr1(vcpu,r2);        default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;                return IA64_NO_FAULT;    }}#define cr_get(cr) \    ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\        vcpu_set_gr(vcpu, tgt, val,0):fault;//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0)/*#define vmx_cr_get(cr) \    ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\        vcpu_set_gr(vcpu, tgt, val,0):fault;*/#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0))static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst){    u64 tgt = inst.M33.r1;    u64 val;    IA64FAULT fault;#ifdef  CHECK_FAULT    IA64_PSR vpsr;    vpsr.val=vmx_vcpu_get_psr(vcpu);    if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||        (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        return IA64_FAULT;    }    if ( vpsr.cpl != 0) {        /* Inject Privileged Operation fault into guest */        set_privileged_operation_isr (vcpu, 0);        privilege_op (vcpu);        return IA64_FAULT;    }#endif // CHECK_FAULT//    from_cr_cnt[inst.M33.cr3]++;    switch (inst.M33.cr3) {        case 0: return cr_get(dcr);        case 1: return vmx_cr_get(itm);        case 2: return vmx_cr_get(iva);        case 8: return vmx_cr_get(pta);        case 16:return cr_get(ipsr);        case 17:return cr_get(isr);        case 19:return cr_get(iip);        case 20:return cr_get(ifa);        case 21:return cr_get(itir);        case 22:return cr_get(iipa);        case 23:return cr_get(ifs);        case 24:return cr_get(iim);        case 25:return cr_get(iha);        case 64:return vmx_cr_get(lid);        case 65:                val = vmx_vcpu_get_ivr(vcpu);                return vcpu_set_gr(vcpu,tgt,val,0);        case 66:return vmx_cr_get(tpr);        case 67:return vcpu_set_gr(vcpu,tgt,0L,0);        case 68:return vmx_cr_get(irr0);        case 69:return vmx_cr_get(irr1);        case 70:return vmx_cr_get(irr2);        case 71:return vmx_cr_get(irr3);        case 72:return vmx_cr_get(itv);        case 73:return vmx_cr_get(pmv);        case 74:return vmx_cr_get(cmcv);        case 80:return vmx_cr_get(lrr0);        case 81:return vmx_cr_get(lrr1);        default: return IA64_NO_FAULT;    }}//#define  BYPASS_VMAL_OPCODEextern IA64_SLOT_TYPE  slot_types[0x20][3];unsigned long__vmx_get_domain_bundle(u64 iip, IA64_BUNDLE *pbundle){	return fetch_code(current, iip, pbundle);}/** Emulate a privileged operation. * * * @param vcpu virtual cpu * @cause the reason cause virtualization fault * @opcode the instruction code which cause virtualization fault */voidvmx_emulate(VCPU *vcpu, REGS *regs){    IA64FAULT status;    INST64 inst;    u64 iip, cause, opcode;    iip = regs->cr_iip;    cause = VMX(vcpu,cause);    opcode = VMX(vcpu,opcode);#ifdef  VTLB_DEBUG    check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));    dump_vtlb(vmx_vcpu_get_vtlb(vcpu));#endif#if 0if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {		printk ("VMAL decode error: cause - %lx; op - %lx\n", 			cause, opcode );		return;}#endif#ifdef BYPASS_VMAL_OPCODE    // make a local copy of the bundle containing the privop    IA64_BUNDLE bundle;    int slot;    IA64_SLOT_TYPE slot_type;    IA64_PSR vpsr;    bundle = __vmx_get_domain_bundle(iip);    slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;    if (!slot) inst.inst = bundle.slot0;    else if (slot == 1)        inst.inst = bundle.slot1a + (bundle.slot1b<<18);    else if (slot == 2) inst.inst = bundle.slot2;    else printk("priv_handle_op: illegal slot: %d\n", slot);    slot_type = slot_types[bundle.template][slot];    ia64_priv_decoder(slot_type, inst, &cause);    if(cause==0){        panic_domain(regs,"This instruction at 0x%lx slot %d can't be  virtualized", iip, slot);    }#else    inst.inst=opcode;#endif /* BYPASS_VMAL_OPCODE */    debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);    /*     * Switch to actual virtual rid in rr0 and rr4,     * which is required by some tlb related instructions.     */    prepare_if_physical_mode(vcpu);    switch(cause) {    case EVENT_RSM:        perfc_incr(vmx_rsm);        status=vmx_emul_rsm(vcpu, inst);        break;    case EVENT_SSM:        perfc_incr(vmx_ssm);        status=vmx_emul_ssm(vcpu, inst);        break;    case EVENT_MOV_TO_PSR:        perfc_incr(vmx_mov_to_psr);        status=vmx_emul_mov_to_psr(vcpu, inst);        break;    case EVENT_MOV_FROM_PSR:        perfc_incr(vmx_mov_from_psr);        status=vmx_emul_mov_from_psr(vcpu, inst);        break;    case EVENT_MOV_FROM_CR:        perfc_incr(vmx_mov_from_cr);        status=vmx_emul_mov_from_cr(vcpu, inst);        break;    case EVENT_MOV_TO_CR:        perfc_incr(vmx_mov_to_cr);        status=vmx_emul_mov_to_cr(vcpu, inst);        break;    case EVENT_BSW_0:        perfc_incr(vmx_bsw0);        status=vmx_emul_bsw0(vcpu, inst);        break;    case EVENT_BSW_1:        perfc_incr(vmx_bsw1);        status=vmx_emul_bsw1(vcpu, inst);        break;    case EVENT_COVER:        perfc_incr(vmx_cover);        status=vmx_emul_cover(vcpu, inst);        break;    case EVENT_RFI:        perfc_incr(vmx_rfi);        status=vmx_emul_rfi(vcpu, inst);        break;    case EVENT_ITR_D:        perfc_incr(vmx_itr_d);        status=vmx_emul_itr_d(vcpu, inst);        break;    case EVENT_ITR_I:        perfc_incr(vmx_itr_i);        status=vmx_emul_itr_i(vcpu, inst);        break;    case EVENT_PTR_D:        perfc_incr(vmx_ptr_d);        status=vmx_emul_ptr_d(vcpu, inst);        break;    case EVENT_PTR_I:        perfc_incr(vmx_ptr_i);        status=vmx_emul_ptr_i(vcpu, inst);        break;    case EVENT_ITC_D:        perfc_incr(vmx_itc_d);        status=vmx_emul_itc_d(vcpu, inst);        break;    case EVENT_ITC_I:        perfc_incr(vmx_itc_i);        status=vmx_emul_itc_i(vcpu, inst);        break;    case EVENT_PTC_L:        perfc_incr(vmx_ptc_l);        status=vmx_emul_ptc_l(vcpu, inst);        break;    case EVENT_PTC_G:        perfc_incr(vmx_ptc_g);        status=vmx_emul_ptc_g(vcpu, inst);        break;    case EVENT_PTC_GA:        perfc_incr(vmx_ptc_ga);        status=vmx_emul_ptc_ga(vcpu, inst);        break;    case EVENT_PTC_E:        perfc_incr(vmx_ptc_e);        status=vmx_emul_ptc_e(vcpu, inst);        break;    case EVENT_MOV_TO_RR:        perfc_incr(vmx_mov_to_rr);        status=vmx_emul_mov_to_rr(vcpu, inst);        break;    case EVENT_MOV_FROM_RR:        perfc_incr(vmx_mov_from_rr);        status=vmx_emul_mov_from_rr(vcpu, inst);        break;    case EVENT_THASH:        perfc_incr(vmx_thash);        status=vmx_emul_thash(vcpu, inst);        break;    case EVENT_TTAG:        perfc_incr(vmx_ttag);        status=vmx_emul_ttag(vcpu, inst);        break;    case EVENT_TPA:        perfc_incr(vmx_tpa);        status=vmx_emul_tpa(vcpu, inst);        break;    case EVENT_TAK:        perfc_incr(vmx_tak);        status=vmx_emul_tak(vcpu, inst);        break;    case EVENT_MOV_TO_AR_IMM:        perfc_incr(vmx_mov_to_ar_imm);        status=vmx_emul_mov_to_ar_imm(vcpu, inst);        break;    case EVENT_MOV_TO_AR:        perfc_incr(vmx_mov_to_ar_reg);        status=vmx_emul_mov_to_ar_reg(vcpu, inst);        break;    case EVENT_MOV_FROM_AR:        perfc_incr(vmx_mov_from_ar_reg);        status=vmx_emul_mov_from_ar_reg(vcpu, inst);        break;    case EVENT_MOV_TO_DBR:        perfc_incr(vmx_mov_to_dbr);        status=vmx_emul_mov_to_dbr(vcpu, inst);        break;    case EVENT_MOV_TO_IBR:        perfc_incr(vmx_mov_to_ibr);        status=vmx_emul_mov_to_ibr(vcpu, inst);        break;    case EVENT_MOV_TO_PMC:        perfc_incr(vmx_mov_to_pmc);        status=vmx_emul_mov_to_pmc(vcpu, inst);        break;    case EVENT_MOV_TO_PMD:        perfc_incr(vmx_mov_to_pmd);        status=vmx_emul_mov_to_pmd(vcpu, inst);        break;    case EVENT_MOV_TO_PKR:        perfc_incr(vmx_mov_to_pkr);        status=vmx_emul_mov_to_pkr(vcpu, inst);        break;    case EVENT_MOV_FROM_DBR:        perfc_incr(vmx_mov_from_dbr);        status=vmx_emul_mov_from_dbr(vcpu, inst);        break;    case EVENT_MOV_FROM_IBR:        perfc_incr(vmx_mov_from_ibr);        status=vmx_emul_mov_from_ibr(vcpu, inst);        break;    case EVENT_MOV_FROM_PMC:        perfc_incr(vmx_mov_from_pmc);        status=vmx_emul_mov_from_pmc(vcpu, inst);        break;    case EVENT_MOV_FROM_PKR:        perfc_incr(vmx_mov_from_pkr);        status=vmx_emul_mov_from_pkr(vcpu, inst);        break;    case EVENT_MOV_FROM_CPUID:        perfc_incr(vmx_mov_from_cpuid);        status=vmx_emul_mov_from_cpuid(vcpu, inst);        break;    case EVENT_VMSW:        printk ("Unimplemented instruction %ld\n", cause);        status=IA64_FAULT;        break;    default:        panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n",                     cause,regs->cr_iip,regs->cr_ipsr);        break;    };#if 0    if (status != IA64_NO_FAULT)	panic("Emulation failed with cause %d:\n", cause);#endif    switch (status) {    case IA64_RSVDREG_FAULT:        set_rsv_reg_field_isr(vcpu);        rsv_reg_field(vcpu);        break;    case IA64_ILLOP_FAULT:        set_illegal_op_isr(vcpu);        illegal_op(vcpu);        break;    case IA64_FAULT:        /* Registers aleady set.  */        break;    case IA64_NO_FAULT:        if ( cause != EVENT_RFI )            vcpu_increment_iip(vcpu);        break;    }    recover_if_physical_mode(vcpu);    return;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -