vmx.c

来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,245 行 · 第 1/5 页

C
2,245
字号
    u32 ecx = regs->ecx;    u64 msr_content;    struct vcpu *v = current;    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",                ecx, (u32)regs->eax, (u32)regs->edx);    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);    switch ( ecx )    {    case MSR_IA32_SYSENTER_CS:        __vmwrite(GUEST_SYSENTER_CS, msr_content);        break;    case MSR_IA32_SYSENTER_ESP:        __vmwrite(GUEST_SYSENTER_ESP, msr_content);        break;    case MSR_IA32_SYSENTER_EIP:        __vmwrite(GUEST_SYSENTER_EIP, msr_content);        break;    case MSR_IA32_DEBUGCTLMSR: {        int i, rc = 0;        if ( !msr_content || (msr_content & ~3) )            break;        if ( msr_content & 1 )        {            const struct lbr_info *lbr = last_branch_msr_get();            if ( lbr == NULL )                break;            for ( ; (rc == 0) && lbr->count; lbr++ )                for ( i = 0; (rc == 0) && (i < lbr->count); i++ )                    if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )                        vmx_disable_intercept_for_msr(v, lbr->base + i);        }        if ( (rc < 0) ||             (vmx_add_host_load_msr(ecx) < 0) )            vmx_inject_hw_exception(v, TRAP_machine_check, 0);        else        {            __vmwrite(GUEST_IA32_DEBUGCTL, msr_content);#ifdef __i386__            __vmwrite(GUEST_IA32_DEBUGCTL_HIGH, msr_content >> 32);#endif        }        break;    }    case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:        goto gp_fault;    default:        if ( vpmu_do_wrmsr(regs) )            return X86EMUL_OKAY;        switch ( long_mode_do_msr_write(regs) )        {            case HNDL_unhandled:                if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&                     !is_last_branch_msr(ecx) )                    wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);                break;            case HNDL_exception_raised:                return X86EMUL_EXCEPTION;            case HNDL_done:                break;        }        break;    }    return X86EMUL_OKAY;gp_fault:    vmx_inject_hw_exception(v, TRAP_gp_fault, 0);    return X86EMUL_EXCEPTION;}static void vmx_do_extint(struct cpu_user_regs *regs){    unsigned int vector;    asmlinkage void do_IRQ(struct cpu_user_regs *);    fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);    fastcall void smp_event_check_interrupt(void);    fastcall void smp_invalidate_interrupt(void);    fastcall void smp_call_function_interrupt(void);    fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);    fastcall void smp_error_interrupt(struct cpu_user_regs *regs);    fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);#ifdef CONFIG_X86_MCE_P4THERMAL    fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);#endif    vector = __vmread(VM_EXIT_INTR_INFO);    BUG_ON(!(vector & INTR_INFO_VALID_MASK));    vector &= INTR_INFO_VECTOR_MASK;    HVMTRACE_1D(INTR, current, vector);    switch ( vector )    {    case LOCAL_TIMER_VECTOR:        smp_apic_timer_interrupt(regs);        break;    case EVENT_CHECK_VECTOR:        smp_event_check_interrupt();        break;    case INVALIDATE_TLB_VECTOR:        smp_invalidate_interrupt();        break;    case CALL_FUNCTION_VECTOR:        smp_call_function_interrupt();        break;    case SPURIOUS_APIC_VECTOR:        smp_spurious_interrupt(regs);        break;    case ERROR_APIC_VECTOR:        smp_error_interrupt(regs);        break;    case PMU_APIC_VECTOR:        smp_pmu_apic_interrupt(regs);        break;#ifdef CONFIG_X86_MCE_P4THERMAL    case THERMAL_APIC_VECTOR:        smp_thermal_interrupt(regs);        break;#endif    default:        regs->entry_vector = vector;        do_IRQ(regs);        break;    }}static void wbinvd_ipi(void *info){    wbinvd();}static void vmx_wbinvd_intercept(void){    if ( !has_arch_pdevs(current->domain) )        return;    if ( cpu_has_wbinvd_exiting )        on_each_cpu(wbinvd_ipi, NULL, 1, 1);    else        wbinvd();}static void ept_handle_violation(unsigned long qualification, paddr_t gpa){    unsigned long gla_validity = qualification & EPT_GLA_VALIDITY_MASK;    struct domain *d = current->domain;    unsigned long gfn = gpa >> PAGE_SHIFT;    mfn_t mfn;    p2m_type_t t;    if ( unlikely(qualification & EPT_GAW_VIOLATION) )    {        gdprintk(XENLOG_ERR, "EPT violation: guest physical address %"PRIpaddr                 " exceeded its width limit.\n", gpa);        goto crash;    }    if ( unlikely(gla_validity == EPT_GLA_VALIDITY_RSVD) ||         unlikely(gla_validity == EPT_GLA_VALIDITY_PDPTR_LOAD) )    {        gdprintk(XENLOG_ERR, "EPT violation: reserved bit or "                 "pdptr load violation.\n");        goto crash;    }    mfn = gfn_to_mfn(d, gfn, &t);    if ( (t != p2m_ram_ro) && p2m_is_ram(t) && paging_mode_log_dirty(d) )    {        paging_mark_dirty(d, mfn_x(mfn));        p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);        flush_tlb_mask(d->domain_dirty_cpumask);        return;    }    /* This can only happen in log-dirty mode, writing back A/D bits. */    if ( unlikely(gla_validity == EPT_GLA_VALIDITY_GPT_WALK) )        goto crash;    ASSERT(gla_validity == EPT_GLA_VALIDITY_MATCH);    handle_mmio();    return; crash:    domain_crash(d);}static void vmx_failed_vmentry(unsigned int exit_reason,                               struct cpu_user_regs *regs){    unsigned int failed_vmentry_reason = (uint16_t)exit_reason;    unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);    struct vcpu *curr = current;    printk("Failed vm entry (exit reason 0x%x) ", exit_reason);    switch ( failed_vmentry_reason )    {    case EXIT_REASON_INVALID_GUEST_STATE:        printk("caused by invalid guest state (%ld).\n", exit_qualification);        break;    case EXIT_REASON_MSR_LOADING:        printk("caused by MSR entry %ld loading.\n", exit_qualification);        break;    case EXIT_REASON_MACHINE_CHECK:        printk("caused by machine check.\n");        HVMTRACE_0D(MCE, curr);        do_machine_check(regs);        break;    default:        printk("reason not known yet!");        break;    }    printk("************* VMCS Area **************\n");    vmcs_dump_vcpu(curr);    printk("**************************************\n");    domain_crash(curr->domain);}asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs){    unsigned int exit_reason, idtv_info;    unsigned long exit_qualification, inst_len = 0;    struct vcpu *v = current;    if ( paging_mode_hap(v->domain) && hvm_paging_enabled(v) )        v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =            __vmread(GUEST_CR3);    exit_reason = __vmread(VM_EXIT_REASON);    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,                (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),                0, 0, 0);    perfc_incra(vmexits, exit_reason);    if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )        local_irq_enable();    if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )        return vmx_failed_vmentry(exit_reason, regs);    hvm_maybe_deassert_evtchn_irq();    /* Event delivery caused this intercept? Queue for redelivery. */    idtv_info = __vmread(IDT_VECTORING_INFO);    if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) &&         (exit_reason != EXIT_REASON_TASK_SWITCH) )    {        if ( hvm_event_needs_reinjection((idtv_info>>8)&7, idtv_info&0xff) )        {            /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */            __vmwrite(VM_ENTRY_INTR_INFO,                      idtv_info & ~INTR_INFO_RESVD_BITS_MASK);            if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )                __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,                          __vmread(IDT_VECTORING_ERROR_CODE));        }        /*         * Clear NMI-blocking interruptibility info if an NMI delivery faulted.         * Re-delivery will re-set it (see SDM 3B 25.7.1.2).         */        if ( (idtv_info & INTR_INFO_INTR_TYPE_MASK) == (X86_EVENTTYPE_NMI<<8) )            __vmwrite(GUEST_INTERRUPTIBILITY_INFO,                      __vmread(GUEST_INTERRUPTIBILITY_INFO) &                      ~VMX_INTR_SHADOW_NMI);    }    switch ( exit_reason )    {    case EXIT_REASON_EXCEPTION_NMI:    {        /*         * We don't set the software-interrupt exiting (INT n).         * (1) We can get an exception (e.g. #PG) in the guest, or         * (2) NMI         */        unsigned int intr_info, vector;        intr_info = __vmread(VM_EXIT_INTR_INFO);        BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));        vector = intr_info & INTR_INFO_VECTOR_MASK;        /*         * Re-set the NMI shadow if vmexit caused by a guest IRET fault (see 3B         * 25.7.1.2, "Resuming Guest Software after Handling an Exception").         * (NB. If we emulate this IRET for any reason, we should re-clear!)         */        if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&             !(__vmread(IDT_VECTORING_INFO) & INTR_INFO_VALID_MASK) &&             (vector != TRAP_double_fault) )            __vmwrite(GUEST_INTERRUPTIBILITY_INFO,                    __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI);        perfc_incra(cause_vector, vector);        switch ( vector )        {        case TRAP_debug:        case TRAP_int3:            if ( !v->domain->debugger_attached )                goto exit_and_crash;            domain_pause_for_debugger();            break;        case TRAP_no_device:            vmx_fpu_dirty_intercept();            break;        case TRAP_page_fault:            exit_qualification = __vmread(EXIT_QUALIFICATION);            regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);            HVM_DBG_LOG(DBG_LEVEL_VMMU,                        "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",                        (unsigned long)regs->eax, (unsigned long)regs->ebx,                        (unsigned long)regs->ecx, (unsigned long)regs->edx,                        (unsigned long)regs->esi, (unsigned long)regs->edi);            if ( paging_fault(exit_qualification, regs) )            {                if ( hvm_long_mode_enabled(v) )                    HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code,                        TRC_PAR_LONG(exit_qualification) );                else                    HVMTRACE_2D (PF_XEN, v,                        regs->error_code, exit_qualification );                break;            }            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;            vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);            break;        case TRAP_nmi:            if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=                 (X86_EVENTTYPE_NMI << 8) )                goto exit_and_crash;            HVMTRACE_0D(NMI, v);            do_nmi(regs); /* Real NMI, vector 2: normal processing. */            break;        case TRAP_machine_check:            HVMTRACE_0D(MCE, v);            do_machine_check(regs);            break;        default:            goto exit_and_crash;        }        break;    }    case EXIT_REASON_EXTERNAL_INTERRUPT:        vmx_do_extint(regs);        break;    case EXIT_REASON_TRIPLE_FAULT:        hvm_triple_fault();        break;    case EXIT_REASON_PENDING_VIRT_INTR:        /* Disable the interrupt window. */        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,                  v->arch.hvm_vmx.exec_control);        break;    case EXIT_REASON_PENDING_VIRT_NMI:        /* Disable the NMI window. */        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,                  v->arch.hvm_vmx.exec_control);        break;    case EXIT_REASON_TASK_SWITCH: {        const enum hvm_task_switch_reason reasons[] = {            TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int };        int32_t errcode = -1;        exit_qualification = __vmread(EXIT_QUALIFICATION);        if ( (idtv_info & INTR_INFO_VALID_MASK) &&             (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )            errcode = __vmread(IDT_VECTORING_ERROR_CODE);        hvm_task_switch((uint16_t)exit_qualification,                        reasons[(exit_qualification >> 30) & 3],                        errcode);        break;    }    case EXIT_REASON_CPUID:        inst_len = __get_instruction_length(); /* Safe: CPUID */        __update_guest_eip(inst_len);        vmx_do_cpuid(regs);        break;    case EXIT_REASON_HLT:        inst_len = __get_instruction_length(); /* Safe: HLT */        __update_guest_eip(inst_len);        hvm_hlt(regs->eflags);        break;    case EXIT_REASON_INVLPG:    {        inst_len = __get_instruction_length(); /* Safe: INVLPG */        __update_guest_eip(inst_len);        exit_qualification = __vmread(EXIT_QUALIFICATION);        vmx_invlpg_intercept(exit_qualification);        break;    }    case EXIT_REASON_RDTSC:        inst_len = __get_instruction_length();        __update_guest_eip(inst_len);        hvm_rdtsc_intercept(regs);        break;    case EXIT_REASON_VMCALL:    {        int rc;        HVMTRACE_1D(VMMCALL, v, regs->eax);        inst_len = __get_instruction_length(); /* Safe: VMCALL */        rc = hvm_do_hypercall(regs);        if ( rc != HVM_HCALL_preempted )        {            __update_guest_eip(inst_len);            if ( rc == HVM_HCALL_invalidate )                send_invalidate_req();        }        break;    }    case EXIT_REASON_CR_ACCESS:    {        exit_qualification = __vmread(EXIT_QUALIFICATION);        inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */        if ( vmx_cr_access(exit_qualification, regs) )            __update_guest_eip(inst_len);        break;    }    case EXIT_REASON_DR_ACCESS:        exit_qualification = __vmread(EXIT_QUALIFICATION);        vmx_dr_access(exit_qualification, regs);        break;    case EXIT_REASON_MSR_READ:        inst_len = __get_instruction_length(); /* Safe: RDMSR */        if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY )            __update_guest_eip(inst_len);        break;    case EXIT_REASON_MSR_WRITE:     

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?