⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 3 页
字号:
    HVMTRACE_0D(DR_WRITE, v);    __restore_debug_registers(v);}static int svm_msr_read_intercept(struct cpu_user_regs *regs){    u64 msr_content = 0;    u32 ecx = regs->ecx, eax, edx;    struct vcpu *v = current;    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    switch ( ecx )    {    case MSR_EFER:        msr_content = v->arch.hvm_vcpu.guest_efer;        break;    case MSR_IA32_MC4_MISC: /* Threshold register */    case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:        /*         * MCA/MCE: We report that the threshold register is unavailable         * for OS use (locked by the BIOS).         */        msr_content = 1ULL << 61; /* MC4_MISC.Locked */        break;    case MSR_IA32_EBC_FREQUENCY_ID:        /*         * This Intel-only register may be accessed if this HVM guest         * has been migrated from an Intel host. The value zero is not         * particularly meaningful, but at least avoids the guest crashing!         */        msr_content = 0;        break;    case MSR_K8_VM_HSAVE_PA:        goto gpf;    case MSR_IA32_DEBUGCTLMSR:        msr_content = vmcb->debugctlmsr;        break;    case MSR_IA32_LASTBRANCHFROMIP:        msr_content = vmcb->lastbranchfromip;        break;    case MSR_IA32_LASTBRANCHTOIP:        msr_content = vmcb->lastbranchtoip;        break;    case MSR_IA32_LASTINTFROMIP:        msr_content = vmcb->lastintfromip;        break;    case MSR_IA32_LASTINTTOIP:        msr_content = vmcb->lastinttoip;        break;    default:        if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||             rdmsr_safe(ecx, eax, edx) == 0 )        {            regs->eax = eax;            regs->edx = edx;            goto done;        }        goto gpf;    }    regs->eax = msr_content & 0xFFFFFFFF;    regs->edx = msr_content >> 32; done:    HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",                ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);    return X86EMUL_OKAY; gpf:    svm_inject_exception(TRAP_gp_fault, 0, 0);    return X86EMUL_EXCEPTION;}static int svm_msr_write_intercept(struct cpu_user_regs *regs){    u64 msr_content = 0;    u32 ecx = regs->ecx;    struct vcpu *v = current;    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);    switch ( ecx )    {    case MSR_K8_VM_HSAVE_PA:        goto gpf;    case MSR_IA32_DEBUGCTLMSR:        vmcb->debugctlmsr = msr_content;        if ( !msr_content || !cpu_has_svm_lbrv )            break;        vmcb->lbr_control.fields.enable = 1;        svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);        break;    case MSR_IA32_LASTBRANCHFROMIP:        vmcb->lastbranchfromip = msr_content;        break;    case MSR_IA32_LASTBRANCHTOIP:        vmcb->lastbranchtoip = msr_content;        break;    case MSR_IA32_LASTINTFROMIP:        vmcb->lastintfromip = msr_content;        break;    case MSR_IA32_LASTINTTOIP:        vmcb->lastinttoip = msr_content;        break;    default:        switch ( long_mode_do_msr_write(regs) )        {        case HNDL_unhandled:            wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);            break;        case HNDL_exception_raised:            return X86EMUL_EXCEPTION;        case HNDL_done:            break;        }        break;    }    return X86EMUL_OKAY; gpf:    svm_inject_exception(TRAP_gp_fault, 0, 0);    return X86EMUL_EXCEPTION;}static void svm_do_msr_access(struct cpu_user_regs *regs){    int rc, inst_len;    struct vcpu *v = current;    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    if ( vmcb->exitinfo1 == 0 )    {        if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )            return;        rc = hvm_msr_read_intercept(regs);    }    else    {        if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )            return;        rc = hvm_msr_write_intercept(regs);    }    if ( rc == X86EMUL_OKAY )        __update_guest_eip(regs, inst_len);}static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,                              struct cpu_user_regs *regs){    unsigned int inst_len;    if ( (inst_len = __get_instruction_length(current, INSTR_HLT)) == 0 )        return;    __update_guest_eip(regs, inst_len);    hvm_hlt(regs->eflags);}static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs){    unsigned int inst_len;    if ( (inst_len = __get_instruction_length(current, INSTR_RDTSC)) == 0 )        return;    __update_guest_eip(regs, inst_len);    hvm_rdtsc_intercept(regs);}static void wbinvd_ipi(void *info){    wbinvd();}static void svm_wbinvd_intercept(void){    if ( has_arch_pdevs(current->domain) )        on_each_cpu(wbinvd_ipi, NULL, 1, 1);}static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs){    enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD };    int inst_len;    inst_len = __get_instruction_length_from_list(        current, list, ARRAY_SIZE(list));    if ( inst_len == 0 )        return;    svm_wbinvd_intercept();    __update_guest_eip(regs, inst_len);}static void svm_invlpg_intercept(unsigned long vaddr){    struct vcpu *curr = current;    HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr));    paging_invlpg(curr, vaddr);    svm_asid_g_invlpg(curr, vaddr);}asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs){    unsigned int exit_reason;    struct vcpu *v = current;    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    eventinj_t eventinj;    int inst_len, rc;    /*     * Before doing anything else, we need to sync up the VLAPIC's TPR with     * SVM's vTPR. It's OK if the guest doesn't touch CR8 (e.g. 32-bit Windows)     * because we update the vTPR on MMIO writes to the TPR.     */    vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,                   (vmcb->vintr.fields.tpr & 0x0F) << 4);    exit_reason = vmcb->exitcode;    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,                (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),                0, 0, 0);    if ( unlikely(exit_reason == VMEXIT_INVALID) )    {        svm_dump_vmcb(__func__, vmcb);        goto exit_and_crash;    }    perfc_incra(svmexits, exit_reason);    hvm_maybe_deassert_evtchn_irq();    /* Event delivery caused this intercept? Queue for redelivery. */    eventinj = vmcb->exitintinfo;    if ( unlikely(eventinj.fields.v) &&         hvm_event_needs_reinjection(eventinj.fields.type,                                     eventinj.fields.vector) )        vmcb->eventinj = eventinj;    switch ( exit_reason )    {    case VMEXIT_INTR:        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */        HVMTRACE_0D(INTR, v);        break;    case VMEXIT_NMI:        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */        HVMTRACE_0D(NMI, v);        break;    case VMEXIT_SMI:        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */        HVMTRACE_0D(SMI, v);        break;    case VMEXIT_EXCEPTION_DB:        if ( !v->domain->debugger_attached )            goto exit_and_crash;        domain_pause_for_debugger();        break;    case VMEXIT_EXCEPTION_BP:        if ( !v->domain->debugger_attached )            goto exit_and_crash;        /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */        if ( (inst_len = __get_instruction_length(v, INSTR_INT3)) == 0 )            break;        __update_guest_eip(regs, inst_len);        domain_pause_for_debugger();        break;    case VMEXIT_EXCEPTION_NM:        svm_fpu_dirty_intercept();        break;      case VMEXIT_EXCEPTION_PF: {        unsigned long va;        va = vmcb->exitinfo2;        regs->error_code = vmcb->exitinfo1;        HVM_DBG_LOG(DBG_LEVEL_VMMU,                    "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",                    (unsigned long)regs->eax, (unsigned long)regs->ebx,                    (unsigned long)regs->ecx, (unsigned long)regs->edx,                    (unsigned long)regs->esi, (unsigned long)regs->edi);        if ( paging_fault(va, regs) )        {            if (hvm_long_mode_enabled(v))                HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, TRC_PAR_LONG(va));            else                HVMTRACE_2D(PF_XEN, v, regs->error_code, va);            break;        }        svm_inject_exception(TRAP_page_fault, regs->error_code, va);        break;    }    /* Asynchronous event, handled when we STGI'd after the VMEXIT. */    case VMEXIT_EXCEPTION_MC:        HVMTRACE_0D(MCE, v);        break;    case VMEXIT_VINTR:        vmcb->vintr.fields.irq = 0;        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;        break;    case VMEXIT_INVD:    case VMEXIT_WBINVD:        svm_vmexit_do_invalidate_cache(regs);        break;    case VMEXIT_TASK_SWITCH: {        enum hvm_task_switch_reason reason;        int32_t errcode = -1;        if ( (vmcb->exitinfo2 >> 36) & 1 )            reason = TSW_iret;        else if ( (vmcb->exitinfo2 >> 38) & 1 )            reason = TSW_jmp;        else            reason = TSW_call_or_int;        if ( (vmcb->exitinfo2 >> 44) & 1 )            errcode = (uint32_t)vmcb->exitinfo2;        /*         * Some processors set the EXITINTINFO field when the task switch         * is caused by a task gate in the IDT. In this case we will be         * emulating the event injection, so we do not want the processor         * to re-inject the original event!         */        vmcb->eventinj.bytes = 0;        hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);        break;    }    case VMEXIT_CPUID:        svm_vmexit_do_cpuid(regs);        break;    case VMEXIT_HLT:        svm_vmexit_do_hlt(vmcb, regs);        break;    case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:    case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:    case VMEXIT_INVLPG:    case VMEXIT_INVLPGA:    case VMEXIT_IOIO:        if ( !handle_mmio() )            hvm_inject_exception(TRAP_gp_fault, 0, 0);        break;    case VMEXIT_VMMCALL:        if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )            break;        HVMTRACE_1D(VMMCALL, v, regs->eax);        rc = hvm_do_hypercall(regs);        if ( rc != HVM_HCALL_preempted )        {            __update_guest_eip(regs, inst_len);            if ( rc == HVM_HCALL_invalidate )                send_invalidate_req();        }        break;    case VMEXIT_DR0_READ ... VMEXIT_DR7_READ:    case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE:        svm_dr_access(v, regs);        break;    case VMEXIT_MSR:        svm_do_msr_access(regs);        break;    case VMEXIT_SHUTDOWN:        hvm_triple_fault();        break;    case VMEXIT_RDTSC:        svm_vmexit_do_rdtsc(regs);        break;    case VMEXIT_RDTSCP:    case VMEXIT_MONITOR:    case VMEXIT_MWAIT:    case VMEXIT_VMRUN:    case VMEXIT_VMLOAD:    case VMEXIT_VMSAVE:    case VMEXIT_STGI:    case VMEXIT_CLGI:    case VMEXIT_SKINIT:        svm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);        break;    case VMEXIT_NPF:        perfc_incra(svmexits, VMEXIT_NPF_PERFC);        regs->error_code = vmcb->exitinfo1;        svm_do_nested_pgfault(vmcb->exitinfo2, regs);        break;    case VMEXIT_IRET:        /*         * IRET clears the NMI mask. However because we clear the mask         * /before/ executing IRET, we set the interrupt shadow to prevent         * a pending NMI from being injected immediately. This will work         * perfectly unless the IRET instruction faults: in that case we         * may inject an NMI before the NMI handler's IRET instruction is         * retired.         */        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;        vmcb->interrupt_shadow = 1;        break;    default:    exit_and_crash:        gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "                 "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n",                 exit_reason,                  (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);        domain_crash(v->domain);        break;    }    /* The exit may have updated the TPR: reflect this in the hardware vtpr */    vmcb->vintr.fields.tpr =         (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;}asmlinkage void svm_trace_vmentry(void){    HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);}  /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -