vmx.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,245 行 · 第 1/5 页
C
2,245 行
HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx);}static void vmx_do_cpuid(struct cpu_user_regs *regs){ unsigned int eax, ebx, ecx, edx; eax = regs->eax; ebx = regs->ebx; ecx = regs->ecx; edx = regs->edx; vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx); regs->eax = eax; regs->ebx = ebx; regs->ecx = ecx; regs->edx = edx;}static void vmx_dr_access(unsigned long exit_qualification, struct cpu_user_regs *regs){ struct vcpu *v = current; HVMTRACE_0D(DR_WRITE, v); if ( !v->arch.hvm_vcpu.flag_dr_dirty ) __restore_debug_registers(v); /* Allow guest direct access to DR registers */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);}static void vmx_invlpg_intercept(unsigned long vaddr){ struct vcpu *curr = current; HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr)); if ( paging_invlpg(curr, vaddr) ) vpid_sync_vcpu_gva(curr, vaddr);}#define CASE_SET_REG(REG, reg) \ case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break#define CASE_GET_REG(REG, reg) \ case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break#define CASE_EXTEND_SET_REG \ CASE_EXTEND_REG(S)#define CASE_EXTEND_GET_REG \ CASE_EXTEND_REG(G)#ifdef __i386__#define CASE_EXTEND_REG(T)#else#define CASE_EXTEND_REG(T) \ CASE_ ## T ## ET_REG(R8, r8); \ CASE_ ## T ## ET_REG(R9, r9); \ CASE_ ## T ## ET_REG(R10, r10); \ CASE_ ## T ## ET_REG(R11, r11); \ CASE_ ## T ## ET_REG(R12, r12); \ CASE_ ## T ## ET_REG(R13, r13); \ CASE_ ## T ## ET_REG(R14, r14); \ CASE_ ## T ## ET_REG(R15, r15)#endifstatic int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs){ unsigned long value; struct vcpu *v = current; struct vlapic *vlapic = vcpu_vlapic(v); switch ( gp ) { CASE_GET_REG(EAX, eax); CASE_GET_REG(ECX, ecx); CASE_GET_REG(EDX, edx); CASE_GET_REG(EBX, ebx); CASE_GET_REG(EBP, ebp); CASE_GET_REG(ESI, esi); CASE_GET_REG(EDI, edi); CASE_GET_REG(ESP, esp); CASE_EXTEND_GET_REG; default: gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp); goto exit_and_crash; } HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value)); HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value); switch ( cr ) { case 0: return !hvm_set_cr0(value); case 3: return !hvm_set_cr3(value); case 4: return !hvm_set_cr4(value); case 8: vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4)); break; default: gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr); goto exit_and_crash; } return 1; exit_and_crash: domain_crash(v->domain); return 0;}/* * Read from control registers. CR0 and CR4 are read from the shadow. */static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs){ unsigned long value = 0; struct vcpu *v = current; struct vlapic *vlapic = vcpu_vlapic(v); switch ( cr ) { case 3: value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3]; break; case 8: value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI); value = (value & 0xF0) >> 4; break; default: gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr); domain_crash(v->domain); break; } switch ( gp ) { CASE_SET_REG(EAX, eax); CASE_SET_REG(ECX, ecx); CASE_SET_REG(EDX, edx); CASE_SET_REG(EBX, ebx); CASE_SET_REG(EBP, ebp); CASE_SET_REG(ESI, esi); CASE_SET_REG(EDI, edi); CASE_SET_REG(ESP, esp); CASE_EXTEND_SET_REG; default: printk("invalid gp: %d\n", gp); domain_crash(v->domain); break; } HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value)); HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);}static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs){ unsigned int gp, cr; unsigned long value; struct vcpu *v = current; switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE ) { case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR; cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM; return mov_to_cr(gp, cr, regs); case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR; cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM; mov_from_cr(cr, gp, regs); break; case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; vmx_update_guest_cr(v, 0); HVMTRACE_0D(CLTS, current); break; case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: value = v->arch.hvm_vcpu.guest_cr[0]; /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */ value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf); HVMTRACE_LONG_1D(LMSW, current, value); return !hvm_set_cr0(value); default: BUG(); } return 1;}static const struct lbr_info { u32 base, count;} p4_lbr[] = { { MSR_P4_LER_FROM_LIP, 1 }, { MSR_P4_LER_TO_LIP, 1 }, { MSR_P4_LASTBRANCH_TOS, 1 }, { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, { 0, 0 }}, c2_lbr[] = { { MSR_IA32_LASTINTFROMIP, 1 }, { MSR_IA32_LASTINTTOIP, 1 }, { MSR_C2_LASTBRANCH_TOS, 1 }, { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, { 0, 0 }#ifdef __i386__}, pm_lbr[] = { { MSR_IA32_LASTINTFROMIP, 1 }, { MSR_IA32_LASTINTTOIP, 1 }, { MSR_PM_LASTBRANCH_TOS, 1 }, { MSR_PM_LASTBRANCH_0, NUM_MSR_PM_LASTBRANCH }, { 0, 0 }#endif};static const struct lbr_info *last_branch_msr_get(void){ switch ( boot_cpu_data.x86 ) { case 6: switch ( boot_cpu_data.x86_model ) {#ifdef __i386__ /* PentiumM */ case 9: case 13: /* Core Solo/Duo */ case 14: return pm_lbr; break;#endif /* Core2 Duo */ case 15: return c2_lbr; break; } break; case 15: switch ( boot_cpu_data.x86_model ) { /* Pentium4/Xeon with em64t */ case 3: case 4: case 6: return p4_lbr; break; } break; } return NULL;}static int is_last_branch_msr(u32 ecx){ const struct lbr_info *lbr = last_branch_msr_get(); if ( lbr == NULL ) return 0; for ( ; lbr->count; lbr++ ) if ( (ecx >= lbr->base) && (ecx < (lbr->base + lbr->count)) ) return 1; return 0;}static int vmx_msr_read_intercept(struct cpu_user_regs *regs){ u64 msr_content = 0; u32 ecx = regs->ecx, eax, edx; struct vcpu *v = current; HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx); switch ( ecx ) { case MSR_IA32_SYSENTER_CS: msr_content = (u32)__vmread(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_ESP: msr_content = __vmread(GUEST_SYSENTER_ESP); break; case MSR_IA32_SYSENTER_EIP: msr_content = __vmread(GUEST_SYSENTER_EIP); break; case MSR_IA32_DEBUGCTLMSR: msr_content = __vmread(GUEST_IA32_DEBUGCTL);#ifdef __i386__ msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;#endif break; case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2: goto gp_fault; case MSR_IA32_MISC_ENABLE: rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); /* Debug Trace Store is not supported. */ msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; break; default: if ( vpmu_do_rdmsr(regs) ) goto done; switch ( long_mode_do_msr_read(regs) ) { case HNDL_unhandled: break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; case HNDL_done: goto done; } if ( vmx_read_guest_msr(ecx, &msr_content) == 0 ) break; if ( is_last_branch_msr(ecx) ) { msr_content = 0; break; } if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) || rdmsr_safe(ecx, eax, edx) == 0 ) { regs->eax = eax; regs->edx = edx; goto done; } goto gp_fault; } regs->eax = (uint32_t)msr_content; regs->edx = (uint32_t)(msr_content >> 32);done: HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx); HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); return X86EMUL_OKAY;gp_fault: vmx_inject_hw_exception(v, TRAP_gp_fault, 0); return X86EMUL_EXCEPTION;}static int vmx_alloc_vlapic_mapping(struct domain *d){ void *apic_va; if ( !cpu_has_vmx_virtualize_apic_accesses ) return 0; apic_va = alloc_xenheap_page(); if ( apic_va == NULL ) return -ENOMEM; share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable); set_mmio_p2m_entry( d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(virt_to_mfn(apic_va))); d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va); return 0;}static void vmx_free_vlapic_mapping(struct domain *d){ unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn; if ( mfn != 0 ) free_xenheap_page(mfn_to_virt(mfn));}static int vmx_alloc_vpid(struct domain *d){ int idx; if ( !cpu_has_vmx_vpid ) return 0; do { idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE); if ( idx >= VPID_BITMAP_SIZE ) { dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n"); return -EBUSY; } } while ( test_and_set_bit(idx, vpid_bitmap) ); d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS; return 0;}static void vmx_free_vpid(struct domain *d){ if ( !cpu_has_vmx_vpid ) return; clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);}static void vmx_install_vlapic_mapping(struct vcpu *v){ paddr_t virt_page_ma, apic_page_ma; if ( !cpu_has_vmx_virtualize_apic_accesses ) return; virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page); apic_page_ma = v->domain->arch.hvm_domain.vmx.apic_access_mfn; apic_page_ma <<= PAGE_SHIFT; vmx_vmcs_enter(v); __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma); __vmwrite(APIC_ACCESS_ADDR, apic_page_ma); vmx_vmcs_exit(v);}void vmx_vlapic_msr_changed(struct vcpu *v){ struct vlapic *vlapic = vcpu_vlapic(v); uint32_t ctl; if ( !cpu_has_vmx_virtualize_apic_accesses ) return; vmx_vmcs_enter(v); ctl = __vmread(SECONDARY_VM_EXEC_CONTROL); ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if ( !vlapic_hw_disabled(vlapic) && (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl); vmx_vmcs_exit(v);}static int vmx_msr_write_intercept(struct cpu_user_regs *regs){
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?