vmcs.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,066 行 · 第 1/3 页
C
1,066 行
return -ESRCH;}int vmx_add_guest_msr(u32 msr){ struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; if ( msr_area == NULL ) { if ( (msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; curr->arch.hvm_vmx.msr_area = msr_area; __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area)); __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area)); } for ( i = 0; i < msr_count; i++ ) if ( msr_area[i].index == msr ) return 0; if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) ) return -ENOSPC; msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; msr_area[msr_count].data = 0; curr->arch.hvm_vmx.msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count); return 0;}int vmx_add_host_load_msr(u32 msr){ struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count; struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area; if ( msr_area == NULL ) { if ( (msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; curr->arch.hvm_vmx.host_msr_area = msr_area; __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area)); } for ( i = 0; i < msr_count; i++ ) if ( msr_area[i].index == msr ) return 0; if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) ) return -ENOSPC; msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; rdmsrl(msr, msr_area[msr_count].data); curr->arch.hvm_vmx.host_msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); return 0;}int vmx_create_vmcs(struct vcpu *v){ struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; int rc; if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL ) return -ENOMEM; INIT_LIST_HEAD(&arch_vmx->active_list); __vmpclear(virt_to_maddr(arch_vmx->vmcs)); arch_vmx->active_cpu = -1; arch_vmx->launched = 0; if ( (rc = construct_vmcs(v)) != 0 ) { vmx_free_vmcs(arch_vmx->vmcs); return rc; } return 0;}void vmx_destroy_vmcs(struct vcpu *v){ struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; vmx_clear_vmcs(v); vmx_free_vmcs(arch_vmx->vmcs); free_xenheap_page(v->arch.hvm_vmx.host_msr_area); free_xenheap_page(v->arch.hvm_vmx.msr_area); free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);}void vm_launch_fail(void){ unsigned long error = __vmread(VM_INSTRUCTION_ERROR); printk("<vm_launch_fail> error code %lx\n", error); domain_crash_synchronous();}void vm_resume_fail(void){ unsigned long error = __vmread(VM_INSTRUCTION_ERROR); printk("<vm_resume_fail> error code %lx\n", error); domain_crash_synchronous();}static void wbinvd_ipi(void *info){ wbinvd();}void vmx_do_resume(struct vcpu *v){ bool_t debug_state; if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) { if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) ) vmx_load_vmcs(v); } else { /* * For pass-through domain, guest PCI-E device driver may leverage the * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. * Since migration may occur before WBINVD or CLFLUSH, we need to * maintain data consistency either by: * 1: flushing cache (wbinvd) when the guest is scheduled out if * there is no wbinvd exit, or * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. */ if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); } vmx_clear_vmcs(v); vmx_load_vmcs(v); hvm_migrate_timers(v); vmx_set_host_env(v); vpid_sync_vcpu_all(v); } debug_state = v->domain->debugger_attached; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { unsigned long intercepts = __vmread(EXCEPTION_BITMAP); unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3); v->arch.hvm_vcpu.debug_state_latch = debug_state; if ( debug_state ) intercepts |= mask; else intercepts &= ~mask; __vmwrite(EXCEPTION_BITMAP, intercepts); } hvm_do_resume(v); reset_stack_and_jump(vmx_asm_do_vmentry);}static void vmx_dump_sel(char *name, enum x86_segment seg){ struct segment_register sreg; hvm_get_segment_register(current, seg, &sreg); printk("%s: sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016llx\n", name, sreg.sel, sreg.attr.bytes, sreg.limit, (unsigned long long)sreg.base);}static unsigned long vmr(unsigned long field){ int rc; unsigned long val; val = __vmread_safe(field, &rc); return rc ? 0 : val;}void vmcs_dump_vcpu(struct vcpu *v){ struct cpu_user_regs *regs = &v->arch.guest_context.user_regs; unsigned long long x; if ( v == current ) regs = guest_cpu_user_regs(); vmx_vmcs_enter(v); printk("*** Guest State ***\n"); printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", (unsigned long long)vmr(GUEST_CR0), (unsigned long long)vmr(CR0_READ_SHADOW), (unsigned long long)vmr(CR0_GUEST_HOST_MASK)); printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", (unsigned long long)vmr(GUEST_CR4), (unsigned long long)vmr(CR4_READ_SHADOW), (unsigned long long)vmr(CR4_GUEST_HOST_MASK)); printk("CR3: actual=0x%016llx, target_count=%d\n", (unsigned long long)vmr(GUEST_CR3), (int)vmr(CR3_TARGET_COUNT)); printk(" target0=%016llx, target1=%016llx\n", (unsigned long long)vmr(CR3_TARGET_VALUE0), (unsigned long long)vmr(CR3_TARGET_VALUE1)); printk(" target2=%016llx, target3=%016llx\n", (unsigned long long)vmr(CR3_TARGET_VALUE2), (unsigned long long)vmr(CR3_TARGET_VALUE3)); printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n", (unsigned long long)vmr(GUEST_RSP), (unsigned long long)regs->esp, (unsigned long long)vmr(GUEST_RIP), (unsigned long long)regs->eip); printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n", (unsigned long long)vmr(GUEST_RFLAGS), (unsigned long long)regs->eflags, (unsigned long long)vmr(GUEST_DR7)); printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", (unsigned long long)vmr(GUEST_SYSENTER_ESP), (int)vmr(GUEST_SYSENTER_CS), (unsigned long long)vmr(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS", x86_seg_cs); vmx_dump_sel("DS", x86_seg_ds); vmx_dump_sel("SS", x86_seg_ss); vmx_dump_sel("ES", x86_seg_es); vmx_dump_sel("FS", x86_seg_fs); vmx_dump_sel("GS", x86_seg_gs); vmx_dump_sel("GDTR", x86_seg_gdtr); vmx_dump_sel("LDTR", x86_seg_ldtr); vmx_dump_sel("IDTR", x86_seg_idtr); vmx_dump_sel("TR", x86_seg_tr); x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32; x |= (uint32_t)vmr(TSC_OFFSET); printk("TSC Offset = %016llx\n", x); x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32; x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL); printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x, (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS)); printk("Interruptibility=%04x ActivityState=%04x\n", (int)vmr(GUEST_INTERRUPTIBILITY_INFO), (int)vmr(GUEST_ACTIVITY_STATE)); printk("*** Host State ***\n"); printk("RSP = 0x%016llx RIP = 0x%016llx\n", (unsigned long long)vmr(HOST_RSP), (unsigned long long)vmr(HOST_RIP)); printk("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n", (uint16_t)vmr(HOST_CS_SELECTOR), (uint16_t)vmr(HOST_DS_SELECTOR), (uint16_t)vmr(HOST_ES_SELECTOR), (uint16_t)vmr(HOST_FS_SELECTOR), (uint16_t)vmr(HOST_GS_SELECTOR), (uint16_t)vmr(HOST_SS_SELECTOR), (uint16_t)vmr(HOST_TR_SELECTOR)); printk("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n", (unsigned long long)vmr(HOST_FS_BASE), (unsigned long long)vmr(HOST_GS_BASE), (unsigned long long)vmr(HOST_TR_BASE)); printk("GDTBase=%016llx IDTBase=%016llx\n", (unsigned long long)vmr(HOST_GDTR_BASE), (unsigned long long)vmr(HOST_IDTR_BASE)); printk("CR0=%016llx CR3=%016llx CR4=%016llx\n", (unsigned long long)vmr(HOST_CR0), (unsigned long long)vmr(HOST_CR3), (unsigned long long)vmr(HOST_CR4)); printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", (unsigned long long)vmr(HOST_SYSENTER_ESP), (int)vmr(HOST_SYSENTER_CS), (unsigned long long)vmr(HOST_SYSENTER_EIP)); printk("*** Control State ***\n"); printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL)); printk("EntryControls=%08x ExitControls=%08x\n", (uint32_t)vmr(VM_ENTRY_CONTROLS), (uint32_t)vmr(VM_EXIT_CONTROLS)); printk("ExceptionBitmap=%08x\n", (uint32_t)vmr(EXCEPTION_BITMAP)); printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", (uint32_t)vmr(VM_ENTRY_INTR_INFO), (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE), (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", (uint32_t)vmr(VM_EXIT_INTR_INFO), (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE), (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); printk(" reason=%08x qualification=%08x\n", (uint32_t)vmr(VM_EXIT_REASON), (uint32_t)vmr(EXIT_QUALIFICATION)); printk("IDTVectoring: info=%08x errcode=%08x\n", (uint32_t)vmr(IDT_VECTORING_INFO), (uint32_t)vmr(IDT_VECTORING_ERROR_CODE)); printk("TPR Threshold = 0x%02x\n", (uint32_t)vmr(TPR_THRESHOLD)); printk("EPT pointer = 0x%08x%08x\n", (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER)); printk("Virtual processor ID = 0x%04x\n", (uint32_t)vmr(VIRTUAL_PROCESSOR_ID)); vmx_vmcs_exit(v);}static void vmcs_dump(unsigned char ch){ struct domain *d; struct vcpu *v; printk("*********** VMCS Areas **************\n"); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) { if ( !is_hvm_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); for_each_vcpu ( d, v ) { printk("\tVCPU %d\n", v->vcpu_id); vmcs_dump_vcpu(v); } } rcu_read_unlock(&domlist_read_lock); printk("**************************************\n");}void setup_vmcs_dump(void){ register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?