📄 svm.c
字号:
arch_svm->vmcb_in_sync = 1; svm_vmsave(arch_svm->vmcb);}static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg, struct segment_register *reg){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; ASSERT((v == current) || !vcpu_runnable(v)); switch ( seg ) { case x86_seg_cs: memcpy(reg, &vmcb->cs, sizeof(*reg)); break; case x86_seg_ds: memcpy(reg, &vmcb->ds, sizeof(*reg)); break; case x86_seg_es: memcpy(reg, &vmcb->es, sizeof(*reg)); break; case x86_seg_fs: svm_sync_vmcb(v); memcpy(reg, &vmcb->fs, sizeof(*reg)); break; case x86_seg_gs: svm_sync_vmcb(v); memcpy(reg, &vmcb->gs, sizeof(*reg)); break; case x86_seg_ss: memcpy(reg, &vmcb->ss, sizeof(*reg)); reg->attr.fields.dpl = vmcb->cpl; break; case x86_seg_tr: svm_sync_vmcb(v); memcpy(reg, &vmcb->tr, sizeof(*reg)); break; case x86_seg_gdtr: memcpy(reg, &vmcb->gdtr, sizeof(*reg)); break; case x86_seg_idtr: memcpy(reg, &vmcb->idtr, sizeof(*reg)); break; case x86_seg_ldtr: svm_sync_vmcb(v); memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break; default: BUG(); }}static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, struct segment_register *reg){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int sync = 0; ASSERT((v == current) || !vcpu_runnable(v)); switch ( seg ) { case x86_seg_fs: case x86_seg_gs: case x86_seg_tr: case x86_seg_ldtr: sync = (v == current); break; default: break; } if ( sync ) svm_sync_vmcb(v); switch ( seg ) { case x86_seg_cs: memcpy(&vmcb->cs, reg, sizeof(*reg)); break; case x86_seg_ds: memcpy(&vmcb->ds, reg, sizeof(*reg)); break; case x86_seg_es: memcpy(&vmcb->es, reg, sizeof(*reg)); break; case x86_seg_fs: memcpy(&vmcb->fs, reg, sizeof(*reg)); break; case x86_seg_gs: memcpy(&vmcb->gs, reg, sizeof(*reg)); break; case x86_seg_ss: memcpy(&vmcb->ss, reg, sizeof(*reg)); vmcb->cpl = vmcb->ss.attr.fields.dpl; break; case x86_seg_tr: memcpy(&vmcb->tr, reg, sizeof(*reg)); break; case x86_seg_gdtr: vmcb->gdtr.base = reg->base; vmcb->gdtr.limit = (uint16_t)reg->limit; break; case x86_seg_idtr: vmcb->idtr.base = reg->base; vmcb->idtr.limit = (uint16_t)reg->limit; break; case x86_seg_ldtr: memcpy(&vmcb->ldtr, reg, sizeof(*reg)); break; default: BUG(); } if ( sync ) svm_vmload(vmcb);}static void svm_set_tsc_offset(struct vcpu *v, u64 offset){ v->arch.hvm_svm.vmcb->tsc_offset = offset;}static void svm_init_hypercall_page(struct domain *d, void *hypercall_page){ char *p; int i; for ( i = 0; i < (PAGE_SIZE / 32); i++ ) { p = (char *)(hypercall_page + (i * 32)); *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ *(u32 *)(p + 1) = i; *(u8 *)(p + 5) = 0x0f; /* vmmcall */ *(u8 *)(p + 6) = 0x01; *(u8 *)(p + 7) = 0xd9; *(u8 *)(p + 8) = 0xc3; /* ret */ } /* Don't support HYPERVISOR_iret at the moment */ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */}static void svm_ctxt_switch_from(struct vcpu *v){ int cpu = smp_processor_id(); svm_fpu_leave(v); svm_save_dr(v); svm_sync_vmcb(v); svm_vmload(root_vmcb[cpu]);#ifdef __x86_64__ /* Resume use of ISTs now that the host TR is reinstated. */ idt_tables[cpu][TRAP_double_fault].a |= IST_DF << 32; idt_tables[cpu][TRAP_nmi].a |= IST_NMI << 32; idt_tables[cpu][TRAP_machine_check].a |= IST_MCE << 32;#endif}static void svm_ctxt_switch_to(struct vcpu *v){ int cpu = smp_processor_id();#ifdef __x86_64__ /* * This is required, because VMRUN does consistency check * and some of the DOM0 selectors are pointing to * invalid GDT locations, and cause AMD processors * to shutdown. */ set_segment_register(ds, 0); set_segment_register(es, 0); set_segment_register(ss, 0); /* * Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR. * But this doesn't matter: the IST is only req'd to handle SYSCALL/SYSRET. */ idt_tables[cpu][TRAP_double_fault].a &= ~(7UL << 32); idt_tables[cpu][TRAP_nmi].a &= ~(7UL << 32); idt_tables[cpu][TRAP_machine_check].a &= ~(7UL << 32);#endif svm_restore_dr(v); svm_vmsave(root_vmcb[cpu]); svm_vmload(v->arch.hvm_svm.vmcb);}static void svm_do_resume(struct vcpu *v) { bool_t debug_state = v->domain->debugger_attached; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3); v->arch.hvm_vcpu.debug_state_latch = debug_state; if ( debug_state ) v->arch.hvm_svm.vmcb->exception_intercepts |= mask; else v->arch.hvm_svm.vmcb->exception_intercepts &= ~mask; } if ( v->arch.hvm_svm.launch_core != smp_processor_id() ) { v->arch.hvm_svm.launch_core = smp_processor_id(); hvm_migrate_timers(v); /* Migrating to another ASID domain. Request a new ASID. */ svm_asid_init_vcpu(v); } /* Reflect the vlapic's TPR in the hardware vtpr */ v->arch.hvm_svm.vmcb->vintr.fields.tpr = (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4; hvm_do_resume(v); reset_stack_and_jump(svm_asm_do_resume);}static int svm_domain_initialise(struct domain *d){ return 0;}static void svm_domain_destroy(struct domain *d){}static int svm_vcpu_initialise(struct vcpu *v){ int rc; v->arch.schedule_tail = svm_do_resume; v->arch.ctxt_switch_from = svm_ctxt_switch_from; v->arch.ctxt_switch_to = svm_ctxt_switch_to; v->arch.hvm_svm.launch_core = -1; if ( (rc = svm_create_vmcb(v)) != 0 ) { dprintk(XENLOG_WARNING, "Failed to create VMCB for vcpu %d: err=%d.\n", v->vcpu_id, rc); return rc; } return 0;}static void svm_vcpu_destroy(struct vcpu *v){ svm_destroy_vmcb(v);}static void svm_inject_exception( unsigned int trapnr, int errcode, unsigned long cr2){ struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; eventinj_t event = vmcb->eventinj; if ( unlikely(event.fields.v) && (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) { trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr); if ( trapnr == TRAP_double_fault ) errcode = 0; } event.bytes = 0; event.fields.v = 1; event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; event.fields.vector = trapnr; event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE); event.fields.errorcode = errcode; vmcb->eventinj = event; if ( trapnr == TRAP_page_fault ) { vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2; HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2)); } else { HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode); } if ( (trapnr == TRAP_debug) && (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) ) { __restore_debug_registers(curr); vmcb->dr6 |= 0x4000; }}static int svm_event_pending(struct vcpu *v){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; return vmcb->eventinj.fields.v;}static int svm_do_pmu_interrupt(struct cpu_user_regs *regs){ return 0;}static struct hvm_function_table svm_function_table = { .name = "SVM", .cpu_down = svm_cpu_down, .domain_initialise = svm_domain_initialise, .domain_destroy = svm_domain_destroy, .vcpu_initialise = svm_vcpu_initialise, .vcpu_destroy = svm_vcpu_destroy, .save_cpu_ctxt = svm_save_vmcb_ctxt, .load_cpu_ctxt = svm_load_vmcb_ctxt, .get_interrupt_shadow = svm_get_interrupt_shadow, .set_interrupt_shadow = svm_set_interrupt_shadow, .guest_x86_mode = svm_guest_x86_mode, .get_segment_register = svm_get_segment_register, .set_segment_register = svm_set_segment_register, .update_host_cr3 = svm_update_host_cr3, .update_guest_cr = svm_update_guest_cr, .update_guest_efer = svm_update_guest_efer, .flush_guest_tlbs = svm_flush_guest_tlbs, .set_tsc_offset = svm_set_tsc_offset, .inject_exception = svm_inject_exception, .init_hypercall_page = svm_init_hypercall_page, .event_pending = svm_event_pending, .do_pmu_interrupt = svm_do_pmu_interrupt, .cpuid_intercept = svm_cpuid_intercept, .wbinvd_intercept = svm_wbinvd_intercept, .fpu_dirty_intercept = svm_fpu_dirty_intercept, .msr_read_intercept = svm_msr_read_intercept, .msr_write_intercept = svm_msr_write_intercept, .invlpg_intercept = svm_invlpg_intercept};int start_svm(struct cpuinfo_x86 *c){ u32 eax, ecx, edx; u32 phys_hsa_lo, phys_hsa_hi; u64 phys_hsa; int cpu = smp_processor_id(); /* Xen does not fill x86_capability words except 0. */ ecx = cpuid_ecx(0x80000001); boot_cpu_data.x86_capability[5] = ecx; if ( !(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)) ) return 0; /* Check whether SVM feature is disabled in BIOS */ rdmsr(MSR_K8_VM_CR, eax, edx); if ( eax & K8_VMCR_SVME_DISABLE ) { printk("AMD SVM Extension is disabled in BIOS.\n"); return 0; } if ( ((hsa[cpu] = alloc_host_save_area()) == NULL) || ((root_vmcb[cpu] = alloc_vmcb()) == NULL) ) return 0; write_efer(read_efer() | EFER_SVME); /* Initialize the HSA for this core. */ phys_hsa = (u64) virt_to_maddr(hsa[cpu]); phys_hsa_lo = (u32) phys_hsa; phys_hsa_hi = (u32) (phys_hsa >> 32); wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi); /* Initialize core's ASID handling. */ svm_asid_init(c); if ( cpu != 0 ) return 1; setup_vmcb_dump(); svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ? cpuid_edx(0x8000000A) : 0); svm_function_table.hap_supported = cpu_has_svm_npt; hvm_enable(&svm_function_table); return 1;}static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs){ p2m_type_t p2mt; mfn_t mfn; unsigned long gfn = gpa >> PAGE_SHIFT; /* * If this GFN is emulated MMIO or marked as read-only, pass the fault * to the mmio handler. */ mfn = gfn_to_mfn_current(gfn, &p2mt); if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) ) { if ( !handle_mmio() ) hvm_inject_exception(TRAP_gp_fault, 0, 0); return; } /* Log-dirty: mark the page dirty and let the guest write it again */ paging_mark_dirty(current->domain, mfn_x(mfn)); p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);}static void svm_fpu_dirty_intercept(void){ struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; svm_fpu_enter(curr); if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) ) vmcb->cr0 &= ~X86_CR0_TS;}#define bitmaskof(idx) (1U << ((idx) & 31))static void svm_cpuid_intercept( unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx){ unsigned int input = *eax; struct vcpu *v = current; hvm_cpuid(input, eax, ebx, ecx, edx); if ( input == 0x80000001 ) { /* Fix up VLAPIC details. */ if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) __clear_bit(X86_FEATURE_APIC & 31, edx); } HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx);}static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs){ unsigned int eax, ebx, ecx, edx, inst_len; if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 ) return; eax = regs->eax; ebx = regs->ebx; ecx = regs->ecx; edx = regs->edx; svm_cpuid_intercept(&eax, &ebx, &ecx, &edx); regs->eax = eax; regs->ebx = ebx; regs->ecx = ecx; regs->edx = edx; __update_guest_eip(regs, inst_len);}static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -