📄 svm.c
字号:
* clears CR0.TS, and we will initialise the FPU when that happens. */ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) ) { v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device; vmcb->cr0 |= X86_CR0_TS; }}static enum hvm_intblk svm_interrupt_blocked( struct vcpu *v, struct hvm_intack intack){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; if ( vmcb->interrupt_shadow ) return hvm_intblk_shadow; if ( intack.source == hvm_intsrc_nmi ) return ((vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET) ? hvm_intblk_nmi_iret : hvm_intblk_none); ASSERT((intack.source == hvm_intsrc_pic) || (intack.source == hvm_intsrc_lapic)); if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) ) return hvm_intblk_rflags_ie; return hvm_intblk_none;}static int svm_guest_x86_mode(struct vcpu *v){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) ) return 0; if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) ) return 1; if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) ) return 8; return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);}static void svm_update_host_cr3(struct vcpu *v){ /* SVM doesn't have a HOST_CR3 equivalent to update. */}static void svm_update_guest_cr(struct vcpu *v, unsigned int cr){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; switch ( cr ) { case 0: { unsigned long hw_cr0_mask = 0; if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) ) { if ( v != current ) hw_cr0_mask |= X86_CR0_TS; else if ( vmcb->cr0 & X86_CR0_TS ) svm_fpu_enter(v); } vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask; if ( !paging_mode_hap(v->domain) ) vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP; break; } case 2: vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2]; break; case 3: vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; svm_asid_inv_asid(v); break; case 4: vmcb->cr4 = HVM_CR4_HOST_MASK; if ( paging_mode_hap(v->domain) ) vmcb->cr4 &= ~X86_CR4_PAE; vmcb->cr4 |= v->arch.hvm_vcpu.guest_cr[4]; break; default: BUG(); }}static void svm_update_guest_efer(struct vcpu *v){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME; if ( vmcb->efer & EFER_LMA ) vmcb->efer |= EFER_LME;}static void svm_flush_guest_tlbs(void){ /* Roll over the CPU's ASID generation, so it gets a clean TLB when we * next VMRUN. (If ASIDs are disabled, the whole TLB is flushed on * VMRUN anyway). */ svm_asid_inc_generation();}static void svm_sync_vmcb(struct vcpu *v){ struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; if ( arch_svm->vmcb_in_sync ) return; arch_svm->vmcb_in_sync = 1; svm_vmsave(arch_svm->vmcb);}static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v); switch ( seg ) { case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base; case x86_seg_ds: return long_mode ? 0 : vmcb->ds.base; case x86_seg_es: return long_mode ? 0 : vmcb->es.base; case x86_seg_fs: svm_sync_vmcb(v); return vmcb->fs.base; case x86_seg_gs: svm_sync_vmcb(v); return vmcb->gs.base; case x86_seg_ss: return long_mode ? 0 : vmcb->ss.base; case x86_seg_tr: svm_sync_vmcb(v); return vmcb->tr.base; case x86_seg_gdtr: return vmcb->gdtr.base; case x86_seg_idtr: return vmcb->idtr.base; case x86_seg_ldtr: svm_sync_vmcb(v); return vmcb->ldtr.base; default: BUG(); } return 0;}static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg, struct segment_register *reg){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; ASSERT(v == current); switch ( seg ) { case x86_seg_cs: memcpy(reg, &vmcb->cs, sizeof(*reg)); break; case x86_seg_ds: memcpy(reg, &vmcb->ds, sizeof(*reg)); break; case x86_seg_es: memcpy(reg, &vmcb->es, sizeof(*reg)); break; case x86_seg_fs: svm_sync_vmcb(v); memcpy(reg, &vmcb->fs, sizeof(*reg)); break; case x86_seg_gs: svm_sync_vmcb(v); memcpy(reg, &vmcb->gs, sizeof(*reg)); break; case x86_seg_ss: memcpy(reg, &vmcb->ss, sizeof(*reg)); reg->attr.fields.dpl = vmcb->cpl; break; case x86_seg_tr: svm_sync_vmcb(v); memcpy(reg, &vmcb->tr, sizeof(*reg)); break; case x86_seg_gdtr: memcpy(reg, &vmcb->gdtr, sizeof(*reg)); break; case x86_seg_idtr: memcpy(reg, &vmcb->idtr, sizeof(*reg)); break; case x86_seg_ldtr: svm_sync_vmcb(v); memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break; default: BUG(); }}static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, struct segment_register *reg){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int sync = 0; ASSERT((v == current) || !vcpu_runnable(v)); switch ( seg ) { case x86_seg_fs: case x86_seg_gs: case x86_seg_tr: case x86_seg_ldtr: sync = (v == current); break; default: break; } if ( sync ) svm_sync_vmcb(v); switch ( seg ) { case x86_seg_cs: memcpy(&vmcb->cs, reg, sizeof(*reg)); break; case x86_seg_ds: memcpy(&vmcb->ds, reg, sizeof(*reg)); break; case x86_seg_es: memcpy(&vmcb->es, reg, sizeof(*reg)); break; case x86_seg_fs: memcpy(&vmcb->fs, reg, sizeof(*reg)); break; case x86_seg_gs: memcpy(&vmcb->gs, reg, sizeof(*reg)); break; case x86_seg_ss: memcpy(&vmcb->ss, reg, sizeof(*reg)); vmcb->cpl = vmcb->ss.attr.fields.dpl; break; case x86_seg_tr: memcpy(&vmcb->tr, reg, sizeof(*reg)); break; case x86_seg_gdtr: memcpy(&vmcb->gdtr, reg, sizeof(*reg)); break; case x86_seg_idtr: memcpy(&vmcb->idtr, reg, sizeof(*reg)); break; case x86_seg_ldtr: memcpy(&vmcb->ldtr, reg, sizeof(*reg)); break; default: BUG(); } if ( sync ) svm_vmload(vmcb);}static void svm_set_tsc_offset(struct vcpu *v, u64 offset){ v->arch.hvm_svm.vmcb->tsc_offset = offset;}static void svm_init_hypercall_page(struct domain *d, void *hypercall_page){ char *p; int i; for ( i = 0; i < (PAGE_SIZE / 32); i++ ) { p = (char *)(hypercall_page + (i * 32)); *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ *(u32 *)(p + 1) = i; *(u8 *)(p + 5) = 0x0f; /* vmmcall */ *(u8 *)(p + 6) = 0x01; *(u8 *)(p + 7) = 0xd9; *(u8 *)(p + 8) = 0xc3; /* ret */ } /* Don't support HYPERVISOR_iret at the moment */ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */}static void svm_ctxt_switch_from(struct vcpu *v){ int cpu = smp_processor_id(); svm_fpu_leave(v); svm_save_dr(v); svm_sync_vmcb(v); svm_vmload(root_vmcb[cpu]);#ifdef __x86_64__ /* Resume use of ISTs now that the host TR is reinstated. */ idt_tables[cpu][TRAP_double_fault].a |= IST_DF << 32; idt_tables[cpu][TRAP_nmi].a |= IST_NMI << 32; idt_tables[cpu][TRAP_machine_check].a |= IST_MCE << 32;#endif}static void svm_ctxt_switch_to(struct vcpu *v){ int cpu = smp_processor_id();#ifdef __x86_64__ /* * This is required, because VMRUN does consistency check * and some of the DOM0 selectors are pointing to * invalid GDT locations, and cause AMD processors * to shutdown. */ set_segment_register(ds, 0); set_segment_register(es, 0); set_segment_register(ss, 0); /* * Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR. * But this doesn't matter: the IST is only req'd to handle SYSCALL/SYSRET. */ idt_tables[cpu][TRAP_double_fault].a &= ~(7UL << 32); idt_tables[cpu][TRAP_nmi].a &= ~(7UL << 32); idt_tables[cpu][TRAP_machine_check].a &= ~(7UL << 32);#endif svm_restore_dr(v); svm_vmsave(root_vmcb[cpu]); svm_vmload(v->arch.hvm_svm.vmcb);}static void svm_do_resume(struct vcpu *v) { bool_t debug_state = v->domain->debugger_attached; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3); v->arch.hvm_vcpu.debug_state_latch = debug_state; if ( debug_state ) v->arch.hvm_svm.vmcb->exception_intercepts |= mask; else v->arch.hvm_svm.vmcb->exception_intercepts &= ~mask; } if ( v->arch.hvm_svm.launch_core != smp_processor_id() ) { v->arch.hvm_svm.launch_core = smp_processor_id(); hvm_migrate_timers(v); /* Migrating to another ASID domain. Request a new ASID. */ svm_asid_init_vcpu(v); } /* Reflect the vlapic's TPR in the hardware vtpr */ v->arch.hvm_svm.vmcb->vintr.fields.tpr = (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4; hvm_do_resume(v); reset_stack_and_jump(svm_asm_do_resume);}static int svm_domain_initialise(struct domain *d){ return 0;}static void svm_domain_destroy(struct domain *d){}static int svm_vcpu_initialise(struct vcpu *v){ int rc; v->arch.schedule_tail = svm_do_resume; v->arch.ctxt_switch_from = svm_ctxt_switch_from; v->arch.ctxt_switch_to = svm_ctxt_switch_to; v->arch.hvm_svm.launch_core = -1; if ( (rc = svm_create_vmcb(v)) != 0 ) { dprintk(XENLOG_WARNING, "Failed to create VMCB for vcpu %d: err=%d.\n", v->vcpu_id, rc); return rc; } return 0;}static void svm_vcpu_destroy(struct vcpu *v){ svm_destroy_vmcb(v);}static void svm_inject_exception( unsigned int trapnr, int errcode, unsigned long cr2){ struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; eventinj_t event; event.bytes = 0; event.fields.v = 1; event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; event.fields.vector = trapnr; event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE); event.fields.errorcode = errcode; vmcb->eventinj = event; if ( trapnr == TRAP_page_fault ) { vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2; HVMTRACE_2D(PF_INJECT, curr, curr->arch.hvm_vcpu.guest_cr[2], errcode); } else { HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode); } if ( (trapnr == TRAP_debug) && (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) ) { __restore_debug_registers(curr); vmcb->dr6 |= 0x4000; }}static int svm_event_pending(struct vcpu *v){ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; return vmcb->eventinj.fields.v;}static struct hvm_function_table svm_function_table = { .name = "SVM", .cpu_down = svm_cpu_down, .domain_initialise = svm_domain_initialise, .domain_destroy = svm_domain_destroy, .vcpu_initialise = svm_vcpu_initialise, .vcpu_destroy = svm_vcpu_destroy, .save_cpu_ctxt = svm_save_vmcb_ctxt, .load_cpu_ctxt = svm_load_vmcb_ctxt, .interrupt_blocked = svm_interrupt_blocked, .guest_x86_mode = svm_guest_x86_mode, .get_segment_base = svm_get_segment_base, .get_segment_register = svm_get_segment_register, .set_segment_register = svm_set_segment_register, .update_host_cr3 = svm_update_host_cr3, .update_guest_cr = svm_update_guest_cr, .update_guest_efer = svm_update_guest_efer, .flush_guest_tlbs = svm_flush_guest_tlbs,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -