hvm.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,304 行 · 第 1/5 页
C
2,304 行
}enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack){ unsigned long intr_shadow; ASSERT(v == current); if ( (intack.source != hvm_intsrc_nmi) && !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) ) return hvm_intblk_rflags_ie; intr_shadow = hvm_funcs.get_interrupt_shadow(v); if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) ) return hvm_intblk_shadow; if ( intack.source == hvm_intsrc_nmi ) return ((intr_shadow & HVM_INTR_SHADOW_NMI) ? hvm_intblk_nmi_iret : hvm_intblk_none); if ( intack.source == hvm_intsrc_lapic ) { uint32_t tpr = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xF0; if ( (tpr >> 4) >= (intack.vector >> 4) ) return hvm_intblk_tpr; } return hvm_intblk_none;}static long hvm_grant_table_op( unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count){ if ( (cmd != GNTTABOP_query_size) && (cmd != GNTTABOP_setup_table) ) return -ENOSYS; /* all other commands need auditing */ return do_grant_table_op(cmd, uop, count);}static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg){ long rc = do_memory_op(cmd, arg); if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; return rc;}typedef unsigned long hvm_hypercall_t( unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);#define HYPERCALL(x) \ [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x#if defined(__i386__)static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = { [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op, [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, HYPERCALL(xen_version), HYPERCALL(event_channel_op), HYPERCALL(sched_op), HYPERCALL(hvm_op)};#else /* defined(__x86_64__) */static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg){ long rc = compat_memory_op(cmd, arg); if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; return rc;}static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = { [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op, [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, HYPERCALL(xen_version), HYPERCALL(event_channel_op), HYPERCALL(sched_op), HYPERCALL(hvm_op)};static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = { [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32, [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, HYPERCALL(xen_version), HYPERCALL(event_channel_op), HYPERCALL(sched_op), HYPERCALL(hvm_op)};#endif /* defined(__x86_64__) */int hvm_do_hypercall(struct cpu_user_regs *regs){ struct vcpu *curr = current; struct segment_register sreg; int mode = hvm_guest_x86_mode(curr); uint32_t eax = regs->eax; switch ( mode ) {#ifdef __x86_64__ case 8: #endif case 4: case 2: hvm_get_segment_register(curr, x86_seg_ss, &sreg); if ( unlikely(sreg.attr.fields.dpl == 3) ) { default: regs->eax = -EPERM; return HVM_HCALL_completed; } case 0: break; } if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] ) { regs->eax = -ENOSYS; return HVM_HCALL_completed; } this_cpu(hc_preempted) = 0;#ifdef __x86_64__ if ( mode == 8 ) { HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8); this_cpu(hvm_64bit_hcall) = 1; regs->rax = hvm_hypercall64_table[eax](regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8); this_cpu(hvm_64bit_hcall) = 0; } else#endif { HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%x, %x, %x, %x, %x)", eax, (uint32_t)regs->ebx, (uint32_t)regs->ecx, (uint32_t)regs->edx, (uint32_t)regs->esi, (uint32_t)regs->edi); regs->eax = hvm_hypercall32_table[eax]((uint32_t)regs->ebx, (uint32_t)regs->ecx, (uint32_t)regs->edx, (uint32_t)regs->esi, (uint32_t)regs->edi); } HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u -> %lx", eax, (unsigned long)regs->eax); if ( this_cpu(hc_preempted) ) return HVM_HCALL_preempted; if ( unlikely(curr->domain->arch.hvm_domain.qemu_mapcache_invalidate) && test_and_clear_bool(curr->domain->arch.hvm_domain. qemu_mapcache_invalidate) ) return HVM_HCALL_invalidate; return HVM_HCALL_completed;}static void hvm_latch_shinfo_size(struct domain *d){ /* * Called from operations which are among the very first executed by * PV drivers on initialisation or after save/restore. These are sensible * points at which to sample the execution mode of the guest and latch * 32- or 64-bit format for shared state. */ if ( current->domain == d ) d->arch.has_32bit_shinfo = (hvm_guest_x86_mode(current) != 8);}/* Initialise a hypercall transfer page for a VMX domain using paravirtualised drivers. */void hvm_hypercall_page_initialise(struct domain *d, void *hypercall_page){ hvm_latch_shinfo_size(d); hvm_funcs.init_hypercall_page(d, hypercall_page);}static int hvmop_set_pci_intx_level( XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop){ struct xen_hvm_set_pci_intx_level op; struct domain *d; int rc; if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) ) return -EINVAL; d = rcu_lock_domain_by_id(op.domid); if ( d == NULL ) return -ESRCH; rc = -EPERM; if ( !IS_PRIV_FOR(current->domain, d) ) goto out; rc = -EINVAL; if ( !is_hvm_domain(d) ) goto out; rc = xsm_hvm_set_pci_intx_level(d); if ( rc ) goto out; rc = 0; switch ( op.level ) { case 0: hvm_pci_intx_deassert(d, op.device, op.intx); break; case 1: hvm_pci_intx_assert(d, op.device, op.intx); break; default: rc = -EINVAL; break; } out: rcu_unlock_domain(d); return rc;}void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip){ struct domain *d = current->domain; struct vcpu_guest_context *ctxt; struct segment_register reg; BUG_ON(vcpu_runnable(v)); domain_lock(d); if ( v->is_initialised ) goto out; if ( !paging_mode_hap(d) ) { if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG ) put_page(pagetable_get_page(v->arch.guest_table)); v->arch.guest_table = pagetable_null(); } ctxt = &v->arch.guest_context; memset(ctxt, 0, sizeof(*ctxt)); ctxt->flags = VGCF_online; ctxt->user_regs.eflags = 2; ctxt->user_regs.edx = 0x00000f00; ctxt->user_regs.eip = ip; v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET; hvm_update_guest_cr(v, 0); v->arch.hvm_vcpu.guest_cr[2] = 0; hvm_update_guest_cr(v, 2); v->arch.hvm_vcpu.guest_cr[3] = 0; hvm_update_guest_cr(v, 3); v->arch.hvm_vcpu.guest_cr[4] = 0; hvm_update_guest_cr(v, 4); v->arch.hvm_vcpu.guest_efer = 0; hvm_update_guest_efer(v); reg.sel = cs; reg.base = (uint32_t)reg.sel << 4; reg.limit = 0xffff; reg.attr.bytes = 0x09b; hvm_set_segment_register(v, x86_seg_cs, ®); reg.sel = reg.base = 0; reg.limit = 0xffff; reg.attr.bytes = 0x093; hvm_set_segment_register(v, x86_seg_ds, ®); hvm_set_segment_register(v, x86_seg_es, ®); hvm_set_segment_register(v, x86_seg_fs, ®); hvm_set_segment_register(v, x86_seg_gs, ®); hvm_set_segment_register(v, x86_seg_ss, ®); reg.attr.bytes = 0x82; /* LDT */ hvm_set_segment_register(v, x86_seg_ldtr, ®); reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */ hvm_set_segment_register(v, x86_seg_tr, ®); reg.attr.bytes = 0; hvm_set_segment_register(v, x86_seg_gdtr, ®); hvm_set_segment_register(v, x86_seg_idtr, ®); /* Sync AP's TSC with BSP's. */ v->arch.hvm_vcpu.cache_tsc_offset = v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); paging_update_paging_modes(v); v->arch.flags |= TF_kernel_mode; v->is_initialised = 1; clear_bit(_VPF_down, &v->pause_flags); out: domain_unlock(d);}static void hvm_s3_suspend(struct domain *d){ struct vcpu *v; domain_pause(d); domain_lock(d); if ( d->is_dying || (d->vcpu[0] == NULL) || test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) ) { domain_unlock(d); domain_unpause(d); return; } for_each_vcpu ( d, v ) { vlapic_reset(vcpu_vlapic(v)); vcpu_reset(v); } vpic_reset(d); vioapic_reset(d); pit_reset(d); rtc_reset(d); pmtimer_reset(d); hpet_reset(d); hvm_vcpu_reset_state(d->vcpu[0], 0xf000, 0xfff0); domain_unlock(d);}static void hvm_s3_resume(struct domain *d){ if ( test_and_clear_bool(d->arch.hvm_domain.is_s3_suspended) ) domain_unpause(d);}static int hvmop_set_isa_irq_level( XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop){ struct xen_hvm_set_isa_irq_level op; struct domain *d; int rc; if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; if ( op.isa_irq > 15 ) return -EINVAL; d = rcu_lock_domain_by_id(op.domid); if ( d == NULL ) return -ESRCH; rc = -EPERM; if ( !IS_PRIV_FOR(current->domain, d) ) goto out; rc = -EINVAL; if ( !is_hvm_domain(d) ) goto out; rc = xsm_hvm_set_isa_irq_level(d); if ( rc ) goto out; rc = 0; switch ( op.level ) { case 0: hvm_isa_irq_deassert(d, op.isa_irq); break; case 1: hvm_isa_irq_assert(d, op.isa_irq); break; default: rc = -EINVAL; break; } out: rcu_unlock_domain(d); return rc;}static int hvmop_set_pci_link_route( XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop){ struct xen_hvm_set_pci_link_route op; struct domain *d; int rc; if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; if ( (op.link > 3) || (op.isa_irq > 15) ) return -EINVAL; d = rcu_lock_domain_by_id(op.domid); if ( d == NULL ) return -ESRCH; rc = -EPERM; if ( !IS_PRIV_FOR(current->domain, d) ) goto out; rc = -EINVAL; if ( !is_hvm_domain(d) ) goto out; rc = xsm_hvm_set_pci_link_route(d); if ( rc ) goto out; rc = 0; hvm_set_pci_link_route(d, op.link, op.isa_irq); out: rcu_unlock_domain(d); return rc;}static int hvmop_flush_tlb_all(void){ struct domain *d = current->domain; struct vcpu *v; /* Avoid deadlock if more than one vcpu tries this at the same time. */ if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) return -EAGAIN; /* Pause all other vcpus. */ for_each_vcpu ( d, v ) if ( v != current ) vcpu_pause_nosync(v); /* Now that all
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?