domctl.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,086 行 · 第 1/2 页
C
1,086 行
case XEN_DOMCTL_sendtrigger: { struct domain *d; struct vcpu *v; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) break; ret = -EINVAL; if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS ) goto sendtrigger_out; ret = -ESRCH; if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL ) goto sendtrigger_out; switch ( domctl->u.sendtrigger.trigger ) { case XEN_DOMCTL_SENDTRIGGER_NMI: { ret = 0; if ( !test_and_set_bool(v->nmi_pending) ) vcpu_kick(v); } break; default: ret = -ENOSYS; } sendtrigger_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_get_device_group: { struct domain *d; u32 max_sdevs; u8 bus, devfn; XEN_GUEST_HANDLE_64(uint32) sdevs; int num_sdevs; ret = -ENOSYS; if ( !iommu_enabled ) break; ret = -EINVAL; if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) break; bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff; devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff; max_sdevs = domctl->u.get_device_group.max_sdevs; sdevs = domctl->u.get_device_group.sdev_array; num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs); if ( num_sdevs < 0 ) { dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n"); ret = -EFAULT; domctl->u.get_device_group.num_sdevs = 0; } else { ret = 0; domctl->u.get_device_group.num_sdevs = num_sdevs; } if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; rcu_unlock_domain(d); } break; case XEN_DOMCTL_test_assign_device: { u8 bus, devfn; ret = -ENOSYS; if ( !iommu_enabled ) break; ret = -EINVAL; bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff; if ( device_assigned(bus, devfn) ) { gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: " "%x:%x:%x already assigned, or non-existent\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); break; } ret = 0; } break; case XEN_DOMCTL_assign_device: { struct domain *d; u8 bus, devfn; ret = -ENOSYS; if ( !iommu_enabled ) break; ret = -EINVAL; if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) { gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); break; } bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff; if ( !iommu_pv_enabled && !is_hvm_domain(d) ) { ret = -ENOSYS; put_domain(d); break; } if ( device_assigned(bus, devfn) ) { gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: " "%x:%x:%x already assigned, or non-existent\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); put_domain(d); break; } ret = assign_device(d, bus, devfn); if ( ret ) gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: " "assign device (%x:%x:%x) failed\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); put_domain(d); } break; case XEN_DOMCTL_deassign_device: { struct domain *d; u8 bus, devfn; ret = -ENOSYS; if ( !iommu_enabled ) break; ret = -EINVAL; if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) { gdprintk(XENLOG_ERR, "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n"); break; } bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff; if ( !iommu_pv_enabled && !is_hvm_domain(d) ) { ret = -ENOSYS; put_domain(d); break; } if ( !device_assigned(bus, devfn) ) { put_domain(d); break; } ret = 0; deassign_device(d, bus, devfn); gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); put_domain(d); } break; case XEN_DOMCTL_bind_pt_irq: { struct domain * d; xen_domctl_bind_pt_irq_t * bind; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) break; bind = &(domctl->u.bind_pt_irq); if ( iommu_enabled ) ret = pt_irq_create_bind_vtd(d, bind); if ( ret < 0 ) gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n"); rcu_unlock_domain(d); } break; case XEN_DOMCTL_unbind_pt_irq: { struct domain * d; xen_domctl_bind_pt_irq_t * bind; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) break; bind = &(domctl->u.bind_pt_irq); if ( iommu_enabled ) ret = pt_irq_destroy_bind_vtd(d, bind); if ( ret < 0 ) gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n"); rcu_unlock_domain(d); } break; case XEN_DOMCTL_memory_mapping: { struct domain *d; unsigned long gfn = domctl->u.memory_mapping.first_gfn; unsigned long mfn = domctl->u.memory_mapping.first_mfn; unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns; int i; ret = -EINVAL; if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */ break; ret = -ESRCH; if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) break; ret=0; if ( domctl->u.memory_mapping.add_mapping ) { gdprintk(XENLOG_INFO, "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n", gfn, mfn, nr_mfns); ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); for ( i = 0; i < nr_mfns; i++ ) set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); } else { gdprintk(XENLOG_INFO, "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n", gfn, mfn, nr_mfns); for ( i = 0; i < nr_mfns; i++ ) clear_mmio_p2m_entry(d, gfn+i); ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); } rcu_unlock_domain(d); } break; case XEN_DOMCTL_ioport_mapping: {#define MAX_IOPORTS 0x10000 struct domain *d; struct hvm_iommu *hd; unsigned int fgp = domctl->u.ioport_mapping.first_gport; unsigned int fmp = domctl->u.ioport_mapping.first_mport; unsigned int np = domctl->u.ioport_mapping.nr_ports; struct g2m_ioport *g2m_ioport; int found = 0; ret = -EINVAL; if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) || ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) ) { gdprintk(XENLOG_ERR, "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n", fgp, fmp, np); break; } ret = -ESRCH; if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) break; hd = domain_hvm_iommu(d); if ( domctl->u.ioport_mapping.add_mapping ) { gdprintk(XENLOG_INFO, "ioport_map:add f_gport=%x f_mport=%x np=%x\n", fgp, fmp, np); list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list) if (g2m_ioport->mport == fmp ) { g2m_ioport->gport = fgp; g2m_ioport->np = np; found = 1; break; } if ( !found ) { g2m_ioport = xmalloc(struct g2m_ioport); g2m_ioport->gport = fgp; g2m_ioport->mport = fmp; g2m_ioport->np = np; list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list); } ret = ioports_permit_access(d, fmp, fmp + np - 1); } else { gdprintk(XENLOG_INFO, "ioport_map:remove f_gport=%x f_mport=%x np=%x\n", fgp, fmp, np); list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list) if ( g2m_ioport->mport == fmp ) { list_del(&g2m_ioport->list); xfree(g2m_ioport); break; } ret = ioports_deny_access(d, fmp, fmp + np - 1); } rcu_unlock_domain(d); } break; case XEN_DOMCTL_pin_mem_cacheattr: { struct domain *d; ret = -ESRCH; d = rcu_lock_domain_by_id(domctl->domain); if ( d == NULL ) break; ret = hvm_set_mem_pinned_cacheattr( d, domctl->u.pin_mem_cacheattr.start, domctl->u.pin_mem_cacheattr.end, domctl->u.pin_mem_cacheattr.type); rcu_unlock_domain(d); } break; case XEN_DOMCTL_set_ext_vcpucontext: case XEN_DOMCTL_get_ext_vcpucontext: { struct xen_domctl_ext_vcpucontext *evc; struct domain *d; struct vcpu *v; evc = &domctl->u.ext_vcpucontext; ret = -ESRCH; d = rcu_lock_domain_by_id(domctl->domain); if ( d == NULL ) break; ret = -ESRCH; if ( (evc->vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[evc->vcpu]) == NULL) ) goto ext_vcpucontext_out; if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext ) { evc->size = sizeof(*evc);#ifdef __x86_64__ evc->sysenter_callback_cs = v->arch.sysenter_callback_cs; evc->sysenter_callback_eip = v->arch.sysenter_callback_eip; evc->sysenter_disables_events = v->arch.sysenter_disables_events; evc->syscall32_callback_cs = v->arch.syscall32_callback_cs; evc->syscall32_callback_eip = v->arch.syscall32_callback_eip; evc->syscall32_disables_events = v->arch.syscall32_disables_events;#else evc->sysenter_callback_cs = 0; evc->sysenter_callback_eip = 0; evc->sysenter_disables_events = 0; evc->syscall32_callback_cs = 0; evc->syscall32_callback_eip = 0; evc->syscall32_disables_events = 0;#endif } else { ret = -EINVAL; if ( evc->size != sizeof(*evc) ) goto ext_vcpucontext_out;#ifdef __x86_64__ fixup_guest_code_selector(d, evc->sysenter_callback_cs); v->arch.sysenter_callback_cs = evc->sysenter_callback_cs; v->arch.sysenter_callback_eip = evc->sysenter_callback_eip; v->arch.sysenter_disables_events = evc->sysenter_disables_events; fixup_guest_code_selector(d, evc->syscall32_callback_cs); v->arch.syscall32_callback_cs = evc->syscall32_callback_cs; v->arch.syscall32_callback_eip = evc->syscall32_callback_eip; v->arch.syscall32_disables_events = evc->syscall32_disables_events;#else /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */ if ( (evc->sysenter_callback_cs & ~3) || evc->sysenter_callback_eip || (evc->syscall32_callback_cs & ~3) || evc->syscall32_callback_eip ) goto ext_vcpucontext_out;#endif } ret = 0; ext_vcpucontext_out: rcu_unlock_domain(d); if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) && copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; } break; case XEN_DOMCTL_set_cpuid: { struct domain *d; xen_domctl_cpuid_t *ctl = &domctl->u.cpuid; cpuid_input_t *cpuid = NULL; int i; ret = -ESRCH; d = rcu_lock_domain_by_id(domctl->domain); if ( d == NULL ) break; for ( i = 0; i < MAX_CPUID_INPUT; i++ ) { cpuid = &d->arch.cpuids[i]; if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED ) break; if ( (cpuid->input[0] == ctl->input[0]) && ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) || (cpuid->input[1] == ctl->input[1])) ) break; } if ( i == MAX_CPUID_INPUT ) { ret = -ENOENT; } else { memcpy(cpuid, ctl, sizeof(cpuid_input_t)); ret = 0; } rcu_unlock_domain(d); } break; default: ret = -ENOSYS; break; } return ret;}void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c){#ifdef CONFIG_COMPAT#define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))#else#define c(fld) (c.nat->fld)#endif if ( !is_pv_32on64_domain(v->domain) ) memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));#ifdef CONFIG_COMPAT else XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);#endif c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel)); if ( v->fpu_initialised ) c(flags |= VGCF_i387_valid); if ( !test_bit(_VPF_down, &v->pause_flags) ) c(flags |= VGCF_online); if ( is_hvm_vcpu(v) ) { memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg)); c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0]; c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2]; c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3]; c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4]; } else { /* IOPL privileges are virtualised: merge back into returned eflags. */ BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0); c(user_regs.eflags |= v->arch.iopl << 12); if ( !is_pv_32on64_domain(v->domain) ) { c.nat->ctrlreg[3] = xen_pfn_to_cr3( pagetable_get_pfn(v->arch.guest_table));#ifdef __x86_64__ if ( !pagetable_is_null(v->arch.guest_table_user) ) c.nat->ctrlreg[1] = xen_pfn_to_cr3( pagetable_get_pfn(v->arch.guest_table_user));#endif /* Merge shadow DR7 bits into real DR7. */ c.nat->debugreg[7] |= c.nat->debugreg[5]; c.nat->debugreg[5] = 0; }#ifdef CONFIG_COMPAT else { l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table)); c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e)); /* Merge shadow DR7 bits into real DR7. */ c.cmp->debugreg[7] |= c.cmp->debugreg[5]; c.cmp->debugreg[5] = 0; }#endif if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) ) c(flags |= VGCF_in_kernel); } c(vm_assist = v->domain->vm_assist);#undef c}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?