📄 vmx_init.c
字号:
BUG_ON(v != current); ia64_call_vsa(PAL_VPS_SAVE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0); /* Need to save KR when domain switch, though HV itself doesn;t * use them. */ v->arch.arch_vmx.vkr[0] = ia64_get_kr(0); v->arch.arch_vmx.vkr[1] = ia64_get_kr(1); v->arch.arch_vmx.vkr[2] = ia64_get_kr(2); v->arch.arch_vmx.vkr[3] = ia64_get_kr(3); v->arch.arch_vmx.vkr[4] = ia64_get_kr(4); v->arch.arch_vmx.vkr[5] = ia64_get_kr(5); v->arch.arch_vmx.vkr[6] = ia64_get_kr(6); v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);}/* Even guest is in physical mode, we still need such double mapping */voidvmx_load_state(struct vcpu *v){ BUG_ON(v != current); vmx_load_all_rr(v); /* vmx_load_all_rr() pins down v->arch.privregs with both dtr/itr*/ ia64_call_vsa(PAL_VPS_RESTORE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0); ia64_set_kr(0, v->arch.arch_vmx.vkr[0]); ia64_set_kr(1, v->arch.arch_vmx.vkr[1]); ia64_set_kr(2, v->arch.arch_vmx.vkr[2]); ia64_set_kr(3, v->arch.arch_vmx.vkr[3]); ia64_set_kr(4, v->arch.arch_vmx.vkr[4]); ia64_set_kr(5, v->arch.arch_vmx.vkr[5]); ia64_set_kr(6, v->arch.arch_vmx.vkr[6]); ia64_set_kr(7, v->arch.arch_vmx.vkr[7]); /* Guest vTLB is not required to be switched explicitly, since * anchored in vcpu */ migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);}static intvmx_vcpu_initialise(struct vcpu *v){ struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq; int rc = alloc_unbound_xen_event_channel(v, 0); if (rc < 0) return rc; v->arch.arch_vmx.xen_port = rc; spin_lock(&iorp->lock); if (v->domain->arch.vmx_platform.ioreq.va != 0) { vcpu_iodata_t *p = get_vio(v); p->vp_eport = v->arch.arch_vmx.xen_port; } spin_unlock(&iorp->lock); gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n", v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id); return 0;}static int vmx_create_event_channels(struct vcpu *v){ struct vcpu *o; if (v->vcpu_id == 0) { /* Ugly: create event channels for every vcpu when vcpu 0 starts, so that they're available for ioemu to bind to. */ for_each_vcpu(v->domain, o) { int rc = vmx_vcpu_initialise(o); if (rc < 0) //XXX error recovery return rc; } } return 0;}/* * Event channel has destoryed in domain_kill(), so we needn't * do anything here */static void vmx_release_assist_channel(struct vcpu *v){ return;}/* following three functions are based from hvm_xxx_ioreq_page() * in xen/arch/x86/hvm/hvm.c */static void vmx_init_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp){ memset(iorp, 0, sizeof(*iorp)); spin_lock_init(&iorp->lock); domain_pause(d);}static void vmx_destroy_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp){ spin_lock(&iorp->lock); ASSERT(d->is_dying); if (iorp->va != NULL) { put_page(iorp->page); iorp->page = NULL; iorp->va = NULL; } spin_unlock(&iorp->lock);}int vmx_set_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn){ struct page_info *page; unsigned long mfn; pte_t pte; pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT); if (!pte_present(pte) || !pte_mem(pte)) return -EINVAL; mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT; ASSERT(mfn_valid(mfn)); page = mfn_to_page(mfn); if (get_page(page, d) == 0) return -EINVAL; spin_lock(&iorp->lock); if ((iorp->va != NULL) || d->is_dying) { spin_unlock(&iorp->lock); put_page(page); return -EINVAL; } iorp->va = mfn_to_virt(mfn); iorp->page = page; spin_unlock(&iorp->lock); domain_unpause(d); return 0;}/* * Initialize VMX envirenment for guest. Only the 1st vp/vcpu * is registered here. */intvmx_final_setup_guest(struct vcpu *v){ vpd_t *vpd; int rc; vpd = alloc_vpd(); ASSERT(vpd); if (!vpd) return -ENOMEM; v->arch.privregs = (mapped_regs_t *)vpd; vpd->vpd_low.virt_env_vaddr = vm_buffer; /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick * to this solution. Maybe it can be deferred until we know created * one as vmx domain */ rc = init_domain_tlb(v); if (rc) return rc; if (!v->domain->arch.is_sioemu) { rc = vmx_create_event_channels(v); if (rc) return rc; } /* v->arch.schedule_tail = arch_vmx_do_launch; */ vmx_create_vp(v); /* Physical mode emulation initialization, including * emulation ID allcation and related memory request */ physical_mode_init(v); vlsapic_reset(v); vtm_init(v); /* Set up guest 's indicator for VTi domain*/ set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags); return 0;}voidvmx_relinquish_guest_resources(struct domain *d){ struct vcpu *v; if (d->arch.is_sioemu) return; for_each_vcpu(d, v) vmx_release_assist_channel(v); vacpi_relinquish_resources(d); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);}voidvmx_relinquish_vcpu_resources(struct vcpu *v){ vtime_t *vtm = &(v->arch.arch_vmx.vtm); kill_timer(&vtm->vtm_timer); if (v->arch.arch_vmx.sioemu_info_mva) put_page(virt_to_page((unsigned long) v->arch.arch_vmx.sioemu_info_mva)); free_domain_tlb(v); free_vpd(v);}typedef struct io_range { unsigned long start; unsigned long size; unsigned long type;} io_range_t;static const io_range_t io_ranges[] = { {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER << PAGE_SHIFT}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO << PAGE_SHIFT}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO << PAGE_SHIFT}, {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC << PAGE_SHIFT}, {PIB_START, PIB_SIZE, GPFN_PIB << PAGE_SHIFT},};// The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest()// so only mark IO memory space herestatic void vmx_build_io_physmap_table(struct domain *d){ unsigned long i, j; /* Mark I/O ranges */ for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) (void)__assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable | ASSIGN_io); }}int vmx_setup_platform(struct domain *d){ ASSERT(d != dom0); /* only for non-privileged vti domain */ if (!d->arch.is_sioemu) { vmx_build_io_physmap_table(d); vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq); vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq); } /* TEMP */ d->arch.vmx_platform.pib_base = 0xfee00000UL; d->arch.sal_data = xmalloc(struct xen_sal_data); if (d->arch.sal_data == NULL) return -ENOMEM; /* Only open one port for I/O and interrupt emulation */ memset(&d->shared_info->evtchn_mask[0], 0xff, sizeof(d->shared_info->evtchn_mask)); /* Initialize iosapic model within hypervisor */ viosapic_init(d); if (!d->arch.is_sioemu) vacpi_init(d); if (d->arch.is_sioemu) { int i; for (i = 1; i < MAX_VIRT_CPUS; i++) d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1; } return 0;}void vmx_do_resume(struct vcpu *v){ ioreq_t *p; vmx_load_state(v); if (v->domain->arch.is_sioemu) return; /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */ /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = &get_vio(v)->vp_ioreq; while (p->state != STATE_IOREQ_NONE) { switch (p->state) { case STATE_IORESP_READY: /* IORESP_READY -> NONE */ vmx_io_assist(v); break; case STATE_IOREQ_READY: case STATE_IOREQ_INPROCESS: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ wait_on_xen_event_channel(v->arch.arch_vmx.xen_port, (p->state != STATE_IOREQ_READY) && (p->state != STATE_IOREQ_INPROCESS)); break; default: gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); domain_crash_synchronous(); } }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -