⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 domain.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
	    v->arch.rid_bits = d->arch.rid_bits;	    v->arch.breakimm = d->arch.breakimm;	    v->arch.last_processor = INVALID_PROCESSOR;	    v->arch.vhpt_pg_shift = PAGE_SHIFT;	}	if (!VMX_DOMAIN(v))		init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,		           first_cpu(cpu_online_map));	return 0;}static void vcpu_share_privregs_with_guest(struct vcpu *v){	struct domain *d = v->domain;	int i, order = get_order_from_shift(XMAPPEDREGS_SHIFT); 	for (i = 0; i < (1 << order); i++)		share_xen_page_with_guest(virt_to_page(v->arch.privregs) + i,		                          d, XENSHARE_writable);	/*	 * XXX IA64_XMAPPEDREGS_PADDR	 * assign these pages into guest pseudo physical address	 * space for dom0 to map this page by gmfn.	 * this is necessary for domain save, restore and dump-core.	 */	for (i = 0; i < XMAPPEDREGS_SIZE; i += PAGE_SIZE)		assign_domain_page(d, IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,		                   virt_to_maddr(v->arch.privregs + i));}int vcpu_late_initialise(struct vcpu *v){	int rc, order;	if (HAS_PERVCPU_VHPT(v->domain)) {		rc = pervcpu_vhpt_alloc(v);		if (rc != 0)			return rc;	}	/* Create privregs page. */	order = get_order_from_shift(XMAPPEDREGS_SHIFT);	v->arch.privregs = alloc_xenheap_pages(order);	if (v->arch.privregs == NULL)		return -ENOMEM;	BUG_ON(v->arch.privregs == NULL);	memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT);	vcpu_share_privregs_with_guest(v);	return 0;}void vcpu_destroy(struct vcpu *v){	if (is_hvm_vcpu(v))		vmx_relinquish_vcpu_resources(v);	else		relinquish_vcpu_resources(v);}static unsigned long*vcpu_to_rbs_bottom(struct vcpu *v){	return (unsigned long*)((char *)v + IA64_RBS_OFFSET);}static void init_switch_stack(struct vcpu *v){	struct pt_regs *regs = vcpu_regs (v);	struct switch_stack *sw = (struct switch_stack *) regs - 1;	extern void ia64_ret_from_clone;	memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));	sw->ar_bspstore = (unsigned long)vcpu_to_rbs_bottom(v);	sw->b0 = (unsigned long) &ia64_ret_from_clone;	sw->ar_fpsr = FPSR_DEFAULT;	v->arch._thread.ksp = (unsigned long) sw - 16;	// stay on kernel stack because may get interrupts!	// ia64_ret_from_clone switches to user stack	v->arch._thread.on_ustack = 0;	memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);}#ifdef CONFIG_XEN_IA64_PERVCPU_VHPTstatic int opt_pervcpu_vhpt = 1;integer_param("pervcpu_vhpt", opt_pervcpu_vhpt);#endifint arch_domain_create(struct domain *d, unsigned int domcr_flags){	int i;	// the following will eventually need to be negotiated dynamically	d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR;	d->arch.breakimm = 0x1000;	for (i = 0; i < NR_CPUS; i++) {		d->arch.last_vcpu[i].vcpu_id = INVALID_VCPU_ID;	}	if (is_idle_domain(d))	    return 0;	foreign_p2m_init(d);#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT	d->arch.has_pervcpu_vhpt = opt_pervcpu_vhpt;	dprintk(XENLOG_INFO, "%s:%d domain %d pervcpu_vhpt %d\n",	        __func__, __LINE__, d->domain_id, d->arch.has_pervcpu_vhpt);#endif	if (tlb_track_create(d) < 0)		goto fail_nomem1;	d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT));	if (d->shared_info == NULL)		goto fail_nomem;	BUG_ON(d->shared_info == NULL);	memset(d->shared_info, 0, XSI_SIZE);	for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)	    share_xen_page_with_guest(virt_to_page((char *)d->shared_info + i),	                              d, XENSHARE_writable);	/* We may also need emulation rid for region4, though it's unlikely	 * to see guest issue uncacheable access in metaphysical mode. But	 * keep such info here may be more sane.	 */	if (!allocate_rid_range(d,0))		goto fail_nomem;	memset(&d->arch.mm, 0, sizeof(d->arch.mm));	d->arch.relres = RELRES_not_started;	d->arch.mm_teardown_offset = 0;	INIT_LIST_HEAD(&d->arch.relmem_list);	if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)	    goto fail_nomem;	/*	 * grant_table_create() can't fully initialize grant table for domain	 * because it is called before arch_domain_create().	 * Here we complete the initialization which requires p2m table.	 */	spin_lock(&d->grant_table->lock);	for (i = 0; i < nr_grant_frames(d->grant_table); i++)		ia64_gnttab_create_shared_page(d, d->grant_table, i);	spin_unlock(&d->grant_table->lock);	d->arch.ioport_caps = rangeset_new(d, "I/O Ports",	                                   RANGESETF_prettyprint_hex);	dprintk(XENLOG_DEBUG, "arch_domain_create: domain=%p\n", d);	return 0;fail_nomem:	tlb_track_destroy(d);fail_nomem1:	if (d->arch.mm.pgd != NULL)	    pgd_free(d->arch.mm.pgd);	if (d->shared_info != NULL)	    free_xenheap_pages(d->shared_info,			       get_order_from_shift(XSI_SHIFT));	return -ENOMEM;}void arch_domain_destroy(struct domain *d){	mm_final_teardown(d);	if (d->shared_info != NULL)		free_xenheap_pages(d->shared_info,				   get_order_from_shift(XSI_SHIFT));	tlb_track_destroy(d);	/* Clear vTLB for the next domain.  */	domain_flush_tlb_vhpt(d);	deallocate_rid_range(d);}void arch_vcpu_reset(struct vcpu *v){	/* FIXME: Stub for now */}/* Here it is assumed that all of the CPUs has same RSE.N_STACKED_PHYS */static unsigned long num_phys_stacked;static int __initinit_num_phys_stacked(void){	switch (ia64_pal_rse_info(&num_phys_stacked, NULL)) {	case 0L:		printk("the number of physical stacked general registers"		       "(RSE.N_STACKED_PHYS) = %ld\n", num_phys_stacked);		return 0;	case -2L:	case -3L:	default:		break;	}	printk("WARNING: PAL_RSE_INFO call failed. "	       "domain save/restore may NOT work!\n");	return -EINVAL;}__initcall(init_num_phys_stacked);#define COPY_FPREG(dst, src) memcpy(dst, src, sizeof(struct ia64_fpreg))#define AR_PFS_PEC_SHIFT	51#define AR_PFS_REC_SIZE		6#define AR_PFS_PEC_MASK		(((1UL << 6) - 1) << 51)/* * See init_swtich_stack() and ptrace.h */static struct switch_stack*vcpu_to_switch_stack(struct vcpu* v){	return (struct switch_stack *)(v->arch._thread.ksp + 16);}static intvcpu_has_not_run(struct vcpu* v){	extern void ia64_ret_from_clone;	struct switch_stack *sw = vcpu_to_switch_stack(v);	return (sw == (struct switch_stack *)(vcpu_regs(v)) - 1) &&		(sw->b0 == (unsigned long)&ia64_ret_from_clone);}static voidnats_update(unsigned int* nats, unsigned int reg, char nat){	BUG_ON(reg > 31);	if (nat)		*nats |= (1UL << reg);	else		*nats &= ~(1UL << reg);}void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c){	int i;	struct vcpu_tr_regs *tr = &c.nat->regs.tr;	struct cpu_user_regs *uregs = vcpu_regs(v);	struct switch_stack *sw = vcpu_to_switch_stack(v);	struct unw_frame_info info;	int is_hvm = VMX_DOMAIN(v);	unsigned int rbs_size;	unsigned long *const rbs_bottom = vcpu_to_rbs_bottom(v);	unsigned long *rbs_top;	unsigned long *rbs_rnat_addr;	unsigned int top_slot;	unsigned int num_regs;	memset(c.nat, 0, sizeof(*c.nat));	c.nat->regs.b[6] = uregs->b6;	c.nat->regs.b[7] = uregs->b7;	memset(&info, 0, sizeof(info));	unw_init_from_blocked_task(&info, v);	if (vcpu_has_not_run(v)) {		c.nat->regs.ar.lc = sw->ar_lc;		c.nat->regs.ar.ec =			(sw->ar_pfs & AR_PFS_PEC_MASK) >> AR_PFS_PEC_SHIFT;	} else if (unw_unwind_to_user(&info) < 0) {		/* warn: should panic? */		gdprintk(XENLOG_ERR, "vcpu=%d unw_unwind_to_user() failed.\n",			 v->vcpu_id);		show_stack(v, NULL);		/* can't return error */		c.nat->regs.ar.lc = 0;		c.nat->regs.ar.ec = 0;	} else {		unw_get_ar(&info, UNW_AR_LC, &c.nat->regs.ar.lc);		unw_get_ar(&info, UNW_AR_EC, &c.nat->regs.ar.ec);	}	c.nat->regs.ar.csd = uregs->ar_csd;	c.nat->regs.ar.ssd = uregs->ar_ssd;	c.nat->regs.r[8] = uregs->r8;	c.nat->regs.r[9] = uregs->r9;	c.nat->regs.r[10] = uregs->r10;	c.nat->regs.r[11] = uregs->r11;	if (is_hvm)		c.nat->regs.psr = vmx_vcpu_get_psr(v);	else		c.nat->regs.psr = vcpu_get_psr(v);	c.nat->regs.ip = uregs->cr_iip;	c.nat->regs.cfm = uregs->cr_ifs;	c.nat->regs.ar.unat = uregs->ar_unat;	c.nat->regs.ar.pfs = uregs->ar_pfs;	c.nat->regs.ar.rsc = uregs->ar_rsc;	c.nat->regs.ar.rnat = uregs->ar_rnat;	c.nat->regs.ar.bspstore = uregs->ar_bspstore;	c.nat->regs.pr = uregs->pr;	c.nat->regs.b[0] = uregs->b0;	rbs_size = uregs->loadrs >> 16;	num_regs = ia64_rse_num_regs(rbs_bottom,			(unsigned long*)((char*)rbs_bottom + rbs_size));	c.nat->regs.ar.bsp = (unsigned long)ia64_rse_skip_regs(		(unsigned long*)c.nat->regs.ar.bspstore, num_regs);	BUG_ON(num_regs > num_phys_stacked);	c.nat->regs.r[1] = uregs->r1;	c.nat->regs.r[12] = uregs->r12;	c.nat->regs.r[13] = uregs->r13;	c.nat->regs.ar.fpsr = uregs->ar_fpsr;	c.nat->regs.r[15] = uregs->r15;	c.nat->regs.r[14] = uregs->r14;	c.nat->regs.r[2] = uregs->r2;	c.nat->regs.r[3] = uregs->r3;	c.nat->regs.r[16] = uregs->r16;	c.nat->regs.r[17] = uregs->r17;	c.nat->regs.r[18] = uregs->r18;	c.nat->regs.r[19] = uregs->r19;	c.nat->regs.r[20] = uregs->r20;	c.nat->regs.r[21] = uregs->r21;	c.nat->regs.r[22] = uregs->r22;	c.nat->regs.r[23] = uregs->r23;	c.nat->regs.r[24] = uregs->r24;	c.nat->regs.r[25] = uregs->r25;	c.nat->regs.r[26] = uregs->r26;	c.nat->regs.r[27] = uregs->r27;	c.nat->regs.r[28] = uregs->r28;	c.nat->regs.r[29] = uregs->r29;	c.nat->regs.r[30] = uregs->r30;	c.nat->regs.r[31] = uregs->r31;	c.nat->regs.ar.ccv = uregs->ar_ccv;	COPY_FPREG(&c.nat->regs.f[2], &sw->f2);	COPY_FPREG(&c.nat->regs.f[3], &sw->f3);	COPY_FPREG(&c.nat->regs.f[4], &sw->f4);	COPY_FPREG(&c.nat->regs.f[5], &sw->f5);	COPY_FPREG(&c.nat->regs.f[6], &uregs->f6);	COPY_FPREG(&c.nat->regs.f[7], &uregs->f7);	COPY_FPREG(&c.nat->regs.f[8], &uregs->f8);	COPY_FPREG(&c.nat->regs.f[9], &uregs->f9);	COPY_FPREG(&c.nat->regs.f[10], &uregs->f10);	COPY_FPREG(&c.nat->regs.f[11], &uregs->f11);	COPY_FPREG(&c.nat->regs.f[12], &sw->f12);	COPY_FPREG(&c.nat->regs.f[13], &sw->f13);	COPY_FPREG(&c.nat->regs.f[14], &sw->f14);	COPY_FPREG(&c.nat->regs.f[15], &sw->f15);	COPY_FPREG(&c.nat->regs.f[16], &sw->f16);	COPY_FPREG(&c.nat->regs.f[17], &sw->f17);	COPY_FPREG(&c.nat->regs.f[18], &sw->f18);	COPY_FPREG(&c.nat->regs.f[19], &sw->f19);	COPY_FPREG(&c.nat->regs.f[20], &sw->f20);	COPY_FPREG(&c.nat->regs.f[21], &sw->f21);	COPY_FPREG(&c.nat->regs.f[22], &sw->f22);	COPY_FPREG(&c.nat->regs.f[23], &sw->f23);	COPY_FPREG(&c.nat->regs.f[24], &sw->f24);	COPY_FPREG(&c.nat->regs.f[25], &sw->f25);	COPY_FPREG(&c.nat->regs.f[26], &sw->f26);	COPY_FPREG(&c.nat->regs.f[27], &sw->f27);	COPY_FPREG(&c.nat->regs.f[28], &sw->f28);	COPY_FPREG(&c.nat->regs.f[29], &sw->f29);	COPY_FPREG(&c.nat->regs.f[30], &sw->f30);	COPY_FPREG(&c.nat->regs.f[31], &sw->f31);	// f32 - f127	memcpy(&c.nat->regs.f[32], &v->arch._thread.fph[0],	       sizeof(v->arch._thread.fph));#define NATS_UPDATE(reg)						\	nats_update(&c.nat->regs.nats, (reg),				\		    !!(uregs->eml_unat &				\		       (1UL << ia64_unat_pos(&uregs->r ## reg))))	// corresponding bit in ar.unat is determined by	// (&uregs->rN){8:3}.	// r8: the lowest gr member of struct cpu_user_regs.	// r7: the highest gr member of struct cpu_user_regs.	BUILD_BUG_ON(offsetof(struct cpu_user_regs, r7) -		     offsetof(struct cpu_user_regs, r8) >		     64 * sizeof(unsigned long));	NATS_UPDATE(1);	NATS_UPDATE(2);	NATS_UPDATE(3);	NATS_UPDATE(8);	NATS_UPDATE(9);	NATS_UPDATE(10);	NATS_UPDATE(11);	NATS_UPDATE(12);	NATS_UPDATE(13);	NATS_UPDATE(14);	NATS_UPDATE(15);	NATS_UPDATE(16);	NATS_UPDATE(17);	NATS_UPDATE(18);	NATS_UPDATE(19);	NATS_UPDATE(20);	NATS_UPDATE(21);	NATS_UPDATE(22);	NATS_UPDATE(23);	NATS_UPDATE(24);	NATS_UPDATE(25);	NATS_UPDATE(26);	NATS_UPDATE(27);	NATS_UPDATE(28);	NATS_UPDATE(29);	NATS_UPDATE(30);	NATS_UPDATE(31);		if (!is_hvm) {		c.nat->regs.r[4] = uregs->r4;		c.nat->regs.r[5] = uregs->r5;		c.nat->regs.r[6] = uregs->r6;		c.nat->regs.r[7] = uregs->r7;		NATS_UPDATE(4);		NATS_UPDATE(5);		NATS_UPDATE(6);		NATS_UPDATE(7);#undef NATS_UPDATE	} else {		/*		 * for VTi domain, r[4-7] are saved sometimes both in		 * uregs->r[4-7] and memory stack or only in memory stack.		 * So it is ok to get them from memory stack.		 */		if (vcpu_has_not_run(v)) {			c.nat->regs.r[4] = sw->r4;			c.nat->regs.r[5] = sw->r5;			c.nat->regs.r[6] = sw->r6;			c.nat->regs.r[7] = sw->r7;			nats_update(&c.nat->regs.nats, 4,				    !!(sw->ar_unat &				       (1UL << ia64_unat_pos(&sw->r4))));			nats_update(&c.nat->regs.nats, 5,				    !!(sw->ar_unat &				       (1UL << ia64_unat_pos(&sw->r5))));			nats_update(&c.nat->regs.nats, 6,				    !!(sw->ar_unat &				       (1UL << ia64_unat_pos(&sw->r6))));			nats_update(&c.nat->regs.nats, 7,				    !!(sw->ar_unat &				       (1UL << ia64_unat_pos(&sw->r7))));		} else {			char nat;			unw_get_gr(&info, 4, &c.nat->regs.r[4], &nat);			nats_update(&c.nat->regs.nats, 4, nat);			unw_get_gr(&info, 5, &c.nat->regs.r[5], &nat);			nats_update(&c.nat->regs.nats, 5, nat);			unw_get_gr(&info, 6, &c.nat->regs.r[6], &nat);			nats_update(&c.nat->regs.nats, 6, nat);			unw_get_gr(&info, 7, &c.nat->regs.r[7], &nat);			nats_update(&c.nat->regs.nats, 7, nat);		}	}	c.nat->regs.rbs_voff = (IA64_RBS_OFFSET / 8) % 64;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -