⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 domain.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
	// f32 - f127	memcpy(&v->arch._thread.fph[0], &c.nat->regs.f[32],	       sizeof(v->arch._thread.fph));#define UNAT_UPDATE(reg)					\	unat_update(&uregs->eml_unat, &uregs->r ## reg,		\		    !!(c.nat->regs.nats & (1UL << (reg))));	uregs->eml_unat = 0;	UNAT_UPDATE(1);	UNAT_UPDATE(2);	UNAT_UPDATE(3);	UNAT_UPDATE(8);	UNAT_UPDATE(9);	UNAT_UPDATE(10);	UNAT_UPDATE(11);	UNAT_UPDATE(12);	UNAT_UPDATE(13);	UNAT_UPDATE(14);	UNAT_UPDATE(15);	UNAT_UPDATE(16);	UNAT_UPDATE(17);	UNAT_UPDATE(18);	UNAT_UPDATE(19);	UNAT_UPDATE(20);	UNAT_UPDATE(21);	UNAT_UPDATE(22);	UNAT_UPDATE(23);	UNAT_UPDATE(24);	UNAT_UPDATE(25);	UNAT_UPDATE(26);	UNAT_UPDATE(27);	UNAT_UPDATE(28);	UNAT_UPDATE(29);	UNAT_UPDATE(30);	UNAT_UPDATE(31);		/*	 * r4-r7 is saved sometimes both in pt_regs->r[4-7] and memory stack or	 * only in memory stack.	 * for both cases, both memory stack and pt_regs->r[4-7] are updated.	 */	uregs->r4 = c.nat->regs.r[4];	uregs->r5 = c.nat->regs.r[5];	uregs->r6 = c.nat->regs.r[6];	uregs->r7 = c.nat->regs.r[7];	UNAT_UPDATE(4);	UNAT_UPDATE(5);	UNAT_UPDATE(6);	UNAT_UPDATE(7);#undef UNAT_UPDATE	if (vcpu_has_not_run(v)) {		sw->r4 = c.nat->regs.r[4];		sw->r5 = c.nat->regs.r[5];		sw->r6 = c.nat->regs.r[6];		sw->r7 = c.nat->regs.r[7];		unat_update(&sw->ar_unat, &sw->r4,			    !!(c.nat->regs.nats & (1UL << 4)));		unat_update(&sw->ar_unat, &sw->r5,			    !!(c.nat->regs.nats & (1UL << 5)));		unat_update(&sw->ar_unat, &sw->r6,			    !!(c.nat->regs.nats & (1UL << 6)));		unat_update(&sw->ar_unat, &sw->r7,			    !!(c.nat->regs.nats & (1UL << 7)));	} else {		unw_set_gr(&info, 4, c.nat->regs.r[4],			   !!(c.nat->regs.nats & (1UL << 4)));		unw_set_gr(&info, 5, c.nat->regs.r[5],			   !!(c.nat->regs.nats & (1UL << 5)));		unw_set_gr(&info, 6, c.nat->regs.r[6],			   !!(c.nat->regs.nats & (1UL << 6)));		unw_set_gr(&info, 7, c.nat->regs.r[7],			   !!(c.nat->regs.nats & (1UL << 7)));	}	 	if (!is_hvm_domain(d)) { 		/* domain runs at PL2/3 */ 		uregs->cr_ipsr = vcpu_pl_adjust(uregs->cr_ipsr,		                                IA64_PSR_CPL0_BIT); 		uregs->ar_rsc = vcpu_pl_adjust(uregs->ar_rsc, 2); 	}	for (i = 0; i < IA64_NUM_DBG_REGS; i++) {		if (is_hvm_domain(d)) {			vmx_vcpu_set_dbr(v, i, c.nat->regs.dbr[i]);			vmx_vcpu_set_ibr(v, i, c.nat->regs.ibr[i]);		} else {			vcpu_set_dbr(v, i, c.nat->regs.dbr[i]);			vcpu_set_ibr(v, i, c.nat->regs.ibr[i]);		}	}	/* rr[] must be set before setting itrs[] dtrs[] */	for (i = 0; i < 8; i++) {		unsigned long rrval = c.nat->regs.rr[i];		unsigned long reg = (unsigned long)i << 61;		IA64FAULT fault = IA64_NO_FAULT;		if (rrval == 0)			continue;		if (is_hvm_domain(d)) {			//without VGCF_EXTRA_REGS check,			//VTi domain doesn't boot.			if (c.nat->flags & VGCF_EXTRA_REGS)				fault = vmx_vcpu_set_rr(v, reg, rrval);		} else			fault = vcpu_set_rr(v, reg, rrval);		if (fault != IA64_NO_FAULT)			return -EINVAL;	}	if (c.nat->flags & VGCF_EXTRA_REGS) {		struct vcpu_tr_regs *tr = &c.nat->regs.tr;		for (i = 0;		     (i < sizeof(tr->itrs) / sizeof(tr->itrs[0])) && i < NITRS;		     i++) {			if (is_hvm_domain(d))				vmx_vcpu_itr_i(v, i, tr->itrs[i].pte,					       tr->itrs[i].itir,					       tr->itrs[i].vadr);			else				vcpu_set_itr(v, i, tr->itrs[i].pte,					     tr->itrs[i].itir,					     tr->itrs[i].vadr,					     tr->itrs[i].rid);		}		for (i = 0;		     (i < sizeof(tr->dtrs) / sizeof(tr->dtrs[0])) && i < NDTRS;		     i++) {			if (is_hvm_domain(d))				vmx_vcpu_itr_d(v, i, tr->dtrs[i].pte,					       tr->dtrs[i].itir,					       tr->dtrs[i].vadr);			else				vcpu_set_dtr(v, i,					     tr->dtrs[i].pte,					     tr->dtrs[i].itir,					     tr->dtrs[i].vadr,					     tr->dtrs[i].rid);		}		v->arch.event_callback_ip = c.nat->event_callback_ip;		vcpu_set_iva(v, c.nat->regs.cr.iva);	}	if (is_hvm_domain(d))		rc = vmx_arch_set_info_guest(v, c);	return rc;}static int relinquish_memory(struct domain *d, struct list_head *list){    struct list_head *ent;    struct page_info *page;#ifndef __ia64__    unsigned long     x, y;#endif    int               ret = 0;    /* Use a recursive lock, as we may enter 'free_domheap_page'. */    spin_lock_recursive(&d->page_alloc_lock);    ent = list->next;    while ( ent != list )    {        page = list_entry(ent, struct page_info, list);        /* Grab a reference to the page so it won't disappear from under us. */        if ( unlikely(!get_page(page, d)) )        {            /* Couldn't get a reference -- someone is freeing this page. */            ent = ent->next;            list_move_tail(&page->list, &d->arch.relmem_list);            continue;        }        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )            put_page_and_type(page);        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )            put_page(page);#ifndef __ia64__        /*         * Forcibly invalidate base page tables at this point to break circular         * 'linear page table' references. This is okay because MMU structures         * are not shared across domains and this domain is now dead. Thus base         * tables are not in use so a non-zero count means circular reference.         */        y = page->u.inuse.type_info;        for ( ; ; )        {            x = y;            if ( likely((x & (PGT_type_mask|PGT_validated)) !=                        (PGT_base_page_table|PGT_validated)) )                break;            y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);            if ( likely(y == x) )            {                free_page_type(page, PGT_base_page_table);                break;            }        }#endif        /* Follow the list chain and /then/ potentially free the page. */        ent = ent->next;        BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);        list_move_tail(&page->list, &d->arch.relmem_list);        put_page(page);        if (hypercall_preempt_check()) {                ret = -EAGAIN;                goto out;        }    }    list_splice_init(&d->arch.relmem_list, list); out:    spin_unlock_recursive(&d->page_alloc_lock);    return ret;}int domain_relinquish_resources(struct domain *d){	int ret = 0;	switch (d->arch.relres) {	case RELRES_not_started:		/* Relinquish guest resources for VT-i domain. */		if (is_hvm_domain(d))			vmx_relinquish_guest_resources(d);		d->arch.relres = RELRES_mm_teardown;		/*fallthrough*/	case RELRES_mm_teardown:		/* Tear down shadow mode stuff. */		ret = mm_teardown(d);		if (ret != 0)			return ret;		d->arch.relres = RELRES_xen;		/* fallthrough */	case RELRES_xen:		/* Relinquish every xen page of memory. */		ret = relinquish_memory(d, &d->xenpage_list);		if (ret != 0)			return ret;		d->arch.relres = RELRES_dom;		/* fallthrough */	case RELRES_dom:		/* Relinquish every domain page of memory. */		ret = relinquish_memory(d, &d->page_list);		if (ret != 0)			return ret;		d->arch.relres = RELRES_done;		/* fallthrough */    	case RELRES_done:		break;	default:		BUG();	}	if (is_hvm_domain(d) && d->arch.sal_data)		xfree(d->arch.sal_data);	/* Free page used by xen oprofile buffer */	free_xenoprof_pages(d);	return 0;}unsigned longdomain_set_shared_info_va (unsigned long va){	struct vcpu *v = current;	struct domain *d = v->domain;	int rc;	/* Check virtual address:	   must belong to region 7,	   must be 64Kb aligned,	   must not be within Xen virtual space.  */	if ((va >> 61) != 7	    || (va & 0xffffUL) != 0	    || (va >= HYPERVISOR_VIRT_START && va < HYPERVISOR_VIRT_END))		panic_domain (NULL, "%s: bad va (0x%016lx)\n", __func__, va);	/* Note: this doesn't work well if other cpus are already running.	   However this is part of the spec :-)  */	gdprintk(XENLOG_DEBUG, "Domain set shared_info_va to 0x%016lx\n", va);	d->arch.shared_info_va = va;	VCPU(v, interrupt_mask_addr) = (unsigned char *)va +	                               INT_ENABLE_OFFSET(v);	set_current_psr_i_addr(v);	/* Remap the shared pages.  */	BUG_ON(VMX_DOMAIN(v));	rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7]));	BUG_ON(rc);	return rc;}/* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */#define SHADOW_COPY_CHUNK 1024int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc){	unsigned int op = sc->op;	int          rc = 0;	int i;	//struct vcpu *v;	if (unlikely(d == current->domain)) {		gdprintk(XENLOG_INFO,                        "Don't try to do a shadow op on yourself!\n");		return -EINVAL;	}   	domain_pause(d);	switch (op)	{	case XEN_DOMCTL_SHADOW_OP_OFF:		if (shadow_mode_enabled (d)) {			u64 *bm = d->arch.shadow_bitmap;			struct vcpu *v;			for_each_vcpu(d, v)				v->arch.shadow_bitmap = NULL;			/* Flush vhpt and tlb to restore dirty bit usage.  */			flush_tlb_for_log_dirty(d);			/* Free bitmap.  */			d->arch.shadow_bitmap_size = 0;			d->arch.shadow_bitmap = NULL;			xfree(bm);		}		break;	case XEN_DOMCTL_SHADOW_OP_ENABLE_TEST:	case XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE:		rc = -EINVAL;		break;	case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:		if (shadow_mode_enabled(d)) {			rc = -EINVAL;			break;		}		atomic64_set(&d->arch.shadow_fault_count, 0);		atomic64_set(&d->arch.shadow_dirty_count, 0);		d->arch.shadow_bitmap_size =			(domain_get_maximum_gpfn(d) + BITS_PER_LONG) &			~(BITS_PER_LONG - 1);		d->arch.shadow_bitmap = xmalloc_array(unsigned long,		                   d->arch.shadow_bitmap_size / BITS_PER_LONG);		if (d->arch.shadow_bitmap == NULL) {			d->arch.shadow_bitmap_size = 0;			rc = -ENOMEM;		}		else {			struct vcpu *v;			memset(d->arch.shadow_bitmap, 0, 			       d->arch.shadow_bitmap_size / 8);			for_each_vcpu(d, v)				v->arch.shadow_bitmap = d->arch.shadow_bitmap;			/* Flush vhtp and tlb to enable dirty bit			   virtualization.  */			flush_tlb_for_log_dirty(d);		}		break;	case XEN_DOMCTL_SHADOW_OP_CLEAN:	  {		int nbr_bytes;		sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);		sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);		atomic64_set(&d->arch.shadow_fault_count, 0);		atomic64_set(&d->arch.shadow_dirty_count, 0); 		if (guest_handle_is_null(sc->dirty_bitmap) ||		    (d->arch.shadow_bitmap == NULL)) {			rc = -EINVAL;			break;		}		if (sc->pages > d->arch.shadow_bitmap_size)			sc->pages = d->arch.shadow_bitmap_size; 		nbr_bytes = (sc->pages + 7) / 8;		for (i = 0; i < nbr_bytes; i += SHADOW_COPY_CHUNK) {			int size = (nbr_bytes - i) > SHADOW_COPY_CHUNK ?			           SHADOW_COPY_CHUNK : nbr_bytes - i;     			if (copy_to_guest_offset(                            sc->dirty_bitmap, i,                            (uint8_t *)d->arch.shadow_bitmap + i,                            size)) {				rc = -EFAULT;				break;			}			memset((uint8_t *)d->arch.shadow_bitmap + i, 0, size);		}		flush_tlb_for_log_dirty(d);				break;	  }	case XEN_DOMCTL_SHADOW_OP_PEEK:	{		unsigned long size;		sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);		sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);		if (guest_handle_is_null(sc->dirty_bitmap) ||		    (d->arch.shadow_bitmap == NULL)) {			rc = -EINVAL;			break;		} 		if (sc->pages > d->arch.shadow_bitmap_size)			sc->pages = d->arch.shadow_bitmap_size; 		size = (sc->pages + 7) / 8;		if (copy_to_guest(sc->dirty_bitmap,		                  (uint8_t *)d->arch.shadow_bitmap, size)) {			rc = -EFAULT;			break;		}		break;	}	case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:		sc->mb = 0;		break;	case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:		if (sc->mb > 0) {			BUG();			rc = -ENOMEM;		}		break;	default:		rc = -EINVAL;		break;	}		domain_unpause(d);	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -