⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vcpu.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
	} else {		if (PSCB(vcpu, banknum)) {			for (i = 0; i < 16; i++) {				*b1++ = *r;				*r++ = *b0++;			}			vcpu_bsw0_unat(i, b0unat, b1unat, runat,			               IA64_PT_REGS_R16_SLOT);			PSCB(vcpu, banknum) = 0;		}	}	return IA64_NO_FAULT;}#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT)	\do {             							\	__asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n"		\        		      "dep %1 = %0, %1, 16, 16;;\n"		\			      "st8 [%4] = %1\n"				\			      "extr.u %0 = %2, 0, 16;;\n"		\			      "dep %3 = %0, %3, %6, 16;;\n"		\			      "st8 [%5] = %3\n"				\			      ::"r"(i), "r"(*b0unat), "r"(*b1unat),	\			      "r"(*runat), "r"(b0unat), "r"(runat),	\			      "i"(IA64_PT_REGS_R16_SLOT): "memory");	\} while(0)IA64FAULT vcpu_bsw1(VCPU * vcpu){	// TODO: Only allowed for current vcpu	REGS *regs = vcpu_regs(vcpu);	unsigned long *r = &regs->r16;	unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);	unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);	unsigned long *runat = &regs->eml_unat;	unsigned long *b0unat = &PSCB(vcpu, vbnat);	unsigned long *b1unat = &PSCB(vcpu, vnat);	unsigned long i;	if (VMX_DOMAIN(vcpu)) {		if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {			for (i = 0; i < 16; i++) {				*b0++ = *r;				*r++ = *b1++;			}			vcpu_bsw1_unat(i, b0unat, b1unat, runat,			               IA64_PT_REGS_R16_SLOT);			VCPU(vcpu, vpsr) |= IA64_PSR_BN;		}	} else {		if (!PSCB(vcpu, banknum)) {			for (i = 0; i < 16; i++) {				*b0++ = *r;				*r++ = *b1++;			}			vcpu_bsw1_unat(i, b0unat, b1unat, runat,			               IA64_PT_REGS_R16_SLOT);			PSCB(vcpu, banknum) = 1;		}	}	return IA64_NO_FAULT;}/************************************************************************** VCPU cpuid access routines**************************************************************************/IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval){	// FIXME: This could get called as a result of a rsvd-reg fault	// if reg > 3	switch (reg) {	case 0:		memcpy(pval, "Xen/ia64", 8);		break;	case 1:		*pval = 0;		break;	case 2:		*pval = 0;		break;	case 3:		*pval = ia64_get_cpuid(3);		break;	case 4:		*pval = ia64_get_cpuid(4);		break;	default:		if (reg > (ia64_get_cpuid(3) & 0xff))			return IA64_RSVDREG_FAULT;		*pval = ia64_get_cpuid(reg);		break;	}	return IA64_NO_FAULT;}/************************************************************************** VCPU region register access routines**************************************************************************/unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr){	ia64_rr rr;	rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];	return rr.ve;}IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val){	if (unlikely(is_reserved_rr_field(vcpu, val))) {		gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);		return IA64_RSVDREG_FAULT;	}	PSCB(vcpu, rrs)[reg >> 61] = val;	if (likely(vcpu == current)) {		int rc = set_one_rr(reg, val);		BUG_ON(rc == 0);	}	return IA64_NO_FAULT;}IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval){	if (VMX_DOMAIN(vcpu))		*pval = VMX(vcpu, vrr[reg >> 61]);	else		*pval = PSCB(vcpu, rrs)[reg >> 61];	return IA64_NO_FAULT;}IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,			      u64 val3, u64 val4){	u64 reg0 = 0x0000000000000000UL;	u64 reg1 = 0x2000000000000000UL;	u64 reg2 = 0x4000000000000000UL;	u64 reg3 = 0x6000000000000000UL;	u64 reg4 = 0x8000000000000000UL;	if (unlikely(is_reserved_rr_field(vcpu, val0) ||		     is_reserved_rr_field(vcpu, val1) ||		     is_reserved_rr_field(vcpu, val2) ||		     is_reserved_rr_field(vcpu, val3) ||		     is_reserved_rr_field(vcpu, val4))) {		gdprintk(XENLOG_DEBUG,			 "use of invalid rrval %lx %lx %lx %lx %lx\n",			 val0, val1, val2, val3, val4);		return IA64_RSVDREG_FAULT;	}	PSCB(vcpu, rrs)[reg0 >> 61] = val0;	PSCB(vcpu, rrs)[reg1 >> 61] = val1;	PSCB(vcpu, rrs)[reg2 >> 61] = val2;	PSCB(vcpu, rrs)[reg3 >> 61] = val3;	PSCB(vcpu, rrs)[reg4 >> 61] = val4;	if (likely(vcpu == current)) {		int rc;		rc  = !set_one_rr(reg0, val0);		rc |= !set_one_rr(reg1, val1);		rc |= !set_one_rr(reg2, val2);		rc |= !set_one_rr(reg3, val3);		rc |= !set_one_rr(reg4, val4);		BUG_ON(rc != 0);	}	return IA64_NO_FAULT;}/************************************************************************** VCPU protection key register access routines**************************************************************************/IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval){	if (reg > XEN_IA64_NPKRS)		return IA64_RSVDREG_FAULT;	/* register index to large */	*pval = (u64) PSCBX(vcpu, pkrs[reg]);	return IA64_NO_FAULT;}IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val){	ia64_pkr_t pkr_new;	if (reg >= XEN_IA64_NPKRS)		return IA64_RSVDREG_FAULT;	/* index to large */	pkr_new.val = val;	if (pkr_new.reserved1)		return IA64_RSVDREG_FAULT;	/* reserved field */	if (pkr_new.reserved2)		return IA64_RSVDREG_FAULT;	/* reserved field */	PSCBX(vcpu, pkrs[reg]) = pkr_new.val;	ia64_set_pkr(reg, pkr_new.val);	return IA64_NO_FAULT;}/************************************************************************** VCPU translation register access routines**************************************************************************/static voidvcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,                      u64 itir, u64 ifa, u64 rid){	u64 ps;	union pte_flags new_pte;	trp->itir = itir;	trp->rid = rid;	ps = trp->ps;	new_pte.val = pte;	if (new_pte.pl < CONFIG_CPL0_EMUL)		new_pte.pl = CONFIG_CPL0_EMUL;	trp->vadr = ifa & ~0xfff;	if (ps > 12) {		// "ignore" relevant low-order bits		new_pte.ppn &= ~((1UL << (ps - 12)) - 1);		trp->vadr &= ~((1UL << ps) - 1);	}	/* Atomic write.  */	trp->pte.val = new_pte.val;}static inline voidvcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa){	vcpu_set_tr_entry_rid(trp, pte, itir, ifa,			      VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);}IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,                     u64 itir, u64 ifa){	TR_ENTRY *trp;	if (slot >= NDTRS)		return IA64_RSVDREG_FAULT;	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));	trp = &PSCBX(vcpu, dtrs[slot]);//printk("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);	vcpu_set_tr_entry(trp, pte, itir, ifa);	vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);	/*	 * FIXME According to spec, vhpt should be purged, but this	 * incurs considerable performance loss, since it is safe for	 * linux not to purge vhpt, vhpt purge is disabled until a	 * feasible way is found.	 *	 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));	 */	return IA64_NO_FAULT;}IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,                     u64 itir, u64 ifa){	TR_ENTRY *trp;	if (slot >= NITRS)		return IA64_RSVDREG_FAULT;	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));	trp = &PSCBX(vcpu, itrs[slot]);//printk("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);	vcpu_set_tr_entry(trp, pte, itir, ifa);	vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);	/*	 * FIXME According to spec, vhpt should be purged, but this	 * incurs considerable performance loss, since it is safe for	 * linux not to purge vhpt, vhpt purge is disabled until a	 * feasible way is found.	 *	 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));	 */	return IA64_NO_FAULT;}IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,                       u64 itir, u64 ifa, u64 rid){	TR_ENTRY *trp;	if (slot >= NITRS)		return IA64_RSVDREG_FAULT;	trp = &PSCBX(vcpu, itrs[slot]);	vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);	/* Recompute the itr_region.  */	vcpu->arch.itr_regions = 0;	for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)		if (trp->pte.p)			vcpu_quick_region_set(vcpu->arch.itr_regions,			                      trp->vadr);	return IA64_NO_FAULT;}IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,                       u64 itir, u64 ifa, u64 rid){	TR_ENTRY *trp;	if (slot >= NDTRS)		return IA64_RSVDREG_FAULT;	trp = &PSCBX(vcpu, dtrs[slot]);	vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);	/* Recompute the dtr_region.  */	vcpu->arch.dtr_regions = 0;	for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)		if (trp->pte.p)			vcpu_quick_region_set(vcpu->arch.dtr_regions,			                      trp->vadr);	return IA64_NO_FAULT;}/************************************************************************** VCPU translation cache access routines**************************************************************************/static voidvcpu_rebuild_vhpt(VCPU * vcpu, u64 ps){#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT	printk("vhpt rebuild: using page_shift %d\n", (int)ps);	vcpu->arch.vhpt_pg_shift = ps;	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));	local_vhpt_flush();	load_region_regs(vcpu);#else	panic_domain(NULL, "domain trying to use smaller page size!\n");#endif}voidvcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,                 u64 mp_pte, u64 itir, struct p2m_entry *entry){	ia64_itir_t _itir = {.itir = itir};	unsigned long psr;	check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);	// FIXME, must be inlined or potential for nested fault here!	if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))		panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "		             "smaller page size!\n");	BUG_ON(_itir.ps > PAGE_SHIFT);	vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);	psr = ia64_clear_ic();	pte &= ~(_PAGE_RV2 | _PAGE_RV1);	// Mask out the reserved bits.					// FIXME: look for bigger mappings	ia64_itc(IorD, vaddr, pte, _itir.itir);	ia64_set_psr(psr);	// ia64_srlz_i(); // no srls req'd, will rfi later	if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {		// FIXME: this is dangerous... vhpt_flush_address ensures these		// addresses never get flushed.  More work needed if this		// ever happens.//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);		if (_itir.ps > vcpu->arch.vhpt_pg_shift)			vhpt_multiple_insert(vaddr, pte, _itir.itir);		else			vhpt_insert(vaddr, pte, _itir.itir);	}	// even if domain pagesize is larger than PAGE_SIZE, just put	// PAGE_SIZE mapping in the vhpt for now, else purging is complicated	else {		vhpt_insert(vaddr, pte, _itir.itir);	}}IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa){	unsigned long pteval;	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));	struct p2m_entry entry;	ia64_itir_t _itir = {.itir = itir};	if (_itir.ps < vcpu->arch.vhpt_pg_shift)		vcpu_rebuild_vhpt(vcpu, _itir.ps); again:	//itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);	if (!pteval)		return IA64_ILLOP_FAULT;	if (swap_rr0)		set_virtual_rr0();	vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);	if (swap_rr0)		set_metaphysical_rr0();	if (p2m_entry_retry(&entry)) {		vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);		goto again;	}	vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);	return IA64_NO_FAULT;}IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa){	unsigned long pteval;	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));	struct p2m_entry entry;	ia64_itir_t _itir = {.itir = itir};	if (_itir.ps < vcpu->arch.vhpt_pg_shift)		vcpu_rebuild_vhpt(vcpu, _itir.ps);      again:	//itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);	if (!pteval)		return IA64_ILLOP_FAULT;	if (swap_rr0)		set_virtual_rr0();	vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);	if (swap_rr0)		set_metaphysical_rr0();	if (p2m_entry_retry(&entry)) {		vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);		goto again;	}	vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);	return IA64_NO_FAULT;}IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range){	BUG_ON(vcpu != current);	check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);	/* Purge TC  */	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));	/* Purge all tlb and vhpt */	vcpu_flush_tlb_vhpt_range(vadr, log_range);	return IA64_NO_FAULT;}// At privlvl=0, fc performs no access rights or protection key checks, while// at privlvl!=0, fc performs access rights checks as if it were a 1-byte// read but no protection key check.  Thus in order to avoid an unexpected// access rights fault, we have to translate the virtual address to a// physical address (possibly via a metaphysical address) and do the fc// on the physical address, which is guaranteed to flush the same cache lineIA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr){	// TODO: Only allowed for current vcpu	u64 mpaddr, paddr;	IA64FAULT fault;      again:	fault = vcpu_tpa(vcpu, vadr, &mpaddr);	if (fault == IA64_NO_FAULT) {		struct p2m_entry entry;		paddr = translate_domain_mpaddr(mpaddr, &entry);		ia64_fc(__va(paddr));		if (p2m_entry_retry(&entry))			goto again;	}	return fault;}IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr){	// Note that this only needs to be called once, i.e. the	// architected loop to purge the entire TLB, should use	//  base = stride1 = stride2 = 0, count0 = count 1 = 1	vcpu_flush_vtlb_all(current);	return IA64_NO_FAULT;}IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range){	printk("vcpu_ptc_g: called, not implemented yet\n");	return IA64_ILLOP_FAULT;}IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range){	// FIXME: validate not flushing Xen addresses	// if (Xen address) return(IA64_ILLOP_FAULT);	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE//printk("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);	check_xen_space_overlap("ptc_ga", vadr, addr_range);	domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);	return IA64_NO_FAULT;}IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range){	unsigned long region = vadr >> 61;	u64 addr_range = 1UL << log_range;	unsigned long rid, rr;	int i;	TR_ENTRY *trp;	BUG_ON(vcpu != current);	check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);	rr = PSCB(vcpu, rrs)[region];	rid = rr & RR_RID_MASK;	/* Purge TC  */	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));	/* Purge tr and recompute dtr_regions.  */	vcpu->arch.dtr_regions = 0;	for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)		if (vcpu_match_tr_entry_range		    (trp, rid, vadr, vadr + addr_range))			vcpu_purge_tr_entry(trp);		else if (trp->pte.p)			vcpu_quick_region_set(vcpu->arch.dtr_regions,					      trp->vadr);	vcpu_flush_tlb_vhpt_range(vadr, log_range);	return IA64_NO_FAULT;}IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range){	unsigned long region = vadr >> 61;	u64 addr_range = 1UL << log_range;	unsigned long rid, rr;	int i;	TR_ENTRY *trp;	BUG_ON(vcpu != current);	check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);	rr = PSCB(vcpu, rrs)[region];	rid = rr & RR_RID_MASK;	/* Purge TC  */	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));	/* Purge tr and recompute itr_regions.  */	vcpu->arch.itr_regions = 0;	for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)		if (vcpu_match_tr_entry_range		    (trp, rid, vadr, vadr + addr_range))			vcpu_purge_tr_entry(trp);		else if (trp->pte.p)			vcpu_quick_region_set(vcpu->arch.itr_regions,					      trp->vadr);	vcpu_flush_tlb_vhpt_range(vadr, log_range);	return IA64_NO_FAULT;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -