⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vcpu.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
/**************************************************************************Privileged operation emulation routines**************************************************************************/static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa){	PSCB(vcpu, ifa) = ifa;	PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);	vcpu_thash(current, ifa, &PSCB(current, iha));}IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa){	vcpu_force_tlb_miss(vcpu, ifa);	return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :		IA64_ALT_INST_TLB_VECTOR;}IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa){	vcpu_force_tlb_miss(vcpu, ifa);	return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :		IA64_ALT_DATA_TLB_VECTOR;}IA64FAULT vcpu_rfi(VCPU * vcpu){	u64 ifs;	REGS *regs = vcpu_regs(vcpu);		vcpu_set_psr(vcpu, PSCB(vcpu, ipsr));	ifs = PSCB(vcpu, ifs);	if (ifs & 0x8000000000000000UL) 		regs->cr_ifs = ifs;	regs->cr_iip = PSCB(vcpu, iip);	return IA64_NO_FAULT;}IA64FAULT vcpu_cover(VCPU * vcpu){	// TODO: Only allowed for current vcpu	REGS *regs = vcpu_regs(vcpu);	if (!PSCB(vcpu, interrupt_collection_enabled)) {		PSCB(vcpu, ifs) = regs->cr_ifs;	}	regs->cr_ifs = 0;	return IA64_NO_FAULT;}IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval){	u64 pta = PSCB(vcpu, pta);	u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;	u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);	u64 Mask = (1L << pta_sz) - 1;	u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;	u64 compMask_60_15 = ~Mask_60_15;	u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);	u64 VHPT_offset = (vadr >> rr_ps) << 3;	u64 VHPT_addr1 = vadr & 0xe000000000000000L;	u64 VHPT_addr2a =	    ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;	u64 VHPT_addr2b =	    ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;	u64 VHPT_addr3 = VHPT_offset & 0x7fff;	u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |	    VHPT_addr3;//verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);	*pval = VHPT_addr;	return IA64_NO_FAULT;}IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr){	printk("vcpu_ttag: ttag instruction unsupported\n");	return IA64_ILLOP_FAULT;}int warn_region0_address = 0;	// FIXME later: tie to a boot parameter?/* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps.  */static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2){	return (b1 <= e2) && (e1 >= b2);}/* Crash domain if [base, base + page_size] and Xen virtual space overlaps.   Note: LSBs of base inside page_size are ignored.  */static inline voidcheck_xen_space_overlap(const char *func, u64 base, u64 page_size){	/* Overlaps can occur only in region 7.	   (This is an optimization to bypass all the checks).  */	if (REGION_NUMBER(base) != 7)		return;	/* Mask LSBs of base.  */	base &= ~(page_size - 1);	/* FIXME: ideally an MCA should be generated...  */	if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,			  base, base + page_size)	    || range_overlap(current->domain->arch.shared_info_va,			     current->domain->arch.shared_info_va			     + XSI_SIZE + XMAPPEDREGS_SIZE,			     base, base + page_size))		panic_domain(NULL, "%s on Xen virtual space (%lx)\n",			     func, base);}// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,                                           u64 rid){	return trp->rid == rid	    && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);}static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid){	return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);}static inline intvcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e){	return trp->rid == rid	    && trp->pte.p	    && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);}static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,                                BOOLEAN is_data){	unsigned char *regions;	TR_ENTRY *trp;	int tr_max;	int i;	if (is_data) {		// data		regions = &vcpu->arch.dtr_regions;		trp = vcpu->arch.dtrs;		tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);	} else {		// instruction		regions = &vcpu->arch.itr_regions;		trp = vcpu->arch.itrs;		tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);	}	if (!vcpu_quick_region_check(*regions, va)) {		return NULL;	}	for (i = 0; i < tr_max; i++, trp++) {		if (vcpu_match_tr_entry(trp, va, rid)) {			return trp;		}	}	return NULL;}// return value// 0: failure// 1: successintvcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,                       IA64_BUNDLE * bundle){	u64 gpip;		// guest pseudo phyiscal ip	unsigned long vaddr;	struct page_info *page; again:#if 0	// Currently xen doesn't track psr.it bits.	// it assumes always psr.it = 1.	if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {		gpip = gip;	} else#endif	{		unsigned long region = REGION_NUMBER(gip);		unsigned long rr = PSCB(vcpu, rrs)[region];		unsigned long rid = rr & RR_RID_MASK;		BOOLEAN swap_rr0;		TR_ENTRY *trp;		// vcpu->arch.{i, d}tlb are volatile,		// copy its value to the variable, tr, before use.		TR_ENTRY tr;		trp = vcpu_tr_lookup(vcpu, gip, rid, 0);		if (trp != NULL) {			tr = *trp;			goto found;		}		// When it failed to get a bundle, itlb miss is reflected.		// Last itc.i value is cached to PSCBX(vcpu, itlb).		tr = PSCBX(vcpu, itlb);		if (vcpu_match_tr_entry(&tr, gip, rid)) {			//dprintk(XENLOG_WARNING,			//        "%s gip 0x%lx gpip 0x%lx\n", __func__,			//	  gip, gpip);			goto found;		}		trp = vcpu_tr_lookup(vcpu, gip, rid, 1);		if (trp != NULL) {			tr = *trp;			goto found;		}#if 0		tr = PSCBX(vcpu, dtlb);		if (vcpu_match_tr_entry(&tr, gip, rid)) {			goto found;		}#endif		// try to access gip with guest virtual address		// This may cause tlb miss. see vcpu_translate(). Be careful!		swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));		if (swap_rr0) {			set_virtual_rr0();		}		*bundle = __get_domain_bundle(gip);		if (swap_rr0) {			set_metaphysical_rr0();		}		if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {			dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);			return 0;		}		return 1;	found:		gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |			(gip & ((1 << tr.ps) - 1));	}	vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);	page = virt_to_page(vaddr);	if (get_page(page, vcpu->domain) == 0) {		if (page_get_owner(page) != vcpu->domain) {			// This page might be a page granted by another			// domain.			panic_domain(regs, "domain tries to execute foreign "				     "domain page which might be mapped by "				     "grant table.\n");		}		goto again;	}	*bundle = *((IA64_BUNDLE *) vaddr);	put_page(page);	return 1;}IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,			 u64 * pteval, u64 * itir, u64 * iha){	unsigned long region = REGION_NUMBER(address);	unsigned long pta, rid, rr, key = 0;	union pte_flags pte;	TR_ENTRY *trp;	if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {		// dom0 may generate an uncacheable physical address (msb=1)		if (region && ((region != 4) || (vcpu->domain != dom0))) {// FIXME: This seems to happen even though it shouldn't.  Need to track// this down, but since it has been apparently harmless, just flag it for now//                      panic_domain(vcpu_regs(vcpu),			/*			 * Guest may execute itc.d and rfi with psr.dt=0			 * When VMM try to fetch opcode, tlb miss may happen,			 * At this time PSCB(vcpu,metaphysical_mode)=1,			 * region=5,VMM need to handle this tlb miss as if			 * PSCB(vcpu,metaphysical_mode)=0			 */			printk("vcpu_translate: bad physical address: 0x%lx "			       "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);		} else {			*pteval = (address & _PAGE_PPN_MASK) |				__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;			*itir = vcpu->arch.vhpt_pg_shift << 2;			perfc_incr(phys_translate);			return IA64_NO_FAULT;		}	} else if (!region && warn_region0_address) {		REGS *regs = vcpu_regs(vcpu);		unsigned long viip = PSCB(vcpu, iip);		unsigned long vipsr = PSCB(vcpu, ipsr);		unsigned long iip = regs->cr_iip;		unsigned long ipsr = regs->cr_ipsr;		printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "		       "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",		       address, viip, vipsr, iip, ipsr);	}	rr = PSCB(vcpu, rrs)[region];	rid = rr & RR_RID_MASK;	if (is_data) {		trp = vcpu_tr_lookup(vcpu, address, rid, 1);		if (trp != NULL) {			*pteval = trp->pte.val;			*itir = trp->itir;			perfc_incr(tr_translate);			return IA64_NO_FAULT;		}	}	// FIXME?: check itr's for data accesses too, else bad things happen?	/* else */  {		trp = vcpu_tr_lookup(vcpu, address, rid, 0);		if (trp != NULL) {			*pteval = trp->pte.val;			*itir = trp->itir;			perfc_incr(tr_translate);			return IA64_NO_FAULT;		}	}	/* check 1-entry TLB */	// FIXME?: check dtlb for inst accesses too, else bad things happen?	trp = &vcpu->arch.dtlb;	pte = trp->pte;	if ( /* is_data && */ pte.p	    && vcpu_match_tr_entry_no_p(trp, address, rid)) {		*pteval = pte.val;		*itir = trp->itir;		perfc_incr(dtlb_translate);		return IA64_USE_TLB;	}	/* check guest VHPT */	pta = PSCB(vcpu, pta);	*itir = rr & (RR_RID_MASK | RR_PS_MASK);	// note: architecturally, iha is optionally set for alt faults but	// xenlinux depends on it so should document it as part of PV interface	vcpu_thash(vcpu, address, iha);	if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {		REGS *regs = vcpu_regs(vcpu);		struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);		/* Optimization for identity mapped region 7 OS (linux) */		if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG &&		    region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL &&		    REGION_OFFSET(address) < _PAGE_PPN_MASK) {			pte.val = address & _PAGE_PPN_MASK;			pte.val = pte.val | optf->im_reg7.pgprot;			key = optf->im_reg7.key;			goto out;		}		return is_data ? IA64_ALT_DATA_TLB_VECTOR :			IA64_ALT_INST_TLB_VECTOR;	}	if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */		/*		 * minimal support: vhpt walker is really dumb and won't find		 * anything		 */		return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;	}	/* avoid recursively walking (short format) VHPT */	if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)		return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;	if (!__access_ok(*iha)	    || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)		// virtual VHPT walker "missed" in TLB		return IA64_VHPT_FAULT;	/*	 * Optimisation: this VHPT walker aborts on not-present pages	 * instead of inserting a not-present translation, this allows	 * vectoring directly to the miss handler.	 */	if (!pte.p)		return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;	/* found mapping in guest VHPT! */out:	*itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY);	*pteval = pte.val;	perfc_incr(vhpt_translate);	return IA64_NO_FAULT;}IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr){	u64 pteval, itir, mask, iha;	IA64FAULT fault;	fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {		mask = itir_mask(itir);		*padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);		return IA64_NO_FAULT;	}	return vcpu_force_data_miss(vcpu, vadr);}IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key){	u64 pteval, itir, iha;	IA64FAULT fault;	fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)		*key = itir & IA64_ITIR_KEY_MASK;	else		*key = 1;	return IA64_NO_FAULT;}/************************************************************************** VCPU debug breakpoint register access routines**************************************************************************/IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val){	if (reg >= IA64_NUM_DBG_REGS)		return IA64_RSVDREG_FAULT;	if ((reg & 1) == 0) {		/* Validate address. */		if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)			return IA64_ILLOP_FAULT;	} else {		if (!VMX_DOMAIN(vcpu)) {			/* Mask PL0. */			val &= ~(1UL << 56);		}	}	if (val != 0)		vcpu->arch.dbg_used |= (1 << reg);	else		vcpu->arch.dbg_used &= ~(1 << reg);	vcpu->arch.dbr[reg] = val;	if (vcpu == current)		ia64_set_dbr(reg, val);	return IA64_NO_FAULT;}IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val){	if (reg >= IA64_NUM_DBG_REGS)		return IA64_RSVDREG_FAULT;	if ((reg & 1) == 0) {		/* Validate address. */		if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)			return IA64_ILLOP_FAULT;	} else {		if (!VMX_DOMAIN(vcpu)) {			/* Mask PL0. */			val &= ~(1UL << 56);		}	}	if (val != 0)		vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));	else		vcpu->arch.dbg_used &= ~(1 << (reg + IA64_NUM_DBG_REGS));	vcpu->arch.ibr[reg] = val;	if (vcpu == current)		ia64_set_ibr(reg, val);	return IA64_NO_FAULT;}IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval){	if (reg >= IA64_NUM_DBG_REGS)		return IA64_RSVDREG_FAULT;	*pval = vcpu->arch.dbr[reg];	return IA64_NO_FAULT;}IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval){	if (reg >= IA64_NUM_DBG_REGS)		return IA64_RSVDREG_FAULT;	*pval = vcpu->arch.ibr[reg];	return IA64_NO_FAULT;}/************************************************************************** VCPU performance monitor register access routines**************************************************************************/IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val){	// TODO: Should set Logical CPU state, not just physical	// NOTE: Writes to unimplemented PMC registers are discarded#ifdef DEBUG_PFMON	printk("vcpu_set_pmc(%x,%lx)\n", reg, val);#endif	ia64_set_pmc(reg, val);	return IA64_NO_FAULT;}IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val){	// TODO: Should set Logical CPU state, not just physical	// NOTE: Writes to unimplemented PMD registers are discarded#ifdef DEBUG_PFMON	printk("vcpu_set_pmd(%x,%lx)\n", reg, val);#endif	ia64_set_pmd(reg, val);	return IA64_NO_FAULT;}IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval){	// NOTE: Reads from unimplemented PMC registers return zero	u64 val = (u64) ia64_get_pmc(reg);#ifdef DEBUG_PFMON	printk("%lx=vcpu_get_pmc(%x)\n", val, reg);#endif	*pval = val;	return IA64_NO_FAULT;}IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval){	// NOTE: Reads from unimplemented PMD registers return zero	u64 val = (u64) ia64_get_pmd(reg);#ifdef DEBUG_PFMON	printk("%lx=vcpu_get_pmd(%x)\n", val, reg);#endif	*pval = val;	return IA64_NO_FAULT;}/************************************************************************** VCPU banked general register access routines**************************************************************************/#define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT)     \do{     \    __asm__ __volatile__ (                      \        ";;extr.u %0 = %3,%6,16;;\n"            \        "dep %1 = %0, %1, 0, 16;;\n"            \        "st8 [%4] = %1\n"                       \        "extr.u %0 = %2, 16, 16;;\n"            \        "dep %3 = %0, %3, %6, 16;;\n"           \        "st8 [%5] = %3\n"                       \        ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \        "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");    \}while(0)IA64FAULT vcpu_bsw0(VCPU * vcpu){	// TODO: Only allowed for current vcpu	REGS *regs = vcpu_regs(vcpu);	unsigned long *r = &regs->r16;	unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);	unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);	unsigned long *runat = &regs->eml_unat;	unsigned long *b0unat = &PSCB(vcpu, vbnat);	unsigned long *b1unat = &PSCB(vcpu, vnat);	unsigned long i;	if (VMX_DOMAIN(vcpu)) {		if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {			for (i = 0; i < 16; i++) {				*b1++ = *r;				*r++ = *b0++;			}			vcpu_bsw0_unat(i, b0unat, b1unat, runat,				       IA64_PT_REGS_R16_SLOT);			VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -