⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vcpu.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * Virtualized CPU functions * * Copyright (C) 2004-2005 Hewlett-Packard Co. *	Dan Magenheimer (dan.magenheimer@hp.com) * */#include <linux/sched.h>#include <public/xen.h>#include <xen/mm.h>#include <asm/ia64_int.h>#include <asm/vcpu.h>#include <asm/regionreg.h>#include <asm/tlb.h>#include <asm/processor.h>#include <asm/delay.h>#include <asm/vmx_vcpu.h>#include <asm/vhpt.h>#include <asm/tlbflush.h>#include <asm/privop.h>#include <xen/event.h>#include <asm/vmx_phy_mode.h>#include <asm/bundle.h>#include <asm/privop_stat.h>#include <asm/uaccess.h>#include <asm/p2m_entry.h>#include <asm/tlb_track.h>/* FIXME: where these declarations should be there ? */extern void getreg(unsigned long regnum, unsigned long *val, int *nat,                   struct pt_regs *regs);extern void setreg(unsigned long regnum, unsigned long val, int nat,                   struct pt_regs *regs);extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,                     struct pt_regs *regs);extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,                     struct pt_regs *regs);typedef union {	struct ia64_psr ia64_psr;	unsigned long i64;} PSR;// this def for vcpu_regs won't work if kernel stack is present//#define       vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs#define	IA64_PTA_SZ_BIT		2#define	IA64_PTA_VF_BIT		8#define	IA64_PTA_BASE_BIT	15#define	IA64_PTA_SZ(x)		(x##UL << IA64_PTA_SZ_BIT)#define IA64_PSR_NON_VIRT_BITS				\	(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC |	\	 IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK |	\	 IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB |	\	 IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID |	\	 IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS |	\	 IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)unsigned long vcpu_verbose = 0;/************************************************************************** VCPU general register access routines**************************************************************************/#ifdef XENu64 vcpu_get_gr(VCPU * vcpu, unsigned long reg){	REGS *regs = vcpu_regs(vcpu);	u64 val;	if (!reg)		return 0;	getreg(reg, &val, 0, regs);	// FIXME: handle NATs later	return val;}IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val){	REGS *regs = vcpu_regs(vcpu);	int nat;	getreg(reg, val, &nat, regs);	// FIXME: handle NATs later	if (nat)		return IA64_NAT_CONSUMPTION_VECTOR;	return 0;}// returns://   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault//   IA64_NO_FAULT otherwiseIA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat){	REGS *regs = vcpu_regs(vcpu);	long sof = (regs->cr_ifs) & 0x7f;	if (!reg)		return IA64_ILLOP_FAULT;	if (reg >= sof + 32)		return IA64_ILLOP_FAULT;	setreg(reg, value, nat, regs);	// FIXME: handle NATs later	return IA64_NO_FAULT;}IA64FAULTvcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val){	REGS *regs = vcpu_regs(vcpu);	getfpreg(reg, val, regs);	// FIXME: handle NATs later	return IA64_NO_FAULT;}IA64FAULTvcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val){	REGS *regs = vcpu_regs(vcpu);	if (reg > 1)		setfpreg(reg, val, regs);	// FIXME: handle NATs later	return IA64_NO_FAULT;}#else// returns://   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault//   IA64_NO_FAULT otherwiseIA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value){	REGS *regs = vcpu_regs(vcpu);	long sof = (regs->cr_ifs) & 0x7f;	if (!reg)		return IA64_ILLOP_FAULT;	if (reg >= sof + 32)		return IA64_ILLOP_FAULT;	setreg(reg, value, 0, regs);	// FIXME: handle NATs later	return IA64_NO_FAULT;}#endifvoid vcpu_init_regs(struct vcpu *v){	struct pt_regs *regs;	regs = vcpu_regs(v);	if (VMX_DOMAIN(v)) {		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */		regs->cr_ipsr = IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT |				IA64_PSR_I  | IA64_PSR_IC | IA64_PSR_SI |				IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_VM;		/* lazy fp */		FP_PSR(v) = IA64_PSR_DFH;		regs->cr_ipsr |= IA64_PSR_DFH;	} else {		regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)		    | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;		regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR				   | IA64_PSR_RI | IA64_PSR_IS);		// domain runs at PL2		regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);		// lazy fp 		PSCB(v, hpsr_dfh) = 1;		PSCB(v, hpsr_mfh) = 0;		regs->cr_ipsr |= IA64_PSR_DFH;	}	regs->cr_ifs = 1UL << 63;	/* or clear? */	regs->ar_fpsr = FPSR_DEFAULT;	if (VMX_DOMAIN(v)) {		vmx_init_all_rr(v);		/* Virtual processor context setup */		VCPU(v, vpsr) = IA64_PSR_BN;		VCPU(v, dcr) = 0;	} else {		init_all_rr(v);		regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);		VCPU(v, banknum) = 1;		VCPU(v, metaphysical_mode) = 1;		VCPU(v, interrupt_mask_addr) =		    (unsigned char *)v->domain->arch.shared_info_va +		    INT_ENABLE_OFFSET(v);		VCPU(v, itv) = (1 << 16);	/* timer vector masked */		v->vcpu_info->evtchn_upcall_pending = 0;		v->vcpu_info->evtchn_upcall_mask = -1;	}	/* pta.size must not be 0.  The minimum is 15 (32k) */	VCPU(v, pta) = 15 << 2;	v->arch.domain_itm_last = -1L;}/************************************************************************** VCPU privileged application register access routines**************************************************************************/void vcpu_load_kernel_regs(VCPU * vcpu){	ia64_set_kr(0, VCPU(vcpu, krs[0]));	ia64_set_kr(1, VCPU(vcpu, krs[1]));	ia64_set_kr(2, VCPU(vcpu, krs[2]));	ia64_set_kr(3, VCPU(vcpu, krs[3]));	ia64_set_kr(4, VCPU(vcpu, krs[4]));	ia64_set_kr(5, VCPU(vcpu, krs[5]));	ia64_set_kr(6, VCPU(vcpu, krs[6]));	ia64_set_kr(7, VCPU(vcpu, krs[7]));}/* GCC 4.0.2 seems not to be able to suppress this call!.  */#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULTIA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val){	if (reg == 44)		return vcpu_set_itc(vcpu, val);	else if (reg == 27)		return IA64_ILLOP_FAULT;	else if (reg == 24)		printk("warning: setting ar.eflg is a no-op; no IA-32 "		       "support\n");	else if (reg > 7)		return IA64_ILLOP_FAULT;	else {		PSCB(vcpu, krs[reg]) = val;		ia64_set_kr(reg, val);	}	return IA64_NO_FAULT;}IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val){	if (reg == 24)		printk("warning: getting ar.eflg is a no-op; no IA-32 "		       "support\n");	else if (reg > 7)		return IA64_ILLOP_FAULT;	else		*val = PSCB(vcpu, krs[reg]);	return IA64_NO_FAULT;}/************************************************************************** VCPU protection key emulating for PV This first implementation reserves 1 pkr for the hypervisor key. On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the hypervisor may run with psr.pk==1. The key for the hypervisor is 0. Furthermore the VCPU is flagged to use the protection keys. Currently the domU has to take care of the used keys, because on setting a pkr there is no check against other pkr's whether this key is already used.**************************************************************************//* The function loads the protection key registers from the struct arch_vcpu * into the processor pkr's! Called in context_switch(). * TODO: take care of the order of writing pkr's! */void vcpu_pkr_load_regs(VCPU * vcpu){	int i;	for (i = 0; i <= XEN_IA64_NPKRS; i++)		ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));}/* The function activates the pkr handling. */static void vcpu_pkr_set_psr_handling(VCPU * vcpu){	if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)		return;	vcpu_pkr_use_set(vcpu);	PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;	/* Write the special key for the hypervisor into pkr[15]. */	ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);}/************************************************************************** VCPU processor status register access routines**************************************************************************/static void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode){	/* only do something if mode changes */	if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {		PSCB(vcpu, metaphysical_mode) = newmode;		if (newmode)			set_metaphysical_rr0();		else			set_virtual_rr0();	}}IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu){	vcpu_set_metaphysical_mode(vcpu, TRUE);	return IA64_NO_FAULT;}IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24){	struct ia64_psr imm, *ipsr;	REGS *regs = vcpu_regs(vcpu);	//PRIVOP_COUNT_ADDR(regs,_RSM);	// TODO: All of these bits need to be virtualized	// TODO: Only allowed for current vcpu	ipsr = (struct ia64_psr *)&regs->cr_ipsr;	imm = *(struct ia64_psr *)&imm24;	// interrupt flag	if (imm.i)		vcpu->vcpu_info->evtchn_upcall_mask = 1;	if (imm.ic)		PSCB(vcpu, interrupt_collection_enabled) = 0;	// interrupt collection flag	//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;	// just handle psr.up and psr.pp for now	if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |		      IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |		      IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))		return IA64_ILLOP_FAULT;	if (imm.dfh) {		ipsr->dfh = PSCB(vcpu, hpsr_dfh);		PSCB(vcpu, vpsr_dfh) = 0;	}	if (imm.dfl)		ipsr->dfl = 0;	if (imm.pp) {		// xenoprof:		// Don't change psr.pp and ipsr->pp 		// They are manipulated by xenoprof		// psr.pp = 1;		// ipsr->pp = 1;		PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr	}	if (imm.up)		ipsr->up = 0;	if (imm.sp)		ipsr->sp = 0;	if (imm.be)		ipsr->be = 0;	if (imm.dt)		vcpu_set_metaphysical_mode(vcpu, TRUE);	if (imm.pk) {		ipsr->pk = 0;		vcpu_pkr_use_unset(vcpu);	}	return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr_dt(VCPU * vcpu){	vcpu_set_metaphysical_mode(vcpu, FALSE);	return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr_i(VCPU * vcpu){	vcpu->vcpu_info->evtchn_upcall_mask = 0;	PSCB(vcpu, interrupt_collection_enabled) = 1;	return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24){	struct ia64_psr imm, *ipsr;	REGS *regs = vcpu_regs(vcpu);	u64 mask, enabling_interrupts = 0;	//PRIVOP_COUNT_ADDR(regs,_SSM);	// TODO: All of these bits need to be virtualized	imm = *(struct ia64_psr *)&imm24;	ipsr = (struct ia64_psr *)&regs->cr_ipsr;	// just handle psr.sp,pp and psr.i,ic (and user mask) for now	mask =	    IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |	    IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |	    IA64_PSR_PK;	if (imm24 & ~mask)		return IA64_ILLOP_FAULT;	if (imm.dfh) {		PSCB(vcpu, vpsr_dfh) = 1;		ipsr->dfh = 1;	} 	if (imm.dfl)		ipsr->dfl = 1;	if (imm.pp) {		// xenoprof:		// Don't change psr.pp and ipsr->pp 		// They are manipulated by xenoprof		// psr.pp = 1;		// ipsr->pp = 1;		PSCB(vcpu, vpsr_pp) = 1;	}	if (imm.sp)		ipsr->sp = 1;	if (imm.i) {		if (vcpu->vcpu_info->evtchn_upcall_mask) {//printk("vcpu_set_psr_sm: psr.ic 0->1\n");			enabling_interrupts = 1;		}		vcpu->vcpu_info->evtchn_upcall_mask = 0;	}	if (imm.ic)		PSCB(vcpu, interrupt_collection_enabled) = 1;	// TODO: do this faster	if (imm.mfl)		ipsr->mfl = 1;	if (imm.mfh)		ipsr->mfh = 1;	if (imm.ac)		ipsr->ac = 1;	if (imm.up)		ipsr->up = 1;	if (imm.be)		ipsr->be = 1;	if (imm.dt)		vcpu_set_metaphysical_mode(vcpu, FALSE);	if (imm.pk) {		vcpu_pkr_set_psr_handling(vcpu);		ipsr->pk = 1;	}	if (enabling_interrupts &&	    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)		PSCB(vcpu, pending_interruption) = 1;	return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val){	struct ia64_psr newpsr, *ipsr;	REGS *regs = vcpu_regs(vcpu);	u64 enabling_interrupts = 0;	newpsr = *(struct ia64_psr *)&val;	ipsr = (struct ia64_psr *)&regs->cr_ipsr;	ipsr->be = newpsr.be;	ipsr->up = newpsr.up;	ipsr->ac = newpsr.ac;	ipsr->mfl = newpsr.mfl;	ipsr->mfh = newpsr.mfh;	PSCB(vcpu, interrupt_collection_enabled) = newpsr.ic;	if (newpsr.i && vcpu->vcpu_info->evtchn_upcall_mask)		enabling_interrupts = 1;	vcpu->vcpu_info->evtchn_upcall_mask = !(newpsr.i);	if (newpsr.pk) {		vcpu_pkr_set_psr_handling(vcpu);		ipsr->pk = 1;	} else		vcpu_pkr_use_unset(vcpu);	vcpu_set_metaphysical_mode(vcpu, !(newpsr.dt && newpsr.rt));	ipsr->dfl = newpsr.dfl;	PSCB(vcpu, vpsr_dfh) = newpsr.dfh;	ipsr->dfh = newpsr.dfh ? 1 : PSCB(vcpu, hpsr_dfh);	ipsr->sp = newpsr.sp;	/* xenoprof: Don't change ipsr->pp, it is manipulated by xenoprof */	PSCB(vcpu, vpsr_pp) = newpsr.pp;	if (enabling_interrupts &&	    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)		PSCB(vcpu, pending_interruption) = 1;	return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val){	IA64_PSR newpsr, vpsr;	REGS *regs = vcpu_regs(vcpu);	u64 enabling_interrupts = 0;	/* Copy non-virtualized bits.  */	newpsr.val = val & IA64_PSR_NON_VIRT_BITS;	/* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0)  */	newpsr.val |= IA64_PSR_DI;	newpsr.val |= IA64_PSR_I  | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT |		      IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI;	/*	 * xenoprof:	 * keep psr.pp unchanged for xenoprof.	 */	if (regs->cr_ipsr & IA64_PSR_PP)		newpsr.val |= IA64_PSR_PP;	else		newpsr.val &= ~IA64_PSR_PP;	vpsr.val = val;	if (val & IA64_PSR_DFH) {		newpsr.dfh = 1;		PSCB(vcpu, vpsr_dfh) = 1;	} else {		newpsr.dfh = PSCB(vcpu, hpsr_dfh);		PSCB(vcpu, vpsr_dfh) = 0;	}	PSCB(vcpu, vpsr_pp) = vpsr.pp;	if (vpsr.i) {		if (vcpu->vcpu_info->evtchn_upcall_mask)			enabling_interrupts = 1;		vcpu->vcpu_info->evtchn_upcall_mask = 0;		if (enabling_interrupts &&		    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)			PSCB(vcpu, pending_interruption) = 1;	} else		vcpu->vcpu_info->evtchn_upcall_mask = 1;	PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;	vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));	newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL);	if (PSCB(vcpu, banknum)	!= vpsr.bn) {		if (vpsr.bn)			vcpu_bsw1(vcpu);		else			vcpu_bsw0(vcpu);	}	if (vpsr.pk) {		vcpu_pkr_set_psr_handling(vcpu);		newpsr.pk = 1;	} else		vcpu_pkr_use_unset(vcpu);	regs->cr_ipsr = newpsr.val;	return IA64_NO_FAULT;}u64 vcpu_get_psr(VCPU * vcpu){ 	REGS *regs = vcpu_regs(vcpu);	PSR newpsr;	PSR ipsr;	ipsr.i64 = regs->cr_ipsr;	/* Copy non-virtualized bits.  */	newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS;	/* Bits forced to 1 (psr.si and psr.is are forced to 0)  */	newpsr.i64 |= IA64_PSR_DI;	/* System mask.  */	newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);	newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;	if (!PSCB(vcpu, metaphysical_mode))		newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;	newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);	newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);	/* Fool cpl.  */	if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)		newpsr.ia64_psr.cpl = 0;	else		newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;	newpsr.ia64_psr.bn = PSCB(vcpu, banknum);		return newpsr.i64;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -