⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 unaligned.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
	long sof = (regs->cr_ifs) & 0x7f;	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;	long ridx = r1 - 32;	if (ridx >= sof) {		/* read of out-of-frame register returns an undefined value; 0 in our case.  */		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);		goto fail;	}	if (ridx < sor)		ridx = rotate_reg(sor, rrb_gr, ridx);	DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",	       r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);	on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);	addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);	if (addr >= kbs) {		/* the register is on the kernel backing store: easy... */		*val = *addr;		if (nat) {			rnat_addr = ia64_rse_rnat_addr(addr);			if ((unsigned long) rnat_addr >= sw->ar_bspstore)				rnat_addr = &sw->ar_rnat;			nat_mask = 1UL << ia64_rse_slot_num(addr);			*nat = (*rnat_addr & nat_mask) != 0;		}		return;	}	if (!user_stack(current, regs)) {		DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);		goto fail;	}	bspstore = (unsigned long *)regs->ar_bspstore;	ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);	bsp     = ia64_rse_skip_regs(ubs_end, -sof);	addr    = ia64_rse_skip_regs(bsp, ridx);	DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);	ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);	if (nat) {		rnat_addr = ia64_rse_rnat_addr(addr);		nat_mask = 1UL << ia64_rse_slot_num(addr);		DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);		ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);		*nat = (rnats & nat_mask) != 0;	}	return;  fail:	*val = 0;	if (nat)		*nat = 0;	return;}#endif#ifdef XENvoid#elsestatic void#endifsetreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs){	struct switch_stack *sw = (struct switch_stack *) regs - 1;	unsigned long addr;	unsigned long bitmask;	unsigned long *unat;	/*	 * First takes care of stacked registers	 */	if (regnum >= IA64_FIRST_STACKED_GR) {		set_rse_reg(regs, regnum, val, nat);		return;	}	/*	 * Using r0 as a target raises a General Exception fault which has higher priority	 * than the Unaligned Reference fault.	 */	/*	 * Now look at registers in [0-31] range and init correct UNAT	 */	if (GR_IN_SW(regnum)) {		addr = (unsigned long)sw;		unat = &sw->ar_unat;	} else {		addr = (unsigned long)regs;#if defined(XEN)		unat = &regs->eml_unat;#else		unat = &sw->caller_unat;#endif	}	DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",	       addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));	/*	 * add offset from base of struct	 * and do it !	 */	addr += GR_OFFS(regnum);	*(unsigned long *)addr = val;	/*	 * We need to clear the corresponding UNAT bit to fully emulate the load	 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4	 */	bitmask   = 1UL << (addr >> 3 & 0x3f);	DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);	if (nat) {		*unat |= bitmask;	} else {		*unat &= ~bitmask;	}	DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);}/* * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the * range from 32-127, result is in the range from 0-95. */static inline unsigned longfph_index (struct pt_regs *regs, long regnum){	unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));}#ifndef XENstatic voidsetfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs){	struct switch_stack *sw = (struct switch_stack *)regs - 1;	unsigned long addr;	/*	 * From EAS-2.5: FPDisableFault has higher priority than Unaligned	 * Fault. Thus, when we get here, we know the partition is enabled.	 * To update f32-f127, there are three choices:	 *	 *	(1) save f32-f127 to thread.fph and update the values there	 *	(2) use a gigantic switch statement to directly access the registers	 *	(3) generate code on the fly to update the desired register	 *	 * For now, we are using approach (1).	 */	if (regnum >= IA64_FIRST_ROTATING_FR) {		ia64_sync_fph(current);#ifdef XEN		current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;#else		current->thread.fph[fph_index(regs, regnum)] = *fpval;#endif	} else {		/*		 * pt_regs or switch_stack ?		 */		if (FR_IN_SW(regnum)) {			addr = (unsigned long)sw;		} else {			addr = (unsigned long)regs;		}		DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));		addr += FR_OFFS(regnum);		*(struct ia64_fpreg *)addr = *fpval;		/*		 * mark the low partition as being used now		 *		 * It is highly unlikely that this bit is not already set, but		 * let's do it for safety.		 */		regs->cr_ipsr |= IA64_PSR_MFL;	}}#endif /* XEN *//* * Those 2 inline functions generate the spilled versions of the constant floating point * registers which can be used with stfX */static inline voidfloat_spill_f0 (struct ia64_fpreg *final){	ia64_stf_spill(final, 0);}static inline voidfloat_spill_f1 (struct ia64_fpreg *final){	ia64_stf_spill(final, 1);}#ifndef XENstatic voidgetfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs){	struct switch_stack *sw = (struct switch_stack *) regs - 1;	unsigned long addr;	/*	 * From EAS-2.5: FPDisableFault has higher priority than	 * Unaligned Fault. Thus, when we get here, we know the partition is	 * enabled.	 *	 * When regnum > 31, the register is still live and we need to force a save	 * to current->thread.fph to get access to it.  See discussion in setfpreg()	 * for reasons and other ways of doing this.	 */	if (regnum >= IA64_FIRST_ROTATING_FR) {		ia64_flush_fph(current);#ifdef XEN		*fpval = current->arch._thread.fph[fph_index(regs, regnum)];#else		*fpval = current->thread.fph[fph_index(regs, regnum)];#endif	} else {		/*		 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus		 * not saved, we must generate their spilled form on the fly		 */		switch(regnum) {		case 0:			float_spill_f0(fpval);			break;		case 1:			float_spill_f1(fpval);			break;		default:			/*			 * pt_regs or switch_stack ?			 */			addr =  FR_IN_SW(regnum) ? (unsigned long)sw						 : (unsigned long)regs;			DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",			       FR_IN_SW(regnum), addr, FR_OFFS(regnum));			addr  += FR_OFFS(regnum);			*fpval = *(struct ia64_fpreg *)addr;		}	}}#elsevoidgetfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs){	// Take floating register rotation into consideration	if(regnum >= IA64_FIRST_ROTATING_FR)		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); #define CASE_FIXED_FP(reg)			\	case reg:				\		ia64_stf_spill(fpval,reg);	\		break#define CASE_SAVED_FP(reg)					\	case reg:						\		fpval->u.bits[0] = regs->f##reg.u.bits[0];	\		fpval->u.bits[1] = regs->f##reg.u.bits[1];	\		break	switch(regnum) {		CASE_FIXED_FP(0);		CASE_FIXED_FP(1);		CASE_FIXED_FP(2);		CASE_FIXED_FP(3);		CASE_FIXED_FP(4);		CASE_FIXED_FP(5);		CASE_SAVED_FP(6);		CASE_SAVED_FP(7);		CASE_SAVED_FP(8);		CASE_SAVED_FP(9);		CASE_SAVED_FP(10);		CASE_SAVED_FP(11);		CASE_FIXED_FP(12);		CASE_FIXED_FP(13);		CASE_FIXED_FP(14);		CASE_FIXED_FP(15);		CASE_FIXED_FP(16);		CASE_FIXED_FP(17);		CASE_FIXED_FP(18);		CASE_FIXED_FP(19);		CASE_FIXED_FP(20);		CASE_FIXED_FP(21);		CASE_FIXED_FP(22);		CASE_FIXED_FP(23);		CASE_FIXED_FP(24);		CASE_FIXED_FP(25);		CASE_FIXED_FP(26);		CASE_FIXED_FP(27);		CASE_FIXED_FP(28);		CASE_FIXED_FP(29);		CASE_FIXED_FP(30);		CASE_FIXED_FP(31);		CASE_FIXED_FP(32);		CASE_FIXED_FP(33);		CASE_FIXED_FP(34);		CASE_FIXED_FP(35);		CASE_FIXED_FP(36);		CASE_FIXED_FP(37);		CASE_FIXED_FP(38);		CASE_FIXED_FP(39);		CASE_FIXED_FP(40);		CASE_FIXED_FP(41);		CASE_FIXED_FP(42);		CASE_FIXED_FP(43);		CASE_FIXED_FP(44);		CASE_FIXED_FP(45);		CASE_FIXED_FP(46);		CASE_FIXED_FP(47);		CASE_FIXED_FP(48);		CASE_FIXED_FP(49);		CASE_FIXED_FP(50);		CASE_FIXED_FP(51);		CASE_FIXED_FP(52);		CASE_FIXED_FP(53);		CASE_FIXED_FP(54);		CASE_FIXED_FP(55);		CASE_FIXED_FP(56);		CASE_FIXED_FP(57);		CASE_FIXED_FP(58);		CASE_FIXED_FP(59);		CASE_FIXED_FP(60);		CASE_FIXED_FP(61);		CASE_FIXED_FP(62);		CASE_FIXED_FP(63);		CASE_FIXED_FP(64);		CASE_FIXED_FP(65);		CASE_FIXED_FP(66);		CASE_FIXED_FP(67);		CASE_FIXED_FP(68);		CASE_FIXED_FP(69);		CASE_FIXED_FP(70);		CASE_FIXED_FP(71);		CASE_FIXED_FP(72);		CASE_FIXED_FP(73);		CASE_FIXED_FP(74);		CASE_FIXED_FP(75);		CASE_FIXED_FP(76);		CASE_FIXED_FP(77);		CASE_FIXED_FP(78);		CASE_FIXED_FP(79);		CASE_FIXED_FP(80);		CASE_FIXED_FP(81);		CASE_FIXED_FP(82);		CASE_FIXED_FP(83);		CASE_FIXED_FP(84);		CASE_FIXED_FP(85);		CASE_FIXED_FP(86);		CASE_FIXED_FP(87);		CASE_FIXED_FP(88);		CASE_FIXED_FP(89);		CASE_FIXED_FP(90);		CASE_FIXED_FP(91);		CASE_FIXED_FP(92);		CASE_FIXED_FP(93);		CASE_FIXED_FP(94);		CASE_FIXED_FP(95);		CASE_FIXED_FP(96);		CASE_FIXED_FP(97);		CASE_FIXED_FP(98);		CASE_FIXED_FP(99);		CASE_FIXED_FP(100);		CASE_FIXED_FP(101);		CASE_FIXED_FP(102);		CASE_FIXED_FP(103);		CASE_FIXED_FP(104);		CASE_FIXED_FP(105);		CASE_FIXED_FP(106);		CASE_FIXED_FP(107);		CASE_FIXED_FP(108);		CASE_FIXED_FP(109);		CASE_FIXED_FP(110);		CASE_FIXED_FP(111);		CASE_FIXED_FP(112);		CASE_FIXED_FP(113);		CASE_FIXED_FP(114);		CASE_FIXED_FP(115);		CASE_FIXED_FP(116);		CASE_FIXED_FP(117);		CASE_FIXED_FP(118);		CASE_FIXED_FP(119);		CASE_FIXED_FP(120);		CASE_FIXED_FP(121);		CASE_FIXED_FP(122);		CASE_FIXED_FP(123);		CASE_FIXED_FP(124);		CASE_FIXED_FP(125);		CASE_FIXED_FP(126);		CASE_FIXED_FP(127);	}#undef CASE_FIXED_FP#undef CASE_SAVED_FP}voidsetfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs){	// Take floating register rotation into consideration	ia64_fph_enable();	if(regnum >= IA64_FIRST_ROTATING_FR)		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); #define CASE_FIXED_FP(reg)			\	case reg:				\		ia64_ldf_fill(reg,fpval);	\		break#define CASE_RESTORED_FP(reg)					\	case reg:						\		regs->f##reg.u.bits[0] = fpval->u.bits[0]; 	\		regs->f##reg.u.bits[1] = fpval->u.bits[1] ;	\		break	switch(regnum) {		CASE_FIXED_FP(2);		CASE_FIXED_FP(3);		CASE_FIXED_FP(4);		CASE_FIXED_FP(5);		CASE_RESTORED_FP(6);		CASE_RESTORED_FP(7);		CASE_RESTORED_FP(8);		CASE_RESTORED_FP(9);		CASE_RESTORED_FP(10);		CASE_RESTORED_FP(11);		CASE_FIXED_FP(12);		CASE_FIXED_FP(13);		CASE_FIXED_FP(14);		CASE_FIXED_FP(15);		CASE_FIXED_FP(16);		CASE_FIXED_FP(17);		CASE_FIXED_FP(18);		CASE_FIXED_FP(19);		CASE_FIXED_FP(20);		CASE_FIXED_FP(21);		CASE_FIXED_FP(22);		CASE_FIXED_FP(23);		CASE_FIXED_FP(24);		CASE_FIXED_FP(25);		CASE_FIXED_FP(26);		CASE_FIXED_FP(27);		CASE_FIXED_FP(28);		CASE_FIXED_FP(29);		CASE_FIXED_FP(30);		CASE_FIXED_FP(31);		CASE_FIXED_FP(32);		CASE_FIXED_FP(33);		CASE_FIXED_FP(34);		CASE_FIXED_FP(35);		CASE_FIXED_FP(36);		CASE_FIXED_FP(37);		CASE_FIXED_FP(38);		CASE_FIXED_FP(39);		CASE_FIXED_FP(40);		CASE_FIXED_FP(41);		CASE_FIXED_FP(42);		CASE_FIXED_FP(43);		CASE_FIXED_FP(44);		CASE_FIXED_FP(45);		CASE_FIXED_FP(46);		CASE_FIXED_FP(47);		CASE_FIXED_FP(48);		CASE_FIXED_FP(49);		CASE_FIXED_FP(50);		CASE_FIXED_FP(51);		CASE_FIXED_FP(52);		CASE_FIXED_FP(53);		CASE_FIXED_FP(54);		CASE_FIXED_FP(55);		CASE_FIXED_FP(56);		CASE_FIXED_FP(57);		CASE_FIXED_FP(58);		CASE_FIXED_FP(59);		CASE_FIXED_FP(60);		CASE_FIXED_FP(61);		CASE_FIXED_FP(62);		CASE_FIXED_FP(63);		CASE_FIXED_FP(64);		CASE_FIXED_FP(65);		CASE_FIXED_FP(66);		CASE_FIXED_FP(67);		CASE_FIXED_FP(68);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -