⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 unaligned.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
	 */	if (ld.x6_op != 0x2) {		/*		 * This assumes little-endian byte-order.  Note that there is no "ldfpe"		 * instruction:		 */		if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)		    || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))			return -1;		DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);		DDUMP("frp_init =", &fpr_init, 2*len);		/*		 * XXX fixme		 * Could optimize inlines by using ldfpX & 2 spills		 */		switch( ld.x6_sz ) {			case 0:				mem2float_extended(&fpr_init[0], &fpr_final[0]);				mem2float_extended(&fpr_init[1], &fpr_final[1]);				break;			case 1:				mem2float_integer(&fpr_init[0], &fpr_final[0]);				mem2float_integer(&fpr_init[1], &fpr_final[1]);				break;			case 2:				mem2float_single(&fpr_init[0], &fpr_final[0]);				mem2float_single(&fpr_init[1], &fpr_final[1]);				break;			case 3:				mem2float_double(&fpr_init[0], &fpr_final[0]);				mem2float_double(&fpr_init[1], &fpr_final[1]);				break;		}		DDUMP("fpr_final =", &fpr_final, 2*len);		/*		 * XXX fixme		 *		 * A possible optimization would be to drop fpr_final and directly		 * use the storage from the saved context i.e., the actual final		 * destination (pt_regs, switch_stack or thread structure).		 */		setfpreg(ld.r1, &fpr_final[0], regs);		setfpreg(ld.imm, &fpr_final[1], regs);	}	/*	 * Check for updates: only immediate updates are available for this	 * instruction.	 */	if (ld.m) {		/*		 * the immediate is implicit given the ldsz of the operation:		 * single: 8 (2x4) and for  all others it's 16 (2x8)		 */		ifa += len<<1;		/*		 * IMPORTANT:		 * the fact that we force the NaT of r3 to zero is ONLY valid		 * as long as we don't come here with a ldfpX.s.		 * For this reason we keep this sanity check		 */		if (ld.x6_op == 1 || ld.x6_op == 3)			printk(KERN_ERR "%s: register update on speculative load pair, error\n",			       __FUNCTION__);		setreg(ld.r3, ifa, 0, regs);	}	/*	 * Invalidate ALAT entries, if any, for both registers.	 */	if (ld.x6_op == 0x2) {		invala_fr(ld.r1);		invala_fr(ld.imm);	}	return 0;}static intemulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs){	struct ia64_fpreg fpr_init;	struct ia64_fpreg fpr_final;	unsigned long len = float_fsz[ld.x6_sz];	/*	 * fr0 & fr1 don't need to be checked because Illegal Instruction	 * faults have higher priority than unaligned faults.	 *	 * r0 cannot be found as the base as it would never generate an	 * unaligned reference.	 */	/*	 * make sure we get clean buffers	 */	memset(&fpr_init,0, sizeof(fpr_init));	memset(&fpr_final,0, sizeof(fpr_final));	/*	 * ldfX.a we don't try to emulate anything but we must	 * invalidate the ALAT entry.	 * See comments in ldX for descriptions on how the various loads are handled.	 */	if (ld.x6_op != 0x2) {		if (copy_from_user(&fpr_init, (void __user *) ifa, len))			return -1;		DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);		DDUMP("fpr_init =", &fpr_init, len);		/*		 * we only do something for x6_op={0,8,9}		 */		switch( ld.x6_sz ) {			case 0:				mem2float_extended(&fpr_init, &fpr_final);				break;			case 1:				mem2float_integer(&fpr_init, &fpr_final);				break;			case 2:				mem2float_single(&fpr_init, &fpr_final);				break;			case 3:				mem2float_double(&fpr_init, &fpr_final);				break;		}		DDUMP("fpr_final =", &fpr_final, len);		/*		 * XXX fixme		 *		 * A possible optimization would be to drop fpr_final and directly		 * use the storage from the saved context i.e., the actual final		 * destination (pt_regs, switch_stack or thread structure).		 */		setfpreg(ld.r1, &fpr_final, regs);	}	/*	 * check for updates on any loads	 */	if (ld.op == 0x7 || ld.m)		emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);	/*	 * invalidate ALAT entry in case of advanced floating point loads	 */	if (ld.x6_op == 0x2)		invala_fr(ld.r1);	return 0;}static intemulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs){	struct ia64_fpreg fpr_init;	struct ia64_fpreg fpr_final;	unsigned long len = float_fsz[ld.x6_sz];	/*	 * make sure we get clean buffers	 */	memset(&fpr_init,0, sizeof(fpr_init));	memset(&fpr_final,0, sizeof(fpr_final));	/*	 * if we get to this handler, Nat bits on both r3 and r2 have already	 * been checked. so we don't need to do it	 *	 * extract the value to be stored	 */	getfpreg(ld.imm, &fpr_init, regs);	/*	 * during this step, we extract the spilled registers from the saved	 * context i.e., we refill. Then we store (no spill) to temporary	 * aligned location	 */	switch( ld.x6_sz ) {		case 0:			float2mem_extended(&fpr_init, &fpr_final);			break;		case 1:			float2mem_integer(&fpr_init, &fpr_final);			break;		case 2:			float2mem_single(&fpr_init, &fpr_final);			break;		case 3:			float2mem_double(&fpr_init, &fpr_final);			break;	}	DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);	DDUMP("fpr_init =", &fpr_init, len);	DDUMP("fpr_final =", &fpr_final, len);	if (copy_to_user((void __user *) ifa, &fpr_final, len))		return -1;	/*	 * stfX [r3]=r2,imm(9)	 *	 * NOTE:	 * ld.r3 can never be r0, because r0 would not generate an	 * unaligned access.	 */	if (ld.op == 0x7) {		unsigned long imm;		/*		 * form imm9: [12:6] contain first 7bits		 */		imm = ld.x << 7 | ld.r1;		/*		 * sign extend (8bits) if m set		 */		if (ld.m)			imm |= SIGN_EXT9;		/*		 * ifa == r3 (NaT is necessarily cleared)		 */		ifa += imm;		DPRINT("imm=%lx r3=%lx\n", imm, ifa);		setreg(ld.r3, ifa, 0, regs);	}	/*	 * we don't have alat_invalidate_multiple() so we need	 * to do the complete flush :-<<	 */	ia64_invala();	return 0;}/* * Make sure we log the unaligned access, so that user/sysadmin can notice it and * eventually fix the program.  However, we don't want to do that for every access so we * pace it with jiffies.  This isn't really MP-safe, but it doesn't really have to be * either... */static intwithin_logging_rate_limit (void){	static unsigned long count, last_time;	if (jiffies - last_time > 5*HZ)		count = 0;	if (++count < 5) {		last_time = jiffies;		return 1;	}	return 0;}#endif /* XEN */voidia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs){#ifdef XENprintk("ia64_handle_unaligned: called, not working yet\n");#else	struct ia64_psr *ipsr = ia64_psr(regs);	mm_segment_t old_fs = get_fs();	unsigned long bundle[2];	unsigned long opcode;	struct siginfo si;	const struct exception_table_entry *eh = NULL;	union {		unsigned long l;		load_store_t insn;	} u;	int ret = -1;	if (ia64_psr(regs)->be) {		/* we don't support big-endian accesses */		die_if_kernel("big-endian unaligned accesses are not supported", regs, 0);		goto force_sigbus;	}	/*	 * Treat kernel accesses for which there is an exception handler entry the same as	 * user-level unaligned accesses.  Otherwise, a clever program could trick this	 * handler into reading an arbitrary kernel addresses...	 */	if (!user_mode(regs))		eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);	if (user_mode(regs) || eh) {		if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)			goto force_sigbus;		if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)		    && within_logging_rate_limit())		{			char buf[200];	/* comm[] is at most 16 bytes... */			size_t len;			len = snprintf(buf, sizeof(buf), "%s(%d): unaligned access to 0x%016lx, "				      "ip=0x%016lx\n\r", current->comm, current->pid,				      ifa, regs->cr_iip + ipsr->ri);			/*			 * Don't call tty_write_message() if we're in the kernel; we might			 * be holding locks...			 */			if (user_mode(regs))				tty_write_message(current->signal->tty, buf);			buf[len-1] = '\0';	/* drop '\r' */			printk(KERN_WARNING "%s", buf);	/* watch for command names containing %s */		}	} else {		if (within_logging_rate_limit())			printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",			       ifa, regs->cr_iip + ipsr->ri);		set_fs(KERNEL_DS);	}	DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",	       regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);	if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))		goto failure;	/*	 * extract the instruction from the bundle given the slot number	 */	switch (ipsr->ri) {	      case 0: u.l = (bundle[0] >>  5); break;	      case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;	      case 2: u.l = (bundle[1] >> 23); break;	}	opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;	DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "	       "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,	       u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);	/*	 * IMPORTANT:	 * Notice that the switch statement DOES not cover all possible instructions	 * that DO generate unaligned references. This is made on purpose because for some	 * instructions it DOES NOT make sense to try and emulate the access. Sometimes it	 * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,	 * the program will get a signal and die:	 *	 *	load/store:	 *		- ldX.spill	 *		- stX.spill	 *	Reason: RNATs are based on addresses	 *		- ld16	 *		- st16	 *	Reason: ld16 and st16 are supposed to occur in a single	 *		memory op	 *	 *	synchronization:	 *		- cmpxchg	 *		- fetchadd	 *		- xchg	 *	Reason: ATOMIC operations cannot be emulated properly using multiple	 *	        instructions.	 *	 *	speculative loads:	 *		- ldX.sZ	 *	Reason: side effects, code must be ready to deal with failure so simpler	 *		to let the load fail.	 * ---------------------------------------------------------------------------------	 * XXX fixme	 *	 * I would like to get rid of this switch case and do something	 * more elegant.	 */	switch (opcode) {	      case LDS_OP:	      case LDSA_OP:		if (u.insn.x)			/* oops, really a semaphore op (cmpxchg, etc) */			goto failure;		/* no break */	      case LDS_IMM_OP:	      case LDSA_IMM_OP:	      case LDFS_OP:	      case LDFSA_OP:	      case LDFS_IMM_OP:		/*		 * The instruction will be retried with deferred exceptions turned on, and		 * we should get Nat bit installed		 *		 * IMPORTANT: When PSR_ED is set, the register & immediate update forms		 * are actually executed even though the operation failed. So we don't		 * need to take care of this.		 */		DPRINT("forcing PSR_ED\n");		regs->cr_ipsr |= IA64_PSR_ED;		goto done;	      case LD_OP:	      case LDA_OP:	      case LDBIAS_OP:	      case LDACQ_OP:	      case LDCCLR_OP:	      case LDCNC_OP:	      case LDCCLRACQ_OP:		if (u.insn.x)			/* oops, really a semaphore op (cmpxchg, etc) */			goto failure;		/* no break */	      case LD_IMM_OP:	      case LDA_IMM_OP:	      case LDBIAS_IMM_OP:	      case LDACQ_IMM_OP:	      case LDCCLR_IMM_OP:	      case LDCNC_IMM_OP:	      case LDCCLRACQ_IMM_OP:		ret = emulate_load_int(ifa, u.insn, regs);		break;	      case ST_OP:	      case STREL_OP:		if (u.insn.x)			/* oops, really a semaphore op (cmpxchg, etc) */			goto failure;		/* no break */	      case ST_IMM_OP:	      case STREL_IMM_OP:		ret = emulate_store_int(ifa, u.insn, regs);		break;	      case LDF_OP:	      case LDFA_OP:	      case LDFCCLR_OP:	      case LDFCNC_OP:	      case LDF_IMM_OP:	      case LDFA_IMM_OP:	      case LDFCCLR_IMM_OP:	      case LDFCNC_IMM_OP:		if (u.insn.x)			ret = emulate_load_floatpair(ifa, u.insn, regs);		else			ret = emulate_load_float(ifa, u.insn, regs);		break;	      case STF_OP:	      case STF_IMM_OP:		ret = emulate_store_float(ifa, u.insn, regs);		break;	      default:		goto failure;	}	DPRINT("ret=%d\n", ret);	if (ret)		goto failure;	if (ipsr->ri == 2)		/*		 * given today's architecture this case is not likely to happen because a		 * memory access instruction (M) can never be in the last slot of a		 * bundle. But let's keep it for now.		 */		regs->cr_iip += 16;	ipsr->ri = (ipsr->ri + 1) & 0x3;	DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);  done:	set_fs(old_fs);		/* restore original address limit */	return;  failure:	/* something went wrong... */	if (!user_mode(regs)) {		if (eh) {			ia64_handle_exception(regs, eh);			goto done;		}		die_if_kernel("error during unaligned kernel access\n", regs, ret);		/* NOT_REACHED */	}  force_sigbus:	si.si_signo = SIGBUS;	si.si_errno = 0;	si.si_code = BUS_ADRALN;	si.si_addr = (void __user *) ifa;	si.si_flags = 0;	si.si_isr = 0;	si.si_imm = 0;	force_sig_info(SIGBUS, &si, current);	goto done;#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -