⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 align.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
#define	EVLHHOUSPLAT	0x06#define	EVLHHOSSPLAT	0x07#define	EVLWHE		0x08#define	EVLWHOU		0x0A#define	EVLWHOS		0x0B#define	EVLWWSPLAT	0x0C#define	EVLWHSPLAT	0x0E#define	EVSTDD		0x10#define	EVSTDW		0x11#define	EVSTDH		0x12#define	EVSTWHE		0x18#define	EVSTWHO		0x1A#define	EVSTWWE		0x1C#define	EVSTWWO		0x1E/* * Emulate SPE loads and stores. * Only Book-E has these instructions, and it does true little-endian, * so we don't need the address swizzling. */static int emulate_spe(struct pt_regs *regs, unsigned int reg,		       unsigned int instr){	int t, ret;	union {		u64 ll;		u32 w[2];		u16 h[4];		u8 v[8];	} data, temp;	unsigned char __user *p, *addr;	unsigned long *evr = &current->thread.evr[reg];	unsigned int nb, flags;	instr = (instr >> 1) & 0x1f;	/* DAR has the operand effective address */	addr = (unsigned char __user *)regs->dar;	nb = spe_aligninfo[instr].len;	flags = spe_aligninfo[instr].flags;	/* Verify the address of the operand */	if (unlikely(user_mode(regs) &&		     !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),				addr, nb)))		return -EFAULT;	/* userland only */	if (unlikely(!user_mode(regs)))		return 0;	flush_spe_to_thread(current);	/* If we are loading, get the data from user space, else	 * get it from register values	 */	if (flags & ST) {		data.ll = 0;		switch (instr) {		case EVSTDD:		case EVSTDW:		case EVSTDH:			data.w[0] = *evr;			data.w[1] = regs->gpr[reg];			break;		case EVSTWHE:			data.h[2] = *evr >> 16;			data.h[3] = regs->gpr[reg] >> 16;			break;		case EVSTWHO:			data.h[2] = *evr & 0xffff;			data.h[3] = regs->gpr[reg] & 0xffff;			break;		case EVSTWWE:			data.w[1] = *evr;			break;		case EVSTWWO:			data.w[1] = regs->gpr[reg];			break;		default:			return -EINVAL;		}	} else {		temp.ll = data.ll = 0;		ret = 0;		p = addr;		switch (nb) {		case 8:			ret |= __get_user_inatomic(temp.v[0], p++);			ret |= __get_user_inatomic(temp.v[1], p++);			ret |= __get_user_inatomic(temp.v[2], p++);			ret |= __get_user_inatomic(temp.v[3], p++);		case 4:			ret |= __get_user_inatomic(temp.v[4], p++);			ret |= __get_user_inatomic(temp.v[5], p++);		case 2:			ret |= __get_user_inatomic(temp.v[6], p++);			ret |= __get_user_inatomic(temp.v[7], p++);			if (unlikely(ret))				return -EFAULT;		}		switch (instr) {		case EVLDD:		case EVLDW:		case EVLDH:			data.ll = temp.ll;			break;		case EVLHHESPLAT:			data.h[0] = temp.h[3];			data.h[2] = temp.h[3];			break;		case EVLHHOUSPLAT:		case EVLHHOSSPLAT:			data.h[1] = temp.h[3];			data.h[3] = temp.h[3];			break;		case EVLWHE:			data.h[0] = temp.h[2];			data.h[2] = temp.h[3];			break;		case EVLWHOU:		case EVLWHOS:			data.h[1] = temp.h[2];			data.h[3] = temp.h[3];			break;		case EVLWWSPLAT:			data.w[0] = temp.w[1];			data.w[1] = temp.w[1];			break;		case EVLWHSPLAT:			data.h[0] = temp.h[2];			data.h[1] = temp.h[2];			data.h[2] = temp.h[3];			data.h[3] = temp.h[3];			break;		default:			return -EINVAL;		}	}	if (flags & SW) {		switch (flags & 0xf0) {		case E8:			SWAP(data.v[0], data.v[7]);			SWAP(data.v[1], data.v[6]);			SWAP(data.v[2], data.v[5]);			SWAP(data.v[3], data.v[4]);			break;		case E4:			SWAP(data.v[0], data.v[3]);			SWAP(data.v[1], data.v[2]);			SWAP(data.v[4], data.v[7]);			SWAP(data.v[5], data.v[6]);			break;		/* Its half word endian */		default:			SWAP(data.v[0], data.v[1]);			SWAP(data.v[2], data.v[3]);			SWAP(data.v[4], data.v[5]);			SWAP(data.v[6], data.v[7]);			break;		}	}	if (flags & SE) {		data.w[0] = (s16)data.h[1];		data.w[1] = (s16)data.h[3];	}	/* Store result to memory or update registers */	if (flags & ST) {		ret = 0;		p = addr;		switch (nb) {		case 8:			ret |= __put_user_inatomic(data.v[0], p++);			ret |= __put_user_inatomic(data.v[1], p++);			ret |= __put_user_inatomic(data.v[2], p++);			ret |= __put_user_inatomic(data.v[3], p++);		case 4:			ret |= __put_user_inatomic(data.v[4], p++);			ret |= __put_user_inatomic(data.v[5], p++);		case 2:			ret |= __put_user_inatomic(data.v[6], p++);			ret |= __put_user_inatomic(data.v[7], p++);		}		if (unlikely(ret))			return -EFAULT;	} else {		*evr = data.w[0];		regs->gpr[reg] = data.w[1];	}	return 1;}#endif /* CONFIG_SPE *//* * Called on alignment exception. Attempts to fixup * * Return 1 on success * Return 0 if unable to handle the interrupt * Return -EFAULT if data address is bad */int fix_alignment(struct pt_regs *regs){	unsigned int instr, nb, flags;	unsigned int reg, areg;	unsigned int dsisr;	unsigned char __user *addr;	unsigned long p, swiz;	int ret, t;	union {		u64 ll;		double dd;		unsigned char v[8];		struct {			unsigned hi32;			int	 low32;		} x32;		struct {			unsigned char hi48[6];			short	      low16;		} x16;	} data;	/*	 * We require a complete register set, if not, then our assembly	 * is broken	 */	CHECK_FULL_REGS(regs);	dsisr = regs->dsisr;	/* Some processors don't provide us with a DSISR we can use here,	 * let's make one up from the instruction	 */	if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {		unsigned long pc = regs->nip;		if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))			pc ^= 4;		if (unlikely(__get_user_inatomic(instr,						 (unsigned int __user *)pc)))			return -EFAULT;		if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))			instr = cpu_to_le32(instr);		dsisr = make_dsisr(instr);	}	/* extract the operation and registers from the dsisr */	reg = (dsisr >> 5) & 0x1f;	/* source/dest register */	areg = dsisr & 0x1f;		/* register to update */#ifdef CONFIG_SPE	if ((instr >> 26) == 0x4)		return emulate_spe(regs, reg, instr);#endif	instr = (dsisr >> 10) & 0x7f;	instr |= (dsisr >> 13) & 0x60;	/* Lookup the operation in our table */	nb = aligninfo[instr].len;	flags = aligninfo[instr].flags;	/* Byteswap little endian loads and stores */	swiz = 0;	if (regs->msr & MSR_LE) {		flags ^= SW;		/*		 * So-called "PowerPC little endian" mode works by		 * swizzling addresses rather than by actually doing		 * any byte-swapping.  To emulate this, we XOR each		 * byte address with 7.  We also byte-swap, because		 * the processor's address swizzling depends on the		 * operand size (it xors the address with 7 for bytes,		 * 6 for halfwords, 4 for words, 0 for doublewords) but		 * we will xor with 7 and load/store each byte separately.		 */		if (cpu_has_feature(CPU_FTR_PPC_LE))			swiz = 7;	}	/* DAR has the operand effective address */	addr = (unsigned char __user *)regs->dar;	/* A size of 0 indicates an instruction we don't support, with	 * the exception of DCBZ which is handled as a special case here	 */	if (instr == DCBZ)		return emulate_dcbz(regs, addr);	if (unlikely(nb == 0))		return 0;	/* Load/Store Multiple instructions are handled in their own	 * function	 */	if (flags & M)		return emulate_multiple(regs, addr, reg, nb,					flags, instr, swiz);	/* Verify the address of the operand */	if (unlikely(user_mode(regs) &&		     !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),				addr, nb)))		return -EFAULT;	/* Force the fprs into the save area so we can reference them */	if (flags & F) {		/* userland only */		if (unlikely(!user_mode(regs)))			return 0;		flush_fp_to_thread(current);	}	/* Special case for 16-byte FP loads and stores */	if (nb == 16)		return emulate_fp_pair(regs, addr, reg, flags);	/* If we are loading, get the data from user space, else	 * get it from register values	 */	if (!(flags & ST)) {		data.ll = 0;		ret = 0;		p = (unsigned long) addr;		switch (nb) {		case 8:			ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));			ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));			ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));			ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));		case 4:			ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));			ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));		case 2:			ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));			ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));			if (unlikely(ret))				return -EFAULT;		}	} else if (flags & F) {		data.dd = current->thread.fpr[reg];		if (flags & S) {			/* Single-precision FP store requires conversion... */#ifdef CONFIG_PPC_FPU			preempt_disable();			enable_kernel_fp();			cvt_df(&data.dd, (float *)&data.v[4], &current->thread);			preempt_enable();#else			return 0;#endif		}	} else		data.ll = regs->gpr[reg];	if (flags & SW) {		switch (nb) {		case 8:			SWAP(data.v[0], data.v[7]);			SWAP(data.v[1], data.v[6]);			SWAP(data.v[2], data.v[5]);			SWAP(data.v[3], data.v[4]);			break;		case 4:			SWAP(data.v[4], data.v[7]);			SWAP(data.v[5], data.v[6]);			break;		case 2:			SWAP(data.v[6], data.v[7]);			break;		}	}	/* Perform other misc operations like sign extension	 * or floating point single precision conversion	 */	switch (flags & ~(U|SW)) {	case LD+SE:	/* sign extending integer loads */	case LD+F+SE:	/* sign extend for lfiwax */		if ( nb == 2 )			data.ll = data.x16.low16;		else	/* nb must be 4 */			data.ll = data.x32.low32;		break;	/* Single-precision FP load requires conversion... */	case LD+F+S:#ifdef CONFIG_PPC_FPU		preempt_disable();		enable_kernel_fp();		cvt_fd((float *)&data.v[4], &data.dd, &current->thread);		preempt_enable();#else		return 0;#endif		break;	}	/* Store result to memory or update registers */	if (flags & ST) {		ret = 0;		p = (unsigned long) addr;		switch (nb) {		case 8:			ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));			ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));			ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));			ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));		case 4:			ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));			ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));		case 2:			ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));			ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));		}		if (unlikely(ret))			return -EFAULT;	} else if (flags & F)		current->thread.fpr[reg] = data.dd;	else		regs->gpr[reg] = data.ll;	/* Update RA as needed */	if (flags & U)		regs->gpr[areg] = regs->dar;	return 1;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -