⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
			break;		}	} else#endif	{		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */		__u64 lo, hi;		switch (width_shift) {		case 1:			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);			break;		case 2:			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));			regs->regs[destreg] = lo | hi;			break;		case 3:			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));			regs->regs[destreg] = lo | hi;			break;		default:			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",				width_shift, (unsigned long) regs->pc);			break;		}	}	return 0;}static int misaligned_store(struct pt_regs *regs,			    __u32 opcode,			    int displacement_not_indexed,			    int width_shift){	/* Return -1 for a fault, 0 for OK */	int error;	int srcreg;	__u64 address;	error = generate_and_check_address(regs, opcode,			displacement_not_indexed, width_shift, &address);	if (error < 0) {		return error;	}	srcreg = (opcode >> 4) & 0x3f;#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)	if (user_mode(regs)) {		__u64 buffer;		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {			return -1;		}		switch (width_shift) {		case 1:			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];			break;		case 2:			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];			break;		case 3:			buffer = regs->regs[srcreg];			break;		default:			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",				width_shift, (unsigned long) regs->pc);			break;		}		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {			return -1; /* fault */		}	} else#endif	{		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */		__u64 val = regs->regs[srcreg];		switch (width_shift) {		case 1:			misaligned_kernel_word_store(address, val);			break;		case 2:			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));			break;		case 3:			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));			break;		default:			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",				width_shift, (unsigned long) regs->pc);			break;		}	}	return 0;}#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)/* Never need to fix up misaligned FPU accesses within the kernel since that's a real   error. */static int misaligned_fpu_load(struct pt_regs *regs,			   __u32 opcode,			   int displacement_not_indexed,			   int width_shift,			   int do_paired_load){	/* Return -1 for a fault, 0 for OK */	int error;	int destreg;	__u64 address;	error = generate_and_check_address(regs, opcode,			displacement_not_indexed, width_shift, &address);	if (error < 0) {		return error;	}	destreg = (opcode >> 4) & 0x3f;	if (user_mode(regs)) {		__u64 buffer;		__u32 buflo, bufhi;		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {			return -1;		}		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {			return -1; /* fault */		}		/* 'current' may be the current owner of the FPU state, so		   context switch the registers into memory so they can be		   indexed by register number. */		if (last_task_used_math == current) {			grab_fpu();			fpsave(&current->thread.fpu.hard);			release_fpu();			last_task_used_math = NULL;			regs->sr |= SR_FD;		}		buflo = *(__u32*) &buffer;		bufhi = *(1 + (__u32*) &buffer);		switch (width_shift) {		case 2:			current->thread.fpu.hard.fp_regs[destreg] = buflo;			break;		case 3:			if (do_paired_load) {				current->thread.fpu.hard.fp_regs[destreg] = buflo;				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;			} else {#if defined(CONFIG_LITTLE_ENDIAN)				current->thread.fpu.hard.fp_regs[destreg] = bufhi;				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;#else				current->thread.fpu.hard.fp_regs[destreg] = buflo;				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;#endif			}			break;		default:			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",				width_shift, (unsigned long) regs->pc);			break;		}		return 0;	} else {		die ("Misaligned FPU load inside kernel", regs, 0);		return -1;	}}static int misaligned_fpu_store(struct pt_regs *regs,			   __u32 opcode,			   int displacement_not_indexed,			   int width_shift,			   int do_paired_load){	/* Return -1 for a fault, 0 for OK */	int error;	int srcreg;	__u64 address;	error = generate_and_check_address(regs, opcode,			displacement_not_indexed, width_shift, &address);	if (error < 0) {		return error;	}	srcreg = (opcode >> 4) & 0x3f;	if (user_mode(regs)) {		__u64 buffer;		/* Initialise these to NaNs. */		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {			return -1;		}		/* 'current' may be the current owner of the FPU state, so		   context switch the registers into memory so they can be		   indexed by register number. */		if (last_task_used_math == current) {			grab_fpu();			fpsave(&current->thread.fpu.hard);			release_fpu();			last_task_used_math = NULL;			regs->sr |= SR_FD;		}		switch (width_shift) {		case 2:			buflo = current->thread.fpu.hard.fp_regs[srcreg];			break;		case 3:			if (do_paired_load) {				buflo = current->thread.fpu.hard.fp_regs[srcreg];				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];			} else {#if defined(CONFIG_LITTLE_ENDIAN)				bufhi = current->thread.fpu.hard.fp_regs[srcreg];				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];#else				buflo = current->thread.fpu.hard.fp_regs[srcreg];				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];#endif			}			break;		default:			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",				width_shift, (unsigned long) regs->pc);			break;		}		*(__u32*) &buffer = buflo;		*(1 + (__u32*) &buffer) = bufhi;		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {			return -1; /* fault */		}		return 0;	} else {		die ("Misaligned FPU load inside kernel", regs, 0);		return -1;	}}#endifstatic int misaligned_fixup(struct pt_regs *regs){	unsigned long opcode;	int error;	int major, minor;#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)	/* Never fixup user mode misaligned accesses without this option enabled. */	return -1;#else	if (!user_mode_unaligned_fixup_enable) return -1;#endif	error = read_opcode(regs->pc, &opcode, user_mode(regs));	if (error < 0) {		return error;	}	major = (opcode >> 26) & 0x3f;	minor = (opcode >> 16) & 0xf;#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {		--user_mode_unaligned_fixup_count;		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",		       current->comm, current->pid, (__u32)regs->pc, opcode);	} else#endif	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {		--kernel_mode_unaligned_fixup_count;		if (in_interrupt()) {			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",			       (__u32)regs->pc, opcode);		} else {			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",			       current->comm, current->pid, (__u32)regs->pc, opcode);		}	}	switch (major) {		case (0x84>>2): /* LD.W */			error = misaligned_load(regs, opcode, 1, 1, 1);			break;		case (0xb0>>2): /* LD.UW */			error = misaligned_load(regs, opcode, 1, 1, 0);			break;		case (0x88>>2): /* LD.L */			error = misaligned_load(regs, opcode, 1, 2, 1);			break;		case (0x8c>>2): /* LD.Q */			error = misaligned_load(regs, opcode, 1, 3, 0);			break;		case (0xa4>>2): /* ST.W */			error = misaligned_store(regs, opcode, 1, 1);			break;		case (0xa8>>2): /* ST.L */			error = misaligned_store(regs, opcode, 1, 2);			break;		case (0xac>>2): /* ST.Q */			error = misaligned_store(regs, opcode, 1, 3);			break;		case (0x40>>2): /* indexed loads */			switch (minor) {				case 0x1: /* LDX.W */					error = misaligned_load(regs, opcode, 0, 1, 1);					break;				case 0x5: /* LDX.UW */					error = misaligned_load(regs, opcode, 0, 1, 0);					break;				case 0x2: /* LDX.L */					error = misaligned_load(regs, opcode, 0, 2, 1);					break;				case 0x3: /* LDX.Q */					error = misaligned_load(regs, opcode, 0, 3, 0);					break;				default:					error = -1;					break;			}			break;		case (0x60>>2): /* indexed stores */			switch (minor) {				case 0x1: /* STX.W */					error = misaligned_store(regs, opcode, 0, 1);					break;				case 0x2: /* STX.L */					error = misaligned_store(regs, opcode, 0, 2);					break;				case 0x3: /* STX.Q */					error = misaligned_store(regs, opcode, 0, 3);					break;				default:					error = -1;					break;			}			break;#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)		case (0x94>>2): /* FLD.S */			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);			break;		case (0x98>>2): /* FLD.P */			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);			break;		case (0x9c>>2): /* FLD.D */			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);			break;		case (0x1c>>2): /* floating indexed loads */			switch (minor) {			case 0x8: /* FLDX.S */				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);				break;			case 0xd: /* FLDX.P */				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);				break;			case 0x9: /* FLDX.D */				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);				break;			default:				error = -1;				break;			}			break;		case (0xb4>>2): /* FLD.S */			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);			break;		case (0xb8>>2): /* FLD.P */			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);			break;		case (0xbc>>2): /* FLD.D */			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);			break;		case (0x3c>>2): /* floating indexed stores */			switch (minor) {			case 0x8: /* FSTX.S */				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);				break;			case 0xd: /* FSTX.P */				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);				break;			case 0x9: /* FSTX.D */				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);				break;			default:				error = -1;				break;			}			break;#endif		default:			/* Fault */			error = -1;			break;	}	if (error < 0) {		return error;	} else {		regs->pc += 4; /* Skip the instruction that's just been emulated */		return 0;	}}static ctl_table unaligned_table[] = {	{1, "kernel_reports", &kernel_mode_unaligned_fixup_count,		sizeof(int), 0644, NULL, &proc_dointvec},#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)	{2, "user_reports", &user_mode_unaligned_fixup_count,		sizeof(int), 0644, NULL, &proc_dointvec},	{3, "user_enable", &user_mode_unaligned_fixup_enable,		sizeof(int), 0644, NULL, &proc_dointvec},#endif	{0}};static ctl_table unaligned_root[] = {	{1, "unaligned_fixup", NULL, 0, 0555, unaligned_table},	{0}};static ctl_table sh64_root[] = {	{1, "sh64", NULL, 0, 0555, unaligned_root},	{0}};static struct ctl_table_header *sysctl_header;static int __init init_sysctl(void){	sysctl_header = register_sysctl_table(sh64_root, 0);	return 0;}__initcall(init_sysctl);asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs){	u64 peek_real_address_q(u64 addr);	u64 poke_real_address_q(u64 addr, u64 val);	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;	unsigned long long exp_cause;	/* It's not worth ioremapping the debug module registers for the amount	   of access we make to them - just go direct to their physical	   addresses. */	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);	if (exp_cause & ~4) {		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",			(unsigned long)(exp_cause & 0xffffffff));	}	show_state();	/* Clear all DEBUGINT causes */	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -