⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Architecture-specific trap handling. * * Copyright (C) 1998-2003 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE */#include <linux/kernel.h>#include <linux/init.h>#include <linux/sched.h>#include <linux/tty.h>#include <linux/vt_kern.h>		/* For unblank_screen() */#include <linux/module.h>       /* for EXPORT_SYMBOL */#include <linux/hardirq.h>#include <linux/kprobes.h>#include <linux/delay.h>		/* for ssleep() */#include <linux/kdebug.h>#include <asm/fpswa.h>#include <asm/ia32.h>#include <asm/intrinsics.h>#include <asm/processor.h>#include <asm/uaccess.h>fpswa_interface_t *fpswa_interface;EXPORT_SYMBOL(fpswa_interface);void __inittrap_init (void){	if (ia64_boot_param->fpswa)		/* FPSWA fixup: make the interface pointer a kernel virtual address: */		fpswa_interface = __va(ia64_boot_param->fpswa);}voiddie (const char *str, struct pt_regs *regs, long err){	static struct {		spinlock_t lock;		u32 lock_owner;		int lock_owner_depth;	} die = {		.lock =	__SPIN_LOCK_UNLOCKED(die.lock),		.lock_owner = -1,		.lock_owner_depth = 0	};	static int die_counter;	int cpu = get_cpu();	if (die.lock_owner != cpu) {		console_verbose();		spin_lock_irq(&die.lock);		die.lock_owner = cpu;		die.lock_owner_depth = 0;		bust_spinlocks(1);	}	put_cpu();	if (++die.lock_owner_depth < 3) {		printk("%s[%d]: %s %ld [%d]\n",		current->comm, task_pid_nr(current), str, err, ++die_counter);		(void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);		show_regs(regs);  	} else		printk(KERN_ERR "Recursive die() failure, output suppressed\n");	bust_spinlocks(0);	die.lock_owner = -1;	add_taint(TAINT_DIE);	spin_unlock_irq(&die.lock);	if (panic_on_oops)		panic("Fatal exception");  	do_exit(SIGSEGV);}voiddie_if_kernel (char *str, struct pt_regs *regs, long err){	if (!user_mode(regs))		die(str, regs, err);}void__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs){	siginfo_t siginfo;	int sig, code;	/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */	siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);	siginfo.si_imm = break_num;	siginfo.si_flags = 0;		/* clear __ISR_VALID */	siginfo.si_isr = 0;	switch (break_num) {	      case 0: /* unknown error (used by GCC for __builtin_abort()) */		if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)			       	== NOTIFY_STOP)			return;		die_if_kernel("bugcheck!", regs, break_num);		sig = SIGILL; code = ILL_ILLOPC;		break;	      case 1: /* integer divide by zero */		sig = SIGFPE; code = FPE_INTDIV;		break;	      case 2: /* integer overflow */		sig = SIGFPE; code = FPE_INTOVF;		break;	      case 3: /* range check/bounds check */		sig = SIGFPE; code = FPE_FLTSUB;		break;	      case 4: /* null pointer dereference */		sig = SIGSEGV; code = SEGV_MAPERR;		break;	      case 5: /* misaligned data */		sig = SIGSEGV; code = BUS_ADRALN;		break;	      case 6: /* decimal overflow */		sig = SIGFPE; code = __FPE_DECOVF;		break;	      case 7: /* decimal divide by zero */		sig = SIGFPE; code = __FPE_DECDIV;		break;	      case 8: /* packed decimal error */		sig = SIGFPE; code = __FPE_DECERR;		break;	      case 9: /* invalid ASCII digit */		sig = SIGFPE; code = __FPE_INVASC;		break;	      case 10: /* invalid decimal digit */		sig = SIGFPE; code = __FPE_INVDEC;		break;	      case 11: /* paragraph stack overflow */		sig = SIGSEGV; code = __SEGV_PSTKOVF;		break;	      case 0x3f000 ... 0x3ffff:	/* bundle-update in progress */		sig = SIGILL; code = __ILL_BNDMOD;		break;	      default:		if (break_num < 0x40000 || break_num > 0x100000)			die_if_kernel("Bad break", regs, break_num);		if (break_num < 0x80000) {			sig = SIGILL; code = __ILL_BREAK;		} else {			if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)					== NOTIFY_STOP)				return;			sig = SIGTRAP; code = TRAP_BRKPT;		}	}	siginfo.si_signo = sig;	siginfo.si_errno = 0;	siginfo.si_code = code;	force_sig_info(sig, &siginfo, current);}/* * disabled_fph_fault() is called when a user-level process attempts to access f32..f127 * and it doesn't own the fp-high register partition.  When this happens, we save the * current fph partition in the task_struct of the fpu-owner (if necessary) and then load * the fp-high partition of the current task (if necessary).  Note that the kernel has * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes * care of clearing psr.dfh. */static inline voiddisabled_fph_fault (struct pt_regs *regs){	struct ia64_psr *psr = ia64_psr(regs);	/* first, grant user-level access to fph partition: */	psr->dfh = 0;	/*	 * Make sure that no other task gets in on this processor	 * while we're claiming the FPU	 */	preempt_disable();#ifndef CONFIG_SMP	{		struct task_struct *fpu_owner			= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);		if (ia64_is_local_fpu_owner(current)) {			preempt_enable_no_resched();			return;		}		if (fpu_owner)			ia64_flush_fph(fpu_owner);	}#endif /* !CONFIG_SMP */	ia64_set_local_fpu_owner(current);	if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {		__ia64_load_fpu(current->thread.fph);		psr->mfh = 0;	} else {		__ia64_init_fpu();		/*		 * Set mfh because the state in thread.fph does not match the state in		 * the fph partition.		 */		psr->mfh = 1;	}	preempt_enable_no_resched();}static inline intfp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,	    struct pt_regs *regs){	fp_state_t fp_state;	fpswa_ret_t ret;	if (!fpswa_interface)		return -1;	memset(&fp_state, 0, sizeof(fp_state_t));	/*	 * compute fp_state.  only FP registers f6 - f11 are used by the	 * kernel, so set those bits in the mask and set the low volatile	 * pointer to point to these registers.	 */	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;	/*	 * unsigned long (*EFI_FPSWA) (	 *      unsigned long    trap_type,	 *	void             *Bundle,	 *	unsigned long    *pipsr,	 *	unsigned long    *pfsr,	 *	unsigned long    *pisr,	 *	unsigned long    *ppreds,	 *	unsigned long    *pifs,	 *	void             *fp_state);	 */	ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,					(unsigned long *) ipsr, (unsigned long *) fpsr,					(unsigned long *) isr, (unsigned long *) pr,					(unsigned long *) ifs, &fp_state);	return ret.status;}struct fpu_swa_msg {	unsigned long count;	unsigned long time;};static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);static struct fpu_swa_msg last __cacheline_aligned;/* * Handle floating-point assist faults and traps. */static inthandle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr){	long exception, bundle[2];	unsigned long fault_ip;	struct siginfo siginfo;	fault_ip = regs->cr_iip;	if (!fp_fault && (ia64_psr(regs)->ri == 0))		fault_ip -= 16;	if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))		return -1;	if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT))  {		unsigned long count, current_jiffies = jiffies;		struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);		if (unlikely(current_jiffies > cp->time))			cp->count = 0;		if (unlikely(cp->count < 5)) {			cp->count++;			cp->time = current_jiffies + 5 * HZ;			/* minimize races by grabbing a copy of count BEFORE checking last.time. */			count = last.count;			barrier();			/*			 * Lower 4 bits are used as a count. Upper bits are a sequence			 * number that is updated when count is reset. The cmpxchg will			 * fail is seqno has changed. This minimizes mutiple cpus			 * resetting the count.			 */			if (current_jiffies > last.time)				(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));			/* used fetchadd to atomically update the count */			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {				last.time = current_jiffies + 5 * HZ;				printk(KERN_WARNING		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);			}		}	}	exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,			       &regs->cr_ifs, regs);	if (fp_fault) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -