⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
	unsigned int opcode;	if (unlikely(get_insn_opcode(regs, &opcode)))		return -EFAULT;	if ((opcode & OPCODE) == LL) {		simulate_ll(regs, opcode);		return 0;	}	if ((opcode & OPCODE) == SC) {		simulate_sc(regs, opcode);		return 0;	}	return -EFAULT;			/* Strange things going on ... */}/* * Simulate trapping 'rdhwr' instructions to provide user accessible * registers not implemented in hardware.  The only current use of this * is the thread area pointer. */static inline int simulate_rdhwr(struct pt_regs *regs){	struct thread_info *ti = task_thread_info(current);	unsigned int opcode;	if (unlikely(get_insn_opcode(regs, &opcode)))		return -EFAULT;	if (unlikely(compute_return_epc(regs)))		return -EFAULT;	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {		int rd = (opcode & RD) >> 11;		int rt = (opcode & RT) >> 16;		switch (rd) {			case 29:				regs->regs[rt] = ti->tp_value;				return 0;			default:				return -EFAULT;		}	}	/* Not ours.  */	return -EFAULT;}asmlinkage void do_ov(struct pt_regs *regs){	siginfo_t info;	die_if_kernel("Integer overflow", regs);	info.si_code = FPE_INTOVF;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_addr = (void __user *) regs->cp0_epc;	force_sig_info(SIGFPE, &info, current);}/* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31){	if (fcr31 & FPU_CSR_UNI_X) {		int sig;		preempt_disable();#ifdef CONFIG_PREEMPT		if (!is_fpu_owner()) {			/* We might lose fpu before disabling preempt... */			own_fpu();			BUG_ON(!used_math());			restore_fp(current);		}#endif		/*		 * Unimplemented operation exception.  If we've got the full		 * software emulator on-board, let's use it...		 *		 * Force FPU to dump state into task/thread context.  We're		 * moving a lot of data here for what is probably a single		 * instruction, but the alternative is to pre-decode the FP		 * register operands before invoking the emulator, which seems		 * a bit extreme for what should be an infrequent event.		 */		save_fp(current);		/* Ensure 'resume' not overwrite saved fp context again. */		lose_fpu();		preempt_enable();		/* Run the emulator */		sig = fpu_emulator_cop1Handler (regs,			&current->thread.fpu.soft);		preempt_disable();		own_fpu();	/* Using the FPU again.  */		/*		 * We can't allow the emulated instruction to leave any of		 * the cause bit set in $fcr31.		 */		current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;		/* Restore the hardware register state */		restore_fp(current);		preempt_enable();		/* If something went wrong, signal */		if (sig)			force_sig(sig, current);		return;	}	force_sig(SIGFPE, current);}asmlinkage void do_bp(struct pt_regs *regs){	unsigned int opcode, bcode;	siginfo_t info;	die_if_kernel("Break instruction in kernel code", regs);	if (get_insn_opcode(regs, &opcode))		return;	/*	 * There is the ancient bug in the MIPS assemblers that the break	 * code starts left to bit 16 instead to bit 6 in the opcode.	 * Gas is bug-compatible, but not always, grrr...	 * We handle both cases with a simple heuristics.  --macro	 */	bcode = ((opcode >> 6) & ((1 << 20) - 1));	if (bcode < (1 << 10))		bcode <<= 10;	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all break	 * insns, even for break codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (bcode) {	case BRK_OVERFLOW << 10:	case BRK_DIVZERO << 10:		if (bcode == (BRK_DIVZERO << 10))			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void __user *) regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_tr(struct pt_regs *regs){	unsigned int opcode, tcode = 0;	siginfo_t info;	die_if_kernel("Trap instruction in kernel code", regs);	if (get_insn_opcode(regs, &opcode))		return;	/* Immediate versions don't provide a code.  */	if (!(opcode & OPCODE))		tcode = ((opcode >> 6) & ((1 << 10) - 1));	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap	 * insns, even for trap codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (tcode) {	case BRK_OVERFLOW:	case BRK_DIVZERO:		if (tcode == BRK_DIVZERO)			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void __user *) regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_ri(struct pt_regs *regs){	die_if_kernel("Reserved instruction in kernel code", regs);	if (!cpu_has_llsc)		if (!simulate_llsc(regs))			return;	if (!simulate_rdhwr(regs))		return;	force_sig(SIGILL, current);}asmlinkage void do_cpu(struct pt_regs *regs){	unsigned int cpid;	die_if_kernel("do_cpu invoked from kernel context!", regs);	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;	switch (cpid) {	case 0:		if (!cpu_has_llsc)			if (!simulate_llsc(regs))				return;		if (!simulate_rdhwr(regs))			return;		break;	case 1:		preempt_disable();		own_fpu();		if (used_math()) {	/* Using the FPU again.  */			restore_fp(current);		} else {			/* First time FPU user.  */			init_fpu();			set_used_math();		}		preempt_enable();		if (!cpu_has_fpu) {			int sig = fpu_emulator_cop1Handler(regs,						&current->thread.fpu.soft);			if (sig)				force_sig(sig, current);#ifdef CONFIG_MIPS_MT_FPAFF			else {			/*			 * MIPS MT processors may have fewer FPU contexts			 * than CPU threads. If we've emulated more than			 * some threshold number of instructions, force			 * migration to a "CPU" that has FP support.			 */			 if(mt_fpemul_threshold > 0			 && ((current->thread.emulated_fp++			    > mt_fpemul_threshold))) {			  /*			   * If there's no FPU present, or if the			   * application has already restricted			   * the allowed set to exclude any CPUs			   * with FPUs, we'll skip the procedure.			   */			  if (cpus_intersects(current->cpus_allowed,			  			mt_fpu_cpumask)) {			    cpumask_t tmask;			    cpus_and(tmask,					current->thread.user_cpus_allowed,					mt_fpu_cpumask);			    set_cpus_allowed(current, tmask);			    current->thread.mflags |= MF_FPUBOUND;			  }			 }			}#endif /* CONFIG_MIPS_MT_FPAFF */		}		return;	case 2:	case 3:		die_if_kernel("do_cpu invoked from kernel context!", regs);		break;	}	force_sig(SIGILL, current);}asmlinkage void do_mdmx(struct pt_regs *regs){	force_sig(SIGILL, current);}asmlinkage void do_watch(struct pt_regs *regs){	/*	 * We use the watch exception where available to detect stack	 * overflows.	 */	dump_tlb_all();	show_regs(regs);	panic("Caught WATCH exception - probably caused by stack overflow.");}asmlinkage void do_mcheck(struct pt_regs *regs){	const int field = 2 * sizeof(unsigned long);	int multi_match = regs->cp0_status & ST0_TS;	show_regs(regs);	if (multi_match) {		printk("Index   : %0x\n", read_c0_index());		printk("Pagemask: %0x\n", read_c0_pagemask());		printk("EntryHi : %0*lx\n", field, read_c0_entryhi());		printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());		printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());		printk("\n");		dump_tlb_all();	}	show_code((unsigned int *) regs->cp0_epc);	/*	 * Some chips may have other causes of machine check (e.g. SB1	 * graduation timer)	 */	panic("Caught Machine Check exception - %scaused by multiple "	      "matching entries in the TLB.",	      (multi_match) ? "" : "not ");}asmlinkage void do_mt(struct pt_regs *regs){	int subcode;	die_if_kernel("MIPS MT Thread exception in kernel", regs);	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)			>> VPECONTROL_EXCPT_SHIFT;	switch (subcode) {	case 0:		printk(KERN_ERR "Thread Underflow\n");		break;	case 1:		printk(KERN_ERR "Thread Overflow\n");		break;	case 2:		printk(KERN_ERR "Invalid YIELD Qualifier\n");		break;	case 3:		printk(KERN_ERR "Gating Storage Exception\n");		break;	case 4:		printk(KERN_ERR "YIELD Scheduler Exception\n");		break;	case 5:		printk(KERN_ERR "Gating Storage Schedulier Exception\n");		break;	default:		printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",			subcode);		break;	}	die_if_kernel("MIPS MT Thread exception in kernel", regs);	force_sig(SIGILL, current);}asmlinkage void do_dsp(struct pt_regs *regs){	if (cpu_has_dsp)		panic("Unexpected DSP exception\n");	force_sig(SIGILL, current);}asmlinkage void do_reserved(struct pt_regs *regs){	/*	 * Game over - no way to handle this if it ever occurs.  Most probably	 * caused by a new unknown cpu type or after another deadly	 * hard/software error.	 */	show_regs(regs);	panic("Caught reserved exception %ld - should not happen.",	      (regs->cp0_cause & 0x7f) >> 2);}asmlinkage void do_default_vi(struct pt_regs *regs){	show_regs(regs);	panic("Caught unexpected vectored interrupt.");}/* * Some MIPS CPUs can enable/disable for cache parity detection, but do * it different ways. */static inline void parity_protection_init(void){	switch (current_cpu_data.cputype) {	case CPU_24K:	case CPU_34K:	case CPU_5KC:		write_c0_ecc(0x80000000);		back_to_back_c0_hazard();		/* Set the PE bit (bit 31) in the c0_errctl register. */		printk(KERN_INFO "Cache parity protection %sabled\n",		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");		break;	case CPU_20KC:	case CPU_25KF:		/* Clear the DE bit (bit 16) in the c0_status register. */		printk(KERN_INFO "Enable cache parity protection for "		       "MIPS 20KC/25KF CPUs.\n");		clear_c0_status(ST0_DE);		break;	default:		break;	}}asmlinkage void cache_parity_error(void){	const int field = 2 * sizeof(unsigned long);	unsigned int reg_val;	/* For the moment, report the problem and hang. */	printk("Cache error exception:\n");	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());	reg_val = read_c0_cacheerr();	printk("c0_cacheerr == %08x\n", reg_val);	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",	       reg_val & (1<<30) ? "secondary" : "primary",	       reg_val & (1<<31) ? "data" : "insn");	printk("Error bits: %s%s%s%s%s%s%s\n",	       reg_val & (1<<29) ? "ED " : "",	       reg_val & (1<<28) ? "ET " : "",	       reg_val & (1<<26) ? "EE " : "",	       reg_val & (1<<25) ? "EB " : "",	       reg_val & (1<<24) ? "EI " : "",	       reg_val & (1<<23) ? "E1 " : "",	       reg_val & (1<<22) ? "E0 " : "");	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)	if (reg_val & (1<<22))		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());	if (reg_val & (1<<23))		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());#endif	panic("Can't handle the cache error!");}/* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. */void ejtag_exception_handler(struct pt_regs *regs){	const int field = 2 * sizeof(unsigned long);	unsigned long depc, old_epc;	unsigned int debug;	printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");	depc = read_c0_depc();	debug = read_c0_debug();	printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);	if (debug & 0x80000000) {		/*		 * In branch delay slot.		 * We cheat a little bit here and use EPC to calculate the		 * debug return address (DEPC). EPC is restored after the		 * calculation.		 */		old_epc = regs->cp0_epc;		regs->cp0_epc = depc;		__compute_return_epc(regs);		depc = regs->cp0_epc;		regs->cp0_epc = old_epc;	} else		depc += 4;	write_c0_depc(depc);#if 0	printk("\n\n----- Enable EJTAG single stepping ----\n\n");	write_c0_debug(debug | 0x100);#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -