⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 h内核
💻 C
📖 第 1 页 / 共 2 页
字号:
		 * the cause bit set in $fcr31.		 */		current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;		/* Restore the hardware register state */		restore_fp(current);		preempt_enable();		/* If something went wrong, signal */		if (sig)			force_sig(sig, current);		return;	}	force_sig(SIGFPE, current);}asmlinkage void do_bp(struct pt_regs *regs){	unsigned int opcode, bcode;	siginfo_t info;	die_if_kernel("Break instruction in kernel code", regs);	if (get_insn_opcode(regs, &opcode))		return;	/*	 * There is the ancient bug in the MIPS assemblers that the break	 * code starts left to bit 16 instead to bit 6 in the opcode.	 * Gas is bug-compatible, but not always, grrr...	 * We handle both cases with a simple heuristics.  --macro	 */	bcode = ((opcode >> 6) & ((1 << 20) - 1));	if (bcode < (1 << 10))		bcode <<= 10;	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all break	 * insns, even for break codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (bcode) {	case BRK_OVERFLOW << 10:	case BRK_DIVZERO << 10:		if (bcode == (BRK_DIVZERO << 10))			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void *)regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_tr(struct pt_regs *regs){	unsigned int opcode, tcode = 0;	siginfo_t info;	die_if_kernel("Trap instruction in kernel code", regs);	if (get_insn_opcode(regs, &opcode))		return;	/* Immediate versions don't provide a code.  */	if (!(opcode & OPCODE))		tcode = ((opcode >> 6) & ((1 << 10) - 1));	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap	 * insns, even for trap codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (tcode) {	case BRK_OVERFLOW:	case BRK_DIVZERO:		if (tcode == BRK_DIVZERO)			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void *)regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_ri(struct pt_regs *regs){	die_if_kernel("Reserved instruction in kernel code", regs);	if (!cpu_has_llsc)		if (!simulate_llsc(regs))			return;	force_sig(SIGILL, current);}asmlinkage void do_cpu(struct pt_regs *regs){	unsigned int cpid;	die_if_kernel("do_cpu invoked from kernel context!", regs);	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;	switch (cpid) {	case 0:		if (cpu_has_llsc)			break;		if (!simulate_llsc(regs))			return;		break;	case 1:		preempt_disable();		own_fpu();		if (used_math()) {	/* Using the FPU again.  */			restore_fp(current);		} else {			/* First time FPU user.  */			init_fpu();			set_used_math();		}		if (!cpu_has_fpu) {			int sig = fpu_emulator_cop1Handler(0, regs,						&current->thread.fpu.soft);			if (sig)				force_sig(sig, current);		}		preempt_enable();		return;	case 2:	case 3:		break;	}	force_sig(SIGILL, current);}asmlinkage void do_mdmx(struct pt_regs *regs){	force_sig(SIGILL, current);}asmlinkage void do_watch(struct pt_regs *regs){	/*	 * We use the watch exception where available to detect stack	 * overflows.	 */	dump_tlb_all();	show_regs(regs);	panic("Caught WATCH exception - probably caused by stack overflow.");}asmlinkage void do_mcheck(struct pt_regs *regs){	show_regs(regs);	dump_tlb_all();	/*	 * Some chips may have other causes of machine check (e.g. SB1	 * graduation timer)	 */	panic("Caught Machine Check exception - %scaused by multiple "	      "matching entries in the TLB.",	      (regs->cp0_status & ST0_TS) ? "" : "not ");}asmlinkage void do_reserved(struct pt_regs *regs){	/*	 * Game over - no way to handle this if it ever occurs.  Most probably	 * caused by a new unknown cpu type or after another deadly	 * hard/software error.	 */	show_regs(regs);	panic("Caught reserved exception %ld - should not happen.",	      (regs->cp0_cause & 0x7f) >> 2);}/* * Some MIPS CPUs can enable/disable for cache parity detection, but do * it different ways. */static inline void parity_protection_init(void){	switch (current_cpu_data.cputype) {	case CPU_24K:		/* 24K cache parity not currently implemented in FPGA */		printk(KERN_INFO "Disable cache parity protection for "		       "MIPS 24K CPU.\n");		write_c0_ecc(read_c0_ecc() & ~0x80000000);		break;	case CPU_5KC:		/* Set the PE bit (bit 31) in the c0_ecc register. */		printk(KERN_INFO "Enable cache parity protection for "		       "MIPS 5KC/24K CPUs.\n");		write_c0_ecc(read_c0_ecc() | 0x80000000);		break;	case CPU_20KC:	case CPU_25KF:		/* Clear the DE bit (bit 16) in the c0_status register. */		printk(KERN_INFO "Enable cache parity protection for "		       "MIPS 20KC/25KF CPUs.\n");		clear_c0_status(ST0_DE);		break;	default:		break;	}}asmlinkage void cache_parity_error(void){	const int field = 2 * sizeof(unsigned long);	unsigned int reg_val;	/* For the moment, report the problem and hang. */	printk("Cache error exception:\n");	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());	reg_val = read_c0_cacheerr();	printk("c0_cacheerr == %08x\n", reg_val);	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",	       reg_val & (1<<30) ? "secondary" : "primary",	       reg_val & (1<<31) ? "data" : "insn");	printk("Error bits: %s%s%s%s%s%s%s\n",	       reg_val & (1<<29) ? "ED " : "",	       reg_val & (1<<28) ? "ET " : "",	       reg_val & (1<<26) ? "EE " : "",	       reg_val & (1<<25) ? "EB " : "",	       reg_val & (1<<24) ? "EI " : "",	       reg_val & (1<<23) ? "E1 " : "",	       reg_val & (1<<22) ? "E0 " : "");	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));#if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)	if (reg_val & (1<<22))		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());	if (reg_val & (1<<23))		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());#endif	panic("Can't handle the cache error!");}/* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. */void ejtag_exception_handler(struct pt_regs *regs){	const int field = 2 * sizeof(unsigned long);	unsigned long depc, old_epc;	unsigned int debug;	printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");	depc = read_c0_depc();	debug = read_c0_debug();	printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);	if (debug & 0x80000000) {		/*		 * In branch delay slot.		 * We cheat a little bit here and use EPC to calculate the		 * debug return address (DEPC). EPC is restored after the		 * calculation.		 */		old_epc = regs->cp0_epc;		regs->cp0_epc = depc;		__compute_return_epc(regs);		depc = regs->cp0_epc;		regs->cp0_epc = old_epc;	} else		depc += 4;	write_c0_depc(depc);#if 0	printk("\n\n----- Enable EJTAG single stepping ----\n\n");	write_c0_debug(debug | 0x100);#endif}/* * NMI exception handler. */void nmi_exception_handler(struct pt_regs *regs){	printk("NMI taken!!!!\n");	die("NMI", regs);	while(1) ;}unsigned long exception_handlers[32];/* * As a side effect of the way this is implemented we're limited * to interrupt handlers in the address range from * KSEG0 <= x < KSEG0 + 256mb on the Nevada.  Oh well ... */void *set_except_vector(int n, void *addr){	unsigned long handler = (unsigned long) addr;	unsigned long old_handler = exception_handlers[n];	exception_handlers[n] = handler;	if (n == 0 && cpu_has_divec) {		*(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |		                                 (0x03ffffff & (handler >> 2));		flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);	}	return (void *)old_handler;}/* * This is used by native signal handling */asmlinkage int (*save_fp_context)(struct sigcontext *sc);asmlinkage int (*restore_fp_context)(struct sigcontext *sc);extern asmlinkage int _save_fp_context(struct sigcontext *sc);extern asmlinkage int _restore_fp_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);static inline void signal_init(void){	if (cpu_has_fpu) {		save_fp_context = _save_fp_context;		restore_fp_context = _restore_fp_context;	} else {		save_fp_context = fpu_emulator_save_context;		restore_fp_context = fpu_emulator_restore_context;	}}#ifdef CONFIG_MIPS32_COMPAT/* * This is used by 32-bit signal stuff on the 64-bit kernel */asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);static inline void signal32_init(void){	if (cpu_has_fpu) {		save_fp_context32 = _save_fp_context32;		restore_fp_context32 = _restore_fp_context32;	} else {		save_fp_context32 = fpu_emulator_save_context32;		restore_fp_context32 = fpu_emulator_restore_context32;	}}#endifextern void cpu_cache_init(void);extern void tlb_init(void);void __init per_cpu_trap_init(void){	unsigned int cpu = smp_processor_id();	unsigned int status_set = ST0_CU0;	/*	 * Disable coprocessors and select 32-bit or 64-bit addressing	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV	 * flag that some firmware may have left set and the TS bit (for	 * IP27).  Set XX for ISA IV code to work.	 */#ifdef CONFIG_MIPS64	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;#endif	if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)		status_set |= ST0_XX;	change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,			 status_set);	/*	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the	 * interrupt processing overhead.  Use it where available.	 */	if (cpu_has_divec)		set_c0_cause(CAUSEF_IV);	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;	TLBMISS_HANDLER_SETUP();	atomic_inc(&init_mm.mm_count);	current->active_mm = &init_mm;	BUG_ON(current->mm);	enter_lazy_tlb(&init_mm, current);	cpu_cache_init();	tlb_init();}void __init trap_init(void){	extern char except_vec3_generic, except_vec3_r4000;	extern char except_vec_ejtag_debug;	extern char except_vec4;	unsigned long i;	per_cpu_trap_init();	/*	 * Copy the generic exception handlers to their final destination.	 * This will be overriden later as suitable for a particular	 * configuration.	 */	memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);	/*	 * Setup default vectors	 */	for (i = 0; i <= 31; i++)		set_except_vector(i, handle_reserved);	/*	 * Copy the EJTAG debug exception vector handler code to it's final	 * destination.	 */	if (cpu_has_ejtag)		memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);	/*	 * Only some CPUs have the watch exceptions.	 */	if (cpu_has_watch)		set_except_vector(23, handle_watch);	/*	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the	 * interrupt processing overhead.  Use it where available.	 */	if (cpu_has_divec)		memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);	/*	 * Some CPUs can enable/disable for cache parity detection, but does	 * it different ways.	 */	parity_protection_init();	/*	 * The Data Bus Errors / Instruction Bus Errors are signaled	 * by external hardware.  Therefore these two exceptions	 * may have board specific handlers.	 */	if (board_be_init)		board_be_init();	set_except_vector(1, handle_tlbm);	set_except_vector(2, handle_tlbl);	set_except_vector(3, handle_tlbs);	set_except_vector(4, handle_adel);	set_except_vector(5, handle_ades);	set_except_vector(6, handle_ibe);	set_except_vector(7, handle_dbe);	set_except_vector(8, handle_sys);	set_except_vector(9, handle_bp);	set_except_vector(10, handle_ri);	set_except_vector(11, handle_cpu);	set_except_vector(12, handle_ov);	set_except_vector(13, handle_tr);	set_except_vector(22, handle_mdmx);	if (cpu_has_fpu && !cpu_has_nofpuex)		set_except_vector(15, handle_fpe);	if (cpu_has_mcheck)		set_except_vector(24, handle_mcheck);	if (cpu_has_vce)		/* Special exception: R4[04]00 uses also the divec space. */		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);	else if (cpu_has_4kex)		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);	else		memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);	if (current_cpu_data.cputype == CPU_R6000 ||	    current_cpu_data.cputype == CPU_R6000A) {		/*		 * The R6000 is the only R-series CPU that features a machine		 * check exception (similar to the R4000 cache error) and		 * unaligned ldc1/sdc1 exception.  The handlers have not been		 * written yet.  Well, anyway there is no R6000 machine on the		 * current list of targets for Linux/MIPS.		 * (Duh, crap, there is someone with a triple R6k machine)		 */		//set_except_vector(14, handle_mc);		//set_except_vector(15, handle_ndc);	}	signal_init();#ifdef CONFIG_MIPS32_COMPAT	signal32_init();#endif	flush_icache_range(CAC_BASE, CAC_BASE + 0x400);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -