traps.c

来自「linux-2.4.29操作系统的源码」· C语言 代码 · 共 939 行 · 第 1/2 页

C
939
字号
	force_sig(signal, current);}static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode){	unsigned long *vaddr, reg;	long offset;	int signal = 0;	/*	 * analyse the sc instruction that just caused a ri exception	 * and put the referenced address to addr.	 */	/* sign extend offset */	offset = opcode & OFFSET;	offset <<= 16;	offset >>= 16;	vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);	reg = (opcode & RT) >> 16;	if ((unsigned long)vaddr & 3) {		signal = SIGBUS;		goto sig;	}	if (ll_bit == 0 || ll_task != current) {		regs->regs[reg] = 0;		compute_return_epc(regs);		return;	}	if (put_user(regs->regs[reg], vaddr)) {		signal = SIGSEGV;		goto sig;	}	regs->regs[reg] = 1;	compute_return_epc(regs);	return;sig:	force_sig(signal, current);}/* * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both * opcodes are supposed to result in coprocessor unusable exceptions if * executed on ll/sc-less processors.  That's the theory.  In practice a * few processors such as NEC's VR4100 throw reserved instruction exceptions * instead, so we're doing the emulation thing in both exception handlers. */static inline int simulate_llsc(struct pt_regs *regs){	unsigned int opcode;	if (unlikely(get_insn_opcode(regs, &opcode)))		return -EFAULT;	if ((opcode & OPCODE) == LL) {		simulate_ll(regs, opcode);		return 0;	}	if ((opcode & OPCODE) == SC) {		simulate_sc(regs, opcode);		return 0;	}	return -EFAULT;			/* Strange things going on ... */}asmlinkage void do_ov(struct pt_regs *regs){	siginfo_t info;	info.si_code = FPE_INTOVF;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_addr = (void *)regs->cp0_epc;	force_sig_info(SIGFPE, &info, current);}/* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31){	if (fcr31 & FPU_CSR_UNI_X) {		int sig;		/*	 	 * Unimplemented operation exception.  If we've got the full		 * software emulator on-board, let's use it...		 *		 * Force FPU to dump state into task/thread context.  We're		 * moving a lot of data here for what is probably a single		 * instruction, but the alternative is to pre-decode the FP		 * register operands before invoking the emulator, which seems		 * a bit extreme for what should be an infrequent event.		 */		save_fp(current);		/* Run the emulator */		sig = fpu_emulator_cop1Handler (0, regs,			&current->thread.fpu.soft);		/*		 * We can't allow the emulated instruction to leave any of		 * the cause bit set in $fcr31.		 */		current->thread.fpu.soft.sr &= ~FPU_CSR_ALL_X;		/* Restore the hardware register state */		restore_fp(current);		/* If something went wrong, signal */		if (sig)			force_sig(sig, current);		return;	}	force_sig(SIGFPE, current);}asmlinkage void do_bp(struct pt_regs *regs){	unsigned int opcode, bcode;	siginfo_t info;	die_if_kernel("Break instruction in kernel code", regs);	if (get_insn_opcode(regs, &opcode))		return;	/*	 * There is the ancient bug in the MIPS assemblers that the break	 * code starts left to bit 16 instead to bit 6 in the opcode.	 * Gas is bug-compatible, but not always, grrr...	 * We handle both cases with a simple heuristics.  --macro	 */	bcode = ((opcode >> 6) & ((1 << 20) - 1));	if (bcode < (1 << 10))		bcode <<= 10;	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all break	 * insns, even for break codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (bcode) {	case BRK_OVERFLOW << 10:	case BRK_DIVZERO << 10:		if (bcode == (BRK_DIVZERO << 10))			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void *)regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_tr(struct pt_regs *regs){	unsigned int opcode, tcode = 0;	siginfo_t info;	if (get_insn_opcode(regs, &opcode))		return;	/* Immediate versions don't provide a code.  */	if (!(opcode & OPCODE))		tcode = ((opcode >> 6) & ((1 << 10) - 1));	/*	 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap	 * insns, even for trap codes that indicate arithmetic failures.	 * Weird ...)	 * But should we continue the brokenness???  --macro	 */	switch (tcode) {	case BRK_OVERFLOW:	case BRK_DIVZERO:		if (tcode == BRK_DIVZERO)			info.si_code = FPE_INTDIV;		else			info.si_code = FPE_INTOVF;		info.si_signo = SIGFPE;		info.si_errno = 0;		info.si_addr = (void *)regs->cp0_epc;		force_sig_info(SIGFPE, &info, current);		break;	default:		force_sig(SIGTRAP, current);	}}asmlinkage void do_ri(struct pt_regs *regs){	die_if_kernel("Reserved instruction in kernel code", regs);	if (!cpu_has_llsc)		if (!simulate_llsc(regs))			return;	force_sig(SIGILL, current);}asmlinkage void do_cpu(struct pt_regs *regs){	unsigned int cpid;	die_if_kernel("do_cpu invoked from kernel context!", regs);	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;	switch (cpid) {	case 0:		if (cpu_has_llsc)			break;		if (!simulate_llsc(regs))			return;		break;	case 1:		own_fpu();		if (current->used_math) {	/* Using the FPU again.  */			restore_fp(current);		} else {			/* First time FPU user.  */			init_fpu();			current->used_math = 1;		}		if (!cpu_has_fpu) {			int sig = fpu_emulator_cop1Handler(0, regs,						&current->thread.fpu.soft);			if (sig)				force_sig(sig, current);		}		return;	case 2:	case 3:		break;	}	force_sig(SIGILL, current);}asmlinkage void do_mdmx(struct pt_regs *regs){	force_sig(SIGILL, current);}asmlinkage void do_watch(struct pt_regs *regs){	/*	 * We use the watch exception where available to detect stack	 * overflows.	 */	dump_tlb_all();	show_regs(regs);	panic("Caught WATCH exception - probably caused by stack overflow.");}asmlinkage void do_mcheck(struct pt_regs *regs){	show_regs(regs);	dump_tlb_all();	/*	 * Some chips may have other causes of machine check (e.g. SB1	 * graduation timer)	 */	panic("Caught Machine Check exception - %scaused by multiple "	      "matching entries in the TLB.",	      (regs->cp0_status & ST0_TS) ? "" : "not ");}asmlinkage void do_reserved(struct pt_regs *regs){	/*	 * Game over - no way to handle this if it ever occurs.  Most probably	 * caused by a new unknown cpu type or after another deadly	 * hard/software error.	 */	show_regs(regs);	panic("Caught reserved exception %ld - should not happen.",	      (regs->cp0_cause & 0x7f) >> 2);}unsigned long exception_handlers[32];/* * As a side effect of the way this is implemented we're limited * to interrupt handlers in the address range from * KSEG0 <= x < KSEG0 + 256mb on the Nevada.  Oh well ... */void *set_except_vector(int n, void *addr){	unsigned long handler = (unsigned long) addr;	unsigned long old_handler = exception_handlers[n];	exception_handlers[n] = handler;	if (n == 0 && cpu_has_divec) {		*(volatile u32 *)(KSEG0+0x200) = 0x08000000 |		                                 (0x03ffffff & (handler >> 2));		flush_icache_range(KSEG0+0x200, KSEG0 + 0x204);	}	return (void *)old_handler;}asmlinkage int (*save_fp_context)(struct sigcontext *sc);asmlinkage int (*restore_fp_context)(struct sigcontext *sc);asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);extern asmlinkage int _save_fp_context(struct sigcontext *sc);extern asmlinkage int _restore_fp_context(struct sigcontext *sc);extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);void __init per_cpu_trap_init(void){	unsigned int cpu = smp_processor_id();	/* Some firmware leaves the BEV flag set, clear it.  */	clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV);	set_c0_status(ST0_CU0|ST0_FR|ST0_KX|ST0_SX|ST0_UX);	if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)		set_c0_status(ST0_XX);	/*	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the	 * interrupt processing overhead.  Use it where available.	 */	if (cpu_has_divec)		set_c0_cause(CAUSEF_IV);	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;	write_c0_context(((long)(&pgd_current[cpu])) << 23);	write_c0_wired(0);	atomic_inc(&init_mm.mm_count);	current->active_mm = &init_mm;	if (current->mm)		BUG();	enter_lazy_tlb(&init_mm, current, cpu);}void __init trap_init(void){	extern char except_vec0_generic;	extern char except_vec3_generic, except_vec3_r4000;	extern char except_vec4;	unsigned long i;	per_cpu_trap_init();	/* Copy the generic exception handlers to their final destination. */	memcpy((void *) KSEG0         , &except_vec0_generic, 0x80);	memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);	/*	 * Setup default vectors	 */	for (i = 0; i <= 31; i++)		set_except_vector(i, handle_reserved);	/*	 * Only some CPUs have the watch exceptions.	 */	if (cpu_has_watch)		set_except_vector(23, handle_watch);	/*	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the	 * interrupt processing overhead.  Use it where available.	 */	if (cpu_has_divec)		memcpy((void *)(KSEG0 + 0x200), &except_vec4, 0x8);	/*	 * The Data Bus Errors / Instruction Bus Errors are signaled	 * by external hardware.  Therefore these two exceptions	 * may have board specific handlers.	 */	if (board_be_init)		board_be_init();	set_except_vector(1, __xtlb_mod);	set_except_vector(2, __xtlb_tlbl);	set_except_vector(3, __xtlb_tlbs);	set_except_vector(4, handle_adel);	set_except_vector(5, handle_ades);	set_except_vector(6, handle_ibe);	set_except_vector(7, handle_dbe);	set_except_vector(8, handle_sys);	set_except_vector(9, handle_bp);	set_except_vector(10, handle_ri);	set_except_vector(11, handle_cpu);	set_except_vector(12, handle_ov);	set_except_vector(13, handle_tr);	set_except_vector(22, handle_mdmx);	if (cpu_has_fpu && !cpu_has_nofpuex)		set_except_vector(15, handle_fpe);	if (cpu_has_mcheck)		set_except_vector(24, handle_mcheck);	if (cpu_has_vce)		memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x100);	else if (cpu_has_4kex)		memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);	else		memcpy((void *)(KSEG0 + 0x080), &except_vec3_generic, 0x80);	if (current_cpu_data.cputype == CPU_R6000 ||	    current_cpu_data.cputype == CPU_R6000A) {		/*		 * The R6000 is the only R-series CPU that features a machine		 * check exception (similar to the R4000 cache error) and		 * unaligned ldc1/sdc1 exception.  The handlers have not been		 * written yet.  Well, anyway there is no R6000 machine on the		 * current list of targets for Linux/MIPS.		 * (Duh, crap, there is someone with a tripple R6k machine)		 */		//set_except_vector(14, handle_mc);		//set_except_vector(15, handle_ndc);	}	if (cpu_has_fpu) {		save_fp_context = _save_fp_context;		restore_fp_context = _restore_fp_context;		save_fp_context32 = _save_fp_context32;		restore_fp_context32 = _restore_fp_context32;	} else {		save_fp_context = fpu_emulator_save_context;		restore_fp_context = fpu_emulator_restore_context;		save_fp_context32 = fpu_emulator_save_context32;		restore_fp_context32 = fpu_emulator_restore_context32;	}	flush_icache_range(KSEG0, KSEG0 + 0x400);	atomic_inc(&init_mm.mm_count);	/* XXX UP?  */	current->active_mm = &init_mm;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?