📄 traps.c
字号:
{ siginfo_t info; unsigned int opcode, bcode; unsigned int *epc; epc = (unsigned int *) regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) != 0); if (get_user(opcode, epc)) goto sigsegv; /* * There is the ancient bug in the MIPS assemblers that the break * code starts left to bit 16 instead to bit 6 in the opcode. * Gas is bug-compatible ... */ bcode = ((opcode >> 16) & ((1 << 20) - 1)); /* * (A short test says that IRIX 5.3 sends SIGTRAP for all break * insns, even for break codes that indicate arithmetic failures. * Weird ...) * But should we continue the brokenness??? --macro */ switch (bcode) { case 6: case 7: if (bcode == 7) info.si_code = FPE_INTDIV; else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; default: force_sig(SIGTRAP, current); } return;sigsegv: force_sig(SIGSEGV, current);}asmlinkage void do_tr(struct pt_regs *regs){ siginfo_t info; unsigned int opcode, bcode; unsigned *epc; epc = (unsigned int *) regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) != 0); if (get_user(opcode, epc)) goto sigsegv; bcode = ((opcode >> 6) & ((1 << 20) - 1)); /* * (A short test says that IRIX 5.3 sends SIGTRAP for all break * insns, even for break codes that indicate arithmetic failures. * Weird ...) * But should we continue the brokenness??? --macro */ switch (bcode) { case 6: case 7: if (bcode == 7) info.si_code = FPE_INTDIV; else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; default: force_sig(SIGTRAP, current); } return;sigsegv: force_sig(SIGSEGV, current);}/* * userland emulation for R2300 CPUs * needed for the multithreading part of glibc * * this implementation can handle only sychronization between 2 or more * user contexts and is not SMP safe. */asmlinkage void do_ri(struct pt_regs *regs){ if (!user_mode(regs)) BUG();#ifndef CONFIG_CPU_HAS_LLSC#ifdef CONFIG_SMP#error "ll/sc emulation is not SMP safe"#endif { unsigned int opcode; if (!get_insn_opcode(regs, &opcode)) { if ((opcode & OPCODE) == LL) { simulate_ll(regs, opcode); return; } if ((opcode & OPCODE) == SC) { simulate_sc(regs, opcode); return; } } }#endif /* CONFIG_CPU_HAS_LLSC */ if (compute_return_epc(regs)) return; force_sig(SIGILL, current);}asmlinkage void do_cpu(struct pt_regs *regs){ unsigned int cpid; extern void lazy_fpu_switch(void *); extern void save_fp(struct task_struct *); extern void init_fpu(void); void fpu_emulator_init_fpu(void); int sig; cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; if (cpid != 1) goto bad_cid; if (!(mips_cpu.options & MIPS_CPU_FPU)) goto fp_emul; regs->cp0_status |= ST0_CU1; if (last_task_used_math == current) return; if (current->used_math) { /* Using the FPU again. */ lazy_fpu_switch(last_task_used_math); } else { /* First time FPU user. */ if (last_task_used_math != NULL) save_fp(last_task_used_math); init_fpu(); current->used_math = 1; } last_task_used_math = current; return;fp_emul: if (last_task_used_math != current) { if (!current->used_math) { fpu_emulator_init_fpu(); current->used_math = 1; } } sig = fpu_emulator_cop1Handler(regs); last_task_used_math = current; if (sig) { /* * Return EPC is not calculated in the FPU emulator, if * a signal is being send. So we calculate it here. */ compute_return_epc(regs); force_sig(sig, current); } return;bad_cid:#ifndef CONFIG_CPU_HAS_LLSC switch (mips_cpu.cputype) { case CPU_TX3927: case CPU_TX39XX: do_ri(regs); return; }#endif compute_return_epc(regs); force_sig(SIGILL, current);}asmlinkage void do_watch(struct pt_regs *regs){ extern void dump_tlb_all(void); /* * We use the watch exception where available to detect stack * overflows. */ dump_tlb_all(); show_regs(regs); panic("Caught WATCH exception - probably caused by stack overflow.");}asmlinkage void do_mcheck(struct pt_regs *regs){ show_regs(regs); panic("Caught Machine Check exception - probably caused by multiple " "matching entries in the TLB.");}asmlinkage void do_reserved(struct pt_regs *regs){ /* * Game over - no way to handle this if it ever occurs. Most probably * caused by a new unknown cpu type or after another deadly * hard/software error. */ show_regs(regs); panic("Caught reserved exception - should not happen.");}static inline void watch_init(void){ if (mips_cpu.options & MIPS_CPU_WATCH ) { set_except_vector(23, handle_watch); watch_available = 1; }}/* * Some MIPS CPUs can enable/disable for cache parity detection, but do * it different ways. */static inline void parity_protection_init(void){ switch (mips_cpu.cputype) { case CPU_5KC: /* Set the PE bit (bit 31) in the CP0_ECC register. */ printk(KERN_INFO "Enable the cache parity protection for " "MIPS 5KC CPUs.\n"); write_32bit_cp0_register(CP0_ECC, read_32bit_cp0_register(CP0_ECC) | 0x80000000); break; default: break; }}asmlinkage void cache_parity_error(void){ unsigned int reg_val; /* For the moment, report the problem and hang. */ reg_val = read_32bit_cp0_register(CP0_ERROREPC); printk("Cache error exception:\n"); printk("cp0_errorepc == %08x\n", reg_val); reg_val = read_32bit_cp0_register(CP0_CACHEERR); printk("cp0_cacheerr == %08x\n", reg_val); printk("Decoded CP0_CACHEERR: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); printk("Error bits: %s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", reg_val & (1<<28) ? "ET " : "", reg_val & (1<<26) ? "EE " : "", reg_val & (1<<25) ? "EB " : "", reg_val & (1<<24) ? "EI " : "", reg_val & (1<<23) ? "E1 " : "", reg_val & (1<<22) ? "E0 " : ""); printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); if (reg_val&(1<<22)) printk("DErrAddr0: 0x%08x\n", read_32bit_cp0_set1_register(CP0_S1_DERRADDR0)); if (reg_val&(1<<23)) printk("DErrAddr1: 0x%08x\n", read_32bit_cp0_set1_register(CP0_S1_DERRADDR1)); panic("Can't handle the cache error!");}/* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. */void ejtag_exception_handler(struct pt_regs *regs){ unsigned int depc, old_epc, debug; printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); depc = read_32bit_cp0_register(CP0_DEPC); debug = read_32bit_cp0_register(CP0_DEBUG); printk("DEPC = %08x, DEBUG = %08x\n", depc, debug); if (debug & 0x80000000) { /* * In branch delay slot. * We cheat a little bit here and use EPC to calculate the * debug return address (DEPC). EPC is restored after the * calculation. */ old_epc = regs->cp0_epc; regs->cp0_epc = depc; __compute_return_epc(regs); depc = regs->cp0_epc; regs->cp0_epc = old_epc; } else depc += 4; write_32bit_cp0_register(CP0_DEPC, depc);#if 0 printk("\n\n----- Enable EJTAG single stepping ----\n\n"); write_32bit_cp0_register(CP0_DEBUG, debug | 0x100);#endif}/* * NMI exception handler. */void nmi_exception_handler(struct pt_regs *regs){ printk("NMI taken!!!!\n"); die("NMI", regs); while(1) ; /* We die here. */}unsigned long exception_handlers[32];/* * As a side effect of the way this is implemented we're limited * to interrupt handlers in the address range from * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ... */void *set_except_vector(int n, void *addr){ unsigned handler = (unsigned long) addr; unsigned old_handler = exception_handlers[n]; exception_handlers[n] = handler; if (n == 0 && mips_cpu.options & MIPS_CPU_DIVEC) { *(volatile u32 *)(KSEG0+0x200) = 0x08000000 | (0x03ffffff & (handler >> 2)); flush_icache_range(KSEG0+0x200, KSEG0 + 0x204); } return (void *)old_handler;}asmlinkage int (*save_fp_context)(struct sigcontext *sc);asmlinkage int (*restore_fp_context)(struct sigcontext *sc);extern asmlinkage int _save_fp_context(struct sigcontext *sc);extern asmlinkage int _restore_fp_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);void __init per_cpu_trap_init(void){ unsigned int cpu = smp_processor_id(); /* Some firmware leaves the BEV flag set, clear it. */ clear_cp0_status(ST0_BEV); /* * Some MIPS CPUs have a dedicated interrupt vector which reduces the * interrupt processing overhead. Use it where available. */ if (mips_cpu.options & MIPS_CPU_DIVEC) set_cp0_cause(CAUSEF_IV); cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; set_context(cpu << 23);}void __init trap_init(void){ extern char except_vec1_generic, except_vec2_generic; extern char except_vec3_generic, except_vec3_r4000; extern char except_vec4; extern char except_vec_ejtag_debug; unsigned long i; per_cpu_trap_init(); /* Copy the generic exception handler code to it's final destination. */ memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80); memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80); memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80); /* * Setup default vectors */ for (i = 0; i <= 31; i++) set_except_vector(i, handle_reserved); /* * Copy the EJTAG debug exception vector handler code to it's final * destination. */ if (mips_cpu.options & MIPS_CPU_EJTAG) memcpy((void *)(KSEG0 + 0x300), &except_vec_ejtag_debug, 0x80); /* * Only some CPUs have the watch exceptions or a dedicated * interrupt vector. */ watch_init(); /* * Some MIPS CPUs have a dedicated interrupt vector which reduces the * interrupt processing overhead. Use it where available. */ if (mips_cpu.options & MIPS_CPU_DIVEC) { memcpy((void *)(KSEG0 + 0x200), &except_vec4, 8); set_cp0_cause(CAUSEF_IV); } /* * Some CPUs can enable/disable for cache parity detection, but does * it different ways. */ parity_protection_init(); /* * The Data Bus Errors / Instruction Bus Errors are signaled * by external hardware. Therefore these two exceptions * may have board specific handlers. */ bus_error_init(); set_except_vector(1, handle_mod); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); set_except_vector(6, handle_ibe); set_except_vector(7, handle_dbe); set_except_vector(8, handle_sys); set_except_vector(9, handle_bp); set_except_vector(10, handle_ri); set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); if ((mips_cpu.options & MIPS_CPU_FPU) && !(mips_cpu.options & MIPS_CPU_NOFPUEX)) set_except_vector(15, handle_fpe); if (mips_cpu.options & MIPS_CPU_MCHECK) set_except_vector(24, handle_mcheck); /* * Handling the following exceptions depends mostly of the cpu type */ if ((mips_cpu.options & MIPS_CPU_4KEX) && (mips_cpu.options & MIPS_CPU_4KTLB)) { /* Cache error vector already set above. */ if (mips_cpu.options & MIPS_CPU_VCE) { memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x80); } } else switch (mips_cpu.cputype) { case CPU_SB1: /* * XXX - This should be folded in to the "cleaner" handling, * above */ memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x80);#ifdef CONFIG_SB1_CACHE_ERROR { /* Special cache error handler for SB1 */ extern char except_vec2_sb1; memcpy((void *)(KSEG0 + 0x100), &except_vec2_sb1, 0x80); memcpy((void *)(KSEG1 + 0x100), &except_vec2_sb1, 0x80); }#endif /* Enable timer interrupt and scd mapped interrupt */ clear_cp0_status(0xf000); set_cp0_status(0xc00); break; case CPU_R6000: case CPU_R6000A: /* * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and * unaligned ldc1/sdc1 exception. The handlers have not been * written yet. Well, anyway there is no R6000 machine on the * current list of targets for Linux/MIPS. * (Duh, crap, there is someone with a tripple R6k machine) */ //set_except_vector(14, handle_mc); //set_except_vector(15, handle_ndc); case CPU_R2000: case CPU_R3000: case CPU_R3000A: case CPU_R3041: case CPU_R3051: case CPU_R3052: case CPU_R3081: case CPU_R3081E: case CPU_TX3912: case CPU_TX3922: case CPU_TX3927: case CPU_TX39XX: memcpy((void *)(KSEG0 + 0x80), &except_vec3_generic, 0x80); break; case CPU_UNKNOWN: default: panic("Unknown CPU type"); } flush_icache_range(KSEG0, KSEG0 + 0x400); if (mips_cpu.options & MIPS_CPU_FPU) { save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; } else { save_fp_context = fpu_emulator_save_context; restore_fp_context = fpu_emulator_restore_context; } if (mips_cpu.isa_level == MIPS_CPU_ISA_IV) set_cp0_status(ST0_XX); atomic_inc(&init_mm.mm_count); /* XXX UP? */ current->active_mm = &init_mm; /* XXX Must be done for all CPUs */ current_cpu_data.asid_cache = ASID_FIRST_VERSION; TLBMISS_HANDLER_SETUP();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -