traps.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,080 行 · 第 1/2 页
C
1,080 行
/* Clear and disable the memory parity error line. */ clear_mem_error(reason);}static void io_check_error(unsigned char reason, struct pt_regs * regs){ unsigned long i; printk("NMI: IOCK error (debug interrupt?)\n"); show_registers(regs); /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & 0xf) | 8; outb(reason, 0x61); i = 2000; while (--i) udelay(1000); reason &= ~8; outb(reason, 0x61);}static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA /* Might actually be able to figure out what the guilty party * is. */ if( MCA_bus ) { mca_handle_nmi(); return; }#endif printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", reason, smp_processor_id()); printk("Dazed and confused, but trying to continue\n"); printk("Do you have a strange power saving mode enabled?\n");}static spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;void die_nmi (struct pt_regs *regs, const char *msg){ spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk(msg); printk(" on CPU%d, eip %08lx, registers:\n", smp_processor_id(), regs->eip); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV);}static void default_do_nmi(struct pt_regs * regs){ unsigned char reason = get_nmi_reason(); if (!(reason & 0xc0)) { if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) == NOTIFY_STOP) return;#ifdef CONFIG_X86_LOCAL_APIC /* * Ok, so this is none of the documented NMI sources, * so it must be the NMI watchdog. */ if (nmi_watchdog) { nmi_watchdog_tick(regs); return; }#endif unknown_nmi_error(reason, regs); return; } if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) return; if (reason & 0x80) mem_parity_error(reason, regs); if (reason & 0x40) io_check_error(reason, regs); /* * Reassert NMI in case it became active meanwhile * as it's edge-triggered. */ reassert_nmi();}static int dummy_nmi_callback(struct pt_regs * regs, int cpu){ return 0;} static nmi_callback_t nmi_callback = dummy_nmi_callback; asmlinkage void do_nmi(struct pt_regs * regs, long error_code){ int cpu; nmi_enter(); cpu = smp_processor_id(); ++nmi_count(cpu); if (!nmi_callback(regs, cpu)) default_do_nmi(regs); nmi_exit();}void set_nmi_callback(nmi_callback_t callback){ nmi_callback = callback;}void unset_nmi_callback(void){ nmi_callback = dummy_nmi_callback;}#ifdef CONFIG_KPROBESasmlinkage int do_int3(struct pt_regs *regs, long error_code){ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return 1; /* This is an interrupt gate, because kprobes wants interrupts disabled. Normal trap handlers don't. */ restore_interrupts(regs); do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); return 0;}#endif/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) */asmlinkage void do_debug(struct pt_regs * regs, long error_code){ unsigned int condition; struct task_struct *tsk = current; siginfo_t info; __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ if (regs->eflags & X86_EFLAGS_IF) local_irq_enable(); /* Mask out spurious debug traps due to lazy DR7 setting */ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { if (!tsk->thread.debugreg[7]) goto clear_dr7; } if (regs->eflags & VM_MASK) goto debug_vm86; /* Save debug status register where ptrace can see it */ tsk->thread.debugreg[6] = condition; /* Mask out spurious TF errors due to lazy TF clearing */ if (condition & DR_STEP) { /* * The TF error should be masked out only if the current * process is not traced and if the TRAP flag has been set * previously by a tracing process (condition detected by * the PT_DTRACE flag); remember that the i386 TRAP flag * can be modified by the process itself in user mode, * allowing programs to debug themselves without the ptrace() * interface. */ if ((regs->xcs & 3) == 0) goto clear_TF_reenable; if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE) goto clear_TF; } /* Ok, finally something we can handle */ tsk->thread.trap_no = 1; tsk->thread.error_code = error_code; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; /* If this is a kernel mode trap, save the user PC on entry to * the kernel, that's what the debugger can make sense of. */ info.si_addr = ((regs->xcs & 3) == 0) ? (void __user *)tsk->thread.eip : (void __user *)regs->eip; force_sig_info(SIGTRAP, &info, tsk); /* Disable additional traps. They'll be re-enabled when * the signal is delivered. */clear_dr7: __asm__("movl %0,%%db7" : /* no output */ : "r" (0)); return;debug_vm86: handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); return;clear_TF_reenable: set_tsk_thread_flag(tsk, TIF_SINGLESTEP);clear_TF: regs->eflags &= ~TF_MASK; return;}/* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */void math_error(void __user *eip){ struct task_struct * task; siginfo_t info; unsigned short cwd, swd; /* * Save the info for the exception handler and clear the error. */ task = current; save_init_fpu(task); task->thread.trap_no = 16; task->thread.error_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = eip; /* * (~cwd & swd) will mask out exceptions that are not set to unmasked * status. 0x3f is the exception bits in these regs, 0x200 is the * C1 reg you need in case of a stack fault, 0x040 is the stack * fault bit. We should only be taking one exception at a time, * so if this combination doesn't produce any single exception, * then we have a bad program that isn't syncronizing its FPU usage * and it will suffer the consequences since we won't be able to * fully reproduce the context of the exception */ cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) { case 0x000: default: break; case 0x001: /* Invalid Op */ case 0x041: /* Stack Fault */ case 0x241: /* Stack Fault | Direction */ info.si_code = FPE_FLTINV; /* Should we clear the SF or let user space do it ???? */ break; case 0x002: /* Denormalize */ case 0x010: /* Underflow */ info.si_code = FPE_FLTUND; break; case 0x004: /* Zero Divide */ info.si_code = FPE_FLTDIV; break; case 0x008: /* Overflow */ info.si_code = FPE_FLTOVF; break; case 0x020: /* Precision */ info.si_code = FPE_FLTRES; break; } force_sig_info(SIGFPE, &info, task);}asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code){ ignore_fpu_irq = 1; math_error((void __user *)regs->eip);}void simd_math_error(void __user *eip){ struct task_struct * task; siginfo_t info; unsigned short mxcsr; /* * Save the info for the exception handler and clear the error. */ task = current; save_init_fpu(task); task->thread.trap_no = 19; task->thread.error_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = eip; /* * The SIMD FPU exceptions are handled a little differently, as there * is only a single status/control register. Thus, to determine which * unmasked exception was caught we must mask the exception mask bits * at 0x1f80, and then use these to mask the exception bits at 0x3f. */ mxcsr = get_fpu_mxcsr(task); switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { case 0x000: default: break; case 0x001: /* Invalid Op */ info.si_code = FPE_FLTINV; break; case 0x002: /* Denormalize */ case 0x010: /* Underflow */ info.si_code = FPE_FLTUND; break; case 0x004: /* Zero Divide */ info.si_code = FPE_FLTDIV; break; case 0x008: /* Overflow */ info.si_code = FPE_FLTOVF; break; case 0x020: /* Precision */ info.si_code = FPE_FLTRES; break; } force_sig_info(SIGFPE, &info, task);}asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs, long error_code){ if (cpu_has_xmm) { /* Handle SIMD FPU exceptions on PIII+ processors. */ ignore_fpu_irq = 1; simd_math_error((void __user *)regs->eip); } else { /* * Handle strange cache flush from user space exception * in all other cases. This is undocumented behaviour. */ if (regs->eflags & VM_MASK) { handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); return; } die_if_kernel("cache flush denied", regs, error_code); current->thread.trap_no = 19; current->thread.error_code = error_code; force_sig(SIGSEGV, current); }}asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, long error_code){#if 0 /* No need to warn about this any longer. */ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");#endif}/* * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * * Must be called with kernel preemption disabled (in this case, * local interrupts are disabled at the call-site in entry.S). */asmlinkage void math_state_restore(struct pt_regs regs){ struct thread_info *thread = current_thread_info(); struct task_struct *tsk = thread->task; clts(); /* Allow maths ops (or we recurse) */ if (!tsk->used_math) init_fpu(tsk); restore_fpu(tsk); thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */}#ifndef CONFIG_MATH_EMULATIONasmlinkage void math_emulate(long arg){ printk("math-emulation not enabled and no coprocessor found.\n"); printk("killing %s.\n",current->comm); force_sig(SIGFPE,current); schedule();}#endif /* CONFIG_MATH_EMULATION */#ifdef CONFIG_X86_F00F_BUGvoid __init trap_init_f00f_bug(void){ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); /* * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ idt_descr.address = fix_to_virt(FIX_F00F_IDT); __asm__ __volatile__("lidt %0" : : "m" (idt_descr));}#endif#define _set_gate(gate_addr,type,dpl,addr,seg) \do { \ int __d0, __d1; \ __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ "movw %4,%%dx\n\t" \ "movl %%eax,%0\n\t" \ "movl %%edx,%1" \ :"=m" (*((long *) (gate_addr))), \ "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ "3" ((char *) (addr)),"2" ((seg) << 16)); \} while (0)/* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */void set_intr_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);}/* * This routine sets up an interrupt gate at directory privilege level 3. */static inline void set_system_intr_gate(unsigned int n, void *addr){ _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);}static void __init set_trap_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);}static void __init set_system_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);}static void __init set_call_gate(void *a, void *addr){ _set_gate(a,12,3,addr,__KERNEL_CS);}static void __init set_task_gate(unsigned int n, unsigned int gdt_entry){ _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));}void __init trap_init(void){#ifdef CONFIG_EISA if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { EISA_bus = 1; }#endif#ifdef CONFIG_X86_LOCAL_APIC init_apic_mappings();#endif set_trap_gate(0,÷_error); set_intr_gate(1,&debug); set_intr_gate(2,&nmi); set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ set_system_gate(4,&overflow); set_system_gate(5,&bounds); set_trap_gate(6,&invalid_op); set_trap_gate(7,&device_not_available); set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); set_trap_gate(9,&coprocessor_segment_overrun); set_trap_gate(10,&invalid_TSS); set_trap_gate(11,&segment_not_present); set_trap_gate(12,&stack_segment); set_trap_gate(13,&general_protection); set_intr_gate(14,&page_fault); set_trap_gate(15,&spurious_interrupt_bug); set_trap_gate(16,&coprocessor_error); set_trap_gate(17,&alignment_check);#ifdef CONFIG_X86_MCE set_trap_gate(18,&machine_check);#endif set_trap_gate(19,&simd_coprocessor_error); set_system_gate(SYSCALL_VECTOR,&system_call); /* * default LDT is a single-entry callgate to lcall7 for iBCS * and a callgate to lcall27 for Solaris/x86 binaries */ set_call_gate(&default_ldt[0],lcall7); set_call_gate(&default_ldt[4],lcall27); /* * Should be a barrier for any external CPU state. */ cpu_init(); trap_init_hook();}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?