📄 traps.c
字号:
/* * linux/arch/i386/traps.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 *//* * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */#include <linux/config.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/ptrace.h>#include <linux/timer.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#ifdef CONFIG_MCA#include <linux/mca.h>#include <asm/processor.h>#endif#include <asm/system.h>#include <asm/uaccess.h>#include <asm/io.h>#include <asm/atomic.h>#include <asm/debugreg.h>#include <asm/desc.h>#include <asm/i387.h>#include <asm/smp.h>#include <asm/pgalloc.h>#ifdef CONFIG_X86_VISWS_APIC#include <asm/fixmap.h>#include <asm/cobalt.h>#include <asm/lithium.h>#endif#include <linux/irq.h>asmlinkage int system_call(void);asmlinkage void lcall7(void);asmlinkage void lcall27(void);struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };/* * The IDT has to be page-aligned to simplify the Pentium * F0 0F bug workaround.. We have a special link segment * for this. */struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };extern void bust_spinlocks(void);asmlinkage void divide_error(void);asmlinkage void debug(void);asmlinkage void nmi(void);asmlinkage void int3(void);asmlinkage void overflow(void);asmlinkage void bounds(void);asmlinkage void invalid_op(void);asmlinkage void device_not_available(void);asmlinkage void double_fault(void);asmlinkage void coprocessor_segment_overrun(void);asmlinkage void invalid_TSS(void);asmlinkage void segment_not_present(void);asmlinkage void stack_segment(void);asmlinkage void general_protection(void);asmlinkage void page_fault(void);asmlinkage void coprocessor_error(void);asmlinkage void simd_coprocessor_error(void);asmlinkage void alignment_check(void);asmlinkage void spurious_interrupt_bug(void);asmlinkage void machine_check(void);int kstack_depth_to_print = 24;/* * These constants are for searching for possible module text * segments. */void show_trace(unsigned long * stack){ int i; unsigned long addr, module_start, module_end; if (!stack) stack = (unsigned long*)&stack; printk("Call Trace: "); i = 1; module_start = VMALLOC_START; module_end = VMALLOC_END; while (((long) stack & (THREAD_SIZE-1)) != 0) { addr = *stack++; /* * If the address is either in the text segment of the * kernel, or in the region which contains vmalloc'ed * memory, it *may* be the address of a calling * routine; if so, print it so that someone tracing * down the cause of the crash will be able to figure * out the call path that was taken. */ if (((addr >= (unsigned long) &_stext) && (addr <= (unsigned long) &_etext)) || ((addr >= module_start) && (addr <= module_end))) { if (i && ((i % 8) == 0)) printk("\n "); printk("[<%08lx>] ", addr); i++; } } printk("\n");}void show_stack(unsigned long * esp){ unsigned long *stack; int i; // debugging aid: "show_stack(NULL);" prints the // back trace for this cpu. if(esp==NULL) esp=(unsigned long*)&esp; stack = esp; for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 8) == 0)) printk("\n "); printk("%08lx ", *stack++); } printk("\n"); show_trace(esp);}static void show_registers(struct pt_regs *regs){ int i; int in_kernel = 1; unsigned long esp; unsigned short ss; esp = (unsigned long) (®s->esp); ss = __KERNEL_DS; if (regs->xcs & 3) { in_kernel = 0; esp = regs->esp; ss = regs->xss & 0xffff; } printk("CPU: %d\nEIP: %04x:[<%08lx>]\nEFLAGS: %08lx\n", smp_processor_id(), 0xffff & regs->xcs, regs->eip, regs->eflags); printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", regs->eax, regs->ebx, regs->ecx, regs->edx); printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", regs->esi, regs->edi, regs->ebp, esp); printk("ds: %04x es: %04x ss: %04x\n", regs->xds & 0xffff, regs->xes & 0xffff, ss); printk("Process %s (pid: %d, stackpage=%08lx)", current->comm, current->pid, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (in_kernel) { printk("\nStack: "); show_stack((unsigned long*)esp); printk("\nCode: "); if(regs->eip < PAGE_OFFSET) goto bad; for(i=0;i<20;i++) { unsigned char c; if(__get_user(c, &((unsigned char*)regs->eip)[i])) {bad: printk(" Bad EIP value."); break; } printk("%02x ", c); } } printk("\n");} spinlock_t die_lock = SPIN_LOCK_UNLOCKED;void die(const char * str, struct pt_regs * regs, long err){ console_verbose(); spin_lock_irq(&die_lock); printk("%s: %04lx\n", str, err & 0xffff); show_registers(regs); spin_unlock_irq(&die_lock); do_exit(SIGSEGV);}static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err){ if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) die(str, regs, err);}static inline unsigned long get_cr2(void){ unsigned long address; /* get the address */ __asm__("movl %%cr2,%0":"=r" (address)); return address;}static void inline do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs * regs, long error_code, siginfo_t *info){ if (vm86 && regs->eflags & VM_MASK) goto vm86_trap; if (!(regs->xcs & 3)) goto kernel_trap; trap_signal: { struct task_struct *tsk = current; tsk->thread.error_code = error_code; tsk->thread.trap_no = trapnr; if (info) force_sig_info(signr, info, tsk); else force_sig(signr, tsk); return; } kernel_trap: { unsigned long fixup = search_exception_table(regs->eip); if (fixup) regs->eip = fixup; else die(str, regs, error_code); return; } vm86_trap: { int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); if (ret) goto trap_signal; return; }}#define DO_ERROR(trapnr, signr, str, name) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \}#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void *)siaddr; \ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \}#define DO_VM86_ERROR(trapnr, signr, str, name) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \}#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void *)siaddr; \ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \}DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)DO_ERROR( 8, SIGSEGV, "double fault", double_fault)DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)DO_ERROR(12, SIGBUS, "stack segment", stack_segment)DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2())asmlinkage void do_general_protection(struct pt_regs * regs, long error_code){ if (regs->eflags & VM_MASK) goto gp_in_vm86; if (!(regs->xcs & 3)) goto gp_in_kernel; current->thread.error_code = error_code; current->thread.trap_no = 13; force_sig(SIGSEGV, current); return;gp_in_vm86: handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); return;gp_in_kernel: { unsigned long fixup; fixup = search_exception_table(regs->eip); if (fixup) { regs->eip = fixup; return; } die("general protection fault", regs, error_code); }}static void mem_parity_error(unsigned char reason, struct pt_regs * regs){ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); printk("You probably have a hardware problem with your RAM chips\n"); /* Clear and disable the memory parity error line. */ reason = (reason & 0xf) | 4; outb(reason, 0x61);}static void io_check_error(unsigned char reason, struct pt_regs * regs){ unsigned long i; printk("NMI: IOCK error (debug interrupt?)\n"); show_registers(regs); /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & 0xf) | 8; outb(reason, 0x61); i = 2000; while (--i) udelay(1000); reason &= ~8; outb(reason, 0x61);}static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA /* Might actually be able to figure out what the guilty party * is. */ if( MCA_bus ) { mca_handle_nmi(); return; }#endif printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); printk("Dazed and confused, but trying to continue\n"); printk("Do you have a strange power saving mode enabled?\n");}#if CONFIG_X86_IO_APICint nmi_watchdog = 1;static int __init setup_nmi_watchdog(char *str){ get_option(&str, &nmi_watchdog); return 1;}__setup("nmi_watchdog=", setup_nmi_watchdog);static spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;inline void nmi_watchdog_tick(struct pt_regs * regs){ /* * the best way to detect wether a CPU has a 'hard lockup' problem * is to check it's local APIC timer IRQ counts. If they are not * changing then that CPU has some problem. * * as these watchdog NMI IRQs are broadcasted to every CPU, here * we only have to check the current processor. * * since NMIs dont listen to _any_ locks, we have to be extremely * careful not to rely on unsafe variables. The printk might lock * up though, so we have to break up console_lock first ... * [when there will be more tty-related locks, break them up * here too!] */ static unsigned int last_irq_sums [NR_CPUS], alert_counter [NR_CPUS]; /* * Since current-> is always on the stack, and we always switch * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu = smp_processor_id(); sum = apic_timer_irqs[cpu]; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*HZ) { spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(); printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; }}#endifasmlinkage void do_nmi(struct pt_regs * regs, long error_code){ unsigned char reason = inb(0x61); ++nmi_count(smp_processor_id()); if (!(reason & 0xc0)) {#if CONFIG_X86_IO_APIC /* * Ok, so this is none of the documented NMI sources, * so it must be the NMI watchdog. */ if (nmi_watchdog) { nmi_watchdog_tick(regs); return; } else unknown_nmi_error(reason, regs);#else unknown_nmi_error(reason, regs);#endif return; } if (reason & 0x80) mem_parity_error(reason, regs); if (reason & 0x40) io_check_error(reason, regs); /* * Reassert NMI in case it became active meanwhile * as it's edge-triggered. */ outb(0x8f, 0x70); inb(0x71); /* dummy */ outb(0x0f, 0x70); inb(0x71); /* dummy */}/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -