📄 traps.c
字号:
/* * linux/arch/i386/traps.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 *//* * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */#include <linux/config.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/ptrace.h>#include <linux/timer.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#include <linux/highmem.h>#ifdef CONFIG_MCA#include <linux/mca.h>#include <asm/processor.h>#endif#include <asm/system.h>#include <asm/uaccess.h>#include <asm/io.h>#include <asm/atomic.h>#include <asm/debugreg.h>#include <asm/desc.h>#include <asm/i387.h>#include <asm/smp.h>#include <asm/pgalloc.h>#ifdef CONFIG_X86_VISWS_APIC#include <asm/fixmap.h>#include <asm/cobalt.h>#include <asm/lithium.h>#endif#include <linux/irq.h>#include <linux/module.h>asmlinkage int system_call(void);asmlinkage void lcall7(void);asmlinkage void lcall27(void);struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };/* * The IDT has to be page-aligned to simplify the Pentium * F0 0F bug workaround.. We have a special link segment * for this. */struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };asmlinkage void divide_error(void);asmlinkage void debug(void);asmlinkage void nmi(void);asmlinkage void int3(void);asmlinkage void overflow(void);asmlinkage void bounds(void);asmlinkage void invalid_op(void);asmlinkage void device_not_available(void);asmlinkage void double_fault(void);asmlinkage void coprocessor_segment_overrun(void);asmlinkage void invalid_TSS(void);asmlinkage void segment_not_present(void);asmlinkage void stack_segment(void);asmlinkage void general_protection(void);asmlinkage void page_fault(void);asmlinkage void coprocessor_error(void);asmlinkage void simd_coprocessor_error(void);asmlinkage void alignment_check(void);asmlinkage void spurious_interrupt_bug(void);asmlinkage void machine_check(void);int kstack_depth_to_print = 24;/* * If the address is either in the .text section of the * kernel, or in the vmalloc'ed module regions, it *may* * be the address of a calling routine */#ifdef CONFIG_MODULESextern struct module *module_list;extern struct module kernel_module;static inline int kernel_text_address(unsigned long addr){ int retval = 0; struct module *mod; if (addr >= (unsigned long) &_stext && addr <= (unsigned long) &_etext) return 1; for (mod = module_list; mod != &kernel_module; mod = mod->next) { /* mod_bound tests for addr being inside the vmalloc'ed * module area. Of course it'd be better to test only * for the .text subset... */ if (mod_bound(addr, 0, mod)) { retval = 1; break; } } return retval;}#elsestatic inline int kernel_text_address(unsigned long addr){ return (addr >= (unsigned long) &_stext && addr <= (unsigned long) &_etext);}#endifvoid show_trace(unsigned long * stack){ int i; unsigned long addr; if (!stack) stack = (unsigned long*)&stack; printk("Call Trace: "); i = 1; while (((long) stack & (THREAD_SIZE-1)) != 0) { addr = *stack++; if (kernel_text_address(addr)) { if (i && ((i % 6) == 0)) printk("\n "); printk("[<%08lx>] ", addr); i++; } } printk("\n");}void show_trace_task(struct task_struct *tsk){ unsigned long esp = tsk->thread.esp; /* User space on another CPU? */ if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1)) return; show_trace((unsigned long *)esp);}void show_stack(unsigned long * esp){ unsigned long *stack; int i; // debugging aid: "show_stack(NULL);" prints the // back trace for this cpu. if(esp==NULL) esp=(unsigned long*)&esp; stack = esp; for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 8) == 0)) printk("\n "); printk("%08lx ", *stack++); } printk("\n"); show_trace(esp);}void show_registers(struct pt_regs *regs){ int i; int in_kernel = 1; unsigned long esp; unsigned short ss; esp = (unsigned long) (®s->esp); ss = __KERNEL_DS; if (regs->xcs & 3) { in_kernel = 0; esp = regs->esp; ss = regs->xss & 0xffff; } printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx\n", smp_processor_id(), 0xffff & regs->xcs, regs->eip, print_tainted(), regs->eflags); printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", regs->eax, regs->ebx, regs->ecx, regs->edx); printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", regs->esi, regs->edi, regs->ebp, esp); printk("ds: %04x es: %04x ss: %04x\n", regs->xds & 0xffff, regs->xes & 0xffff, ss); printk("Process %s (pid: %d, stackpage=%08lx)", current->comm, current->pid, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (in_kernel) { printk("\nStack: "); show_stack((unsigned long*)esp); printk("\nCode: "); if(regs->eip < PAGE_OFFSET) goto bad; for(i=0;i<20;i++) { unsigned char c; if(__get_user(c, &((unsigned char*)regs->eip)[i])) {bad: printk(" Bad EIP value."); break; } printk("%02x ", c); } } printk("\n");} spinlock_t die_lock = SPIN_LOCK_UNLOCKED;void die(const char * str, struct pt_regs * regs, long err){ console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx\n", str, err & 0xffff); show_registers(regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); do_exit(SIGSEGV);}static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err){ if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) die(str, regs, err);}static inline unsigned long get_cr2(void){ unsigned long address; /* get the address */ __asm__("movl %%cr2,%0":"=r" (address)); return address;}static void inline do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs * regs, long error_code, siginfo_t *info){ if (vm86 && regs->eflags & VM_MASK) goto vm86_trap; if (!(regs->xcs & 3)) goto kernel_trap; trap_signal: { struct task_struct *tsk = current; tsk->thread.error_code = error_code; tsk->thread.trap_no = trapnr; if (info) force_sig_info(signr, info, tsk); else force_sig(signr, tsk); return; } kernel_trap: { unsigned long fixup = search_exception_table(regs->eip); if (fixup) regs->eip = fixup; else die(str, regs, error_code); return; } vm86_trap: { int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); if (ret) goto trap_signal; return; }}#define DO_ERROR(trapnr, signr, str, name) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \}#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void *)siaddr; \ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \}#define DO_VM86_ERROR(trapnr, signr, str, name) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \}#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \asmlinkage void do_##name(struct pt_regs * regs, long error_code) \{ \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void *)siaddr; \ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \}DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)DO_ERROR( 8, SIGSEGV, "double fault", double_fault)DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)DO_ERROR(12, SIGBUS, "stack segment", stack_segment)DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2())asmlinkage void do_general_protection(struct pt_regs * regs, long error_code){ if (regs->eflags & VM_MASK) goto gp_in_vm86; if (!(regs->xcs & 3)) goto gp_in_kernel; current->thread.error_code = error_code; current->thread.trap_no = 13; force_sig(SIGSEGV, current); return;gp_in_vm86: handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); return;gp_in_kernel: { unsigned long fixup; fixup = search_exception_table(regs->eip); if (fixup) { regs->eip = fixup; return; } die("general protection fault", regs, error_code); }}static void mem_parity_error(unsigned char reason, struct pt_regs * regs){ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); printk("You probably have a hardware problem with your RAM chips\n"); /* Clear and disable the memory parity error line. */ reason = (reason & 0xf) | 4; outb(reason, 0x61);}static void io_check_error(unsigned char reason, struct pt_regs * regs){ unsigned long i; printk("NMI: IOCK error (debug interrupt?)\n"); show_registers(regs); /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & 0xf) | 8; outb(reason, 0x61); i = 2000; while (--i) udelay(1000); reason &= ~8; outb(reason, 0x61);}static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA /* Might actually be able to figure out what the guilty party * is. */ if( MCA_bus ) { mca_handle_nmi(); return; }#endif printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); printk("Dazed and confused, but trying to continue\n"); printk("Do you have a strange power saving mode enabled?\n");}asmlinkage void do_nmi(struct pt_regs * regs, long error_code){ unsigned char reason = inb(0x61); ++nmi_count(smp_processor_id()); if (!(reason & 0xc0)) {#if CONFIG_X86_LOCAL_APIC /* * Ok, so this is none of the documented NMI sources, * so it must be the NMI watchdog. */ if (nmi_watchdog) { nmi_watchdog_tick(regs); return; }#endif unknown_nmi_error(reason, regs); return; } if (reason & 0x80) mem_parity_error(reason, regs); if (reason & 0x40) io_check_error(reason, regs); /* * Reassert NMI in case it became active meanwhile * as it's edge-triggered. */ outb(0x8f, 0x70); inb(0x71); /* dummy */ outb(0x0f, 0x70); inb(0x71); /* dummy */}/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) */asmlinkage void do_debug(struct pt_regs * regs, long error_code){ unsigned int condition; struct task_struct *tsk = current; siginfo_t info;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -