⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/i386/traps.c * *  Copyright (C) 1991, 1992  Linus Torvalds * *  Pentium III FXSR, SSE support *	Gareth Hughes <gareth@valinux.com>, May 2000 *//* * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */#include <linux/config.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/timer.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#include <linux/highmem.h>#include <linux/kallsyms.h>#include <linux/ptrace.h>#include <linux/utsname.h>#include <linux/kprobes.h>#include <linux/kexec.h>#ifdef CONFIG_EISA#include <linux/ioport.h>#include <linux/eisa.h>#endif#ifdef CONFIG_MCA#include <linux/mca.h>#endif#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/io.h>#include <asm/atomic.h>#include <asm/debugreg.h>#include <asm/desc.h>#include <asm/i387.h>#include <asm/nmi.h>#include <asm/smp.h>#include <asm/arch_hooks.h>#include <asm/kdebug.h>#include <linux/module.h>#include "mach_traps.h"asmlinkage int system_call(void);struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },		{ 0, 0 }, { 0, 0 } };/* Do we ignore FPU interrupts ? */char ignore_fpu_irq = 0;/* * The IDT has to be page-aligned to simplify the Pentium * F0 0F bug workaround.. We have a special link segment * for this. */struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };asmlinkage void divide_error(void);asmlinkage void debug(void);asmlinkage void nmi(void);asmlinkage void int3(void);asmlinkage void overflow(void);asmlinkage void bounds(void);asmlinkage void invalid_op(void);asmlinkage void device_not_available(void);asmlinkage void coprocessor_segment_overrun(void);asmlinkage void invalid_TSS(void);asmlinkage void segment_not_present(void);asmlinkage void stack_segment(void);asmlinkage void general_protection(void);asmlinkage void page_fault(void);asmlinkage void coprocessor_error(void);asmlinkage void simd_coprocessor_error(void);asmlinkage void alignment_check(void);asmlinkage void spurious_interrupt_bug(void);asmlinkage void machine_check(void);static int kstack_depth_to_print = 24;struct notifier_block *i386die_chain;static DEFINE_SPINLOCK(die_notifier_lock);int register_die_notifier(struct notifier_block *nb){	int err = 0;	unsigned long flags;	spin_lock_irqsave(&die_notifier_lock, flags);	err = notifier_chain_register(&i386die_chain, nb);	spin_unlock_irqrestore(&die_notifier_lock, flags);	return err;}EXPORT_SYMBOL(register_die_notifier);static inline int valid_stack_ptr(struct thread_info *tinfo, void *p){	return	p > (void *)tinfo &&		p < (void *)tinfo + THREAD_SIZE - 3;}static inline unsigned long print_context_stack(struct thread_info *tinfo,				unsigned long *stack, unsigned long ebp){	unsigned long addr;#ifdef	CONFIG_FRAME_POINTER	while (valid_stack_ptr(tinfo, (void *)ebp)) {		addr = *(unsigned long *)(ebp + 4);		printk(" [<%08lx>] ", addr);		print_symbol("%s", addr);		printk("\n");		ebp = *(unsigned long *)ebp;	}#else	while (valid_stack_ptr(tinfo, stack)) {		addr = *stack++;		if (__kernel_text_address(addr)) {			printk(" [<%08lx>]", addr);			print_symbol(" %s", addr);			printk("\n");		}	}#endif	return ebp;}void show_trace(struct task_struct *task, unsigned long * stack){	unsigned long ebp;	if (!task)		task = current;	if (task == current) {		/* Grab ebp right from our regs */		asm ("movl %%ebp, %0" : "=r" (ebp) : );	} else {		/* ebp is the last reg pushed by switch_to */		ebp = *(unsigned long *) task->thread.esp;	}	while (1) {		struct thread_info *context;		context = (struct thread_info *)			((unsigned long)stack & (~(THREAD_SIZE - 1)));		ebp = print_context_stack(context, stack, ebp);		stack = (unsigned long*)context->previous_esp;		if (!stack)			break;		printk(" =======================\n");	}}void show_stack(struct task_struct *task, unsigned long *esp){	unsigned long *stack;	int i;	if (esp == NULL) {		if (task)			esp = (unsigned long*)task->thread.esp;		else			esp = (unsigned long *)&esp;	}	stack = esp;	for(i = 0; i < kstack_depth_to_print; i++) {		if (kstack_end(stack))			break;		if (i && ((i % 8) == 0))			printk("\n       ");		printk("%08lx ", *stack++);	}	printk("\nCall Trace:\n");	show_trace(task, esp);}/* * The architecture-independent dump_stack generator */void dump_stack(void){	unsigned long stack;	show_trace(current, &stack);}EXPORT_SYMBOL(dump_stack);void show_registers(struct pt_regs *regs){	int i;	int in_kernel = 1;	unsigned long esp;	unsigned short ss;	esp = (unsigned long) (&regs->esp);	savesegment(ss, ss);	if (user_mode(regs)) {		in_kernel = 0;		esp = regs->esp;		ss = regs->xss & 0xffff;	}	print_modules();	printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\nEFLAGS: %08lx"			"   (%s) \n",		smp_processor_id(), 0xffff & regs->xcs, regs->eip,		print_tainted(), regs->eflags, system_utsname.release);	print_symbol("EIP is at %s\n", regs->eip);	printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",		regs->eax, regs->ebx, regs->ecx, regs->edx);	printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",		regs->esi, regs->edi, regs->ebp, esp);	printk("ds: %04x   es: %04x   ss: %04x\n",		regs->xds & 0xffff, regs->xes & 0xffff, ss);	printk("Process %s (pid: %d, threadinfo=%p task=%p)",		current->comm, current->pid, current_thread_info(), current);	/*	 * When in-kernel, we also print out the stack and code at the	 * time of the fault..	 */	if (in_kernel) {		u8 __user *eip;		printk("\nStack: ");		show_stack(NULL, (unsigned long*)esp);		printk("Code: ");		eip = (u8 __user *)regs->eip - 43;		for (i = 0; i < 64; i++, eip++) {			unsigned char c;			if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {				printk(" Bad EIP value.");				break;			}			if (eip == (u8 __user *)regs->eip)				printk("<%02x> ", c);			else				printk("%02x ", c);		}	}	printk("\n");}	static void handle_BUG(struct pt_regs *regs){	unsigned short ud2;	unsigned short line;	char *file;	char c;	unsigned long eip;	eip = regs->eip;	if (eip < PAGE_OFFSET)		goto no_bug;	if (__get_user(ud2, (unsigned short __user *)eip))		goto no_bug;	if (ud2 != 0x0b0f)		goto no_bug;	if (__get_user(line, (unsigned short __user *)(eip + 2)))		goto bug;	if (__get_user(file, (char * __user *)(eip + 4)) ||		(unsigned long)file < PAGE_OFFSET || __get_user(c, file))		file = "<bad filename>";	printk("------------[ cut here ]------------\n");	printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);no_bug:	return;	/* Here we know it was a BUG but file-n-line is unavailable */bug:	printk("Kernel BUG\n");}/* This is gone through when something in the kernel * has done something bad and is about to be terminated.*/void die(const char * str, struct pt_regs * regs, long err){	static struct {		spinlock_t lock;		u32 lock_owner;		int lock_owner_depth;	} die = {		.lock =			SPIN_LOCK_UNLOCKED,		.lock_owner =		-1,		.lock_owner_depth =	0	};	static int die_counter;	if (die.lock_owner != raw_smp_processor_id()) {		console_verbose();		spin_lock_irq(&die.lock);		die.lock_owner = smp_processor_id();		die.lock_owner_depth = 0;		bust_spinlocks(1);	}	if (++die.lock_owner_depth < 3) {		int nl = 0;		handle_BUG(regs);		printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);#ifdef CONFIG_PREEMPT		printk("PREEMPT ");		nl = 1;#endif#ifdef CONFIG_SMP		printk("SMP ");		nl = 1;#endif#ifdef CONFIG_DEBUG_PAGEALLOC		printk("DEBUG_PAGEALLOC");		nl = 1;#endif		if (nl)			printk("\n");	notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);		show_registers(regs);  	} else		printk(KERN_ERR "Recursive die() failure, output suppressed\n");	bust_spinlocks(0);	die.lock_owner = -1;	spin_unlock_irq(&die.lock);	if (kexec_should_crash(current))		crash_kexec(regs);	if (in_interrupt())		panic("Fatal exception in interrupt");	if (panic_on_oops) {		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");		ssleep(5);		panic("Fatal exception");	}	do_exit(SIGSEGV);}static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err){	if (!user_mode_vm(regs))		die(str, regs, err);}static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,			      struct pt_regs * regs, long error_code,			      siginfo_t *info){	struct task_struct *tsk = current;	tsk->thread.error_code = error_code;	tsk->thread.trap_no = trapnr;	if (regs->eflags & VM_MASK) {		if (vm86)			goto vm86_trap;		goto trap_signal;	}	if (!user_mode(regs))		goto kernel_trap;	trap_signal: {		if (info)			force_sig_info(signr, info, tsk);		else			force_sig(signr, tsk);		return;	}	kernel_trap: {		if (!fixup_exception(regs))			die(str, regs, error_code);		return;	}	vm86_trap: {		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);		if (ret) goto trap_signal;		return;	}}#define DO_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \}#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \}#define DO_VM86_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \}#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \}DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)#ifndef CONFIG_KPROBESDO_VM86_ERROR( 3, SIGTRAP, "int3", int3)#endifDO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)fastcall void __kprobes do_general_protection(struct pt_regs * regs,					      long error_code){	int cpu = get_cpu();	struct tss_struct *tss = &per_cpu(init_tss, cpu);	struct thread_struct *thread = &current->thread;	/*	 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an	 * invalid offset set (the LAZY one) and the faulting thread has	 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS	 * and we set the offset field correctly. Then we let the CPU to	 * restart the faulting instruction.	 */	if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&	    thread->io_bitmap_ptr) {		memcpy(tss->io_bitmap, thread->io_bitmap_ptr,		       thread->io_bitmap_max);		/*		 * If the previously set map was extending to higher ports		 * than the current one, pad extra space with 0xff (no access).		 */		if (thread->io_bitmap_max < tss->io_bitmap_max)			memset((char *) tss->io_bitmap +				thread->io_bitmap_max, 0xff,				tss->io_bitmap_max - thread->io_bitmap_max);		tss->io_bitmap_max = thread->io_bitmap_max;		tss->io_bitmap_base = IO_BITMAP_OFFSET;		tss->io_bitmap_owner = thread;		put_cpu();		return;	}	put_cpu();	current->thread.error_code = error_code;	current->thread.trap_no = 13;	if (regs->eflags & VM_MASK)		goto gp_in_vm86;	if (!user_mode(regs))		goto gp_in_kernel;	current->thread.error_code = error_code;	current->thread.trap_no = 13;	force_sig(SIGSEGV, current);	return;gp_in_vm86:	local_irq_enable();	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);	return;gp_in_kernel:	if (!fixup_exception(regs)) {		if (notify_die(DIE_GPF, "general protection fault", regs,				error_code, 13, SIGSEGV) == NOTIFY_STOP)			return;		die("general protection fault", regs, error_code);	}}static void mem_parity_error(unsigned char reason, struct pt_regs * regs){	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");	printk("You probably have a hardware problem with your RAM chips\n");	/* Clear and disable the memory parity error line. */	clear_mem_error(reason);}static void io_check_error(unsigned char reason, struct pt_regs * regs){	unsigned long i;	printk("NMI: IOCK error (debug interrupt?)\n");	show_registers(regs);	/* Re-enable the IOCK line, wait for a few seconds */	reason = (reason & 0xf) | 8;	outb(reason, 0x61);	i = 2000;	while (--i) udelay(1000);	reason &= ~8;	outb(reason, 0x61);}static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA	/* Might actually be able to figure out what the guilty party	* is. */	if( MCA_bus ) {		mca_handle_nmi();		return;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -