⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps_64.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  Copyright (C) 1991, 1992  Linus Torvalds *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * *  Pentium III FXSR, SSE support *	Gareth Hughes <gareth@valinux.com>, May 2000 *//* * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'entry.S'. */#include <linux/sched.h>#include <linux/kernel.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/ptrace.h>#include <linux/timer.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#include <linux/kallsyms.h>#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/nmi.h>#include <linux/kprobes.h>#include <linux/kexec.h>#include <linux/unwind.h>#include <linux/uaccess.h>#include <linux/bug.h>#include <linux/kdebug.h>#include <linux/utsname.h>#if defined(CONFIG_EDAC)#include <linux/edac.h>#endif#include <asm/system.h>#include <asm/io.h>#include <asm/atomic.h>#include <asm/debugreg.h>#include <asm/desc.h>#include <asm/i387.h>#include <asm/processor.h>#include <asm/unwind.h>#include <asm/smp.h>#include <asm/pgalloc.h>#include <asm/pda.h>#include <asm/proto.h>#include <asm/nmi.h>#include <asm/stacktrace.h>asmlinkage void divide_error(void);asmlinkage void debug(void);asmlinkage void nmi(void);asmlinkage void int3(void);asmlinkage void overflow(void);asmlinkage void bounds(void);asmlinkage void invalid_op(void);asmlinkage void device_not_available(void);asmlinkage void double_fault(void);asmlinkage void coprocessor_segment_overrun(void);asmlinkage void invalid_TSS(void);asmlinkage void segment_not_present(void);asmlinkage void stack_segment(void);asmlinkage void general_protection(void);asmlinkage void page_fault(void);asmlinkage void coprocessor_error(void);asmlinkage void simd_coprocessor_error(void);asmlinkage void reserved(void);asmlinkage void alignment_check(void);asmlinkage void machine_check(void);asmlinkage void spurious_interrupt_bug(void);static inline void conditional_sti(struct pt_regs *regs){	if (regs->eflags & X86_EFLAGS_IF)		local_irq_enable();}static inline void preempt_conditional_sti(struct pt_regs *regs){	preempt_disable();	if (regs->eflags & X86_EFLAGS_IF)		local_irq_enable();}static inline void preempt_conditional_cli(struct pt_regs *regs){	if (regs->eflags & X86_EFLAGS_IF)		local_irq_disable();	/* Make sure to not schedule here because we could be running	   on an exception stack. */	preempt_enable_no_resched();}int kstack_depth_to_print = 12;#ifdef CONFIG_KALLSYMSvoid printk_address(unsigned long address){	unsigned long offset = 0, symsize;	const char *symname;	char *modname;	char *delim = ":";	char namebuf[128];	symname = kallsyms_lookup(address, &symsize, &offset,					&modname, namebuf);	if (!symname) {		printk(" [<%016lx>]\n", address);		return;	}	if (!modname)		modname = delim = ""; 			printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",		address, delim, modname, delim, symname, offset, symsize);}#elsevoid printk_address(unsigned long address){	printk(" [<%016lx>]\n", address);}#endifstatic unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,					unsigned *usedp, char **idp){	static char ids[][8] = {		[DEBUG_STACK - 1] = "#DB",		[NMI_STACK - 1] = "NMI",		[DOUBLEFAULT_STACK - 1] = "#DF",		[STACKFAULT_STACK - 1] = "#SS",		[MCE_STACK - 1] = "#MC",#if DEBUG_STKSZ > EXCEPTION_STKSZ		[N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"#endif	};	unsigned k;	/*	 * Iterate over all exception stacks, and figure out whether	 * 'stack' is in one of them:	 */	for (k = 0; k < N_EXCEPTION_STACKS; k++) {		unsigned long end = per_cpu(orig_ist, cpu).ist[k];		/*		 * Is 'stack' above this exception frame's end?		 * If yes then skip to the next frame.		 */		if (stack >= end)			continue;		/*		 * Is 'stack' above this exception frame's start address?		 * If yes then we found the right frame.		 */		if (stack >= end - EXCEPTION_STKSZ) {			/*			 * Make sure we only iterate through an exception			 * stack once. If it comes up for the second time			 * then there's something wrong going on - just			 * break out and return NULL:			 */			if (*usedp & (1U << k))				break;			*usedp |= 1U << k;			*idp = ids[k];			return (unsigned long *)end;		}		/*		 * If this is a debug stack, and if it has a larger size than		 * the usual exception stacks, then 'stack' might still		 * be within the lower portion of the debug stack:		 */#if DEBUG_STKSZ > EXCEPTION_STKSZ		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {			unsigned j = N_EXCEPTION_STACKS - 1;			/*			 * Black magic. A large debug stack is composed of			 * multiple exception stack entries, which we			 * iterate through now. Dont look:			 */			do {				++j;				end -= EXCEPTION_STKSZ;				ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);			} while (stack < end - EXCEPTION_STKSZ);			if (*usedp & (1U << j))				break;			*usedp |= 1U << j;			*idp = ids[j];			return (unsigned long *)end;		}#endif	}	return NULL;}#define MSG(txt) ops->warning(data, txt)/* * x86-64 can have up to three kernel stacks:  * process stack * interrupt stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack */static inline int valid_stack_ptr(struct thread_info *tinfo, void *p){	void *t = (void *)tinfo;        return p > t && p < t + THREAD_SIZE - 3;}void dump_trace(struct task_struct *tsk, struct pt_regs *regs,		unsigned long *stack,		const struct stacktrace_ops *ops, void *data){	const unsigned cpu = get_cpu();	unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;	unsigned used = 0;	struct thread_info *tinfo;	if (!tsk)		tsk = current;	if (!stack) {		unsigned long dummy;		stack = &dummy;		if (tsk && tsk != current)			stack = (unsigned long *)tsk->thread.rsp;	}	/*	 * Print function call entries within a stack. 'cond' is the	 * "end of stackframe" condition, that the 'stack++'	 * iteration will eventually trigger.	 */#define HANDLE_STACK(cond) \	do while (cond) { \		unsigned long addr = *stack++; \		/* Use unlocked access here because except for NMIs	\		   we should be already protected against module unloads */ \		if (__kernel_text_address(addr)) { \			/* \			 * If the address is either in the text segment of the \			 * kernel, or in the region which contains vmalloc'ed \			 * memory, it *may* be the address of a calling \			 * routine; if so, print it so that someone tracing \			 * down the cause of the crash will be able to figure \			 * out the call path that was taken. \			 */ \			ops->address(data, addr);   \		} \	} while (0)	/*	 * Print function call entries in all stacks, starting at the	 * current stack address. If the stacks consist of nested	 * exceptions	 */	for (;;) {		char *id;		unsigned long *estack_end;		estack_end = in_exception_stack(cpu, (unsigned long)stack,						&used, &id);		if (estack_end) {			if (ops->stack(data, id) < 0)				break;			HANDLE_STACK (stack < estack_end);			ops->stack(data, "<EOE>");			/*			 * We link to the next stack via the			 * second-to-last pointer (index -2 to end) in the			 * exception stack:			 */			stack = (unsigned long *) estack_end[-2];			continue;		}		if (irqstack_end) {			unsigned long *irqstack;			irqstack = irqstack_end -				(IRQSTACKSIZE - 64) / sizeof(*irqstack);			if (stack >= irqstack && stack < irqstack_end) {				if (ops->stack(data, "IRQ") < 0)					break;				HANDLE_STACK (stack < irqstack_end);				/*				 * We link to the next stack (which would be				 * the process stack normally) the last				 * pointer (index -1 to end) in the IRQ stack:				 */				stack = (unsigned long *) (irqstack_end[-1]);				irqstack_end = NULL;				ops->stack(data, "EOI");				continue;			}		}		break;	}	/*	 * This handles the process stack:	 */	tinfo = task_thread_info(tsk);	HANDLE_STACK (valid_stack_ptr(tinfo, stack));#undef HANDLE_STACK	put_cpu();}EXPORT_SYMBOL(dump_trace);static voidprint_trace_warning_symbol(void *data, char *msg, unsigned long symbol){	print_symbol(msg, symbol);	printk("\n");}static void print_trace_warning(void *data, char *msg){	printk("%s\n", msg);}static int print_trace_stack(void *data, char *name){	printk(" <%s> ", name);	return 0;}static void print_trace_address(void *data, unsigned long addr){	touch_nmi_watchdog();	printk_address(addr);}static const struct stacktrace_ops print_trace_ops = {	.warning = print_trace_warning,	.warning_symbol = print_trace_warning_symbol,	.stack = print_trace_stack,	.address = print_trace_address,};voidshow_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack){	printk("\nCall Trace:\n");	dump_trace(tsk, regs, stack, &print_trace_ops, NULL);	printk("\n");}static void_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp){	unsigned long *stack;	int i;	const int cpu = smp_processor_id();	unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);	unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);	// debugging aid: "show_stack(NULL, NULL);" prints the	// back trace for this cpu.	if (rsp == NULL) {		if (tsk)			rsp = (unsigned long *)tsk->thread.rsp;		else			rsp = (unsigned long *)&rsp;	}	stack = rsp;	for(i=0; i < kstack_depth_to_print; i++) {		if (stack >= irqstack && stack <= irqstack_end) {			if (stack == irqstack_end) {				stack = (unsigned long *) (irqstack_end[-1]);				printk(" <EOI> ");			}		} else {		if (((long) stack & (THREAD_SIZE-1)) == 0)			break;		}		if (i && ((i % 4) == 0))			printk("\n");		printk(" %016lx", *stack++);		touch_nmi_watchdog();	}	show_trace(tsk, regs, rsp);}void show_stack(struct task_struct *tsk, unsigned long * rsp){	_show_stack(tsk, NULL, rsp);}/* * The architecture-independent dump_stack generator */void dump_stack(void){	unsigned long dummy;	printk("Pid: %d, comm: %.20s %s %s %.*s\n",		current->pid, current->comm, print_tainted(),		init_utsname()->release,		(int)strcspn(init_utsname()->version, " "),		init_utsname()->version);	show_trace(NULL, NULL, &dummy);}EXPORT_SYMBOL(dump_stack);void show_registers(struct pt_regs *regs){	int i;	int in_kernel = !user_mode(regs);	unsigned long rsp;	const int cpu = smp_processor_id();	struct task_struct *cur = cpu_pda(cpu)->pcurrent;	rsp = regs->rsp;	printk("CPU %d ", cpu);	__show_regs(regs);	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",		cur->comm, cur->pid, task_thread_info(cur), cur);	/*	 * When in-kernel, we also print out the stack and code at the	 * time of the fault..	 */	if (in_kernel) {		printk("Stack: ");		_show_stack(NULL, regs, (unsigned long*)rsp);		printk("\nCode: ");		if (regs->rip < PAGE_OFFSET)			goto bad;		for (i=0; i<20; i++) {			unsigned char c;			if (__get_user(c, &((unsigned char*)regs->rip)[i])) {bad:				printk(" Bad RIP value.");				break;			}			printk("%02x ", c);		}	}	printk("\n");}	int is_valid_bugaddr(unsigned long rip){	unsigned short ud2;	if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))		return 0;	return ud2 == 0x0b0f;}#ifdef CONFIG_BUGvoid out_of_line_bug(void){ 	BUG(); } EXPORT_SYMBOL(out_of_line_bug);#endifstatic raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;static int die_owner = -1;static unsigned int die_nest_count;unsigned __kprobes long oops_begin(void){	int cpu;	unsigned long flags;	oops_enter();	/* racy, but better than risking deadlock. */	raw_local_irq_save(flags);	cpu = smp_processor_id();	if (!__raw_spin_trylock(&die_lock)) {		if (cpu == die_owner) 			/* nested oops. should stop eventually */;		else			__raw_spin_lock(&die_lock);	}	die_nest_count++;	die_owner = cpu;	console_verbose();	bust_spinlocks(1);	return flags;}void __kprobes oops_end(unsigned long flags){ 	die_owner = -1;	bust_spinlocks(0);	die_nest_count--;	if (!die_nest_count)		/* Nest count reaches zero, release the lock. */		__raw_spin_unlock(&die_lock);	raw_local_irq_restore(flags);	if (panic_on_oops)		panic("Fatal exception");	oops_exit();}void __kprobes __die(const char * str, struct pt_regs * regs, long err){	static int die_counter;	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);#ifdef CONFIG_PREEMPT	printk("PREEMPT ");#endif#ifdef CONFIG_SMP	printk("SMP ");#endif#ifdef CONFIG_DEBUG_PAGEALLOC	printk("DEBUG_PAGEALLOC");#endif	printk("\n");	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);	show_registers(regs);	add_taint(TAINT_DIE);	/* Executive summary in case the oops scrolled away */	printk(KERN_ALERT "RIP ");	printk_address(regs->rip); 	printk(" RSP <%016lx>\n", regs->rsp); 	if (kexec_should_crash(current))		crash_kexec(regs);}void die(const char * str, struct pt_regs * regs, long err){	unsigned long flags = oops_begin();	if (!user_mode(regs))		report_bug(regs->rip, regs);	__die(str, regs, err);	oops_end(flags);	do_exit(SIGSEGV); }void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic){	unsigned long flags = oops_begin();	/*	 * We are in trouble anyway, lets at least try	 * to get a message out.	 */	printk(str, smp_processor_id());	show_registers(regs);	if (kexec_should_crash(current))		crash_kexec(regs);	if (do_panic || panic_on_oops)		panic("Non maskable interrupt");	oops_end(flags);	nmi_exit();	local_irq_enable();	do_exit(SIGSEGV);}static void __kprobes do_trap(int trapnr, int signr, char *str,			      struct pt_regs * regs, long error_code,			      siginfo_t *info){	struct task_struct *tsk = current;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -