traps.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,211 行 · 第 1/3 页

C
1,211
字号
			regs = NULL;  	} else		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");	bust_spinlocks(0);	die.lock_owner = -1;	spin_unlock_irqrestore(&die.lock, flags);	if (!regs)		return;	if (kexec_should_crash(current))		crash_kexec(regs);	if (in_interrupt())		panic("Fatal exception in interrupt");	if (panic_on_oops) {		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");		ssleep(5);		panic("Fatal exception");	}	oops_exit();	do_exit(SIGSEGV);}static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err){	if (!user_mode_vm(regs))		die(str, regs, err);}static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,			      struct pt_regs * regs, long error_code,			      siginfo_t *info){	struct task_struct *tsk = current;	tsk->thread.error_code = error_code;	tsk->thread.trap_no = trapnr;	if (regs->eflags & VM_MASK) {		if (vm86)			goto vm86_trap;		goto trap_signal;	}	if (!user_mode(regs))		goto kernel_trap;	trap_signal: {		if (info)			force_sig_info(signr, info, tsk);		else			force_sig(signr, tsk);		return;	}	kernel_trap: {		if (!fixup_exception(regs))			die(str, regs, error_code);		return;	}	vm86_trap: {		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);		if (ret) goto trap_signal;		return;	}}#define DO_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \}#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \}#define DO_VM86_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \}#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \}DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)#ifndef CONFIG_KPROBESDO_VM86_ERROR( 3, SIGTRAP, "int3", int3)#endifDO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)fastcall void __kprobes do_general_protection(struct pt_regs * regs,					      long error_code){	int cpu = get_cpu();	struct tss_struct *tss = &per_cpu(init_tss, cpu);	struct thread_struct *thread = &current->thread;	/*	 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an	 * invalid offset set (the LAZY one) and the faulting thread has	 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS	 * and we set the offset field correctly. Then we let the CPU to	 * restart the faulting instruction.	 */	if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&	    thread->io_bitmap_ptr) {		memcpy(tss->io_bitmap, thread->io_bitmap_ptr,		       thread->io_bitmap_max);		/*		 * If the previously set map was extending to higher ports		 * than the current one, pad extra space with 0xff (no access).		 */		if (thread->io_bitmap_max < tss->io_bitmap_max)			memset((char *) tss->io_bitmap +				thread->io_bitmap_max, 0xff,				tss->io_bitmap_max - thread->io_bitmap_max);		tss->io_bitmap_max = thread->io_bitmap_max;		tss->io_bitmap_base = IO_BITMAP_OFFSET;		tss->io_bitmap_owner = thread;		put_cpu();		return;	}	put_cpu();	current->thread.error_code = error_code;	current->thread.trap_no = 13;	if (regs->eflags & VM_MASK)		goto gp_in_vm86;	if (!user_mode(regs))		goto gp_in_kernel;	current->thread.error_code = error_code;	current->thread.trap_no = 13;	force_sig(SIGSEGV, current);	return;gp_in_vm86:	local_irq_enable();	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);	return;gp_in_kernel:	if (!fixup_exception(regs)) {		if (notify_die(DIE_GPF, "general protection fault", regs,				error_code, 13, SIGSEGV) == NOTIFY_STOP)			return;		die("general protection fault", regs, error_code);	}}static void mem_parity_error(unsigned char reason, struct pt_regs * regs){	printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "			"to continue\n");	printk(KERN_EMERG "You probably have a hardware problem with your RAM "			"chips\n");	/* Clear and disable the memory parity error line. */	clear_mem_error(reason);}static void io_check_error(unsigned char reason, struct pt_regs * regs){	unsigned long i;	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");	show_registers(regs);	/* Re-enable the IOCK line, wait for a few seconds */	reason = (reason & 0xf) | 8;	outb(reason, 0x61);	i = 2000;	while (--i) udelay(1000);	reason &= ~8;	outb(reason, 0x61);}static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA	/* Might actually be able to figure out what the guilty party	* is. */	if( MCA_bus ) {		mca_handle_nmi();		return;	}#endif	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",		reason, smp_processor_id());	printk("Dazed and confused, but trying to continue\n");	printk("Do you have a strange power saving mode enabled?\n");}static DEFINE_SPINLOCK(nmi_print_lock);void die_nmi (struct pt_regs *regs, const char *msg){	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==	    NOTIFY_STOP)		return;	spin_lock(&nmi_print_lock);	/*	* We are in trouble anyway, lets at least try	* to get a message out.	*/	bust_spinlocks(1);	printk(KERN_EMERG "%s", msg);	printk(" on CPU%d, eip %08lx, registers:\n",		smp_processor_id(), regs->eip);	show_registers(regs);	printk(KERN_EMERG "console shuts up ...\n");	console_silent();	spin_unlock(&nmi_print_lock);	bust_spinlocks(0);	/* If we are in kernel we are probably nested up pretty bad	 * and might aswell get out now while we still can.	*/	if (!user_mode_vm(regs)) {		current->thread.trap_no = 2;		crash_kexec(regs);	}	do_exit(SIGSEGV);}static void default_do_nmi(struct pt_regs * regs){	unsigned char reason = 0;	/* Only the BSP gets external NMIs from the system.  */	if (!smp_processor_id())		reason = get_nmi_reason(); 	if (!(reason & 0xc0)) {		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)							== NOTIFY_STOP)			return;#ifdef CONFIG_X86_LOCAL_APIC		/*		 * Ok, so this is none of the documented NMI sources,		 * so it must be the NMI watchdog.		 */		if (nmi_watchdog) {			nmi_watchdog_tick(regs);			return;		}#endif		unknown_nmi_error(reason, regs);		return;	}	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)		return;	if (reason & 0x80)		mem_parity_error(reason, regs);	if (reason & 0x40)		io_check_error(reason, regs);	/*	 * Reassert NMI in case it became active meanwhile	 * as it's edge-triggered.	 */	reassert_nmi();}static int dummy_nmi_callback(struct pt_regs * regs, int cpu){	return 0;} static nmi_callback_t nmi_callback = dummy_nmi_callback; fastcall void do_nmi(struct pt_regs * regs, long error_code){	int cpu;	nmi_enter();	cpu = smp_processor_id();	++nmi_count(cpu);	if (!rcu_dereference(nmi_callback)(regs, cpu))		default_do_nmi(regs);	nmi_exit();}void set_nmi_callback(nmi_callback_t callback){	vmalloc_sync_all();	rcu_assign_pointer(nmi_callback, callback);}EXPORT_SYMBOL_GPL(set_nmi_callback);void unset_nmi_callback(void){	nmi_callback = dummy_nmi_callback;}EXPORT_SYMBOL_GPL(unset_nmi_callback);#ifdef CONFIG_KPROBESfastcall void __kprobes do_int3(struct pt_regs *regs, long error_code){	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)			== NOTIFY_STOP)		return;	/* This is an interrupt gate, because kprobes wants interrupts	disabled.  Normal trap handlers don't. */	restore_interrupts(regs);	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);}#endif/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. *  * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) */fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code){	unsigned int condition;	struct task_struct *tsk = current;	get_debugreg(condition, 6);	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,					SIGTRAP) == NOTIFY_STOP)		return;	/* It's safe to allow irq's after DR6 has been saved */	if (regs->eflags & X86_EFLAGS_IF)		local_irq_enable();	/* Mask out spurious debug traps due to lazy DR7 setting */	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {		if (!tsk->thread.debugreg[7])			goto clear_dr7;	}	if (regs->eflags & VM_MASK)		goto debug_vm86;	/* Save debug status register where ptrace can see it */	tsk->thread.debugreg[6] = condition;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?