⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			if (user_mode(regs)) {				esp = regs->esp;				ss = regs->xss & 0xffff;			}			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);			print_symbol("%s", regs->eip);			printk(" SS:ESP %04x:%08lx\n", ss, esp);		}		else			regs = NULL;  	} else		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");	bust_spinlocks(0);	die.lock_owner = -1;	add_taint(TAINT_DIE);	__raw_spin_unlock(&die.lock);	raw_local_irq_restore(flags);	if (!regs)		return;	if (kexec_should_crash(current))		crash_kexec(regs);	if (in_interrupt())		panic("Fatal exception in interrupt");	if (panic_on_oops)		panic("Fatal exception");	oops_exit();	do_exit(SIGSEGV);}static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err){	if (!user_mode_vm(regs))		die(str, regs, err);}static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,			      struct pt_regs * regs, long error_code,			      siginfo_t *info){	struct task_struct *tsk = current;	if (regs->eflags & VM_MASK) {		if (vm86)			goto vm86_trap;		goto trap_signal;	}	if (!user_mode(regs))		goto kernel_trap;	trap_signal: {		/*		 * We want error_code and trap_no set for userspace faults and		 * kernelspace faults which result in die(), but not		 * kernelspace faults which are fixed up.  die() gives the		 * process no chance to handle the signal and notice the		 * kernel fault information, so that won't result in polluting		 * the information about previously queued, but not yet		 * delivered, faults.  See also do_general_protection below.		 */		tsk->thread.error_code = error_code;		tsk->thread.trap_no = trapnr;		if (info)			force_sig_info(signr, info, tsk);		else			force_sig(signr, tsk);		return;	}	kernel_trap: {		if (!fixup_exception(regs)) {			tsk->thread.error_code = error_code;			tsk->thread.trap_no = trapnr;			die(str, regs, error_code);		}		return;	}	vm86_trap: {		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);		if (ret) goto trap_signal;		return;	}}#define DO_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \}#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	if (irq) \		local_irq_enable(); \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \}#define DO_VM86_ERROR(trapnr, signr, str, name) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \}#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \fastcall void do_##name(struct pt_regs * regs, long error_code) \{ \	siginfo_t info; \	info.si_signo = signr; \	info.si_errno = 0; \	info.si_code = sicode; \	info.si_addr = (void __user *)siaddr; \	trace_hardirqs_fixup(); \	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \						== NOTIFY_STOP) \		return; \	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \}DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)#ifndef CONFIG_KPROBESDO_VM86_ERROR( 3, SIGTRAP, "int3", int3)#endifDO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)fastcall void __kprobes do_general_protection(struct pt_regs * regs,					      long error_code){	int cpu = get_cpu();	struct tss_struct *tss = &per_cpu(init_tss, cpu);	struct thread_struct *thread = &current->thread;	/*	 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an	 * invalid offset set (the LAZY one) and the faulting thread has	 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS	 * and we set the offset field correctly. Then we let the CPU to	 * restart the faulting instruction.	 */	if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&	    thread->io_bitmap_ptr) {		memcpy(tss->io_bitmap, thread->io_bitmap_ptr,		       thread->io_bitmap_max);		/*		 * If the previously set map was extending to higher ports		 * than the current one, pad extra space with 0xff (no access).		 */		if (thread->io_bitmap_max < tss->io_bitmap_max)			memset((char *) tss->io_bitmap +				thread->io_bitmap_max, 0xff,				tss->io_bitmap_max - thread->io_bitmap_max);		tss->io_bitmap_max = thread->io_bitmap_max;		tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;		tss->io_bitmap_owner = thread;		put_cpu();		return;	}	put_cpu();	if (regs->eflags & VM_MASK)		goto gp_in_vm86;	if (!user_mode(regs))		goto gp_in_kernel;	current->thread.error_code = error_code;	current->thread.trap_no = 13;	if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&	    printk_ratelimit())		printk(KERN_INFO		    "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",		    current->comm, task_pid_nr(current),		    regs->eip, regs->esp, error_code);	force_sig(SIGSEGV, current);	return;gp_in_vm86:	local_irq_enable();	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);	return;gp_in_kernel:	if (!fixup_exception(regs)) {		current->thread.error_code = error_code;		current->thread.trap_no = 13;		if (notify_die(DIE_GPF, "general protection fault", regs,				error_code, 13, SIGSEGV) == NOTIFY_STOP)			return;		die("general protection fault", regs, error_code);	}}static __kprobes voidmem_parity_error(unsigned char reason, struct pt_regs * regs){	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "		"CPU %d.\n", reason, smp_processor_id());	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");#if defined(CONFIG_EDAC)	if(edac_handler_set()) {		edac_atomic_assert_error();		return;	}#endif	if (panic_on_unrecovered_nmi)                panic("NMI: Not continuing");	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");	/* Clear and disable the memory parity error line. */	clear_mem_error(reason);}static __kprobes voidio_check_error(unsigned char reason, struct pt_regs * regs){	unsigned long i;	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");	show_registers(regs);	/* Re-enable the IOCK line, wait for a few seconds */	reason = (reason & 0xf) | 8;	outb(reason, 0x61);	i = 2000;	while (--i) udelay(1000);	reason &= ~8;	outb(reason, 0x61);}static __kprobes voidunknown_nmi_error(unsigned char reason, struct pt_regs * regs){#ifdef CONFIG_MCA	/* Might actually be able to figure out what the guilty party	* is. */	if( MCA_bus ) {		mca_handle_nmi();		return;	}#endif	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "		"CPU %d.\n", reason, smp_processor_id());	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");	if (panic_on_unrecovered_nmi)                panic("NMI: Not continuing");	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");}static DEFINE_SPINLOCK(nmi_print_lock);void __kprobes die_nmi(struct pt_regs *regs, const char *msg){	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==	    NOTIFY_STOP)		return;	spin_lock(&nmi_print_lock);	/*	* We are in trouble anyway, lets at least try	* to get a message out.	*/	bust_spinlocks(1);	printk(KERN_EMERG "%s", msg);	printk(" on CPU%d, eip %08lx, registers:\n",		smp_processor_id(), regs->eip);	show_registers(regs);	console_silent();	spin_unlock(&nmi_print_lock);	bust_spinlocks(0);	/* If we are in kernel we are probably nested up pretty bad	 * and might aswell get out now while we still can.	*/	if (!user_mode_vm(regs)) {		current->thread.trap_no = 2;		crash_kexec(regs);	}	do_exit(SIGSEGV);}static __kprobes void default_do_nmi(struct pt_regs * regs){	unsigned char reason = 0;	/* Only the BSP gets external NMIs from the system.  */	if (!smp_processor_id())		reason = get_nmi_reason(); 	if (!(reason & 0xc0)) {		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)							== NOTIFY_STOP)			return;#ifdef CONFIG_X86_LOCAL_APIC		/*		 * Ok, so this is none of the documented NMI sources,		 * so it must be the NMI watchdog.		 */		if (nmi_watchdog_tick(regs, reason))			return;		if (!do_nmi_callback(regs, smp_processor_id()))#endif			unknown_nmi_error(reason, regs);		return;	}	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)		return;	if (reason & 0x80)		mem_parity_error(reason, regs);	if (reason & 0x40)		io_check_error(reason, regs);	/*	 * Reassert NMI in case it became active meanwhile	 * as it's edge-triggered.	 */	reassert_nmi();}static int ignore_nmis;fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code){	int cpu;	nmi_enter();	cpu = smp_processor_id();	++nmi_count(cpu);	if (!ignore_nmis)		default_do_nmi(regs);	nmi_exit();}void stop_nmi(void){	acpi_nmi_disable();	ignore_nmis++;}void restart_nmi(void){	ignore_nmis--;	acpi_nmi_enable();}#ifdef CONFIG_KPROBESfastcall void __kprobes do_int3(struct pt_regs *regs, long error_code){	trace_hardirqs_fixup();	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)			== NOTIFY_STOP)		return;	/* This is an interrupt gate, because kprobes wants interrupts	disabled.  Normal trap handlers don't. */	restore_interrupts(regs);	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);}#endif/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -