⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
 *  * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) */fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code){	unsigned int condition;	struct task_struct *tsk = current;	trace_hardirqs_fixup();	get_debugreg(condition, 6);	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,					SIGTRAP) == NOTIFY_STOP)		return;	/* It's safe to allow irq's after DR6 has been saved */	if (regs->eflags & X86_EFLAGS_IF)		local_irq_enable();	/* Mask out spurious debug traps due to lazy DR7 setting */	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {		if (!tsk->thread.debugreg[7])			goto clear_dr7;	}	if (regs->eflags & VM_MASK)		goto debug_vm86;	/* Save debug status register where ptrace can see it */	tsk->thread.debugreg[6] = condition;	/*	 * Single-stepping through TF: make sure we ignore any events in	 * kernel space (but re-enable TF when returning to user mode).	 */	if (condition & DR_STEP) {		/*		 * We already checked v86 mode above, so we can		 * check for kernel mode by just checking the CPL		 * of CS.		 */		if (!user_mode(regs))			goto clear_TF_reenable;	}	/* Ok, finally something we can handle */	send_sigtrap(tsk, regs, error_code);	/* Disable additional traps. They'll be re-enabled when	 * the signal is delivered.	 */clear_dr7:	set_debugreg(0, 7);	return;debug_vm86:	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);	return;clear_TF_reenable:	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);	regs->eflags &= ~TF_MASK;	return;}/* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */void math_error(void __user *eip){	struct task_struct * task;	siginfo_t info;	unsigned short cwd, swd;	/*	 * Save the info for the exception handler and clear the error.	 */	task = current;	save_init_fpu(task);	task->thread.trap_no = 16;	task->thread.error_code = 0;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_code = __SI_FAULT;	info.si_addr = eip;	/*	 * (~cwd & swd) will mask out exceptions that are not set to unmasked	 * status.  0x3f is the exception bits in these regs, 0x200 is the	 * C1 reg you need in case of a stack fault, 0x040 is the stack	 * fault bit.  We should only be taking one exception at a time,	 * so if this combination doesn't produce any single exception,	 * then we have a bad program that isn't syncronizing its FPU usage	 * and it will suffer the consequences since we won't be able to	 * fully reproduce the context of the exception	 */	cwd = get_fpu_cwd(task);	swd = get_fpu_swd(task);	switch (swd & ~cwd & 0x3f) {		case 0x000: /* No unmasked exception */			return;		default:    /* Multiple exceptions */			break;		case 0x001: /* Invalid Op */			/*			 * swd & 0x240 == 0x040: Stack Underflow			 * swd & 0x240 == 0x240: Stack Overflow			 * User must clear the SF bit (0x40) if set			 */			info.si_code = FPE_FLTINV;			break;		case 0x002: /* Denormalize */		case 0x010: /* Underflow */			info.si_code = FPE_FLTUND;			break;		case 0x004: /* Zero Divide */			info.si_code = FPE_FLTDIV;			break;		case 0x008: /* Overflow */			info.si_code = FPE_FLTOVF;			break;		case 0x020: /* Precision */			info.si_code = FPE_FLTRES;			break;	}	force_sig_info(SIGFPE, &info, task);}fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code){	ignore_fpu_irq = 1;	math_error((void __user *)regs->eip);}static void simd_math_error(void __user *eip){	struct task_struct * task;	siginfo_t info;	unsigned short mxcsr;	/*	 * Save the info for the exception handler and clear the error.	 */	task = current;	save_init_fpu(task);	task->thread.trap_no = 19;	task->thread.error_code = 0;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_code = __SI_FAULT;	info.si_addr = eip;	/*	 * The SIMD FPU exceptions are handled a little differently, as there	 * is only a single status/control register.  Thus, to determine which	 * unmasked exception was caught we must mask the exception mask bits	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.	 */	mxcsr = get_fpu_mxcsr(task);	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {		case 0x000:		default:			break;		case 0x001: /* Invalid Op */			info.si_code = FPE_FLTINV;			break;		case 0x002: /* Denormalize */		case 0x010: /* Underflow */			info.si_code = FPE_FLTUND;			break;		case 0x004: /* Zero Divide */			info.si_code = FPE_FLTDIV;			break;		case 0x008: /* Overflow */			info.si_code = FPE_FLTOVF;			break;		case 0x020: /* Precision */			info.si_code = FPE_FLTRES;			break;	}	force_sig_info(SIGFPE, &info, task);}fastcall void do_simd_coprocessor_error(struct pt_regs * regs,					  long error_code){	if (cpu_has_xmm) {		/* Handle SIMD FPU exceptions on PIII+ processors. */		ignore_fpu_irq = 1;		simd_math_error((void __user *)regs->eip);	} else {		/*		 * Handle strange cache flush from user space exception		 * in all other cases.  This is undocumented behaviour.		 */		if (regs->eflags & VM_MASK) {			handle_vm86_fault((struct kernel_vm86_regs *)regs,					  error_code);			return;		}		current->thread.trap_no = 19;		current->thread.error_code = error_code;		die_if_kernel("cache flush denied", regs, error_code);		force_sig(SIGSEGV, current);	}}fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,					  long error_code){#if 0	/* No need to warn about this any longer. */	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");#endif}fastcall unsigned long patch_espfix_desc(unsigned long uesp,					  unsigned long kesp){	struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;	unsigned long base = (kesp - uesp) & -THREAD_SIZE;	unsigned long new_kesp = kesp - base;	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];	/* Set up base for espfix segment */ 	desc &= 0x00f0ff0000000000ULL; 	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |		((((__u64)base) << 32) & 0xff00000000000000ULL) |		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |		(lim_pages & 0xffff);	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;	return new_kesp;}/* *  'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * * Must be called with kernel preemption disabled (in this case, * local interrupts are disabled at the call-site in entry.S). */asmlinkage void math_state_restore(void){	struct thread_info *thread = current_thread_info();	struct task_struct *tsk = thread->task;	clts();		/* Allow maths ops (or we recurse) */	if (!tsk_used_math(tsk))		init_fpu(tsk);	restore_fpu(tsk);	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */	tsk->fpu_counter++;}EXPORT_SYMBOL_GPL(math_state_restore);#ifndef CONFIG_MATH_EMULATIONasmlinkage void math_emulate(long arg){	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");	printk(KERN_EMERG "killing %s.\n",current->comm);	force_sig(SIGFPE,current);	schedule();}#endif /* CONFIG_MATH_EMULATION *//* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */void set_intr_gate(unsigned int n, void *addr){	_set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);}/* * This routine sets up an interrupt gate at directory privilege level 3. */static inline void set_system_intr_gate(unsigned int n, void *addr){	_set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);}static void __init set_trap_gate(unsigned int n, void *addr){	_set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);}static void __init set_system_gate(unsigned int n, void *addr){	_set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);}static void __init set_task_gate(unsigned int n, unsigned int gdt_entry){	_set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));}void __init trap_init(void){	int i;#ifdef CONFIG_EISA	void __iomem *p = ioremap(0x0FFFD9, 4);	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {		EISA_bus = 1;	}	iounmap(p);#endif#ifdef CONFIG_X86_LOCAL_APIC	init_apic_mappings();#endif	set_trap_gate(0,&divide_error);	set_intr_gate(1,&debug);	set_intr_gate(2,&nmi);	set_system_intr_gate(3, &int3); /* int3/4 can be called from all */	set_system_gate(4,&overflow);	set_trap_gate(5,&bounds);	set_trap_gate(6,&invalid_op);	set_trap_gate(7,&device_not_available);	set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);	set_trap_gate(9,&coprocessor_segment_overrun);	set_trap_gate(10,&invalid_TSS);	set_trap_gate(11,&segment_not_present);	set_trap_gate(12,&stack_segment);	set_trap_gate(13,&general_protection);	set_intr_gate(14,&page_fault);	set_trap_gate(15,&spurious_interrupt_bug);	set_trap_gate(16,&coprocessor_error);	set_trap_gate(17,&alignment_check);#ifdef CONFIG_X86_MCE	set_trap_gate(18,&machine_check);#endif	set_trap_gate(19,&simd_coprocessor_error);	if (cpu_has_fxsr) {		/*		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.		 * Generates a compile-time "error: zero width for bit-field" if		 * the alignment is wrong.		 */		struct fxsrAlignAssert {			int _:!(offsetof(struct task_struct,					thread.i387.fxsave) & 15);		};		printk(KERN_INFO "Enabling fast FPU save and restore... ");		set_in_cr4(X86_CR4_OSFXSR);		printk("done.\n");	}	if (cpu_has_xmm) {		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "				"support... ");		set_in_cr4(X86_CR4_OSXMMEXCPT);		printk("done.\n");	}	set_system_gate(SYSCALL_VECTOR,&system_call);	/* Reserve all the builtin and the syscall vector. */	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)		set_bit(i, used_vectors);	set_bit(SYSCALL_VECTOR, used_vectors);	/*	 * Should be a barrier for any external CPU state.	 */	cpu_init();	trap_init_hook();}static int __init kstack_setup(char *s){	kstack_depth_to_print = simple_strtoul(s, NULL, 0);	return 1;}__setup("kstack=", kstack_setup);static int __init code_bytes_setup(char *s){	code_bytes = simple_strtoul(s, NULL, 0);	if (code_bytes > 8192)		code_bytes = 8192;	return 1;}__setup("code_bytes=", code_bytes_setup);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -