traps.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,211 行 · 第 1/3 页

C
1,211
字号
	/*	 * Single-stepping through TF: make sure we ignore any events in	 * kernel space (but re-enable TF when returning to user mode).	 */	if (condition & DR_STEP) {		/*		 * We already checked v86 mode above, so we can		 * check for kernel mode by just checking the CPL		 * of CS.		 */		if (!user_mode(regs))			goto clear_TF_reenable;	}	/* Ok, finally something we can handle */	send_sigtrap(tsk, regs, error_code);	/* Disable additional traps. They'll be re-enabled when	 * the signal is delivered.	 */clear_dr7:	set_debugreg(0, 7);	return;debug_vm86:	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);	return;clear_TF_reenable:	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);	regs->eflags &= ~TF_MASK;	return;}/* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */void math_error(void __user *eip){	struct task_struct * task;	siginfo_t info;	unsigned short cwd, swd;	/*	 * Save the info for the exception handler and clear the error.	 */	task = current;	save_init_fpu(task);	task->thread.trap_no = 16;	task->thread.error_code = 0;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_code = __SI_FAULT;	info.si_addr = eip;	/*	 * (~cwd & swd) will mask out exceptions that are not set to unmasked	 * status.  0x3f is the exception bits in these regs, 0x200 is the	 * C1 reg you need in case of a stack fault, 0x040 is the stack	 * fault bit.  We should only be taking one exception at a time,	 * so if this combination doesn't produce any single exception,	 * then we have a bad program that isn't syncronizing its FPU usage	 * and it will suffer the consequences since we won't be able to	 * fully reproduce the context of the exception	 */	cwd = get_fpu_cwd(task);	swd = get_fpu_swd(task);	switch (swd & ~cwd & 0x3f) {		case 0x000: /* No unmasked exception */			return;		default:    /* Multiple exceptions */			break;		case 0x001: /* Invalid Op */			/*			 * swd & 0x240 == 0x040: Stack Underflow			 * swd & 0x240 == 0x240: Stack Overflow			 * User must clear the SF bit (0x40) if set			 */			info.si_code = FPE_FLTINV;			break;		case 0x002: /* Denormalize */		case 0x010: /* Underflow */			info.si_code = FPE_FLTUND;			break;		case 0x004: /* Zero Divide */			info.si_code = FPE_FLTDIV;			break;		case 0x008: /* Overflow */			info.si_code = FPE_FLTOVF;			break;		case 0x020: /* Precision */			info.si_code = FPE_FLTRES;			break;	}	force_sig_info(SIGFPE, &info, task);}fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code){	ignore_fpu_irq = 1;	math_error((void __user *)regs->eip);}static void simd_math_error(void __user *eip){	struct task_struct * task;	siginfo_t info;	unsigned short mxcsr;	/*	 * Save the info for the exception handler and clear the error.	 */	task = current;	save_init_fpu(task);	task->thread.trap_no = 19;	task->thread.error_code = 0;	info.si_signo = SIGFPE;	info.si_errno = 0;	info.si_code = __SI_FAULT;	info.si_addr = eip;	/*	 * The SIMD FPU exceptions are handled a little differently, as there	 * is only a single status/control register.  Thus, to determine which	 * unmasked exception was caught we must mask the exception mask bits	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.	 */	mxcsr = get_fpu_mxcsr(task);	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {		case 0x000:		default:			break;		case 0x001: /* Invalid Op */			info.si_code = FPE_FLTINV;			break;		case 0x002: /* Denormalize */		case 0x010: /* Underflow */			info.si_code = FPE_FLTUND;			break;		case 0x004: /* Zero Divide */			info.si_code = FPE_FLTDIV;			break;		case 0x008: /* Overflow */			info.si_code = FPE_FLTOVF;			break;		case 0x020: /* Precision */			info.si_code = FPE_FLTRES;			break;	}	force_sig_info(SIGFPE, &info, task);}fastcall void do_simd_coprocessor_error(struct pt_regs * regs,					  long error_code){	if (cpu_has_xmm) {		/* Handle SIMD FPU exceptions on PIII+ processors. */		ignore_fpu_irq = 1;		simd_math_error((void __user *)regs->eip);	} else {		/*		 * Handle strange cache flush from user space exception		 * in all other cases.  This is undocumented behaviour.		 */		if (regs->eflags & VM_MASK) {			handle_vm86_fault((struct kernel_vm86_regs *)regs,					  error_code);			return;		}		current->thread.trap_no = 19;		current->thread.error_code = error_code;		die_if_kernel("cache flush denied", regs, error_code);		force_sig(SIGSEGV, current);	}}fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,					  long error_code){#if 0	/* No need to warn about this any longer. */	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");#endif}fastcall void setup_x86_bogus_stack(unsigned char * stk){	unsigned long *switch16_ptr, *switch32_ptr;	struct pt_regs *regs;	unsigned long stack_top, stack_bot;	unsigned short iret_frame16_off;	int cpu = smp_processor_id();	/* reserve the space on 32bit stack for the magic switch16 pointer */	memmove(stk, stk + 8, sizeof(struct pt_regs));	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));	regs = (struct pt_regs *)stk;	/* now the switch32 on 16bit stack */	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;	switch32_ptr = (unsigned long *)(stack_top - 8);	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;	/* copy iret frame on 16bit stack */	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);	/* fill in the switch pointers */	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;	switch16_ptr[1] = __ESPFIX_SS;	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +		8 - CPU_16BIT_STACK_SIZE;	switch32_ptr[1] = __KERNEL_DS;}fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp){	unsigned long *switch32_ptr;	unsigned char *stack16, *stack32;	unsigned long stack_top, stack_bot;	int len;	int cpu = smp_processor_id();	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;	switch32_ptr = (unsigned long *)(stack_top - 8);	/* copy the data from 16bit stack to 32bit stack */	len = CPU_16BIT_STACK_SIZE - 8 - sp;	stack16 = (unsigned char *)(stack_bot + sp);	stack32 = (unsigned char *)		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);	memcpy(stack32, stack16, len);	return stack32;}/* *  'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * * Must be called with kernel preemption disabled (in this case, * local interrupts are disabled at the call-site in entry.S). */asmlinkage void math_state_restore(struct pt_regs regs){	struct thread_info *thread = current_thread_info();	struct task_struct *tsk = thread->task;	clts();		/* Allow maths ops (or we recurse) */	if (!tsk_used_math(tsk))		init_fpu(tsk);	restore_fpu(tsk);	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */}#ifndef CONFIG_MATH_EMULATIONasmlinkage void math_emulate(long arg){	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");	printk(KERN_EMERG "killing %s.\n",current->comm);	force_sig(SIGFPE,current);	schedule();}#endif /* CONFIG_MATH_EMULATION */#ifdef CONFIG_X86_F00F_BUGvoid __init trap_init_f00f_bug(void){	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);	/*	 * Update the IDT descriptor and reload the IDT so that	 * it uses the read-only mapped virtual address.	 */	idt_descr.address = fix_to_virt(FIX_F00F_IDT);	load_idt(&idt_descr);}#endif#define _set_gate(gate_addr,type,dpl,addr,seg) \do { \  int __d0, __d1; \  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \	"movw %4,%%dx\n\t" \	"movl %%eax,%0\n\t" \	"movl %%edx,%1" \	:"=m" (*((long *) (gate_addr))), \	 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \	:"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \	 "3" ((char *) (addr)),"2" ((seg) << 16)); \} while (0)/* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */void set_intr_gate(unsigned int n, void *addr){	_set_gate(idt_table+n,14,0,addr,__KERNEL_CS);}/* * This routine sets up an interrupt gate at directory privilege level 3. */static inline void set_system_intr_gate(unsigned int n, void *addr){	_set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);}static void __init set_trap_gate(unsigned int n, void *addr){	_set_gate(idt_table+n,15,0,addr,__KERNEL_CS);}static void __init set_system_gate(unsigned int n, void *addr){	_set_gate(idt_table+n,15,3,addr,__KERNEL_CS);}static void __init set_task_gate(unsigned int n, unsigned int gdt_entry){	_set_gate(idt_table+n,5,0,0,(gdt_entry<<3));}void __init trap_init(void){#ifdef CONFIG_EISA	void __iomem *p = ioremap(0x0FFFD9, 4);	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {		EISA_bus = 1;	}	iounmap(p);#endif#ifdef CONFIG_X86_LOCAL_APIC	init_apic_mappings();#endif	set_trap_gate(0,&divide_error);	set_intr_gate(1,&debug);	set_intr_gate(2,&nmi);	set_system_intr_gate(3, &int3); /* int3/4 can be called from all */	set_system_gate(4,&overflow);	set_trap_gate(5,&bounds);	set_trap_gate(6,&invalid_op);	set_trap_gate(7,&device_not_available);	set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);	set_trap_gate(9,&coprocessor_segment_overrun);	set_trap_gate(10,&invalid_TSS);	set_trap_gate(11,&segment_not_present);	set_trap_gate(12,&stack_segment);	set_trap_gate(13,&general_protection);	set_intr_gate(14,&page_fault);	set_trap_gate(15,&spurious_interrupt_bug);	set_trap_gate(16,&coprocessor_error);	set_trap_gate(17,&alignment_check);#ifdef CONFIG_X86_MCE	set_trap_gate(18,&machine_check);#endif	set_trap_gate(19,&simd_coprocessor_error);	if (cpu_has_fxsr) {		/*		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.		 * Generates a compile-time "error: zero width for bit-field" if		 * the alignment is wrong.		 */		struct fxsrAlignAssert {			int _:!(offsetof(struct task_struct,					thread.i387.fxsave) & 15);		};		printk(KERN_INFO "Enabling fast FPU save and restore... ");		set_in_cr4(X86_CR4_OSFXSR);		printk("done.\n");	}	if (cpu_has_xmm) {		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "				"support... ");		set_in_cr4(X86_CR4_OSXMMEXCPT);		printk("done.\n");	}	set_system_gate(SYSCALL_VECTOR,&system_call);	/*	 * Should be a barrier for any external CPU state.	 */	cpu_init();	trap_init_hook();}static int __init kstack_setup(char *s){	kstack_depth_to_print = simple_strtoul(s, NULL, 0);	return 1;}__setup("kstack=", kstack_setup);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?