📄 traps.c
字号:
*/ outb(0x8f, 0x70); inb(0x71); /* dummy */ outb(0x0f, 0x70); inb(0x71); /* dummy */}static int dummy_nmi_callback(struct pt_regs * regs, int cpu){ return 0;} static nmi_callback_t nmi_callback = dummy_nmi_callback; asmlinkage void do_nmi(struct pt_regs * regs, long error_code){ int cpu = smp_processor_id(); if (!nmi_callback(regs, cpu)) default_do_nmi(regs);}void set_nmi_callback(nmi_callback_t callback){ nmi_callback = callback;}void unset_nmi_callback(void){ nmi_callback = dummy_nmi_callback;}/* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) */asmlinkage void do_debug(struct pt_regs * regs, long error_code){ unsigned int condition; struct task_struct *tsk = current; unsigned long eip = regs->eip; siginfo_t info; __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); /* If the user set TF, it's simplest to clear it right away. */ if ((eip >=PAGE_OFFSET) && (regs->eflags & TF_MASK)) goto clear_TF; /* Mask out spurious debug traps due to lazy DR7 setting */ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { if (!tsk->thread.debugreg[7]) goto clear_dr7; } if (regs->eflags & VM_MASK) goto debug_vm86; /* Save debug status register where ptrace can see it */ tsk->thread.debugreg[6] = condition; /* Mask out spurious TF errors due to lazy TF clearing */ if (condition & DR_STEP) { /* * The TF error should be masked out only if the current * process is not traced and if the TRAP flag has been set * previously by a tracing process (condition detected by * the PT_DTRACE flag); remember that the i386 TRAP flag * can be modified by the process itself in user mode, * allowing programs to debug themselves without the ptrace() * interface. */ if ((regs->xcs & 3) == 0) goto clear_TF; if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE) goto clear_TF; } /* Ok, finally something we can handle */ tsk->thread.trap_no = 1; tsk->thread.error_code = error_code; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; /* If this is a kernel mode trap, save the user PC on entry to * the kernel, that's what the debugger can make sense of. */ info.si_addr = ((regs->xcs & 3) == 0) ? (void *)tsk->thread.eip : (void *)regs->eip; force_sig_info(SIGTRAP, &info, tsk); /* Disable additional traps. They'll be re-enabled when * the signal is delivered. */clear_dr7: __asm__("movl %0,%%db7" : /* no output */ : "r" (0)); return;debug_vm86: handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); return;clear_TF: regs->eflags &= ~TF_MASK; return;}/* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */void math_error(void *eip){ struct task_struct * task; siginfo_t info; unsigned short cwd, swd; /* * Save the info for the exception handler and clear the error. */ task = current; save_init_fpu(task); task->thread.trap_no = 16; task->thread.error_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = eip; /* * (~cwd & swd) will mask out exceptions that are not set to unmasked * status. 0x3f is the exception bits in these regs, 0x200 is the * C1 reg you need in case of a stack fault, 0x040 is the stack * fault bit. We should only be taking one exception at a time, * so if this combination doesn't produce any single exception, * then we have a bad program that isn't syncronizing its FPU usage * and it will suffer the consequences since we won't be able to * fully reproduce the context of the exception */ cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) { case 0x000: default: break; case 0x001: /* Invalid Op */ case 0x041: /* Stack Fault */ case 0x241: /* Stack Fault | Direction */ info.si_code = FPE_FLTINV; /* Should we clear the SF or let user space do it ???? */ break; case 0x002: /* Denormalize */ case 0x010: /* Underflow */ info.si_code = FPE_FLTUND; break; case 0x004: /* Zero Divide */ info.si_code = FPE_FLTDIV; break; case 0x008: /* Overflow */ info.si_code = FPE_FLTOVF; break; case 0x020: /* Precision */ info.si_code = FPE_FLTRES; break; } force_sig_info(SIGFPE, &info, task);}asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code){ ignore_irq13 = 1; math_error((void *)regs->eip);}void simd_math_error(void *eip){ struct task_struct * task; siginfo_t info; unsigned short mxcsr; /* * Save the info for the exception handler and clear the error. */ task = current; save_init_fpu(task); task->thread.trap_no = 19; task->thread.error_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = eip; /* * The SIMD FPU exceptions are handled a little differently, as there * is only a single status/control register. Thus, to determine which * unmasked exception was caught we must mask the exception mask bits * at 0x1f80, and then use these to mask the exception bits at 0x3f. */ mxcsr = get_fpu_mxcsr(task); switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { case 0x000: default: break; case 0x001: /* Invalid Op */ info.si_code = FPE_FLTINV; break; case 0x002: /* Denormalize */ case 0x010: /* Underflow */ info.si_code = FPE_FLTUND; break; case 0x004: /* Zero Divide */ info.si_code = FPE_FLTDIV; break; case 0x008: /* Overflow */ info.si_code = FPE_FLTOVF; break; case 0x020: /* Precision */ info.si_code = FPE_FLTRES; break; } force_sig_info(SIGFPE, &info, task);}asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs, long error_code){ if (cpu_has_xmm) { /* Handle SIMD FPU exceptions on PIII+ processors. */ ignore_irq13 = 1; simd_math_error((void *)regs->eip); } else { /* * Handle strange cache flush from user space exception * in all other cases. This is undocumented behaviour. */ if (regs->eflags & VM_MASK) { handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); return; } die_if_kernel("cache flush denied", regs, error_code); current->thread.trap_no = 19; current->thread.error_code = error_code; force_sig(SIGSEGV, current); }}asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, long error_code){#if 0 /* No need to warn about this any longer. */ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");#endif}/* * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. */asmlinkage void math_state_restore(struct pt_regs regs){ __asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */ if (current->used_math) { restore_fpu(current); } else { init_fpu(); } current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */}#ifndef CONFIG_MATH_EMULATIONasmlinkage void math_emulate(long arg){ printk("math-emulation not enabled and no coprocessor found.\n"); printk("killing %s.\n",current->comm); force_sig(SIGFPE,current); schedule();}#endif /* CONFIG_MATH_EMULATION */#ifndef CONFIG_X86_F00F_WORKS_OKvoid __init trap_init_f00f_bug(void){ /* * "idt" is magic - it overlaps the idt_descr * variable so that updating idt will automatically * update the idt descriptor.. */ __set_fixmap(FIX_F00F, __pa(&idt_table), PAGE_KERNEL_RO); idt_descr.address = __fix_to_virt(FIX_F00F); __asm__ __volatile__("lidt %0": "=m" (idt_descr));}#endif#define _set_gate(gate_addr,type,dpl,addr) \do { \ int __d0, __d1; \ __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ "movw %4,%%dx\n\t" \ "movl %%eax,%0\n\t" \ "movl %%edx,%1" \ :"=m" (*((long *) (gate_addr))), \ "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \} while (0)/* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */void set_intr_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,14,0,addr);}static void __init set_trap_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,15,0,addr);}static void __init set_system_gate(unsigned int n, void *addr){ _set_gate(idt_table+n,15,3,addr);}static void __init set_call_gate(void *a, void *addr){ _set_gate(a,12,3,addr);}#ifdef CONFIG_X86_VISWS_APIC/* * On Rev 005 motherboards legacy device interrupt lines are wired directly * to Lithium from the 307. But the PROM leaves the interrupt type of each * 307 logical device set appropriate for the 8259. Later we'll actually use * the 8259, but for now we have to flip the interrupt types to * level triggered, active lo as required by Lithium. */#define REG 0x2e /* The register to read/write */#define DEV 0x07 /* Register: Logical device select */#define VAL 0x2f /* The value to read/write */static voidsuperio_outb(int dev, int reg, int val){ outb(DEV, REG); outb(dev, VAL); outb(reg, REG); outb(val, VAL);}static int __attribute__ ((unused))superio_inb(int dev, int reg){ outb(DEV, REG); outb(dev, VAL); outb(reg, REG); return inb(VAL);}#define FLOP 3 /* floppy logical device */#define PPORT 4 /* parallel logical device */#define UART5 5 /* uart2 logical device (not wired up) */#define UART6 6 /* uart1 logical device (THIS is the serial port!) */#define IDEST 0x70 /* int. destination (which 307 IRQ line) reg. */#define ITYPE 0x71 /* interrupt type register *//* interrupt type bits */#define LEVEL 0x01 /* bit 0, 0 == edge triggered */#define ACTHI 0x02 /* bit 1, 0 == active lo */static voidsuperio_init(void){ if (visws_board_type == VISWS_320 && visws_board_rev == 5) { superio_outb(UART6, IDEST, 0); /* 0 means no intr propagated */ printk("SGI 320 rev 5: disabling 307 uart1 interrupt\n"); }}static voidlithium_init(void){ set_fixmap(FIX_LI_PCIA, LI_PCI_A_PHYS); printk("Lithium PCI Bridge A, Bus Number: %d\n", li_pcia_read16(LI_PCI_BUSNUM) & 0xff); set_fixmap(FIX_LI_PCIB, LI_PCI_B_PHYS); printk("Lithium PCI Bridge B (PIIX4), Bus Number: %d\n", li_pcib_read16(LI_PCI_BUSNUM) & 0xff); /* XXX blindly enables all interrupts */ li_pcia_write16(LI_PCI_INTEN, 0xffff); li_pcib_write16(LI_PCI_INTEN, 0xffff);}static voidcobalt_init(void){ /* * On normal SMP PC this is used only with SMP, but we have to * use it and set it up here to start the Cobalt clock */ set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE); printk("Local APIC ID %lx\n", apic_read(APIC_ID)); printk("Local APIC Version %lx\n", apic_read(APIC_LVR)); set_fixmap(FIX_CO_CPU, CO_CPU_PHYS); printk("Cobalt Revision %lx\n", co_cpu_read(CO_CPU_REV)); set_fixmap(FIX_CO_APIC, CO_APIC_PHYS); printk("Cobalt APIC ID %lx\n", co_apic_read(CO_APIC_ID)); /* Enable Cobalt APIC being careful to NOT change the ID! */ co_apic_write(CO_APIC_ID, co_apic_read(CO_APIC_ID)|CO_APIC_ENABLE); printk("Cobalt APIC enabled: ID reg %lx\n", co_apic_read(CO_APIC_ID));}#endifvoid __init trap_init(void){#ifdef CONFIG_EISA if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) EISA_bus = 1;#endif#ifdef CONFIG_X86_LOCAL_APIC init_apic_mappings();#endif set_trap_gate(0,÷_error); set_trap_gate(1,&debug); set_intr_gate(2,&nmi); set_system_gate(3,&int3); /* int3-5 can be called from all */ set_system_gate(4,&overflow); set_system_gate(5,&bounds); set_trap_gate(6,&invalid_op); set_trap_gate(7,&device_not_available); set_trap_gate(8,&double_fault); set_trap_gate(9,&coprocessor_segment_overrun); set_trap_gate(10,&invalid_TSS); set_trap_gate(11,&segment_not_present); set_trap_gate(12,&stack_segment); set_trap_gate(13,&general_protection); set_intr_gate(14,&page_fault); set_trap_gate(15,&spurious_interrupt_bug); set_trap_gate(16,&coprocessor_error); set_trap_gate(17,&alignment_check); set_trap_gate(18,&machine_check); set_trap_gate(19,&simd_coprocessor_error); set_system_gate(SYSCALL_VECTOR,&system_call); /* * default LDT is a single-entry callgate to lcall7 for iBCS * and a callgate to lcall27 for Solaris/x86 binaries */#if 0 set_call_gate(&default_ldt[0],lcall7); set_call_gate(&default_ldt[4],lcall27);#endif /* * Should be a barrier for any external CPU state. */ cpu_init();#ifdef CONFIG_X86_VISWS_APIC superio_init(); lithium_init(); cobalt_init();#endif}EXPORT_SYMBOL_GPL(netdump_func);EXPORT_SYMBOL_GPL(netdump_mode);#if CONFIG_X86_LOCAL_APICEXPORT_SYMBOL_GPL(nmi_watchdog);#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -