📄 fast_interrupt.s
字号:
.file "interrupt.S"/* * Copyright (C) 1998, 1999, Jonathan S. Shapiro. * * This file is part of the EROS Operating System. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * switch - this file provides the context switch logic. It is called * by the interrupt dispatch routines. The job of this routine is to save * the registers of the interrupted task, enter the kernel address space, * and call the real interrupt handler. On return from the interrupt * handler, this routine inverts that process. A full description of the * logic is provided in eros/doc/Switch * * On entry to switch() from a domain, both SS and CS are 4M segments pointing * to the current window. The kernel can run out of this code segment just * fine, but needs to switch to a proper data segment to do anything serious. * * On entry from the kernel itself, SS points to the proper kernel stack. * * All other segment values are indeterminate. */#include <eros/i486/asm.h>#include <eros/i486/target-asm.h>#define SPURIOUS_CHECK/* #define V86_SUPPORT *//* #define DISPLAY *//* #define COUNT_PHASES */#define SEND_KEY_3#define FAST_BLOCK_MOVE#define NEWFANGLED_PATH#define NO_FAST_GATE_JUMP /* * Space for the interrupt stack. * * The interrupt stack used to have an unmapped page at the bottom * to catch stack overflow. This was never a particularly bright idea. * If you actually managed to hit that point, you would page fault, * which would take an exception that couldn't win (because the page * was unmapped), which would in turn double fault, and the system * would just mysteriously reboot. * * Better to do an overflow test in the dispatch code until we are * confident. * */ .globl EXT(InterruptStackBottom) .globl EXT(InterruptStackLimit) .globl EXT(InterruptStackTop) .globl EXT(intr_common) .bss .align 4LEXT(InterruptStackBottom) .space 128 /* fluff for overflow check! */LEXT(InterruptStackLimit) .space 8064 /* total stack is 8192 */LEXT(InterruptStackTop) .data .align 16ENTRY(TrapDepth) .long 0#ifdef OPTION_KERN_PROFILEENTRY(KernelProfileTable) .long 0#endif /* * On interrupt or trap, we wish to build a stack image that captures * the per-process state. When we are done with all of this, the stack * will look as follows: * * gs if from user mode, else unused * fs if from user mode, else unused * ds if from user mode, else unused * es if from user mode, else unused * ss if from user mode, else unused * esp if from user mode, else unused * eflags * cs * eip * error code (zero if none) * trap number/interrupt number * eax * ecx * edx * ebx * cr2 if page fault, else unused * ebp * esi * edi * cr3 * 0 <- %esp * * The zero at the bottom of the save area indicates the special * functional units, if any that need to be reloaded to resume this * thread. On entry into the kernel, they hold the right values, so * this word is initially 0. On return, some units may need to be * reloaded, so the value may be non-zero. * * An implication of the 'unused' entries is that not all of the * SaveArea structure is necessarily valid. This is exactly true; the * kernel save area simply doesn't need to include all of the state * that the user save area does. * * ARCHITECTURAL BRAIN DEATH ALERT * * One of the more special "features" of the x86 is that it can take * exceptions on the IRET instruction. This shouldn't happen when * returning to a kernel-mode thread, where we fully control what goes * on to the stack, but there really isn't much we can do to stop user * code from, say, attempting to load invalid segment register values. * * This isn't all that big a problem, given that we can arrange things * so as to recover properly, but one needs to be aware of it in order to * understand how the hell reload works. * * There are 5 instructions in the user process reload sequence that can * cause a cascaded exception: * * popl %es * popl %ds * popl %fs * popl %gs * and * iret * * The cascaded exception happens if any of the segment selectors are * inappropriate, or if fetching the instruction at CS:EIP causes a * page fault. In that event, we will end up taking an exception back * onto the user save area before the old exception has been * completely dealt with. The exceptions that might be taken in such a * case are: * * #GP -- if code seg was bogus * #SS -- if stack seg was bogus * #NP -- if stack segment was not present * #TS -- if returning to invalid task segment * #AC -- if alignment checking enabled * #PF -- if instruction page not present * * If one of these occurs, it will push a minimum of 5 words before we * get a chance to set things right: * * exception number * error code * eip * cs * eflags * * The trick in such a case is to patch up the stack so that it looks * like this exception was generated by the user instruction rather * than by the return path. In the iret case, the 5 words that get * clobbered can be reconstructed from the state on the processor. * Unfortunately, the same is NOT true when a fault occurs during one * of the segment reloads. * * If a cascaded interrupt is taken, we examine the return PC to see * if it was the PC of the IRET instruction. If so, the portion of * the save area that was smashed is: * * SMASHED WITH * error code (zero if none) eflags * trap number/interrupt number kern code cs * eax eip of IRET instr * ecx err code * sp-> edx trap no * * What we do in this case is move the err code and trap number up 3 * words (i.e. copy them into their proper positions), rewrite the * %eax, %ecx, and %edx values from the processor registers, adjust * the stack pointer to point to the bottom of the save area, and * dispatch back into OnTrapOrInterrupt */ .text /* * Interrupt entry point definitions: */ #define DEFENTRY(vecno) \ENTRY(istub##vecno) \ pushl $0; \ pushl $vecno; \ jmp EXT(intr_entry) #define DEFENTRY_EC(vecno, label) \ENTRY(istub##vecno) \ pushl $vecno; \ jmp EXT(label) /* Steps in the stuff below: * 1. Save enough to check for spurious interrupt. Using pusha * wastes about 2 cycles, but is worth it if we decide to * actually TAKE the interrupt. * * 2. See if the interrupt was spurious. If so, forget it and bail * * 3. Mark the interrupt as pending * * 4. ACK the PIC * * 5. Check if nested, and bail if appropriate */ /* Note that this definition only works because the kernel is * mapped into the user address space!!! Testing IDT::TrapDepth * works because interrupts will not be re-enabled until * TrapDepth has been properly incremented. */#define DEFIRQ1(pendingbit, vecno, PICbit) \ENTRY(istub##vecno) \ /* Save minimal state: */; \ pushl $0; \ pushl $vecno; \ pusha; \ ;; \ ss ; \ orl $pendingbit,EXT(_3IRQ.PendingInterrupts); \ /* Disable the interrupt on the PIC: */; \ ss ; \ movb EXT(pic1_mask),%al; \ orb $PICbit,%al; \ outb %al,$0x21; \ ss ; \ movb %al,EXT(pic1_mask); \ /* ACK the PIC: */; \ movb $0x20,%al; \ outb %al,$0x20; \ /* check fast path: */; \ ss ; \ cmpl $0,EXT(TrapDepth); \ ja EXT(.L_fast_int_exit); \ jmp EXT(intr_common) #define DEFIRQ2(pendingbit, vecno, PICbit) \ENTRY(istub##vecno) \ /* Save minimal state: */; \ pushl $0; \ pushl $vecno; \ pusha; \ ;; \ ss ; \ orl $pendingbit,EXT(_3IRQ.PendingInterrupts); \ /* Disable the interrupt on the PIC: */; \ ss ; \ movb EXT(pic2_mask),%al; \ orb $PICbit,%al; \ outb %al,$0xa1; \ ss ; \ movb %al,EXT(pic2_mask); \ movb $0x20,%al; \ /* ACK the primary PIC: */; \ outb %al,$0x20; \ /* ACK the secondary PIC: */; \ outb %al,$0xa0; \ /* check fast path: */; \ ss ; \ cmpl $0,EXT(TrapDepth); \ ja EXT(.L_fast_int_exit); \ jmp EXT(intr_common) DEFENTRY(0x00)DEFENTRY(0x01)DEFENTRY(0x02)DEFENTRY(0x03)DEFENTRY(0x04)DEFENTRY(0x05)DEFENTRY(0x06)DEFENTRY(0x07)DEFENTRY(0x08)DEFENTRY(0x09) /* * if invaltss happens in the kernel return path we'll never ses * it, so don't even bother: */DEFENTRY_EC(0x0a, intr_entry)DEFENTRY_EC(0x0b, intr_ec)DEFENTRY_EC(0x0c, intr_ec)DEFENTRY_EC(0x0d, intr_ec)DEFENTRY_EC(0x0e, intr_pagefault)DEFENTRY(0x0f) DEFENTRY(0x10)DEFENTRY_EC(0x11, intr_entry) /* alignment check */DEFENTRY(0x12) /* machine check -- not sure whether this generates an EC or not */DEFENTRY(0x13)DEFENTRY(0x14)DEFENTRY(0x15)DEFENTRY(0x16)DEFENTRY(0x17)DEFENTRY(0x18)DEFENTRY(0x19)DEFENTRY(0x1a)DEFENTRY(0x1b)DEFENTRY(0x1c)DEFENTRY(0x1d)DEFENTRY(0x1e)DEFENTRY(0x1f) /* 0x20 is clock fast path interrupt - see below */DEFIRQ1(0x2, 0x21, 0x2)DEFIRQ1(0x4, 0x22, 0x4)DEFIRQ1(0x8, 0x23, 0x8)DEFIRQ1(0x10, 0x24, 0x10)DEFIRQ1(0x20, 0x25, 0x20)DEFIRQ1(0x40, 0x26, 0x40)#ifndef SPURIOUS_CHECKDEFIRQ1(0x80, 0x27, 0x80)#endif DEFIRQ2(0x100, 0x28, 0x1)DEFIRQ2(0x200, 0x29, 0x2)DEFIRQ2(0x400, 0x2a, 0x4)DEFIRQ2(0x800, 0x2b, 0x8)DEFIRQ2(0x1000, 0x2c, 0x10)DEFIRQ2(0x2000, 0x2d, 0x20)DEFIRQ2(0x4000, 0x2e, 0x40)#ifndef SPURIOUS_CHECKDEFIRQ2(0x8000, 0x2f, 0x80)#endif #ifdef SPURIOUS_CHECK /* Entry point for IRQ's 7 and 15 are a special case, because * the hardware may generate spurious interrupts that we wish * to suppress. */ENTRY(istub0x27) /* Save minimal state: */; pushl $0 pushl $0x27 pusha /* Check for spurious interrupt: */ movb $0xb,%al outb %al,$0x20 inb $0x20,%al cmpb $0,%al /* test sign bit -- if clear, spurious */ jge 1f ss orl $0x80,EXT(_3IRQ.PendingInterrupts) /* Disable the interrupt on the PIC: */ ss movb EXT(pic1_mask),%al orb $0x80,%al outb %al,$0x21 ss movb %al,EXT(pic1_mask) /* ACK the PIC: */ movb $0x20,%al outb %al,$0x20 /* check fast path: */ ss cmpl $0,EXT(TrapDepth) ja EXT(.L_fast_int_exit) jmp EXT(intr_common)1: /* spurious interrupt -- ack and bail */ /* ACK the PIC: */ movb $0x20,%al outb %al,$0x20 jmp EXT(.L_fast_int_exit)ENTRY(istub0x2f) /* Save minimal state: */; pushl $0 pushl $0x2f pusha /* Check for spurious interrupt: */ movb $0xb,%al outb %al,$0xa0 inb $0xa0,%al cmpb $0,%al /* test sign bit -- if clear, spurious */ jge 1f ss orl $0x8000,EXT(_3IRQ.PendingInterrupts) /* Disable the interrupt on the PIC: */ ss movb EXT(pic2_mask),%al orb $0x80,%al outb %al,$0x21 ss movb %al,EXT(pic2_mask) /* ACK both primary and secondary PIC: */ movb $0x20,%al outb %al,$0x20 outb %al,$0xa0 /* check fast path: */ ss cmpl $0,EXT(TrapDepth) ja EXT(.L_fast_int_exit) jmp EXT(intr_common)1: /* spurious interrupt -- ack and bail */ /* ACK the PIC: */ movb $0x20,%al outb %al,$0x20 outb %al,$0xa0 jmp EXT(.L_fast_int_exit)#endif DEFENTRY(0x30) /* INVOCATION interrupt is HIGH FREQUENCY, placed just above handler for benefit of cache adjacency. */ .data.LC1: .string "Fault stack is 0x%08x\n\0" .text /* * All the interrupts that push an error code can interrupt the * IRET, and we therefore may need to reshuffle the stack: */ENTRY(intr_ec)#if 0 cli ss movw $0x8f31,0x000b80103: hlt jmp 3b#endif cmpl $L_iret_pc, 8(%esp) je 1f cmpl $L_reload_ds, 8(%esp) je 1f cmpl $L_reload_es, 8(%esp) je 1f cmpl $L_reload_fs, 8(%esp) je 1f cmpl $L_reload_gs, 8(%esp) je 1f#ifndef NO_FAST_GATE_JUMP cmpl $L_fast_iret_pc, 8(%esp) je 1f cmpl $L_fast_reload_ds, 8(%esp) je 1f cmpl $L_fast_reload_es, 8(%esp) je 1f cmpl $L_fast_reload_fs, 8(%esp) je 1f cmpl $L_fast_reload_gs, 8(%esp) je 1f#endif cmpl $L_v86_iret_pc, 8(%esp) je 1f jmp intr_entry /* * Recovering from an exception on the IRET instruction. Here
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -