📄 inttrap.cxx
字号:
/* * Copyright (C) 1998, 1999, Jonathan S. Shapiro. * * This file is part of the EROS Operating System. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//* This file contains the functions that are called in the IPC path, * in an attempt (probably vain) to minimize I-TLB misses. *//* Define this so that all of the functions that are called from the * IPC path will be exposed by their respective header files to be * inlined: */#define IPC_INLINES#include <kerninc/kernel.hxx>#include <kerninc/Machine.hxx>#include <kerninc/Check.hxx>#include <kerninc/IRQ.hxx>#include <kerninc/Thread.hxx>#include <kerninc/util.h>#include <kerninc/Debug.hxx>#include <kerninc/SysTimer.hxx>#include <kerninc/Process.hxx>#include <eros/SysTraceKey.h>#include <eros/i486/io.h>#include "lostart.hxx"#include "IDT.hxx"#include "GDT.hxx"/* #define TIMING_DEBUG */extern "C" { extern uint32_t InterruptStackTop; extern uint32_t InterruptStackBottom; extern uint32_t InterruptStackLimit; extern int _start; extern int etext;}extern "C" { void EnableCounters(uint32_t ctl); void DisableCounters();#ifdef PPRO_TEST_CHECKS extern uint64_t rdmsr(uint32_t);#endif}#define MAX_TRAP_DEPTH 2#ifndef NDEBUGinline static boolValidEIP(uint32_t eip){ /* Kernel text is okay: */ if (eip >= (uint32_t)&_start && eip < (uint32_t)&etext) return true; /* BIOS prom is okay (PCI BIOS): */ if (eip >= 0xe0000u && eip < 0xfffff) return true; return false;}#endifextern "C" { void resume_from_kernel_interrupt(fixregs_t *) NORETURN;};/* Called from the interrupt entry point with interrupts disabled. The interrupt handler assembly code has also incremented DisableDepth, so we are running as though we had already called IRQ::DISABLE(). Do any processing that must be done with interrupts disabled here and then call IRQ::ENABLE() to allow nested interrupts (when we get that working). Note that saveArea does NOT always point to a proper fixregs_t structure. If we interrupted a process (user or supervisor), then saveArea points to a valid fixregs_t structure. If we interrupted the kernel, then saveArea points to a supervisor interrupt frame on the kernel interupt stack. The kernel save frame is a subset of the user save frame, and the following code is careful about what it references. */voidIDT::OnTrapOrInterrupt(fixregs_t *saveArea){#ifndef NDEBUG int stack; uint32_t oDisableDepth = IRQ::DISABLE_DEPTH();#endif uint32_t vecNumber = saveArea->ExceptNo; KernStats.nInter++; assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 ); assert ( (GetFlags() & MASK_EFLAGS_Interrupt) == 0 );#ifndef NDEBUG /* If we interrupted an invocation, there is no guarantee that * there exists a current thread -- we may have interrupted the * invocation path just after the current thread has been deleted. */ register Thread* curThread = Thread::Current(); assert(curThread || !sa_IsProcess(saveArea));#endif#ifndef NDEBUG if (dbg_inttrap) Debugger();#endif#ifndef NDEBUG /* NOTE: There was a bug here in which a timer interrupt that nailed * the kernel in nested fashion could trigger a context check while * something critical was being updated. */ if ((sa_IsProcess(saveArea) || ((vecNumber != IntVec::BreakPoint) && (vecNumber < IntVec::IRQ0))) && ! Check::Contexts("on int entry") ) halt('a');#endif #ifndef NDEBUG /* Various paranoia checks: */ if ( ( (uint32_t) &stack < (uint32_t) &InterruptStackBottom ) || ( (uint32_t) &stack > (uint32_t) &InterruptStackTop ) ) { halt('b'); printf("Interrupt 0x%x, stack is 0x%08x pc is 0x%08x\n", vecNumber, &stack, saveArea->EIP); if (vecNumber == 0xe) printf("fva=0x%08x ESI=0x%08x ECX=0x%08x ctxt ESI=0x%08x\n" "ctxt EBX=0x%08x ctxt EDX=0x%08x\n", saveArea->ExceptAddr, saveArea->ESI, saveArea->ECX, ((Process *) Thread::CurContext())->fixRegs.ESI, ((Process *) Thread::CurContext())->fixRegs.EBX, ((Process *) Thread::CurContext())->fixRegs.EDX); halt('c'); Debug::Backtrace("Interrupt on wrong stack"); } if ( (uint32_t) &stack < (uint32_t) &InterruptStackLimit ) { halt('d'); Debug::Backtrace("Stack limit exceeded"); } if ( saveArea == 0 || ( sa_IsKernel(saveArea) && !ValidEIP(saveArea->EIP) ) ) { /* halt('e'); */ fatal("Bogus save area 0x%08x vecno %d\n" " EIP=0x%08x CurThread = %s ctxt=0x%08x\n", saveArea, vecNumber, saveArea ? saveArea->EIP : 0, curThread->Name(), Thread::CurContext()); }#endif /* If we interrupted a thread, remember where the saved context * was. For user threads, this is redundant, because it is the same * as the context that is already saved. For kernel threads, this * is vital, as without it we won't be able to restart the thread. * Careful, though -- if this is a nested fault we don't want to * overwrite the old value. */ if (sa_IsProcess(saveArea)) {#ifndef NDEBUG fixregs_t *oldsa = Thread::CurContext()->UnsafeSaveArea(); if ( oldsa != saveArea ) { printf("ex=0x%x err=0x%x, eip=0x%08x\n", saveArea->ExceptNo, saveArea->Error, saveArea->EIP); fatal("in: CurThread is 0x%08x old saveArea 0x%08x, " "saveArea = 0x%08x\n", curThread, oldsa, saveArea); } else#endif Thread::CurContext()->SetSaveArea(saveArea); } assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 ); /* We have now done all of the processing that must be done with * interrupts disabled. Re-enable interrupts here: */#ifndef NESTED_INTERRUPT_SUPPRESS IRQ::ENABLE(); assert( IRQ::DISABLE_DEPTH() == 0 || vecNumber < IntVec::IRQ0 );#endif#if defined(DBG_WILD_PTR) && (DBG_WILD_PTR > 1) if (dbg_wild_ptr) Check::Consistency("before int dispatch");#endif assert (IntVecEntry[vecNumber]);#if 0 /* Count S D R+W miss (1), S I miss (0): */ EnableCounters(0x0269024E);#endif /* Dispatch to the handler: */ IntVecEntry[vecNumber](saveArea);#if defined(DBG_WILD_PTR) && (DBG_WILD_PTR > 1) if (dbg_wild_ptr) Check::Consistency("after int dispatch");#endif#ifndef NESTED_INTERRUPT_SUPPRESS assert( IRQ::DISABLE_DEPTH() == 0 || vecNumber < IntVec::IRQ0 );#endif assert ( Thread::Current() || !sa_IsProcess(saveArea)); /* We are going to process all pending interrupts and then return to * the thread. We need to make sure that we do not lose any, thus * from this point down interrupts must be disabled. */ #ifndef NESTED_INTERRUPT_SUPPRESS IRQ::DISABLE();#endif assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 ); /* * If the thread is yielding voluntarily, it MUST be rescheduled. * * If the current thread is a user thread, it is possible that * having completed the interrupt means that the current thread * needs to be reprepared, or that the thread has faulted. If the * thread has faulted, it has not yielded, as we need to know in * order to migrate the thread to the keeper. * * It is also possible that in attempting to reprepare the current * thread, we will discover that the thread has died. This can * happen if a domain rescinds itself. * * Rather than try to deal with all of this in multiple places, we * unconditionally call Thread::Resched(). If appropriate, * Thread::Resched() will simply return the current thread in * prepared form, and we will return to it. If the thread should * yield unconditionally, we tell Thread::Resched() so. * */ assert ( (GetFlags() & MASK_EFLAGS_Interrupt) == 0 ); assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 ); if (sa_IsProcess(saveArea)) { Thread::Reschedule(); /* We succeeded (wonder of wonders) -- release pinned resources. */ ObjectHeader::ReleasePinnedObjects(); #if 0 /* Since we succeeded, there are no uncommitted I/O page frames: */ ObjectCache::ReleaseUncommittedIoPageFrames();#endif#if 0 printf("Return from resched\n");#endif assert ( Thread::Current() ); saveArea = Thread::CurContext()->UnsafeSaveArea(); if (saveArea == 0) fatal("Thread 0x%08x is not runnable (no saveArea)\n", Thread::Current()); } assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 );#ifndef NDEBUG if ( saveArea == 0 ) { printf("Restore from invalid save area 0x%08x\n" " EIP=0x%08x nRun=%d CurThread = %s ctxt=0x%08x\n", saveArea, saveArea ? saveArea->EIP : 0, Thread::Current()->nRun, Thread::Current()->Name(), Thread::CurContext()); printf(" CS=0x%02x, int#=0x%x, err=0x%x, flg=0x%08x\n", saveArea->CS, saveArea->ExceptNo, saveArea->Error, saveArea->EFLAGS); Debug::Backtrace(); }#endif /* We are returning to a previous interrupt or to a thread, and we * need to restore the interrupt level that was effective in that * context. CATCH: if interrupts were enabled in that context we do * not want them to get enabled here; we'ld rather wait until the * RETI which will enable them when EFLAGS is restored. We * therefore call setspl() rather than splx(). setspl() adjusts the * PIC masks appropriately [or it eventually will], but does not * enable interrupts. * * Note that a yielding thread should always be running at * splyield() [all interrupts enabled], so the fact that we may not * be returning to the same thread is not a problem in restoring the * current spl. */ /* We are about to do a return from interrupt, which path must not * be interrupted. Disable interrupts prior to return: */ assert ( (GetFlags() & MASK_EFLAGS_Interrupt) == 0 ); assert( IRQ::DISABLE_DEPTH() == 1 || vecNumber < IntVec::IRQ0 );#ifndef NDEBUG assert ( oDisableDepth == IRQ::DISABLE_DEPTH() );#endif /* Otherwise resume interrupted thread: */ if (sa_IsProcess(saveArea)) Thread::Current()->Resume(); else resume_from_kernel_interrupt(saveArea);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -