📄 fault.c
字号:
/* Copyright (c) 1996-2000 Microsoft Corporation. All rights reserved. */
/*+
fault.c - iX86 fault handlers
*/
#include "kernel.h"
// disable short jump warning.
#pragma warning(disable:4414)
///#define LIGHTS(n) mov dword ptr ss:[0AA001010h], ~(n)&0xFF
extern RETADDR ServerCallReturn(PTHREAD pth);
extern RETADDR ObjectCall(PTHREAD pth, RETADDR ra, void *args, long iMethod);
extern RETADDR MapArgs(const CINFO *pci, int iMethod, void *args);
extern BOOL HandleException(PTHREAD pth, int id, ulong addr);
extern void NextThread(void);
extern void KCNextThread(void);
extern void OEMIdle(void);
extern void OEMFlushCache(void);
extern KTSS MainTSS;
extern void Reschedule(void);
extern void RunThread(void);
extern void DumpTctx(PTHREAD pth, int id, ulong addr, int level);
extern void DoPowerOff(void);
extern unsigned __int64 g_aGlobalDescriptorTable[];
extern DWORD ticksleft;
#ifdef NKPROF
extern void ProfilerHit(unsigned long ra);
#endif
#ifdef CELOG
extern void CeLogInterrupt(DWORD dwLogValue);
#endif
#define LOAD_SEGS 0
#define ADDR_SLOT_SIZE 0x02000000
#define PDES_PER_SLOT (ADDR_SLOT_SIZE / 1024 / PAGE_SIZE)
#define NUM_SLOTS 32
#define PID_TO_PT_INDEX(pid) ((pid+1) * PDES_PER_SLOT)
#define BLOCKS_PER_PAGE_TABLE (1024 / PAGES_PER_BLOCK)
#define BLOCK_SIZE (PAGES_PER_BLOCK * PAGE_SIZE)
#define SYSCALL_INT 0x20
#define KCALL_INT 0x22
#define PT_PTR_TO_INDEX(pp) ((pp) - g_PageTablePool)
typedef struct _DIRTYRANGE {
ULONG ulStartBlock;
ULONG ulEndBlock;
} DIRTYRANGE, * PDIRTYRANGE;
DIRTYRANGE g_PTDirtyRegion[PTE_POOL_SIZE];
extern PAGETABLE g_PageTablePool[PTE_POOL_SIZE];
extern PAGETABLE g_PageDir;
extern PAGETABLE g_ShadowPageDir; // NOTE: This really only has 512 entries!
extern DWORD ProcessorFeatures;
ACCESSKEY g_PageDirAccess = 0;
ULONG g_PTMapIdx[PTE_POOL_SIZE];
ULONG g_AccessScanIdx = 0;
FXSAVE_AREA g_InitialFPUState;
PTHREAD g_CurFPUOwner;
PPROCESS g_CurASIDProc;
//
// CR0 bit definitions for numeric coprocessor
//
#define MP_MASK 0x00000002
#define EM_MASK 0x00000004
#define TS_MASK 0x00000008
#define NE_MASK 0x00000020
#define NPX_CW_PRECISION_MASK 0x300
#define NPX_CW_PRECISION_24 0x000
#define NPX_CW_PRECISION_53 0x200
#define NPX_CW_PRECISION_64 0x300
PPAGETABLE LowAddrSpacePageFault(PVOID, DWORD);
PPAGETABLE AllocFreePTE(ULONG);
#ifdef DEBUG_VMEM
void ValidateVirtualMemory();
#endif
#define hCurThd [KData].ahSys[SH_CURTHREAD*4]
#define PtrCurThd [KData].pCurThd
#define THREAD_CTX_ES (THREAD_CONTEXT_OFFSET+8)
ERRFALSE(8 == offsetof(CPUCONTEXT, TcxEs));
#define THREAD_CTX_EDI (THREAD_CONTEXT_OFFSET+16)
ERRFALSE(16 == offsetof(CPUCONTEXT, TcxEdi));
#define Naked void __declspec(naked)
#pragma warning(disable:4035) // Disable warning about no return value
DWORD _inline PhysTLBFlush(void)
{
__asm {
mov eax, cr3
mov cr3, eax
}
}
void FlushCache(void) {
OEMFlushCache();
}
#pragma warning(default:4035) // Turn warning back on
void SetCPUASID(PTHREAD pth)
{
PPROCESS pprc;
DWORD aky;
// Make sure this runs without preemption
if (!InSysCall()) {
KCall((PKFN)SetCPUASID, pth);
return;
} else
KCALLPROFON(62);
pprc = pth->pProc;
if (g_CurASIDProc != pprc)
{
ACCESSKEY akyReset;
PDWORD pAliasPD = &g_PageDir.PTE[0];
PDWORD pRealPD = &g_PageDir.PTE[PID_TO_PT_INDEX(pprc->procnum)];
PDWORD pShadowOld = &g_ShadowPageDir.PTE[PID_TO_PT_INDEX(pCurProc->procnum)];
PDWORD pShadowNew = &g_ShadowPageDir.PTE[PID_TO_PT_INDEX(pprc->procnum)];
CELOG_ThreadMigrate(pprc->hProc, 0);
#ifdef DEBUG_VMEM
ValidateVirtualMemory();
#endif
//
// Copy the accessed bits out for the current slot 0 process, and copy
// in the new process' page tables
//
do {
// Since page dir entries are either mirrors of the shadow with the
// exception of the access bit, or they are 0, this simple OR will
// only update the accessed bit in the shadow. It is faster to
// simply do the or than to test the accessed bit for each entry in
// the real page dir
#ifdef DEBUG_VMEM
if (g_CurASIDProc != 0)
{
if ((*pShadowOld & ~PG_ACCESSED_MASK) !=
(*pAliasPD & ~PG_ACCESSED_MASK))
{
NKDbgPrintfW(
TEXT("SetCPUASID: Slot 0 PDE doesn't match Process slot PDE\r\n"));
NKDbgPrintfW(
TEXT("Slot index = %d, PDE index = %d\r\n"),
pprc->procnum+1, pAliasPD - &g_PageDir.PTE[0]);
NKDbgPrintfW(
TEXT("Slot 0 PDE = 0x%8.8X, Process Slot PDE = 0x%8.8X\r\n"),
*pAliasPD & ~PG_ACCESSED_MASK, *pShadowOld & ~PG_ACCESSED_MASK);
DebugBreak();
}
}
#endif
*pShadowOld++ |= *pAliasPD;
*pRealPD++ = *pShadowNew;
*pAliasPD++ = *pShadowNew++;
} while (pAliasPD < &g_PageDir.PTE[PDES_PER_SLOT]);
AddAccess(&g_PageDirAccess,pprc->aky);
// Unmap any sections that the new thread is not allowed to access.
aky = pth->aky | pprc->aky;
akyReset = g_PageDirAccess & (~aky);
if (akyReset)
{
UINT i;
g_PageDirAccess &= aky;
for (i = 0; i < ARRAY_SIZE(g_PageTablePool); i++)
{
UINT j = g_PTMapIdx[i];
#ifdef DEBUG_VMEM
if (j < PDES_PER_SLOT)
{
NKDbgPrintfW(
TEXT("SetCPUASID: PageTablePool has PDE owned by slot 0\r\n"));
NKDbgPrintfW(
TEXT("Pool index = %d, PDE index = %d\r\n"),
i, j);
DebugBreak();
}
#endif
if (akyReset & (1 << ((j / PDES_PER_SLOT) - 1))) {
DWORD dwTest = g_PageDir.PTE[j];
#ifdef DEBUG_VMEM
if (dwTest && ((dwTest&PG_PHYS_ADDR_MASK) == (g_PageDir.PTE[j%PDES_PER_SLOT] & PG_PHYS_ADDR_MASK))) {
NKDbgPrintfW(L"SetCPUASID: Zapping slot0 entry. PDE=%8.8x j=%4.4x\r\n", dwTest, j);
NKDbgPrintfW(L" akyReset=%8.8x g_PageDirAccess=%8.8x new key=%8.8x\r\n", akyReset, g_PageDirAccess, pth->aky);
NKDbgPrintfW(L" pCurProc=%8.8lx, pprc=%8.8lx, pth = %8.8lx, &g_PageDir=%8.8lx, &g_ShadowPageDir=%8.8lx\r\n",
pCurProc,pprc,pth,&g_PageDir,&g_ShadowPageDir);
DebugBreak();
}
#endif
g_PageDir.PTE[j] = 0;
if (dwTest & PG_ACCESSED_MASK)
{
#ifdef DEBUG_VMEM
if ((g_ShadowPageDir.PTE[j] & ~PG_ACCESSED_MASK) !=
(dwTest & ~PG_ACCESSED_MASK))
{
NKDbgPrintfW(
TEXT("SetCPUASID: Real PDE doesn't match Shadow PDE\r\n"));
NKDbgPrintfW(
TEXT("Pool index = %d, PDE index = %d\r\n"),
i, j);
NKDbgPrintfW(
TEXT("Real PDE = 0x%8.8X, Shadow PDE = 0x%8.8X\r\n"),
dwTest & ~PG_ACCESSED_MASK, g_ShadowPageDir.PTE[j] & ~PG_ACCESSED_MASK);
DebugBreak();
}
#endif
g_ShadowPageDir.PTE[j] = dwTest;
}
} else if (aky & (1 << ((j / PDES_PER_SLOT) - 1))) {
g_ShadowPageDir.PTE[j] |= g_PageDir.PTE[j];
g_PageDir.PTE[j] = g_ShadowPageDir.PTE[j];
AddAccess(&g_PageDirAccess,(1 << ((j / PDES_PER_SLOT) - 1)));
}
}
}
SectionTable[0] = SectionTable[(ULONG)pprc->dwVMBase >> VA_SECTION];
g_CurASIDProc = pprc;
PhysTLBFlush();
}
//
// Change CurProc to the new guy
//
pCurProc = pprc;
hCurProc = pprc->hProc;
KCALLPROFOFF(62);
}
LPVOID VerifyAccess(LPVOID pvAddr, DWORD dwFlags, ACCESSKEY aky)
{
PSECTION pscn;
MEMBLOCK *pmb;
ulong entry;
if ((long)pvAddr >= 0) {
if ((pscn = SectionTable[(ulong)pvAddr>>VA_SECTION]) != 0
&& (pmb = (*pscn)[((ulong)pvAddr>>VA_BLOCK)&BLOCK_MASK]) != 0
&& pmb != RESERVED_BLOCK
&& (pmb->alk & aky) != 0
&& (entry = pmb->aPages[((ulong)pvAddr>>VA_PAGE)&PAGE_MASK]) & PG_VALID_MASK
&& (!(dwFlags & VERIFY_WRITE_FLAG)
|| (entry&PG_PROTECTION) == PG_PROT_WRITE))
return Phys2Virt(PFNfromEntry(entry) | ((ulong)pvAddr & (PAGE_SIZE-1)));
} else {
// Kernel mode only address. If the "kernel mode OK" flag is set or if the
// thread is running in kernel mode, allow the access.
if (dwFlags & VERIFY_KERNEL_OK || GetThreadMode(pCurThread) == KERNEL_MODE) {
DWORD dwPageDir;
PPAGETABLE pPageTable;
//
// Find entry in 1st level page dir
//
if ((dwPageDir = g_PageDir.PTE[((ulong)pvAddr) >> 22]) != 0) {
if ((dwPageDir & (PG_LARGE_PAGE_MASK|PG_VALID_MASK)) == (PG_LARGE_PAGE_MASK|PG_VALID_MASK)) {
return pvAddr;
} else {
pPageTable = (PPAGETABLE)PHYS_TO_LIN(dwPageDir & PG_PHYS_ADDR_MASK);
if (pPageTable->PTE[((ulong)pvAddr>>VA_PAGE)&0x3FF] & PG_VALID_MASK)
return pvAddr;
}
}
}
}
return 0;
}
//
// Function:
// CommonFault
//
// Description:
// CommonFault is jumped to by the specific fault handlers for unhandled
// exceptions which are then dispatched to the C routine HandleException.
//
// At entry:
// ESP points to stack frame containing PUSHAD, ERROR CODE, EIP, CS,
// EFLAGS, (and optionally Old ESP, Old SS). Normally this is the
// last part of the thread structure, the saved context. In the case
// of a nested exception the context has been saved on the ring 0
// stack. We will create a fake thread structure on the stack to hold
// the captured context. The remaining segment registers are added by
// this routine.
//
// ECX is the faulting address which is passed to HandleException
//
// ESI is the exception id which is passed to HandleException
//
// Return:
// CommonFault jumps to Reschedule or resumes execution based on the return
// value of HandleException.
//
Naked CommonFault()
{
_asm {
cld
mov eax, KGDT_R3_DATA
mov ds, ax
mov es, ax
dec [KData].cNest
jnz short cf20 // nested fault
mov esp, offset KData-4
mov edi, PtrCurThd
cf10: sti
push ecx
push esi
push edi
call HandleException
add esp, 3*4
test eax, eax
jnz short NoReschedule
jmp Reschedule
NoReschedule:
jmp RunThread
// Nested exception. Create a fake thread structure on the stack
cf20: push ds
push es
push fs
push gs
sub esp, THREAD_CONTEXT_OFFSET
mov edi, esp // (edi) = ptr to fake thread struct
jmp short cf10
}
}
// Do a reschedule.
//
// (edi) = ptr to current thread or 0 to force a context reload
Naked Reschedule()
{
__asm {
test [KData].bPowerOff, 0FFh // Was a PowerOff requested?
jz short rsd10
mov [KData].bPowerOff, 0
call DoPowerOff // Yes - do it
rsd10:
sti
cmp word ptr ([KData].bResched), 1
jne short rsd11
mov word ptr ([KData].bResched), 0
call NextThread
rsd11:
cmp dword ptr ([KData].dwKCRes), 1
jne short rsd12
mov dword ptr ([KData].dwKCRes), 0
call KCNextThread
cmp dword ptr ([KData].dwKCRes), 1
je short rsd10
rsd12:
mov eax, [RunList.pth]
test eax, eax
jz short rsd50 // nothing to run
cmp eax, edi
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -