📄 fault.c
字号:
void InvalidateRange(PVOID pvAddr, ULONG ulSize)
{
if ((ulong)pvAddr < ADDR_SLOT_SIZE)
(ULONG)pvAddr |= pCurProc->dwVMBase;
if (ulSize)
KCall((PKFN)InvalidatePageTables,
&g_ShadowPageDir.PTE[(ULONG)pvAddr/(ARRAY_SIZE(g_ShadowPageDir.PTE)*PAGE_SIZE)],
&g_ShadowPageDir.PTE[((ULONG)pvAddr + ulSize - 1) / (ARRAY_SIZE(g_ShadowPageDir.PTE) * PAGE_SIZE)]);
}
// System call trap handler.
//
// Pop the iret frame from the stack, switch back to the caller's stack, enable interrupts,
// and dispatch the system call.
//
// CPU State: ring1 stack & CS, interrupts disabled.
#pragma warning(disable:4035)
PVOID __declspec(naked) Int20SyscallHandler(void)
{
__asm {
mov [KData.pAPIReturn], offset APICallReturn
mov eax, offset sc00
ret
sc00: mov ecx, 4[esp] // (ecx) = caller's CS
and cl, 0FCh
cmp cl, KGDT_R1_CODE
pop ecx // (ecx) = EIP of "int SYSCALL"
jne short sc10 // caller was in user mode
add esp, 8 // remove CS & flags from stack
push fs:dword ptr [0] // save exception chain linkage
push KERNEL_MODE // mark as kernel mode caller
jmp short sc12
sc10: mov esp, [esp+8] // (esp) = caller's stack
push fs:dword ptr [0] // save exception chain linkage
push USER_MODE // mark as user mode caller
sc12: sub ecx, FIRST_METHOD+2 // (ecx) = iMethod * APICALL_SCALE
sti // interrupts OK now
cmp ecx, -APICALL_SCALE
je short sc25 // api call return
sar ecx, 1 // (ecx) = iMethod
lea eax, [esp+12] // (eax) = ptr to api arguments
push ecx // (arg3) = iMethod
push eax // (arg2) = ptr to args
push dword ptr [eax-4] // (arg1) = return address
sub eax, 12 // (eax) = ptr to thread mode
push eax // (arg0) = pMode
call ObjectCall // (eax) = api function address (0 if completed)
add esp, 16 // clear ObjectCall args off the stack
pop edx // (edx) = thread mode
add esp, 4 // remove exc. linkage parm
mov fs:dword ptr [0], -2 // mark PSL boundary in exception chain
mov ecx, PtrCurThd // (ecx) = ptr to THREAD struct
mov ecx, [ecx].pcstkTop // (ecx) = ptr to CALLSTACK struct
mov [ecx].ExEsp, esp // .\ v
mov [ecx].ExEbp, ebp // ..\ v
mov [ecx].ExEbx, ebx // ...> save registers for possible exception recovery
mov [ecx].ExEsi, esi // ../
mov [ecx].ExEdi, edi // ./
test edx, edx
jz short sc20 // dispatch the function in kernel mode
// Dispatch to api function in user mode. To do this, a far call frame is constructed on
// the stack and a far return is issued.
//
// (eax) = api function address
mov edx, esp
mov dword ptr [edx], SYSCALL_RETURN
push KGDT_R3_DATA | 3
push edx
push KGDT_R3_CODE | 3
push eax
retf
// Dispatch to api function in kernel mode. Just call the function directly and fall through
// into the api return code.
//
// (eax) = api function address
sc20: pop edx // discard return address
call eax // & call the api function
APICallReturn:
push 0 // space for exception chain linkage
push KERNEL_MODE // save current thread mode
// Api call return. Retrieve return address, mode, and exception linkage from the
// thread's call stack.
//
// (eax:edx) = function return value
// (TOS) = thread's execution mode
// (TOS+4) = space to receive previous exception chain linkage
sc25: push eax // save return value
push edx // ...
lea eax, [esp+8] // (eax) = ptr to thread's execution mode
push eax
call ServerCallReturn // (eax) = api return address
mov [esp], eax // save return address
mov edx, [esp+4] // restore return value
mov eax, [esp+8] // ...
mov ecx, [esp+16] // (ecx) = saved exception linkage
cmp dword ptr [esp+12], KERNEL_MODE
mov fs:[0], ecx // restore exception linkage
je short sc28 // dispatch thread in kernel mode
lea ecx, [esp+20] // (ecx) = final stack pointer value
mov dword ptr [esp+4], KGDT_R3_CODE | 3
mov [esp+8], ecx // ESP restore value
mov dword ptr [esp+12], KGDT_R3_DATA | 3
retf // return to ring3 & restore stack pointer
sc28: ret 16 // return & clear working data from the stack
}
}
int __declspec(naked) KCall(PKFN pfn, ...)
{
__asm {
push ebp
mov ebp, esp
push ebx
mov eax, 12[ebp] // (eax) = arg0
mov edx, 16[ebp] // (edx) = arg1
mov ecx, 20[ebp] // (ecx) = arg2
mov ebx, 8[ebp] // (ebx) = function address
cmp [KData].cNest, 1
jne short kcl50 // Already in non-preemtible state
int KCALL_INT // trap to ring0 for non-preemtible stuff
pop ebx
pop ebp // restore original EBP
ret
kcl50: push ecx // push Arg2
push edx // push Arg1
push eax // push Arg0
call ebx // invoke function
add esp, 3*4 // remove args from stack
pop ebx
pop ebp // restore original EBP
ret
}
}
#pragma warning(default:4035) // Turn warning back on
Naked Int22KCallHandler(void)
{
__asm {
push eax // fake Error Code
pushad
mov esi, esp // save ptr to register save area
mov esp, offset KData-4 // switch to kernel stack
dec [KData].cNest
sti
push ecx // push Arg2
push edx // push Arg1
push eax // push Arg0
call ebx // invoke non-preemtible function
mov [esi+7*4], eax // save return value into PUSHAD frame
mov ecx, OFFSET KData
xor edi, edi // force Reschedule to reload the thread's state
cli
cmp word ptr ([ecx].bResched), 1
jne short NotReschedule
jmp Reschedule
NotReschedule:
cmp dword ptr ([ecx].dwKCRes), 1
jne short NotResched2
jmp Reschedule
NotResched2:
mov esp, esi
inc [ecx].cNest
popad
add esp, 4 // throw away the error code
iretd
}
}
void InitializePageTables(void)
{
UINT i;
InitIDTEntry(SYSCALL_INT, KGDT_R1_CODE | 1, Int20SyscallHandler(), RING3_INT_GATE);
InitIDTEntry(KCALL_INT, KGDT_R0_CODE, Int22KCallHandler, RING1_INT_GATE);
// Not needed. Done by init.asm --> memset(g_PageTablePool, 0, sizeof(g_PageTablePool));
for (i = 0; i < ARRAY_SIZE(g_PageTablePool); i++) {
g_PTDirtyRegion[i].ulStartBlock = BLOCKS_PER_PAGE_TABLE;
g_PTDirtyRegion[i].ulEndBlock = 0;
g_ShadowPageDir.PTE[i+PDES_PER_SLOT] = LIN_TO_PHYS(&g_PageTablePool[i]) + PG_READ_WRITE;
g_PTMapIdx[i] = i+PDES_PER_SLOT;
}
}
///////////////////////////// FLOATING POINT UNIT CODE /////////////////////////////////
void InitializeEmx87(void) {
// Fast FP save/restore instructions are not available when emulating FP
KCALLPROFON(70);
ProcessorFeatures &= ~CPUID_FXSR;
_asm {
mov eax, cr0
or eax, MP_MASK or EM_MASK
and eax, NOT (TS_MASK or NE_MASK)
mov cr0, eax
}
KCALLPROFOFF(70);
}
void InitNPXHPHandler(LPVOID NPXNPHandler) {
KCALLPROFON(71);
if(pTOC->ulKernelFlags & KFLAG_NOTALLKMODE)
{
InitIDTEntry(0x07, KGDT_R3_CODE, NPXNPHandler, RING3_TRAP_GATE);
}
else
{
InitIDTEntry(0x07, KGDT_R1_CODE, NPXNPHandler, RING1_TRAP_GATE);
}
KCALLPROFOFF(71);
}
Naked FPUNotPresentException(void)
{
_asm {
push eax // Fake error code
// We cannot be emulating FP if we arrive here. It is safe to not check
// if CR0.EM is set.
pushad
clts
mov ebx, OFFSET KData
dec [ebx].cNest // count kernel reentrancy level
mov esi, esp // (esi) = original stack pointer
jnz short fpu10
lea esp, [ebx-4] // switch to kernel stack (&KData-4)
fpu10:
sti
mov eax, PtrCurThd
push (PTHREAD)[eax].aky
mov (PTHREAD)[eax].aky, 0xffffffff
mov eax, g_CurFPUOwner
test eax, eax
jz NoCurOwner
mov eax, [eax].tlsPtr
sub eax, FLTSAVE_BACKOFF
and eax, 0xfffffff0 // and al, f0 causes processor stall
test ProcessorFeatures, CPUID_FXSR
jz fpu_fnsave
FXSAVE_EAX
jmp NoCurOwner
fpu_fnsave:
fnsave [eax]
NoCurOwner:
mov eax, PtrCurThd
mov g_CurFPUOwner, eax
pop (PTHREAD)[eax].aky
mov eax, [eax].tlsPtr
sub eax, FLTSAVE_BACKOFF
and eax, 0xfffffff0 // and al, f0 causes processor stall
test ProcessorFeatures, CPUID_FXSR
jz fpu_frestor
FXRESTOR_EAX
jmp fpu_done
fpu_frestor:
frstor [eax]
fpu_done:
cli
cmp word ptr ([KData].bResched), 1
je short fpu_resched // must reschedule now
inc [ebx].cNest // back out of kernel one level
mov esp, esi // restore stack pointer
popad
add esp, 4 // skip fake error code
iretd
// The reschedule flag was set and we are at the first nest level into the kernel
// so we must reschedule now.
fpu_resched:
mov edi, PtrCurThd // (edi) = ptr to current THREAD
jmp Reschedule
}
}
void FPUFlushContext(void) {
FLOATING_SAVE_AREA *pFSave;
if (g_CurFPUOwner) {
ACCESSKEY ulOldKey;
SWITCHKEY(ulOldKey,0xffffffff);
_asm {
// If we are emulating FP, g_CurFPUOwner is always 0 so we don't
// have to test if CR0.EM is set(i.e. fnsave will not GP fault).
clts
}
if (g_CurFPUOwner->pThrdDbg && g_CurFPUOwner->pThrdDbg->psavedctx) {
pFSave = &g_CurFPUOwner->pThrdDbg->psavedctx->FloatSave;
_asm {
mov eax, pFSave
fnsave [eax]
}
} else {
pFSave = PTH_TO_FLTSAVEAREAPTR(g_CurFPUOwner);
_asm {
mov eax, pFSave
test ProcessorFeatures, CPUID_FXSR
jz flush_fsave
FXSAVE_EAX
jmp flush_done
flush_fsave:
fnsave [eax]
fwait
flush_done:
}
}
_asm {
mov eax, CR0 // fnsave destroys FP state &
or eax, TS_MASK // g_CurFPUOwner is 0 so we must force
mov CR0, eax // trap 7 on next FP instruction
}
SETCURKEY(ulOldKey);
g_CurFPUOwner = 0;
}
}
Naked FPUException(void)
{
_asm {
push eax // Fake error code
pushad
xor ecx, ecx // EA = 0
mov esi, 16
jmp CommonFault
}
}
void InitializeFPU(void)
{
KCALLPROFON(69);
InitIDTEntry(0x07, KGDT_R0_CODE, FPUNotPresentException, INTERRUPT_GATE);
InitIDTEntry(0x10, KGDT_R0_CODE, FPUException, INTERRUPT_GATE);
_asm {
mov eax, cr0
or eax, MP_MASK OR NE_MASK
and eax, NOT (TS_MASK OR EM_MASK)
mov cr0, eax
finit
fwait
mov ecx, offset g_InitialFPUState
add ecx, 10h // Force 16 byte alignment else
and cl, 0f0h // fxsave will fault
test ProcessorFeatures, CPUID_FXSR
jz no_fxsr
MOV_EDX_CR4
or edx, CR4_FXSR
MOV_CR4_EDX
FXSAVE_ECX
mov [ecx].MXCsr, 01f80h // Mask KNI exceptions
and word ptr [ecx], NOT NPX_CW_PRECISION_MASK // Control word is
or word ptr [ecx], NPX_CW_PRECISION_53 // 16 bits wide here
jmp init_done
no_fxsr:
fnsave [ecx]
// Win32 threads default to long real (53-bit significand).
// Control word is 32 bits wide here
and dword ptr [ecx], NOT NPX_CW_PRECISION_MASK
or dword ptr [ecx], NPX_CW_PRECISION_53
init_done:
or eax, TS_MASK
mov cr0, eax
}
KCALLPROFOFF(69);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -