📄 kiinit.c
字号:
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/i386/kiinit.c
* PURPOSE: Kernel Initialization for x86 CPUs
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
*/
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* GLOBALS *******************************************************************/
/* Spinlocks used only on X86 */
KSPIN_LOCK KiFreezeExecutionLock;
KSPIN_LOCK Ki486CompatibilityLock;
/* BIOS Memory Map. Not NTLDR-compliant yet */
extern ULONG KeMemoryMapRangeCount;
extern ADDRESS_RANGE KeMemoryMap[64];
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KiInitMachineDependent(VOID)
{
ULONG Protect;
ULONG CpuCount;
BOOLEAN FbCaching = FALSE;
NTSTATUS Status;
ULONG ReturnLength;
ULONG i, Affinity, Sample = 0;
PFX_SAVE_AREA FxSaveArea;
ULONG MXCsrMask = 0xFFBF;
ULONG Dummy[4];
KI_SAMPLE_MAP Samples[4];
PKI_SAMPLE_MAP CurrentSample = Samples;
/* Check for large page support */
if (KeFeatureBits & KF_LARGE_PAGE)
{
/* FIXME: Support this */
DPRINT1("Large Page support detected but not yet taken advantage of!\n");
}
/* Check for global page support */
if (KeFeatureBits & KF_GLOBAL_PAGE)
{
/* Do an IPI to enable it on all CPUs */
CpuCount = KeNumberProcessors;
KeIpiGenericCall(Ki386EnableGlobalPage, (ULONG_PTR)&CpuCount);
}
/* Check for PAT and/or MTRR support */
if (KeFeatureBits & (KF_PAT | KF_MTRR))
{
/* Query the HAL to make sure we can use it */
Status = HalQuerySystemInformation(HalFrameBufferCachingInformation,
sizeof(BOOLEAN),
&FbCaching,
&ReturnLength);
if ((NT_SUCCESS(Status)) && (FbCaching))
{
/* We can't, disable it */
KeFeatureBits &= ~(KF_PAT | KF_MTRR);
}
}
/* Check for PAT support and enable it */
if (KeFeatureBits & KF_PAT) KiInitializePAT();
/* Assume no errata for now */
SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] = 0;
/* Check if we have an NPX */
if (KeI386NpxPresent)
{
/* Loop every CPU */
i = KeActiveProcessors;
for (Affinity = 1; i; Affinity <<= 1)
{
/* Check if this is part of the set */
if (i & Affinity)
{
/* Run on this CPU */
i &= ~Affinity;
KeSetSystemAffinityThread(Affinity);
/* Detect FPU errata */
if (KiIsNpxErrataPresent())
{
/* Disable NPX support */
KeI386NpxPresent = FALSE;
SharedUserData->
ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] =
TRUE;
break;
}
}
}
}
/* If there's no NPX, then we're emulating the FPU */
SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_EMULATED] =
!KeI386NpxPresent;
/* Check if there's no NPX, so that we can disable associated features */
if (!KeI386NpxPresent)
{
/* Remove NPX-related bits */
KeFeatureBits &= ~(KF_XMMI64 | KF_XMMI | KF_FXSR | KF_MMX);
/* Disable kernel flags */
KeI386FxsrPresent = KeI386XMMIPresent = FALSE;
/* Disable processor features that might've been set until now */
SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] =
SharedUserData->ProcessorFeatures[PF_XMMI64_INSTRUCTIONS_AVAILABLE] =
SharedUserData->ProcessorFeatures[PF_XMMI_INSTRUCTIONS_AVAILABLE] =
SharedUserData->ProcessorFeatures[PF_3DNOW_INSTRUCTIONS_AVAILABLE] =
SharedUserData->ProcessorFeatures[PF_MMX_INSTRUCTIONS_AVAILABLE] = 0;
}
/* Check for CR4 support */
if (KeFeatureBits & KF_CR4)
{
/* Do an IPI call to enable the Debug Exceptions */
CpuCount = KeNumberProcessors;
KeIpiGenericCall(Ki386EnableDE, (ULONG_PTR)&CpuCount);
}
/* Check if FXSR was found */
if (KeFeatureBits & KF_FXSR)
{
/* Do an IPI call to enable the FXSR */
CpuCount = KeNumberProcessors;
KeIpiGenericCall(Ki386EnableFxsr, (ULONG_PTR)&CpuCount);
/* Check if XMM was found too */
if (KeFeatureBits & KF_XMMI)
{
/* Do an IPI call to enable XMMI exceptions */
CpuCount = KeNumberProcessors;
KeIpiGenericCall(Ki386EnableXMMIExceptions, (ULONG_PTR)&CpuCount);
/* FIXME: Implement and enable XMM Page Zeroing for Mm */
/* Patch the RtlPrefetchMemoryNonTemporal routine to enable it */
Protect = MmGetPageProtect(NULL, RtlPrefetchMemoryNonTemporal);
MmSetPageProtect(NULL,
RtlPrefetchMemoryNonTemporal,
Protect | PAGE_IS_WRITABLE);
*(PCHAR)RtlPrefetchMemoryNonTemporal = 0x90;
MmSetPageProtect(NULL, RtlPrefetchMemoryNonTemporal, Protect);
}
}
/* Check for, and enable SYSENTER support */
KiRestoreFastSyscallReturnState();
/* Loop every CPU */
i = KeActiveProcessors;
for (Affinity = 1; i; Affinity <<= 1)
{
/* Check if this is part of the set */
if (i & Affinity)
{
/* Run on this CPU */
i &= ~Affinity;
KeSetSystemAffinityThread(Affinity);
/* Reset MHz to 0 for this CPU */
KeGetCurrentPrcb()->MHz = 0;
/* Check if we can use RDTSC */
if (KeFeatureBits & KF_RDTSC)
{
/* Start sampling loop */
for (;;)
{
/* Do a dummy CPUID to start the sample */
CPUID(Dummy, 0);
/* Fill out the starting data */
CurrentSample->PerfStart = KeQueryPerformanceCounter(NULL);
CurrentSample->TSCStart = __rdtsc();
CurrentSample->PerfFreq.QuadPart = -50000;
/* Sleep for this sample */
KeDelayExecutionThread(KernelMode,
FALSE,
&CurrentSample->PerfFreq);
/* Do another dummy CPUID */
CPUID(Dummy, 0);
/* Fill out the ending data */
CurrentSample->PerfEnd =
KeQueryPerformanceCounter(&CurrentSample->PerfFreq);
CurrentSample->TSCEnd = __rdtsc();
/* Calculate the differences */
CurrentSample->PerfDelta = CurrentSample->PerfEnd.QuadPart -
CurrentSample->PerfStart.QuadPart;
CurrentSample->TSCDelta = CurrentSample->TSCEnd -
CurrentSample->TSCStart;
/* Compute CPU Speed */
CurrentSample->MHz = (ULONG)((CurrentSample->TSCDelta *
CurrentSample->
PerfFreq.QuadPart + 500000) /
(CurrentSample->PerfDelta *
1000000));
/* Check if this isn't the first sample */
if (Sample)
{
/* Check if we got a good precision within 1MHz */
if ((CurrentSample->MHz == CurrentSample[-1].MHz) ||
(CurrentSample->MHz == CurrentSample[-1].MHz + 1) ||
(CurrentSample->MHz == CurrentSample[-1].MHz - 1))
{
/* We did, stop sampling */
break;
}
}
/* Move on */
CurrentSample++;
Sample++;
if (Sample == sizeof(Samples) / sizeof(Samples[0]))
{
/* Restart */
CurrentSample = Samples;
Sample = 0;
}
}
/* Save the CPU Speed */
KeGetCurrentPrcb()->MHz = CurrentSample[-1].MHz;
}
/* Check if we have MTRR */
if (KeFeatureBits & KF_MTRR)
{
/* Then manually initialize MTRR for the CPU */
KiInitializeMTRR(i ? FALSE : TRUE);
}
/* Check if we have AMD MTRR and initialize it for the CPU */
if (KeFeatureBits & KF_AMDK6MTRR) KiAmdK6InitializeMTRR();
/* Check if this is a buggy Pentium and apply the fixup if so */
if (KiI386PentiumLockErrataPresent) KiI386PentiumLockErrataFixup();
/* Check if the CPU supports FXSR */
if (KeFeatureBits & KF_FXSR)
{
/* Get the current thread NPX state */
FxSaveArea = (PVOID)
((ULONG_PTR)KeGetCurrentThread()->InitialStack -
NPX_FRAME_LENGTH);
/* Clear initial MXCsr mask */
FxSaveArea->U.FxArea.MXCsrMask = 0;
/* Save the current NPX State */
#ifdef __GNUC__
asm volatile("fxsave %0\n\t" : "=m" (*FxSaveArea));
#else
__asm fxsave [FxSaveArea]
#endif
/* Check if the current mask doesn't match the reserved bits */
if (FxSaveArea->U.FxArea.MXCsrMask != 0)
{
/* Then use whatever it's holding */
MXCsrMask = FxSaveArea->U.FxArea.MXCsrMask;
}
/* Check if nobody set the kernel-wide mask */
if (!KiMXCsrMask)
{
/* Then use the one we calculated above */
KiMXCsrMask = MXCsrMask;
}
else
{
/* Was it set to the same value we found now? */
if (KiMXCsrMask != MXCsrMask)
{
/* No, something is definitely wrong */
KEBUGCHECKEX(MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED,
KF_FXSR,
KiMXCsrMask,
MXCsrMask,
0);
}
}
/* Now set the kernel mask */
KiMXCsrMask &= MXCsrMask;
}
}
}
/* Return affinity back to where it was */
KeRevertToUserAffinityThread();
/* NT allows limiting the duration of an ISR with a registry key */
if (KiTimeLimitIsrMicroseconds)
{
/* FIXME: TODO */
DPRINT1("ISR Time Limit not yet supported\n");
}
}
VOID
NTAPI
KiInitializePcr(IN ULONG ProcessorNumber,
IN PKIPCR Pcr,
IN PKIDTENTRY Idt,
IN PKGDTENTRY Gdt,
IN PKTSS Tss,
IN PKTHREAD IdleThread,
IN PVOID DpcStack)
{
/* Setup the TIB */
Pcr->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
Pcr->NtTib.StackBase = 0;
Pcr->NtTib.StackLimit = 0;
Pcr->NtTib.Self = NULL;
/* Set the Current Thread */
Pcr->PrcbData.CurrentThread = IdleThread;
/* Set pointers to ourselves */
Pcr->Self = (PKPCR)Pcr;
Pcr->Prcb = &Pcr->PrcbData;
/* Set the PCR Version */
Pcr->MajorVersion = PCR_MAJOR_VERSION;
Pcr->MinorVersion = PCR_MINOR_VERSION;
/* Set the PCRB Version */
Pcr->PrcbData.MajorVersion = 1;
Pcr->PrcbData.MinorVersion = 1;
/* Set the Build Type */
Pcr->PrcbData.BuildType = 0;
#ifndef CONFIG_SMP
Pcr->PrcbData.BuildType |= PRCB_BUILD_UNIPROCESSOR;
#endif
#ifdef DBG
Pcr->PrcbData.BuildType |= PRCB_BUILD_DEBUG;
#endif
/* Set the Processor Number and current Processor Mask */
Pcr->PrcbData.Number = (UCHAR)ProcessorNumber;
Pcr->PrcbData.SetMember = 1 << ProcessorNumber;
/* Set the PRCB for this Processor */
KiProcessorBlock[ProcessorNumber] = Pcr->Prcb;
/* Start us out at PASSIVE_LEVEL */
Pcr->Irql = PASSIVE_LEVEL;
/* Set the GDI, IDT, TSS and DPC Stack */
Pcr->GDT = (PVOID)Gdt;
Pcr->IDT = Idt;
Pcr->TSS = Tss;
Pcr->TssCopy = Tss;
Pcr->PrcbData.DpcStack = DpcStack;
/* Setup the processor set */
Pcr->PrcbData.MultiThreadProcessorSet = Pcr->PrcbData.SetMember;
}
VOID
NTAPI
KiInitializeKernel(IN PKPROCESS InitProcess,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -