📄 physmem.c
字号:
if (PhysCS.LockCount == 1)
while (pMem = InterlockedPopList(&pFreeKStacks))
FreeHelperStack(pMem);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
SC_GiveKPhys(
void *ptr,
ulong length
)
{
BOOL bRet = FALSE;
LPBYTE pPage;
DWORD loop;
PFREEINFO pfi;
PHYSICAL_ADDRESS paPFN;
uint ix;
TRUSTED_API (L"SC_GiveKPhys", FALSE);
DEBUGMSG(ZONE_ENTRY,(L"SC_GiveKPhys entry: %8.8lx %8.8lx\r\n",ptr,length));
OEMCacheRangeFlush (0, 0, CACHE_SYNC_DISCARD);
EnterPhysCS();
for (loop = 0; loop < length; loop++) {
pPage = *((LPBYTE *)ptr+loop);
ZeroPage(pPage + 0x20000000);
paPFN = GetPFN(pPage);
pfi = GetRegionFromAddr(paPFN);
if (pfi) {
ix = (paPFN - pfi->paStart) / PFN_INCR;
DEBUGCHK(pfi->pUseMap[ix] == 0xff);
pfi->pUseMap[ix] = 0;
KCall((PKFN)LinkPhysPage,pPage);
} else {
ERRORMSG(1, (TEXT("GiveKPhys : invalid address 0x%08x (PFN 0x%08x)\r\n"), pPage, paPFN));
DEBUGCHK(0);
}
}
KInfoTable[KINX_NUMPAGES] += length;
// If there are enough free pages, clear the PageOutNeeded flag.
if (PageFreeCount > PageOutLevel)
PageOutNeeded = 0;
bRet = TRUE;
LeaveCriticalSection(&PhysCS);
DEBUGMSG(ZONE_ENTRY,(L"SC_GiveKPhys exit: %8.8lx\r\n",bRet));
return bRet;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
SC_GetKPhys(
void *ptr,
ulong length
)
{
LPVOID *pPages;
BOOL bRet = TRUE;
DWORD dwCount;
LPBYTE pMem;
TRUSTED_API (L"SC_GetKPhys", FALSE);
DEBUGMSG(ZONE_ENTRY,(L"SC_GetKPhys entry: %8.8lx %8.8lx\r\n",ptr,length));
if ((int) length < 0) {
return FALSE;
}
pPages = (LPVOID *)ptr;
EnterPhysCS();
ScavengeStacks(100000); // Reclaim all extra stack pages.
for (dwCount = length; (int) dwCount > 0; dwCount--) {
if (((length > 1) && ((DWORD)PageFreeCount <= dwCount + PageOutTrigger))
|| !(pMem = (LPBYTE)KCall((PKFN)GrabFirstPhysPage,0xff))) {
bRet = FALSE;
break;
}
KInfoTable[KINX_NUMPAGES]--;
*pPages++ = pMem;
}
LeaveCriticalSection(&PhysCS);
if (!bRet) {
SC_GiveKPhys(ptr,length - dwCount);
RETAILMSG(1,(L"Error getting pages!\r\n"));
}
DEBUGMSG(ZONE_ENTRY,(L"SC_GetKPhys exit: %8.8lx\r\n",bRet));
return bRet;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void
InitMemoryPool()
{
PFREEINFO pfi, pfiEnd;
DWORD loop;
LPBYTE pPage;
/* Fill in data fields in user visible kernel data page */
KInfoTable[KINX_PROCARRAY] = (long)ProcArray;
KInfoTable[KINX_PAGESIZE] = PAGE_SIZE;
#ifdef PFN_SHIFT
KInfoTable[KINX_PFN_SHIFT] = PFN_SHIFT;
#endif
KInfoTable[KINX_PFN_MASK] = (DWORD)PFNfromEntry(~0);
KInfoTable[KINX_SECTIONS] = (long)SectionTable;
KInfoTable[KINX_MEMINFO] = (long)&MemoryInfo;
KInfoTable[KINX_KDATA_ADDR] = (long)&KData;
PFNReserveStart = GetPFN (pTOC->ulRAMFree);
PFNReserveEnd = GetPFN (pTOC->ulRAMFree + MemForPT);
OEMCacheRangeFlush (0, 0, CACHE_SYNC_DISCARD);
pDirtyList = NULL;
for (pfi = MemoryInfo.pFi, pfiEnd = pfi+MemoryInfo.cFi ; pfi < pfiEnd ; ++pfi) {
DEBUGMSG(ZONE_MEMORY, (TEXT("InitMemoryPool: Init range: map=%8.8lx start=%8.8lx end=%8.8lx\r\n"),
pfi->pUseMap, pfi->paStart, pfi->paEnd));
pPage = 0;
for (loop = pfi->paStart; loop < pfi->paEnd; loop += PFN_INCR) {
if (!pfi->pUseMap[(loop-pfi->paStart)/PFN_INCR]) {
pPage = Phys2Virt(loop);
if ((dwOEMCleanPages == 0) || ((DWORD)PageFreeCount < dwOEMCleanPages)) {
ZeroPage(pPage + 0x20000000);
LinkPhysPage(pPage);
LogPtr->pKList = pPage;
KInfoTable[KINX_NUMPAGES]++;
} else {
*(LPBYTE *)((DWORD)pPage + 0x20000000) = pDirtyList;
*(LONG *)((DWORD)pPage + 0x20000004) = (pfi->paEnd - loop)/ PFN_INCR;
pDirtyList = pPage;
pfi->paEnd = loop;
}
}
}
}
KInfoTable[KINX_MINPAGEFREE] = PageFreeCount;
DEBUGMSG(ZONE_MEMORY,(TEXT("InitMemoryPool done, free=%d\r\n"),PageFreeCount));
}
void CleanDirtyPagesThread(LPVOID pv)
{
LPBYTE pList = NULL, pListEnd = NULL, pPage;
DWORD cTotalPages = 0, cPages;
EnterCriticalSection(&DirtyPageCS);
if (!pDirtyList) {
goto exit;
}
SC_ThreadSetPrio (hCurThread, THREAD_PRIORITY_IDLE);
//
// clean memory and form a list from dirty memory trunks
//
pList = NULL;
cTotalPages = 0;
// since we push new page into pList, thus the first page pushed in will
// be the last page in the list.
pListEnd = pDirtyList;
while (pDirtyList) {
// remove trunk from dirty list
pPage = pDirtyList;
pDirtyList = *(LPBYTE *)pPage; // first dword is the link to next trunk
cPages = *(DWORD *)((DWORD)pPage+4); // second dword contains # of pages in this trunk
DEBUGCHK(cPages);
cTotalPages += cPages;
while (cPages) {
ZeroPage(pPage+0x20000000);
// add page into pList
*(LPBYTE *)((DWORD)pPage + 0x20000000) = pList;
*(LPBYTE *)((DWORD)pPage + 0x20000004) = 0;
if (pList)
*(LPBYTE *)((DWORD)pList + 0x20000004) = pPage;
pList = pPage;
pPage += PAGE_SIZE;
cPages--;
}
}
//
// add page list into pKList
//
EnterPhysCS();
KCall((PKFN)LinkPhysPageList, pList, pListEnd, cTotalPages);
KInfoTable[KINX_NUMPAGES] += cTotalPages;
// if there are enough free pages, clear the PageOutNeeded flag.
if (PageFreeCount > PageOutLevel)
PageOutNeeded = 0;
LeaveCriticalSection(&PhysCS);
exit:
LeaveCriticalSection(&DirtyPageCS);
DEBUGMSG(ZONE_MEMORY, (TEXT("CleanDirtyPagesThread: Add memory list start = %X end = %X, %d pages\n"),
pList, pListEnd, cTotalPages));
}
PTHREAD PthScavTarget;
PPROCESS PprcScavTarget = &ProcArray[0];
int StackScavCount;
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
HTHREAD
ScavengeOnePage(void)
{
HTHREAD ret;
KCALLPROFON(13);
if (!PthScavTarget) {
if (++PprcScavTarget >= &ProcArray[MAX_PROCESSES])
PprcScavTarget = ProcArray;
++StackScavCount;
if (!PprcScavTarget->dwVMBase || !(PthScavTarget = PprcScavTarget->pTh)) {
KCALLPROFOFF(13);
return 0;
}
if (PthScavTarget == pCurThread)
PthScavTarget = pCurThread->pNextInProc;
KCALLPROFOFF(13);
return 0;
}
if (HandleToThread(PthScavTarget->hTh) != PthScavTarget) {
PthScavTarget = 0;
KCALLPROFOFF(13);
return 0;
}
// scavange the stack of a thread only if
// (1) it's not the current running thread, and
// (2) the state of its stack is not changing, and
// (3) the state of the fiber is changing, and
// (4) the priority of the thread is lower than dwNKMaxPrioNoScav
// (5) not in the middle of ServerCallReturn that's switching stack
ret = ( (pCurThread != PthScavTarget) // not current thread?
&& MDTestStack(PthScavTarget) // stack okay?
&& !KCURFIBER(PthScavTarget) // not on fiber?
&& (GET_CPRIO(PthScavTarget) > dwNKMaxPrioNoScav) // priority low enough?
&& (1) ) ? PthScavTarget->hTh : 0;
if ((PthScavTarget = PthScavTarget->pNextInProc) == pCurThread)
PthScavTarget = pCurThread->pNextInProc;
KCALLPROFOFF(13);
return ret;
}
HANDLE hCurrScav;
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
ScavengeStacks(
int cNeed
)
{
HTHREAD hth;
PTHREAD pth;
ulong addr, pages, base;
ulong pte;
int ixPage, ixBlock;
PSECTION pscn;
MEMBLOCK *pmb;
ACCESSKEY ulOldKey;
DEBUGMSG(ZONE_MEMORY, (TEXT("Scavenging stacks for %d pages.\r\n"), cNeed-PageFreeCount));
StackScavCount = 0;
SWITCHKEY(ulOldKey,0xffffffff);
while ((StackScavCount <= MAX_PROCESSES) && (cNeed > PageFreeCount)) {
if (hth = (HTHREAD)KCall((PKFN)ScavengeOnePage)) {
// need to call ThreadSuspend directly (fail if can't suspend immediately)
if (!(KCall (ThreadSuspend, pth = HandleToThread(hth), FALSE) & 0x80000000)) {
hCurrScav = hth;
pages = 0;
OEMCacheRangeFlush (0, 0, CACHE_SYNC_DISCARD);
while (addr = MDTestStack(pth)) {
pscn = IsSecureVa(addr)? &NKSection : SectionTable[addr>>VA_SECTION];
ixBlock = (addr>>VA_BLOCK) & BLOCK_MASK;
ixPage = (addr>>VA_PAGE) & PAGE_MASK;
if ((pmb = (*pscn)[ixBlock]) == NULL_BLOCK || pmb == RESERVED_BLOCK ||
!(pmb->flags&MB_FLAG_AUTOCOMMIT) || pmb == PmbDecommitting ||
!(pte = pmb->aPages[ixPage]) || pte == BAD_PAGE)
break;
pmb->aPages[ixPage] = 0;
MDShrinkStack(pth);
FreePhysPage(PFNfromEntry(pte));
DEBUGCHK(!pages || (base == addr - PAGE_SIZE));
base = addr;
pages++;
}
if (pages)
InvalidateRange((PVOID)base, PAGE_SIZE*pages);
hCurrScav = 0;
ResumeThread(hth);
}
}
}
SETCURKEY(ulOldKey);
return (StackScavCount > MAX_PROCESSES) ? FALSE : TRUE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
PHYSICAL_ADDRESS
GetHeldPage(void)
{
PHYSICAL_ADDRESS paRet;
LPBYTE pMem;
if (pMem = (LPBYTE)KCall((PKFN)GrabFirstPhysPage,1)) {
PageFreeCount++; // since we already reserved it
LogPhysicalPages(PageFreeCount);
paRet = GetPFN(pMem);
} else
paRet = 0;
DEBUGMSG(ZONE_PHYSMEM,(TEXT("GetHeldPage: Returning %8.8lx\r\n"), paRet));
return paRet;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
HoldPages(
int cpNeed,
BOOL bForce
)
{
BOOL fSetGweOomEvent = FALSE;
BOOL bRet = FALSE;
WORD prio;
EnterPhysCS();
// Check if this request will drop the page free count below the
// page out trigger and signal the clean up thread to start doing
// pageouts if so.
if ((cpNeed+PageOutTrigger > PageFreeCount) &&
(!PageOutNeeded || (PagedInCount > PAGE_OUT_TRIGGER))) {
PageOutNeeded = 1;
PagedInCount = 0;
if (prio = GET_CPRIO(pCurThread))
prio--;
if (prio < GET_CPRIO(pCleanupThread))
KCall((PKFN)SetThreadBasePrio, pCleanupThread->hTh, (DWORD)prio);
SetEvent(hAlarmThreadWakeup);
}
do {
if (cpNeed+GwesLowThreshold <= PageFreeCount) {
DWORD pfc, pfc2;
do {
pfc = PageFreeCount;
} while (InterlockedTestExchange(&PageFreeCount,pfc,pfc-cpNeed) != (LONG)pfc);
// update page free water mark. need to take into account other threads updating it
// at the same time. NOTE: don't use PageFreeCount directly since other threads might increment
// it and it'll make the water mark inaccurate. And if it been decremented, other threads will
// update it correctly.
pfc -= cpNeed;
do {
pfc2 = KInfoTable[KINX_MINPAGEFREE];
} while ((pfc2 > pfc)
&& (InterlockedTestExchange ((PLONG)&KInfoTable[KINX_MINPAGEFREE], pfc2, pfc) != (LONG)pfc2));
LogPhysicalPages(PageFreeCount);
bRet = TRUE;
goto hpDone;
}
} while (ScavengeStacks(cpNeed+GwesLowThreshold));
// Even after scavenging stacks, we were unable to satisfy the request
// without going below the GWE low threshold.
// Do not allow a request of size GwesLowBlockSize to succeed if
// doing so would leave less than the low threshold. Same with
// GwesCriticalBlockSize and GwesCriticalThreshold.
if (bForce || !((cpNeed > GwesLowBlockSize
&& cpNeed+GwesLowThreshold > PageFreeCount)
|| (cpNeed > GwesCriticalBlockSize
&& cpNeed + GwesCriticalThreshold > PageFreeCount))) {
// Memory is low. Notify GWE, so that GWE can ask
// the user to close some apps.
if (GwesOOMEvent &&
((PageFreeCount >= GwesLowThreshold) ||
((PageFreeCount < cpNeed + GwesCriticalThreshold))))
fSetGweOomEvent = TRUE;
if ((cpNeed + (bForce?0:STACK_RESERVE)) <= PageFreeCount) {
DWORD pfc, pfc2;
do {
pfc = PageFreeCount;
} while (InterlockedTestExchange(&PageFreeCount,pfc,pfc-cpNeed) != (LONG)pfc);
// update page free water mark. need to take into account other threads updating it
// at the same time. NOTE: don't use PageFreeCount directly since other threads might increment
// it and it'll make the water mark inaccurate. And if it been decremented, other threads will
// update it correctly.
pfc -= cpNeed;
do {
pfc2 = KInfoTable[KINX_MINPAGEFREE];
} while ((pfc2 > pfc)
&& (InterlockedTestExchange ((PLONG)&KInfoTable[KINX_MINPAGEFREE], pfc2, pfc) != (LONG)pfc2));
LogPhysicalPages(PageFreeCount);
bRet = TRUE;
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -