📄 physmem.c
字号:
KCALLPROFOFF(13);
return 0;
}
if (PthScavTarget == pCurThread)
PthScavTarget = pCurThread->pNextInProc;
KCALLPROFOFF(13);
return 0;
}
if (HandleToThread(PthScavTarget->hTh) != PthScavTarget) {
PthScavTarget = 0;
KCALLPROFOFF(13);
return 0;
}
ret = ((pCurThread != PthScavTarget) && MDTestStack(PthScavTarget)) ? PthScavTarget->hTh : 0;
if ((PthScavTarget = PthScavTarget->pNextInProc) == pCurThread)
PthScavTarget = pCurThread->pNextInProc;
KCALLPROFOFF(13);
return ret;
}
HANDLE hCurrScav;
BOOL ScavengeStacks(int cNeed) {
HTHREAD hth;
PTHREAD pth;
ulong addr, pages, base;
ulong pte;
int ixPage, ixBlock;
PSECTION pscn;
MEMBLOCK *pmb;
ACCESSKEY ulOldKey;
DEBUGMSG(ZONE_MEMORY, (TEXT("Scavenging stacks for %d pages.\r\n"), cNeed-PageFreeCount));
StackScavCount = 0;
SWITCHKEY(ulOldKey,0xffffffff);
while ((StackScavCount <= MAX_PROCESSES) && (cNeed > PageFreeCount)) {
if (hth = (HTHREAD)KCall((PKFN)ScavengeOnePage)) {
if (SuspendThread(hth) != -1) {
hCurrScav = hth;
pages = 0;
pth = HandleToThread(hth);
SC_CacheSync(CACHE_SYNC_DISCARD);
while (addr = MDTestStack(pth)) {
pscn = SectionTable[addr>>VA_SECTION];
ixBlock = (addr>>VA_BLOCK) & BLOCK_MASK;
ixPage = (addr>>VA_PAGE) & PAGE_MASK;
if ((pmb = (*pscn)[ixBlock]) == NULL_BLOCK || pmb == RESERVED_BLOCK ||
!(pmb->flags&MB_FLAG_AUTOCOMMIT) || pmb == PmbDecommitting ||
!(pte = pmb->aPages[ixPage]) || pte == BAD_PAGE)
break;
pmb->aPages[ixPage] = 0;
MDShrinkStack(pth);
FreePhysPage(PFNfromEntry(pte));
DEBUGCHK(!pages || (base == addr - PAGE_SIZE));
base = addr;
pages++;
}
if (pages)
InvalidateRange((PVOID)base, PAGE_SIZE*pages);
hCurrScav = 0;
ResumeThread(hth);
}
}
}
SETCURKEY(ulOldKey);
return (StackScavCount > MAX_PROCESSES) ? FALSE : TRUE;
}
PHYSICAL_ADDRESS GetHeldPage(void) {
PHYSICAL_ADDRESS paRet;
LPBYTE pMem;
if (pMem = (LPBYTE)KCall((PKFN)GrabFirstPhysPage,1)) {
PageFreeCount++; // since we already reserved it
LogPhysicalPages(PageFreeCount);
paRet = GetPFN(pMem);
} else
paRet = 0;
DEBUGMSG(ZONE_PHYSMEM,(TEXT("GetHeldPage: Returning %8.8lx\r\n"), paRet));
return paRet;
}
BOOL HoldPages(int cpNeed, BOOL bForce) {
BOOL fSetGweOomEvent = FALSE;
BOOL bRet = FALSE;
WORD prio;
EnterPhysCS();
// Check if this request will drop the page free count below the
// page out trigger and signal the clean up thread to start doing
// pageouts if so.
if ((cpNeed+PageOutTrigger > PageFreeCount) &&
(!PageOutNeeded || (PagedInCount > PAGE_OUT_TRIGGER))) {
PageOutNeeded = 1;
PagedInCount = 0;
if (prio = GET_CPRIO(pCurThread))
prio--;
if (prio < GET_CPRIO(pCleanupThread))
KCall((PKFN)SetThreadBasePrio, pCleanupThread->hTh, (DWORD)prio);
SetEvent(hAlarmThreadWakeup);
}
do {
if (cpNeed+GwesLowThreshold <= PageFreeCount) {
DWORD pfc;
do {
pfc = PageFreeCount;
} while (InterlockedTestExchange(&PageFreeCount,pfc,pfc-cpNeed) != (LONG)pfc);
LogPhysicalPages(PageFreeCount);
bRet = TRUE;
goto hpDone;
}
} while (ScavengeStacks(cpNeed+GwesLowThreshold));
// Even after scavenging stacks, we were unable to satisfy the request
// without going below the GWE low threshold.
// Do not allow a request of size GwesLowBlockSize to succeed if
// doing so would leave less than the low threshold. Same with
// GwesCriticalBlockSize and GwesCriticalThreshold.
if (bForce || !((cpNeed > GwesLowBlockSize
&& cpNeed+GwesLowThreshold > PageFreeCount)
|| (cpNeed > GwesCriticalBlockSize
&& cpNeed + GwesCriticalThreshold > PageFreeCount))) {
// Memory is low. Notify GWE, so that GWE can ask
// the user to close some apps.
if (GwesOOMEvent &&
((PageFreeCount >= GwesLowThreshold) ||
((PageFreeCount < cpNeed + GwesCriticalThreshold))))
fSetGweOomEvent = TRUE;
if ((cpNeed + (bForce?0:STACK_RESERVE)) <= PageFreeCount) {
DWORD pfc;
do {
pfc = PageFreeCount;
} while (InterlockedTestExchange(&PageFreeCount,pfc,pfc-cpNeed) != (LONG)pfc);
LogPhysicalPages(PageFreeCount);
bRet = TRUE;
}
}
hpDone:
LeaveCriticalSection(&PhysCS);
if (fSetGweOomEvent)
SetEvent(GwesOOMEvent);
return bRet;
}
void DupPhysPage(PHYSICAL_ADDRESS paPFN) {
PFREEINFO pfi;
uint ix;
//NOTE: DupPhysage and FreePhysPage are only called by the virtual memory
// system with the VA critical section held so DupPhysPage does not need to
// claim the physical memory critical section because it does not change
// the page free count.
if ((pfi = GetRegionFromAddr(paPFN)) != 0) {
ix = (paPFN - pfi->paStart) / PFN_INCR;
DEBUGCHK(pfi->pUseMap[ix] != 0);
DEBUGMSG(ZONE_PHYSMEM, (TEXT("DupPhysPage: PFN=%8.8lx ix=%x rc=%d\r\n"), paPFN,
ix, pfi->pUseMap[ix]));
++pfi->pUseMap[ix];
}
}
void FreePhysPage(PHYSICAL_ADDRESS paPFN) {
PFREEINFO pfi;
uint ix;
if ((pfi = GetRegionFromAddr(paPFN)) != 0) {
ix = (paPFN - pfi->paStart) / PFN_INCR;
DEBUGMSG(ZONE_PHYSMEM, (TEXT("FreePhysPage: PFN=%8.8lx ix=%x rc=%d\r\n"), paPFN,
ix, pfi->pUseMap[ix]));
EnterPhysCS();
DEBUGCHK(pfi->pUseMap[ix] != 0);
if ((pfi->pUseMap[ix] != 0xff) && (!--pfi->pUseMap[ix])) {
DEBUGMSG(ZONE_PHYSMEM, (TEXT("FreePhysPage: PFN=%8.8lx\r\n"), paPFN));
ZeroPage((LPBYTE)Phys2Virt(paPFN)+0x20000000);
KCall((PKFN)LinkPhysPage,Phys2Virt(paPFN));
// If there are enough free pages, clear the PageOutNeeded flag.
if (PageFreeCount > PageOutLevel)
PageOutNeeded = 0;
}
LeaveCriticalSection(&PhysCS);
}
}
ERRFALSE(sizeof(SECTION) == 2048);
#if PAGE_SIZE == 4096
PSECTION GetSection(void)
{
PHYSICAL_ADDRESS pAddr;
if ((PageFreeCount > 1+MIN_PROCESS_PAGES) && HoldPages(1, FALSE)) {
if (pAddr = GetHeldPage()) {
InterlockedIncrement(&KInfoTable[KINX_SYSPAGES]);
return Phys2Virt(pAddr); // can't put GetHeldPage, since Phys2Virt's a macro!
}
InterlockedIncrement(&PageFreeCount);
}
return 0;
}
void FreeSection(PSECTION pscn) {
SC_CacheSync(CACHE_SYNC_DISCARD);
FreePhysPage(GetPFN(pscn));
InterlockedDecrement(&KInfoTable[KINX_SYSPAGES]);
}
LPVOID GetHelperStack(void) {
return AllocMem(HEAP_HLPRSTK);
// return GetSection();
}
void FreeHelperStack(LPVOID pMem) {
FreeMem(pMem,HEAP_HLPRSTK);
// FreeSection(pMem);
}
#else
ERRFALSE((sizeof(SECTION)+PAGE_SIZE-1)/PAGE_SIZE == 2);
void FreeSection(PSECTION pscn) {
PHYSICAL_ADDRESS paSect;
paSect = GetPFN(pscn);
SC_CacheSync(CACHE_SYNC_DISCARD);
FreePhysPage(paSect);
FreePhysPage(NextPFN(paSect));
InterlockedDecrement(&KInfoTable[KINX_SYSPAGES]);
InterlockedDecrement(&KInfoTable[KINX_SYSPAGES]);
}
LPBYTE TakeTwoPages(PFREEINFO pfi, uint ix, LPBYTE pMem) {
KCALLPROFON(40);
if (pfi->pUseMap[ix] || pfi->pUseMap[ix+1]) {
KCALLPROFOFF(40);
return 0;
}
pfi->pUseMap[ix] = 1;
pfi->pUseMap[ix+1] = 1;
UnlinkPhysPage(pMem);
UnlinkPhysPage(pMem+PAGE_SIZE);
KCALLPROFOFF(40);
return pMem;
}
PSECTION GetSection(void) {
PFREEINFO pfi, pfiEnd;
uint ix;
PHYSICAL_ADDRESS paSect;
PSECTION pscn = 0;
LPVOID pMem;
while (pMem = InterlockedPopList(&pFreeKStacks))
FreeHelperStack(pMem);
if ((PageFreeCount > 2+MIN_PROCESS_PAGES) && HoldPages(2, FALSE)) {
pfi = &MemoryInfo.pFi[0];
for (pfi = &MemoryInfo.pFi[0], pfiEnd = pfi+MemoryInfo.cFi ; pfi < pfiEnd ; ++pfi) {
paSect = pfi->paEnd - 2 * PFN_INCR;
ix = (paSect - pfi->paStart) / PFN_INCR;
while (paSect >= pfi->paStart) {
if (pscn = (PSECTION)KCall((PKFN)TakeTwoPages,pfi,ix,Phys2Virt(paSect))) {
InterlockedIncrement(&KInfoTable[KINX_SYSPAGES]);
InterlockedIncrement(&KInfoTable[KINX_SYSPAGES]);
goto foundPages;
}
ix--;
paSect -= PFN_INCR;
}
}
InterlockedIncrement(&PageFreeCount);
InterlockedIncrement(&PageFreeCount);
}
foundPages:
DEBUGMSG(ZONE_PHYSMEM,(TEXT("GetSection: Returning %8.8lx\r\n"), pscn));
return pscn;
}
LPVOID GetHelperStack(void) {
PHYSICAL_ADDRESS pAddr;
if (HoldPages(1, FALSE)) {
if (pAddr = GetHeldPage()) {
InterlockedIncrement(&KInfoTable[KINX_SYSPAGES]);
return Phys2Virt(pAddr); // can't put GetHeldPage, since Phys2Virt's a macro!
}
InterlockedIncrement(&PageFreeCount);
}
return 0;
}
void FreeHelperStack(LPVOID pMem) {
SC_CacheSync(CACHE_SYNC_DISCARD);
FreePhysPage(GetPFN(pMem));
InterlockedDecrement(&KInfoTable[KINX_SYSPAGES]);
}
#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -