📄 virtmem.c
字号:
if (!pmbiBuffer || !lpvAddress || (ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
goto invalidParm;
pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
if (pscn == NULL_SECTION)
goto invalidParm;
ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if ((pmb = (*pscn)[ixBlock]) == NULL_BLOCK ||
(pmb != RESERVED_BLOCK && pmb->aPages[ixPage] == BAD_PAGE) ||
(ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE) {
/* The given page is in a free region, walk the section to compute
* the size of the region */
cPages = PAGES_PER_BLOCK - ixPage;
while (++ixBlock < BLOCK_MASK+1 && (*pscn)[ixBlock] == NULL_BLOCK)
cPages += PAGES_PER_BLOCK;
pmbiBuffer->AllocationBase = 0;
pmbiBuffer->State = MEM_FREE;
pmbiBuffer->AllocationProtect = PAGE_NOACCESS;
pmbiBuffer->Protect = PAGE_NOACCESS;
pmbiBuffer->Type = 0;
} else {
cPages = 0;
pmbiBuffer->AllocationProtect = PAGE_NOACCESS;
pmbiBuffer->Type = MEM_PRIVATE;
if (pmb == RESERVED_BLOCK || pmb->aPages[ixPage] == 0) {
/* Count reserved pages */
pmbiBuffer->State = MEM_RESERVE;
pmbiBuffer->Protect = PAGE_NOACCESS;
do {
if (pmb == RESERVED_BLOCK)
cPages += PAGES_PER_BLOCK - ixPage;
else {
pmbiBuffer->Type = AllocationType[pmb->flags & MB_FLAG_PAGER_TYPE];
do {
if (pmb->aPages[ixPage] != 0)
goto allCounted;
++cPages;
} while (++ixPage < PAGES_PER_BLOCK);
}
ixPage = 0;
} while (++ixBlock < BLOCK_MASK+1 &&
((pmb = (*pscn)[ixBlock]) == RESERVED_BLOCK ||
(pmb != NULL_BLOCK && pmb->ixBase == ixFirB)));
} else {
/* Count committed pages */
pmbiBuffer->State = MEM_COMMIT;
ulPgPerm = pmb->aPages[ixPage] & PG_PERMISSION_MASK;
pmbiBuffer->Protect = ProtectFromPerms(pmb->aPages[ixPage]);
pmbiBuffer->Type = AllocationType[pmb->flags & MB_FLAG_PAGER_TYPE];
do {
do {
ulong ulPgInfo = pmb->aPages[ixPage];
if (ulPgInfo == 0 || ulPgInfo == BAD_PAGE
|| (ulPgInfo&PG_PERMISSION_MASK) != ulPgPerm)
goto allCounted;
++cPages;
} while (++ixPage < PAGES_PER_BLOCK);
ixPage = 0;
} while (++ixBlock < BLOCK_MASK+1 &&
(pmb = (*pscn)[ixBlock]) != NULL_BLOCK &&
pmb != RESERVED_BLOCK && pmb->ixBase == ixFirB);
}
allCounted:
pmbiBuffer->AllocationBase =
(LPVOID)(((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION)) |
((ulong)ixFirB << VA_BLOCK));
}
pmbiBuffer->RegionSize = (DWORD)cPages << VA_PAGE;
pmbiBuffer->BaseAddress = (LPVOID)((ulong)lpvAddress & ~(PAGE_SIZE-1));
return sizeof(MEMORY_BASIC_INFORMATION);
invalidParm:
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualQuery failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return 0;
}
BOOL DoLockPages(
LPCVOID lpvAddress, /* address of region of committed pages */
DWORD cbSize, /* size of the region */
PDWORD pPFNs, /* address of array to receive real PFN's */
int fOptions) /* options: see LOCKFLAG_* in kernel.h */
{
int ixBlock;
int ixPage;
int ixFirB; /* index of first block in region */
int ixStart; /* index of start block of lock region */
PSECTION pscn;
int cPages; /* # of pages to adjust */
ulong ulBase; /* base virtual address */
MEMBLOCK *pmb;
int err = ERROR_INVALID_PARAMETER;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages @%8.8lx size=%lx pPFNs=%lx options=%x\r\n"),
lpvAddress, cbSize, pPFNs, fOptions));
/* Verify that the requested region is within range and within an
* existing reserved memory region that the client is allowed to
* access and locate the starting block of the region.
*/
if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
return TRUE;
/* Lockout other changes to the virtual memory state. */
EnterCriticalSection(&VAcs);
if (!cbSize || !lpvAddress)
goto invalidParm;
ulBase = (ulong)lpvAddress & (SECTION_MASK << VA_SECTION);
pscn = SectionTable[ulBase>>VA_SECTION];
if (pscn == NULL_SECTION)
goto invalidParm;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the pages within the specified range belong to the
* same VirtualAlloc region and are committed.
*
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page of lpvAddress
* (ixFirB) = index of first block in the region
*/
ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE) - ((ulong)lpvAddress / PAGE_SIZE);
if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) == -1)
goto invalidParm;
/* Walk the page tables to check that all pages are present and to
* increment the lock count for each MEMBLOCK in the region.
*/
ixStart = ixBlock;
for (; cPages ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
if (!(fOptions & LOCKFLAG_QUERY_ONLY)) {
++pmb->cLocks;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Locking pages @%8.8x LC=%d\r\n"),
ulBase | (ixBlock<<VA_BLOCK), pmb->cLocks));
}
do {
ulong addr = ulBase | (ixBlock<<VA_BLOCK) | (ixPage<<VA_PAGE);
DEBUGCHK(pmb->aPages[ixPage] != BAD_PAGE);
if (pmb->aPages[ixPage] == 0) {
if (pmb->flags&MB_FLAG_AUTOCOMMIT) {
if (SC_VirtualAlloc((void*)addr, cPages*PAGE_SIZE, MEM_COMMIT,
PAGE_READWRITE) == 0)
goto cleanUpLocks;
} else {
BOOL bRet;
LeaveCriticalSection(&VAcs);
bRet = ProcessPageFault(TRUE, addr);
if (!bRet && !(fOptions & LOCKFLAG_WRITE))
bRet = ProcessPageFault(FALSE, addr);
EnterCriticalSection(&VAcs);
if (!bRet) {
err = ERROR_NOACCESS;
goto cleanUpLocks;
}
}
} else {
if (fOptions & LOCKFLAG_WRITE) {
if (!IsPageWritable(pmb->aPages[ixPage])) {
BOOL bRet;
LeaveCriticalSection(&VAcs);
bRet = ProcessPageFault(TRUE, addr);
EnterCriticalSection(&VAcs);
if (!bRet) {
err = ERROR_NOACCESS;
goto cleanUpLocks;
}
}
} else if (!IsPageReadable(pmb->aPages[ixPage])) {
err = ERROR_NOACCESS;
goto cleanUpLocks;
}
if ((fOptions & LOCKFLAG_READ) && !IsPageReadable(pmb->aPages[ixPage])) {
err = ERROR_NOACCESS;
goto cleanUpLocks;
}
}
if (pPFNs)
*pPFNs++ = PFNfromEntry(pmb->aPages[ixPage]);
} while (--cPages && ++ixPage < PAGES_PER_BLOCK);
ixPage = 0; /* start with first page of next block */
}
LeaveCriticalSection(&VAcs);
return TRUE;
cleanUpLocks:
/* Unable to page in a page or this thread doesn't have the desired access
* to a page in the range. Back out any locks which have been set.
*/
if (!(fOptions & LOCKFLAG_QUERY_ONLY)) {
do {
--pmb->cLocks;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Restoring lock count @%8.8x LC=%d\r\n"),
ulBase | (ixBlock<<VA_BLOCK), pmb->cLocks));
} while (ixBlock != ixStart && (pmb = (*pscn)[--ixBlock]));
}
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages failed.\r\n")));
KSetLastError(pCurThread, err);
return FALSE;
}
BOOL SC_LockPages(LPVOID lpvAddress, DWORD cbSize, PDWORD pPFNs, int fOptions) {
if (pCurProc->bTrustLevel != KERN_TRUST_FULL) {
ERRORMSG(1,(L"SC_LockPages failed due to insufficient trust\r\n"));
KSetLastError(pCurThread, ERROR_ACCESS_DENIED);
return 0;
}
return DoLockPages(lpvAddress, cbSize, pPFNs, fOptions);
}
BOOL DoUnlockPages(
LPCVOID lpvAddress, /* address of region of committed pages */
DWORD cbSize) /* size of the region */
{
int ixPage;
int ixBlock;
int ixFirB; /* index of first block in region */
int ixLastBlock; /* last block to be unlocked */
PSECTION pscn;
int cPages; /* # of pages to adjust */
MEMBLOCK *pmb;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("UnlockPages @%8.8lx size=%lx\r\n"),
lpvAddress, cbSize));
/* Verify that the requested region is within range and within an
* existing reserved memory region that the client is allowed to
* access and locate the starting block of the region.
*/
/* Lockout other changes to the virtual memory state. */
if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
return TRUE;
EnterCriticalSection(&VAcs);
if (!cbSize || !lpvAddress)
goto invalidParm;
pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
if (pscn == NULL_SECTION)
goto invalidParm;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the pages within the specified range belong to the
* same VirtualAlloc region and are committed.
*
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page of lpvAddress
* (ixFirB) = index of first block in the region
*/
ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE)
- ((ulong)lpvAddress / PAGE_SIZE);
if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) == -1)
goto invalidParm;
/* Walk the page tables to decrement the lock count for
* each MEMBLOCK in the region.
*/
ixLastBlock = ixBlock + ((ixPage+cPages+PAGES_PER_BLOCK-1) >> (VA_BLOCK-VA_PAGE));
for (; ixBlock < ixLastBlock ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
if (pmb->cLocks)
--pmb->cLocks;
else
DEBUGCHK(0);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Unlocking pages @%8.8x LC=%d\r\n"),
((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION)) | (ixBlock<<VA_BLOCK),
pmb->cLocks));
}
LeaveCriticalSection(&VAcs);
return TRUE;
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
BOOL SC_UnlockPages(LPVOID lpvAddress, DWORD cbSize) {
if (pCurProc->bTrustLevel != KERN_TRUST_FULL) {
ERRORMSG(1,(L"SC_UnlockPages failed due to insufficient trust\r\n"));
KSetLastError(pCurThread, ERROR_ACCESS_DENIED);
return 0;
}
return DoUnlockPages(lpvAddress, cbSize);
}
LPBYTE GrabFirstPhysPage(DWORD dwCount);
/* AutoCommit - auto-commit a page
*
* This function is called for a TLB miss due to a memory load or store.
* It is invoked from the general exception handler on the kernel stack.
* If the faulting page, is within an auto-commit region then a new page
* will be allocated and committed.
*/
BOOL AutoCommit(ulong addr) {
register MEMBLOCK *pmb;
PSECTION pscn;
int ixSect, ixBlock, ixPage;
ulong ulPFN;
LPBYTE pMem;
DWORD loop;
ixPage = addr>>VA_PAGE & PAGE_MASK;
ixBlock = addr>>VA_BLOCK & BLOCK_MASK;
ixSect = addr>>VA_SECTION;
if ((ixSect <= SECTION_MASK) && ((pscn = SectionTable[ixSect]) != NULL_SECTION) &&
((pmb = (*pscn)[ixBlock]) != NULL_BLOCK) && (pmb != RESERVED_BLOCK) &&
(pmb->flags&MB_FLAG_AUTOCOMMIT) && !pmb->aPages[ixPage]) {
for (loop = 0; loop < 20; loop++) {
//
// Notify OOM thread if we're low on memory.
//
if (GwesOOMEvent && (PageFreeCount < GwesCriticalThreshold)) {
SetEvent(GwesOOMEvent);
}
//
// We don't want to let AutoCommit deplete all physical memory.
// Yes, the thread needing the memory committed is going to fault
// unexpectedly if we can't allocate, so we'll loop for 2 seconds
// checking for available RAM. We need to reserve a small amount
// so that DemandCommit can allocate stack for the
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -