📄 virtmem.c
字号:
/* Copyright (c) 1995-2000 Microsoft Corporation. All rights reserved. */
#include "kernel.h"
int FirstFreeSection = RESERVED_SECTIONS;
MEMBLOCK *PmbDecommitting;
extern CRITICAL_SECTION VAcs;
extern HANDLE GwesOOMEvent;
extern long GwesCriticalThreshold;
const DWORD AllocationType[MB_FLAG_PAGER_TYPE+1] = {MEM_PRIVATE, MEM_IMAGE, MEM_MAPPED};
ulong MakePagePerms(DWORD fdwProtect) {
ulong ulMask;
switch (fdwProtect & ~(PAGE_GUARD | PAGE_NOCACHE
#ifdef ARM
| PAGE_ARM_MINICACHE
#endif
#ifdef x86
| PAGE_x86_WRITETHRU
#endif
)) {
case PAGE_READONLY:
ulMask = PG_VALID_MASK | PG_PROT_READ;
break;
case PAGE_EXECUTE:
case PAGE_EXECUTE_READ:
ulMask = PG_VALID_MASK | PG_PROT_READ | PG_EXECUTE_MASK;
break;
case PAGE_EXECUTE_READWRITE:
ulMask = PG_VALID_MASK | PG_DIRTY_MASK | PG_EXECUTE_MASK | PG_PROT_WRITE;
break;
case PAGE_READWRITE:
ulMask = PG_VALID_MASK | PG_DIRTY_MASK | PG_PROT_WRITE;
break;
case PAGE_WRITECOPY:
case PAGE_EXECUTE_WRITECOPY:
KSetLastError(pCurThread, ERROR_NOT_SUPPORTED);
return 0;
case PAGE_NOACCESS:
if (fdwProtect != PAGE_NOACCESS) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return 0;
}
ulMask = PG_GUARD; /* dummy return value */
break;
default:
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return 0;
}
DEBUGCHK(ulMask);
if (fdwProtect & PAGE_GUARD)
ulMask = (ulMask & ~PG_VALID_MASK) | PG_GUARD;
#ifdef ARM
ulMask |= (fdwProtect & PAGE_NOCACHE) ? PG_NOCACHE : (fdwProtect & PAGE_ARM_MINICACHE) ? PG_MINICACHE : PG_CACHE;
#elif defined(x86)
ulMask |= (fdwProtect & PAGE_NOCACHE) ? PG_NOCACHE : (fdwProtect & PAGE_x86_WRITETHRU) ? PG_WRITE_THRU_MASK : PG_CACHE;
#else
ulMask |= (fdwProtect & PAGE_NOCACHE) ? PG_NOCACHE : PG_CACHE;
#endif
ulMask |= PG_SIZE_MASK;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("MakePagePerms: %8.8lx returns %8.8lx\r\n"), fdwProtect, ulMask));
return ulMask;
}
DWORD ProtectFromPerms(ulong ulPage) {
DWORD fdwProt;
switch (ulPage & (PG_PROTECTION|PG_EXECUTE_MASK)) {
case PG_PROT_READ:
fdwProt = PAGE_READONLY;
break;
case PG_PROT_WRITE:
fdwProt = PAGE_READWRITE;
break;
#if PG_EXECUTE_MASK
case PG_EXECUTE_MASK|PG_PROT_READ:
fdwProt = PAGE_EXECUTE_READ;
break;
case PG_EXECUTE_MASK|PG_PROT_READ|PG_PROT_WRITE:
fdwProt = PAGE_EXECUTE_READWRITE;
break;
#endif
default: // invalid combinations get mapped to PAGE_NOACCESS
return PAGE_NOACCESS;
}
if ((ulPage & (PG_VALID_MASK|PG_GUARD)) == PG_GUARD)
fdwProt |= PAGE_GUARD;
if ((ulPage & PG_CACHE_MASK) == PG_NOCACHE)
fdwProt |= PAGE_NOCACHE;
return fdwProt;
}
int FindFirstBlock(PSECTION pscn, int ix) {
MEMBLOCK *pmb;
do {
if (ix < 0 || (pmb = (*pscn)[ix]) == NULL_BLOCK)
return UNKNOWN_BASE;
--ix;
} while (pmb == RESERVED_BLOCK);
return pmb->ixBase;
}
int ScanRegion(
PSECTION pscn, /* section array */
int ixFirB, /* first block in region */
int ixBlock, /* starting block to scan */
int ixPage, /* starting page in block */
int cPgScan, /* # of pages to scan */
int *pcPages) /* ptr to count of pages in region */
{
register int cPages;
int cpAlloc;
register MEMBLOCK *pmb;
DWORD err;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Scanning %d pages. ixFirB=%x\r\n"), cPgScan, ixFirB));
cpAlloc = 0;
for (cPages = 0 ; cPages < cPgScan && ixBlock < BLOCK_MASK+1 ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Scanning block %8.8lx (%x), ix=%d cPgScan=%d\r\n"),
pmb, (pmb!=RESERVED_BLOCK&&pmb!=NULL_BLOCK)?pmb->ixBase:-1, ixPage, cPgScan));
if (pmb == RESERVED_BLOCK) {
/* Reserved block. If not just counting pages, allocate a new block
* and initialize it as part of the current region.
*/
if (!pcPages) {
if (!(pmb = AllocMem(HEAP_MEMBLOCK))) {
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
memset(pmb,0,sizeof(MEMBLOCK));
pmb->alk = (*pscn)[ixFirB]->alk;
pmb->flags = (*pscn)[ixFirB]->flags;
pmb->hPf = (*pscn)[ixFirB]->hPf;
pmb->cUses = 1;
pmb->ixBase = ixFirB;
(*pscn)[ixBlock] = pmb;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("ScanRegion: created block %8.8lx, ix=%d\r\n"), pmb, ixBlock));
}
ixPage = PAGES_PER_BLOCK - ixPage; /* # of pages relevant to scan */
if ((cPages += ixPage) > cPgScan) {
cpAlloc += cPgScan - (cPages - ixPage);
cPages = cPgScan;
} else
cpAlloc += ixPage;
} else if (pmb == NULL_BLOCK || pmb->ixBase != ixFirB) {
/* This block is not part of the orginal region. If not just
* counting pages, then fail the request.
*/
if (pcPages)
break; /* just counting, so not an error */
err = ERROR_INVALID_PARAMETER;
goto setError;
} else {
/* Scan block to count uncommited pages and to verify that the
* protection on any existing pages is compatible with the protection
* being requested.
*/
for ( ; cPages < cPgScan && ixPage < PAGES_PER_BLOCK
; ++ixPage, ++cPages) {
if (pmb->aPages[ixPage] == BAD_PAGE) {
if (pcPages)
goto allDone;
err = ERROR_INVALID_PARAMETER;
goto setError;
} else if (pmb->aPages[ixPage] == 0)
++cpAlloc; /* count # of pages to allocate */
}
}
ixPage = 0; /* start with first page of next block */
}
allDone:
if (pcPages)
*pcPages = cPages; /* return # of pages in the region */
return cpAlloc;
setError:
DEBUGMSG(ZONE_VIRTMEM, (TEXT("ScanRegion failed err=%d\r\n"), err));
KSetLastError(pCurThread, err);
return -1;
}
void
DecommitPages(
PSECTION pscn, /* section array */
int ixBlock, /* starting block to decommit */
int ixPage, /* starting page in block */
int cPages, /* # of pages to decommit */
DWORD baseScn, /* base address of section */
BOOL bIgnoreLock) /* ignore lockcount when decommitting */
{
register MEMBLOCK *pmb;
PHYSICAL_ADDRESS paPFN;
#ifdef x86
int cbInvalidate = cPages * PAGE_SIZE;
PVOID pvInvalidate = (PVOID)(baseScn + (ixBlock<<VA_BLOCK) + (ixPage<<VA_PAGE));
#endif
/* Walk the page tables to free the physical pages. */
DEBUGMSG(ZONE_VIRTMEM, (TEXT("DecommitPages: %d pages from %3.3x%x000\r\n"), cPages, ixBlock, ixPage));
SC_CacheSync(CACHE_SYNC_DISCARD);
for (; cPages > 0 ; ++ixBlock) {
DEBUGCHK(ixBlock < BLOCK_MASK+1);
if ((pmb = (*pscn)[ixBlock]) == RESERVED_BLOCK)
cPages -= PAGES_PER_BLOCK - ixPage;
else {
DEBUGCHK(pmb != NULL_BLOCK);
DEBUGMSG(ZONE_VIRTMEM,(TEXT("Decommiting block (%8.8lx) uc=%d ixPg=%d cPages=%d\r\n"),
pmb, pmb->cUses, ixPage, cPages));
if (pmb->cUses > 1 && ixPage == 0 && (cPages >= PAGES_PER_BLOCK || pmb->aPages[cPages] == BAD_PAGE)) {
/* Decommiting a duplicated block. Remove the additional
* use of the block and change the block entry to ReservedBlock */
(*pscn)[ixBlock] = RESERVED_BLOCK;
--pmb->cUses;
cPages -= PAGES_PER_BLOCK;
} else if (bIgnoreLock || !pmb->cLocks) {
PmbDecommitting = pmb;
for ( ; cPages && ixPage < PAGES_PER_BLOCK ; ++ixPage, --cPages) {
if ((paPFN = pmb->aPages[ixPage]) != 0 && paPFN != BAD_PAGE) {
paPFN = PFNfromEntry(paPFN);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Decommitting %8.8lx @%3.3x%x000\r\n"), paPFN, ixBlock, ixPage));
pmb->aPages[ixPage] = 0;
FreePhysPage(paPFN);
}
}
PmbDecommitting = 0;
} else {
cPages -= PAGES_PER_BLOCK - ixPage;
DEBUGMSG(1,(L"DecommitPages: Cannot decommit block at %8.8lx, lock count %d\r\n",
(PVOID)(baseScn + (ixBlock<<VA_BLOCK) + (ixPage<<VA_PAGE)),pmb->cLocks));
}
}
ixPage = 0; /* start with first page of next block */
}
// Since pages have been removed from the page tables, it is necessary to flush the TLB.
InvalidateRange(pvInvalidate, cbInvalidate);
}
void
ReleaseRegion(
PSECTION pscn, /* section array */
int ixFirB) /* first block in region */
{
register MEMBLOCK *pmb;
register int ixBlock;
/* Walk the section array to free the MEMBLOCK entries. */
for (ixBlock = ixFirB ; ixBlock < BLOCK_MASK+1 ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
if (pmb == RESERVED_BLOCK)
(*pscn)[ixBlock] = NULL_BLOCK;
else if (pmb == NULL_BLOCK || pmb->ixBase != ixFirB)
break;
else {
#ifdef DEBUG
int ix;
ulong ulPFN;
for (ix = 0 ; ix < PAGES_PER_BLOCK ; ++ix) {
if ((ulPFN = pmb->aPages[ix]) != 0 && ulPFN != BAD_PAGE) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("ReleaseRegion: Commited page found: %8.8lx @%3.3x%x000\r\n"),
ulPFN, ixBlock, ix));
DEBUGCHK(0);
}
}
#endif
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Releasing memblock %8.8lx @%3.3x0000\r\n"), pmb, ixBlock));
(*pscn)[ixBlock] = NULL_BLOCK;
FreeMem(pmb,HEAP_MEMBLOCK);
}
}
/*Note: Since no pages are freed by this routine, no TLB flushing is needed */
}
/** IsAccessOK() - check access permissions for Address
*
* This function checks the access permissions for an address. For user
* virtual addresses, only the access key for the memory region is checked.
* Kernel space addresses are always allowed because access to them is not
* access key dependant.
*
* This function doesn't check that the page is either present or valid.
*
* Environment:
* Kernel mode, preemtible, running on the thread's stack.
*/
BOOL IsAccessOK(void *addr, ACCESSKEY aky) {
register MEMBLOCK *pmb;
PSECTION pscn;
int ixSect, ixBlock;
ixBlock = (ulong)addr>>VA_BLOCK & BLOCK_MASK;
ixSect = (ulong)addr>>VA_SECTION;
if (ixSect <= SECTION_MASK) {
pscn = SectionTable[ixSect];
if ((pmb = (*pscn)[ixBlock]) != NULL_BLOCK) {
if (pmb == RESERVED_BLOCK)
pmb = (*pscn)[FindFirstBlock(pscn, ixBlock)];
if (!TestAccess(&pmb->alk, &aky)) {
DEBUGMSG(1, (TEXT("IsAccessOK returning FALSE\r\n")));
return FALSE;
}
}
}
return TRUE;
}
PVOID DbgVerify(PVOID pvAddr, int option) {
PVOID ret;
int flVerify = VERIFY_KERNEL_OK;
int flLock = LOCKFLAG_QUERY_ONLY | LOCKFLAG_READ;
switch (option) {
case DV_MODIFY:
flVerify = VERIFY_KERNEL_OK | VERIFY_WRITE_FLAG;
flLock = LOCKFLAG_QUERY_ONLY | LOCKFLAG_WRITE;
// fall through
case DV_PROBE:
if (!(ret = VerifyAccess(pvAddr, flVerify, (ACCESSKEY)-1)))
if (((ulong)pvAddr & 0x80000000) == 0 && !InSysCall() && LockPages(pvAddr, 1, 0, flLock))
ret = VerifyAccess(pvAddr, flLock, (ACCESSKEY)-1);
break;
case DV_SETBP:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -