📄 virtmem.c
字号:
ix = dwAddr >> VA_SECTION;
DEBUGCHK (ix <= SECTION_MASK); // can't be 0, but can be NULL_SECTION
if ((pscn = SectionTable[ix]) == NULL_SECTION)
goto invalidParm;
dwBase = dwAddr & (SECTION_MASK<<VA_SECTION);
// Make sure that when remotely allocating memory in another process that
// we use the access key for the remote process instead of the current one.
if (dwAddr < FIRST_MAPPER_ADDRESS) {
if (ix && ix <= MAX_PROCESSES && ProcArray[ix-1].dwVMBase)
aky = ProcArray[ix-1].aky;
} else if (IsInResouceSection (dwAddr)) {
aky = ProcArray[0].aky;
} else if (IsInSharedSection (dwAddr)) {
#if defined (x86) || defined (ARM)
aky = ProcArray[0].aky;
#endif
}
}
if ((fdwAllocationType & MEM_RESERVE) || (!dwAddr && (fdwAllocationType & MEM_COMMIT))) {
// Set MEM_RESERVE so the error cleanup works properly.
fdwAllocationType |= MEM_RESERVE;
ixBlock = dwSlot0Addr >> VA_BLOCK;
if (dwSlot0Addr) {
/* The client has asked to reserve a specific region of memory.
* Verify that the requested region is within range and within an
* existing memory section that the client is allowed to access.
*/
/* adjust lpvAddress to 64K boundary. */
dwAddr &= 0xFFFF0000l;
/* Verify that the entire range is available to be reserved. */
cPages = (dwEnd - dwAddr + PAGE_SIZE-1) / PAGE_SIZE;
for (cNeed = cPages, ix = ixBlock ; cNeed > 0
; ++ix, cNeed -= PAGES_PER_BLOCK) {
if ((*pscn)[ix] != NULL_BLOCK)
goto invalidParm;
}
} else {
/* No specific address given. Go find a region of free blocks */
cPages = (dwEnd - dwAddr + PAGE_SIZE-1) / PAGE_SIZE;
cNeed = cPages;
if (((cPages * PAGE_SIZE >= 2*1024*1024) || (fdwInternal & MEM_SHAREDONLY))
&& (fdwAllocationType == MEM_RESERVE)
&& (fdwProtect == PAGE_NOACCESS)) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc(%8.8lx, %lx, %lx, %lx): doing HugeVirtualAlloc\r\n"),
lpvAddress0, cbSize, fdwAllocationType, fdwProtect));
LeaveCriticalSection(&VAcs);
lpvResult = HugeVirtualReserve(cbSize, fdwInternal & MEM_SHAREDONLY);
if (IsCeLogStatus(CELOGSTATUS_ENABLED_GENERAL)) {
CELOG_VirtualAlloc((DWORD)lpvResult, (DWORD)dwAddr, cbSize, fdwAllocationType, fdwProtect);
}
return lpvResult;
}
if (fdwAllocationType & MEM_TOP_DOWN) {
/* Scan backwards from the end of the section */
for (ix = BLOCK_MASK+1 ; --ix > 0 ; ) {
if ((*pscn)[ix] != NULL_BLOCK)
cNeed = cPages;
else if ((cNeed -= PAGES_PER_BLOCK) <= 0) {
ixBlock = ix;
goto foundRegion;
}
}
} else {
/* Scan forwards from the beginning of the section */
ixBlock = 1;
for (ix = 1 ; ix < BLOCK_MASK+1 ; ++ix) {
if ((*pscn)[ix] != NULL_BLOCK) {
ixBlock = ix+1;
cNeed = cPages;
} else if ((cNeed -= PAGES_PER_BLOCK) <= 0)
goto foundRegion;
}
}
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
foundRegion:
dwAddr = dwBase | ((ulong)ixBlock << VA_BLOCK);
}
/* Reserve the range of blocks */
if (!(pmb = MDAllocMemBlock (dwBase, ixBlock))) {
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
LockFromKey(&pmb->alk, &aky);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc: reserved lock=%lx\r\n"), pmb->alk));
pmb->cUses = 1;
switch (fdwAllocationType & (MEM_MAPPED|MEM_IMAGE)) {
case MEM_MAPPED:
pmb->flags = MB_PAGER_MAPPING;
break;
case MEM_IMAGE:
pmb->flags = MB_PAGER_LOADER;
break;
case 0:
pmb->flags = MB_PAGER_NONE;
break;
default:
goto invalidParm;
}
if (fdwAllocationType & MEM_AUTO_COMMIT)
pmb->flags |= MB_FLAG_AUTOCOMMIT;
pmb->ixBase = ixBlock;
(*pscn)[ixBlock] = pmb;
DEBUGMSG(ZONE_VIRTMEM,
(TEXT("VirtualAlloc: created head block pmb=%8.8lx (%x)\r\n"),
pmb, ixBlock));
ixFirB = ixBlock; /* note start of region for error recovery */
for (cNeed = cPages, ix = ixBlock+1 ; (cNeed -= PAGES_PER_BLOCK) > 0 ; ++ix) {
if (cNeed < PAGES_PER_BLOCK || (fdwAllocationType & MEM_AUTO_COMMIT)) {
/* The last block is not full so must allocate a MEMBLOCK
* and mark the extra pages as invalid or this is an auto-commit
* region which must have all memblocks filled in so that
* pages can be committed without entering the virtual memory
* critical section. */
if (!(pmb = MDAllocMemBlock (dwBase, ix))) {
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
pmb->alk = aky;
pmb->flags = (*pscn)[ixFirB]->flags;
pmb->cUses = 1;
pmb->ixBase = ixFirB;
(*pscn)[ix] = pmb;
DEBUGMSG(ZONE_VIRTMEM,
(TEXT("VirtualAlloc: created a tail block pmb=%8.8lx (%x)\r\n"),
pmb, ixBlock));
} else
(*pscn)[ix] = RESERVED_BLOCK;
}
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Reserved %d pages @%8.8lx\r\n"), cPages, dwAddr));
if (cNeed) {
/* Set unused entries to BAD_PAGE */
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Reserved %d extra pages.\r\n"), -cNeed));
cNeed += PAGES_PER_BLOCK;
for (ix = cNeed ; ix < PAGES_PER_BLOCK ; ++ix)
pmb->aPages[ix] = BAD_PAGE;
}
/* If not committing pages, then return the address of the
* reserved region */
if (!(fdwAllocationType & MEM_COMMIT)) {
LeaveCriticalSection(&VAcs);
ERRORMSG(!dwAddr,(L"Failed VirtualAlloc/reserve of %8.8lx bytes\r\n",cbSize));
if (IsCeLogStatus(CELOGSTATUS_ENABLED_GENERAL)) {
CELOG_VirtualAlloc(dwAddr, (DWORD)lpvAddress0, cbSize, fdwAllocationType, fdwProtect);
}
return (LPVOID) dwAddr;
}
} else {
//
// Not reserving memory, so must be committing. Verify that the
// requested region is within range and within an existing reserved
// memory region that the client is allowed to access.
//
if (!(fdwAllocationType & MEM_COMMIT)
|| !dwAddr
// || (!(fdwInternal&MEM_NOSTKCHK) && IsStackVA (dwSlot0Addr | (dwBase? dwBase : pCurProc->dwVMBase), cbSize))
|| (fdwInternal & MEM_CONTIGUOUS))
goto invalidParm;
ixBlock = (dwAddr >> VA_BLOCK) & BLOCK_MASK;
/* Adjust lpvAddress to PAGE boundary and calculate the number
* of pages to commit. */
dwAddr &= ~(PAGE_SIZE-1);
cPages = (dwEnd - dwAddr + PAGE_SIZE-1) / PAGE_SIZE;
/* locate the starting block of the region */
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
}
/* Verify that cPages of memory starting with the first page indicated by
* lpvAddress can be committed within the region.
*
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page to commit
* (cPages) = # of pages to commit
*/
ixPage = (dwAddr >> VA_PAGE) & PAGE_MASK;
cpAlloc = ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0, dwBase);
if (cpAlloc == -1)
goto cleanUp;
//
// Commit cPages of memory starting with the first page indicated by
// lpvAddress. Allocate all required pages before any changes to the
// virtual region are performed.
//
// (pscn) = ptr to section array
// (ixBlock) = index of block containing the first page to commit
// (cPages) = # of pages to commit
// (cpAlloc) = # of physical pages required
//
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Allocating %d pages.\r\n"), cpAlloc));
if (fdwInternal & MEM_CONTIGUOUS) {
//
// Map to physically contiguous pages. Mark as LOCKED as we go.
// Walk the page tables to map in the physical pages.
//
ulPFN = dwPFNBase;
for (; cPages ; ixBlock++) {
pmb = (*pscn)[ixBlock];
pmb->cLocks++;
ix = 0;
for (; cPages && ix < PAGES_PER_BLOCK; --cPages) {
DEBUGCHK(pmb->aPages[ix] == 0);
pmb->aPages[ix] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
ix++;
}
}
} else {
if (!HoldPages(cpAlloc, FALSE)) {
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
ixBlock = (ixBlock*PAGES_PER_BLOCK+ixPage+cPages-1)/PAGES_PER_BLOCK;
ix = ((ixPage + cPages - 1) % PAGES_PER_BLOCK) + 1;
//
// Walk the page tables to map in the physical pages.
//
for (; cPages ; --ixBlock) {
pmb = (*pscn)[ixBlock];
for (; cPages && ix-- > 0 ; --cPages) {
if (pmb->aPages[ix] == 0) {
DWORD dwRetries;
for (dwRetries = 0; (dwRetries < 20) && !(ulPFN = GetHeldPage()); dwRetries++)
Sleep(100);
if (ulPFN) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Mapping %8.8lx @%3.3x%x000 perm=%x\r\n"),
ulPFN, ixBlock, ix, ulPgMask));
pmb->aPages[ix] = ulPFN | ulPgMask;
} else {
InterlockedIncrement(&PageFreeCount);
RETAILMSG(1,(L"--->>> VirtualAlloc: FATAL ERROR! COMPLETELY OUT OF MEMORY (%8.8lx)!\r\n",PageFreeCount));
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
} else {
fNeedInvalidate = TRUE;
pmb->aPages[ix] = (pmb->aPages[ix] & ~PG_PERMISSION_MASK) | ulPgMask;
}
}
ix = PAGES_PER_BLOCK; /* start with last page of previous block */
}
}
if (fNeedInvalidate) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Calling InvalidateRange (%8.8lx, %d)\n"), dwAddr, cbSize));
InvalidateRange((LPVOID) dwAddr, cbSize); // in case we changed permissions above
}
LeaveCriticalSection(&VAcs);
ERRORMSG(!dwAddr,(L"Failed VirtualAlloc(%8.8lx) of %8.8lx bytes\r\n",fdwAllocationType,cbSize));
if (IsCeLogStatus(CELOGSTATUS_ENABLED_GENERAL)) {
CELOG_VirtualAlloc(dwAddr, dwAddr, cbSize, fdwAllocationType, fdwProtect);
}
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Returning %8.8lx\n"), dwAddr));
return (LPVOID) dwAddr;
//
// There was an error reserving or commiting a range of pages. If reserving
// pages, release any pages which were reserved before the failure occured.
//
invalidParm:
err = ERROR_INVALID_PARAMETER;
setError:
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc failed. error=%d\r\n"), err));
KSetLastError(pCurThread, err);
cleanUp:
if (fdwAllocationType & MEM_RESERVE && ixFirB != UNKNOWN_BASE) {
/* Remove the reservation */
ReleaseRegion(pscn, ixFirB);
}
LeaveCriticalSection(&VAcs);
ERRORMSG(!dwAddr,(L"Failed VirtualAlloc(%8.8lx) of %8.8lx bytes\r\n",fdwAllocationType,cbSize));
return 0;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
LPVOID
SC_VirtualAlloc(
LPVOID lpvAddress, /* address of region to reserve or commit */
DWORD cbSize, /* size of the region */
DWORD fdwAllocationType, /* type of allocation */
DWORD fdwProtect /* type of access protection */
)
{
return DoVirtualAlloc(lpvAddress, cbSize, fdwAllocationType, fdwProtect, 0, 0);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
SC_VirtualFree(
LPVOID lpvAddress0, /* address of region of committed pages */
DWORD cbSize, /* size of the region */
DWORD fdwFreeType /* type of free operation */
)
{
int ixBlock;
int ixPage;
int ixFirB; /* index of first block in region */
PSECTION pscn;
int cpReserved; /* # of reserved (not commited) pages in region */
int cpRegion; /* total # of pages in region */
int cPages; /* # of pages to free */
DWORD baseScn; /* base address of section */
DWORD dwAddr = (DWORD) lpvAddress0;
BOOL bForceDecommit = (fdwFreeType & 0x80000000);
LPDWORD pPageList = NULL;
/* Verify that the requested region is within range and within an
* existing reserved memory region that the client is allowed to
* access and locate the starting block of the region.
*/
fdwFreeType &= ~0x80000000;
if (IsCeLogStatus(CELOGSTATUS_ENABLED_GENERAL)) {
CELOG_VirtualFree(dwAddr, cbSize, fdwFreeType);
}
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree @%8.8lx size=%lx freetype=%lx\r\n"),
dwAddr, cbSize, fdwFreeType));
if (!dwAddr || IsKernelVa (dwAddr) || IsStackVA(dwAddr, cbSize)) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
if ((dwAddr < LAST_MAPPER_ADDRESS) && (dwAddr >= FIRST_MAPPER_ADDRESS) && !cbSize && (fdwFreeType == MEM_RELEASE)) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree @%8.8lx size=%lx freetype=%lx : doing HugeVirtualRelease\r\n"),
dwAddr, cbSize, fdwFreeType));
DEBUGCHK (dwAddr == (DWORD) lpvAddress0);
return HugeVirtualRelease(lpvAddress0);
}
/* Lockout other changes to the virtual memory state. */
EnterCriticalSection(&VAcs);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -