📄 virtmem.c
字号:
if (((ulong)pvAddr & 0x80000000) != 0 || (!InSysCall() && LockPages(pvAddr, 1, 0, LOCKFLAG_READ)))
ret = VerifyAccess(pvAddr, VERIFY_KERNEL_OK, (ACCESSKEY)-1);
else
ret = 0;
break;
case DV_CLEARBP:
if (((ulong)pvAddr & 0x80000000) == 0 && !InSysCall())
UnlockPages(pvAddr, 1);
ret = 0;
break;
}
return ret;
}
VOID DeleteSection(LPVOID lpvSect) {
PSECTION pscn;
int ix;
int ixScn;
DEBUGMSG(ZONE_VIRTMEM,(TEXT("DeleteSection %8.8lx\r\n"), lpvSect));
EnterCriticalSection(&VAcs);
if ((ixScn = (ulong)lpvSect>>VA_SECTION) <= SECTION_MASK
&& ixScn >= RESERVED_SECTIONS
&& (pscn = SectionTable[ixScn]) != NULL_SECTION) {
// For process sections, start freeing at block 1. Otherwise, this
// should be a mapper section which starts freeing at block 0.
ix = (*pscn)[0]->ixBase == 0 ? 0 : 1;
for ( ; ix < BLOCK_MASK+1 ; ++ix) {
if ((*pscn)[ix] != NULL_BLOCK) {
if ((*pscn)[ix] != RESERVED_BLOCK) {
DecommitPages(pscn, ix, 0, PAGES_PER_BLOCK, (DWORD)lpvSect, TRUE);
if ((*pscn)[ix] != RESERVED_BLOCK)
FreeMem((*pscn)[ix], HEAP_MEMBLOCK);
}
}
}
SectionTable[ixScn] = NULL_SECTION;
FreeSection(pscn);
if (ixScn < FirstFreeSection)
FirstFreeSection = ixScn;
} else
DEBUGMSG(ZONE_VIRTMEM,(TEXT("DeleteSection failed.\r\n")));
LeaveCriticalSection(&VAcs);
return;
}
const MEMBLOCK KPageBlock = {
~0ul, // alk
0, // cUses
0, // flags
UNKNOWN_BASE,// ixBase
0, // hPf
1, // cLocks
{
#if PAGE_SIZE == 4096
0, 0, 0, 0, 0,
#elif PAGE_SIZE == 2048
/* 0x0000: */ 0, 0, 0, 0,
/* 0x2000: */ 0, 0, 0, 0,
/* 0x4000: */ 0, 0, 0,
/* 0x5800: (start of user visible kernel data page) */
#elif PAGE_SIZE == 1024
/* 0x0000: */ 0, 0, 0, 0,
/* 0x1000: */ 0, 0, 0, 0,
/* 0x2000: */ 0, 0, 0, 0,
/* 0x3000: */ 0, 0, 0, 0,
/* 0x4000: */ 0, 0, 0, 0,
/* 0x5000: */ 0, 0,
/* 0x5800: (start of user visible kernel data page) */
#else
#error Unsupported page size.
#endif
KPAGE_PTE
}
};
SECTION NKSection;
LPVOID InitNKSection(void) {
SectionTable[1] = &NKSection;
NKSection[0] = (MEMBLOCK*)&KPageBlock;
return (LPVOID)(1<<VA_SECTION);
}
LPVOID CreateSection(LPVOID lpvAddr) {
PSECTION pSect;
uint ixSect;
EnterCriticalSection(&VAcs);
if (lpvAddr != 0) {
if ((ixSect = (ulong)lpvAddr>>VA_SECTION) > SECTION_MASK
|| SectionTable[ixSect] != NULL_SECTION) {
DEBUGMSG(1,(TEXT("CreateSection failed (1)\r\n")));
lpvAddr = 0;
goto exitCreate;
}
} else {
/* scan for an available section table entry */
for (;;) {
for (ixSect = FirstFreeSection ; ixSect < MAX_PROCESSES+RESERVED_SECTIONS
; ++ixSect) {
if (SectionTable[ixSect] == NULL_SECTION)
goto foundSect;
}
if (FirstFreeSection == RESERVED_SECTIONS)
break;
FirstFreeSection = RESERVED_SECTIONS;
}
/* no sections available */
DEBUGMSG(1, (TEXT("CreateSection failed (2)\r\n")));
goto exitCreate;
/* found a free section set new first free index */
foundSect:
FirstFreeSection = ixSect+1;
}
/* Allocate a section and initialize it to invalid blocks */
if ((pSect = GetSection()) == 0) {
DEBUGMSG(1, (TEXT("CreateSection failed (3)\r\n")));
lpvAddr = 0;
goto exitCreate;
}
///memcpy(pSect, &NullSection, sizeof(NullSection));
(*pSect)[0] = (MEMBLOCK*)&KPageBlock;
SectionTable[ixSect] = pSect;
lpvAddr = (LPVOID)(ixSect << VA_SECTION);
exitCreate:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("CreateSection done: addr=%8.8lx, pSect=%8.8lx\r\n"), lpvAddr, pSect));
return lpvAddr;
}
// CreateMapperSection - allocate memory section for file mapping
//
// This function will allocate a 32mb section and reserve the entire range for
// use by file mapping.
BOOL CreateMapperSection(DWORD dwBase) {
BOOL bRet = FALSE;
MEMBLOCK *pmb;
PSECTION pscn;
int ix;
if (dwBase < FIRST_MAPPER_ADDRESS || dwBase >= LAST_MAPPER_ADDRESS) {
DEBUGMSG(1, (TEXT("CreateMapperSection: %8.8lx is out of range.\r\n"), dwBase));
return FALSE;
}
// First, create a normal section. Then convert it into a special section
// for the Mapper.
EnterCriticalSection(&VAcs);
if (CreateSection((LPVOID)dwBase) != 0) {
pscn = SectionTable[(dwBase>>VA_SECTION) & SECTION_MASK];
if ((pmb = AllocMem(HEAP_MEMBLOCK)) != 0) {
memset(pmb,0,sizeof(MEMBLOCK));
LockFromKey(&pmb->alk, &ProcArray[0].aky);
pmb->cUses = 1;
pmb->flags = MB_PAGER_MAPPING;
pmb->ixBase = 0;
(*pscn)[0] = pmb;
for (ix = 1 ; ix < BLOCK_MASK+1 ; ++ix)
(*pscn)[ix] = RESERVED_BLOCK;
bRet = TRUE;
} else {
DeleteSection((LPVOID)dwBase);
DEBUGMSG(1, (TEXT("CreateMapperSection: unable to allocate MEMBLOCK\r\n")));
}
}
LeaveCriticalSection(&VAcs);
return bRet;
}
// DeleteMapperSection - delete a section created by CreateMapperSection()
//
// This function must be used to delete a memory section which is created by
// CreateMapperSection.
void DeleteMapperSection(DWORD dwBase) {
if ((dwBase >= FIRST_MAPPER_ADDRESS) && (dwBase < LAST_MAPPER_ADDRESS))
DeleteSection((LPVOID)dwBase);
else
DEBUGMSG(1, (TEXT("DeleteMapperSection: %8.8lx is out of range.\r\n"), dwBase));
}
LPVOID SC_VirtualAlloc(
LPVOID lpvAddress, /* address of region to reserve or commit */
DWORD cbSize, /* size of the region */
DWORD fdwAllocationType,/* type of allocation */
DWORD fdwProtect) /* type of access protection */
{
int ixBlock;
int ixPage;
int ix;
int ixFirB; /* index of first block in region */
PSECTION pscn;
int cPages; /* # of pages to allocate */
int cNeed;
int cpAlloc; /* # of physical pages to allocate */
ulong ulPgMask; /* page permissions */
ulong ulPFN; /* page physical frame number */
MEMBLOCK *pmb;
DWORD err;
LPBYTE lpbEnd;
LPVOID lpvResult = NULL;
ixFirB = UNKNOWN_BASE;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc(%8.8lx, %lx, %lx, %lx)\r\n"),
lpvAddress, cbSize, fdwAllocationType, fdwProtect));
if (!cbSize) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
if (!(ulPgMask = MakePagePerms(fdwProtect)))
return FALSE; /* invalid protection flags, error # already set */
lpbEnd = (LPBYTE)lpvAddress + cbSize;
/* Lockout other changes to the virtual memory state. */
EnterCriticalSection(&VAcs);
if ((fdwAllocationType & MEM_RESERVE) || (!lpvAddress && (fdwAllocationType & MEM_COMMIT))) {
// Set MEM_RESERVE so the error cleanup works properly.
fdwAllocationType |= MEM_RESERVE;
/* Validate input parameters */
if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
goto invalidParm;
pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
if (pscn == NULL_SECTION)
goto invalidParm;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if (((ulong)lpvAddress & ~(SECTION_MASK<<VA_SECTION)) != 0) {
if (cbSize > (1<<VA_SECTION))
goto invalidParm;
/* The client has asked to reserve a specific region of memory.
* Verify that the requested region is within range and within an
* existing memory section that the client is allowed to access.
*/
/* adjust lpvAddress to 64K boundary. */
lpvAddress = (LPVOID)((ulong)lpvAddress & 0xFFFF0000l);
/* Verify that the entire range is available to be reserved. */
cPages = (ulong)(lpbEnd - (LPBYTE)lpvAddress + PAGE_SIZE-1) / PAGE_SIZE;
for (cNeed = cPages, ix = ixBlock ; cNeed > 0
; ++ix, cNeed -= PAGES_PER_BLOCK) {
if ((*pscn)[ix] != NULL_BLOCK)
goto invalidParm;
}
} else {
/* No specific address given. Go find a region of free blocks */
cPages = (ulong)(lpbEnd - (LPBYTE)lpvAddress + PAGE_SIZE-1) / PAGE_SIZE;
cNeed = cPages;
if (fdwAllocationType & MEM_TOP_DOWN) {
if (cbSize > (1<<VA_SECTION))
goto invalidParm;
/* Scan backwards from the end of the section */
for (ix = BLOCK_MASK+1 ; --ix > 0 ; ) {
if ((*pscn)[ix] != NULL_BLOCK)
cNeed = cPages;
else if ((cNeed -= PAGES_PER_BLOCK) <= 0) {
ixBlock = ix;
goto foundRegion;
}
}
} else if ((cPages * PAGE_SIZE >= 2*1024*1024) && !ixBlock &&
(fdwAllocationType == MEM_RESERVE) && (fdwProtect == PAGE_NOACCESS)) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc(%8.8lx, %lx, %lx, %lx): doing HugeVirtualAlloc\r\n"),
lpvAddress, cbSize, fdwAllocationType, fdwProtect));
LeaveCriticalSection(&VAcs);
lpvResult = HugeVirtualReserve(cbSize);
CELOG_VirtualAlloc((DWORD)lpvResult, (DWORD)lpvAddress, cbSize, fdwAllocationType, fdwProtect);
return lpvResult;
} else {
if (cbSize > (1<<VA_SECTION))
goto invalidParm;
/* Scan forwards from the beginning of the section */
ixBlock = 1;
for (ix = 1 ; ix < BLOCK_MASK+1 ; ++ix) {
if ((*pscn)[ix] != NULL_BLOCK) {
ixBlock = ix+1;
cNeed = cPages;
} else if ((cNeed -= PAGES_PER_BLOCK) <= 0)
goto foundRegion;
}
}
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
foundRegion:
lpvAddress = (LPVOID)(((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION))
| ((ulong)ixBlock << VA_BLOCK));
}
/* Reserve the range of blocks */
if (!(pmb = AllocMem(HEAP_MEMBLOCK))) {
err = ERROR_NOT_ENOUGH_MEMORY;
goto setError;
}
memset(pmb,0,sizeof(MEMBLOCK));
if (((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION)) == ProcArray[0].dwVMBase)
LockFromKey(&pmb->alk, &ProcArray[0].aky);
else
LockFromKey(&pmb->alk, &pCurProc->aky);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc: reserved lock=%lx\r\n"),
pmb->alk));
pmb->cUses = 1;
switch (fdwAllocationType & (MEM_MAPPED|MEM_IMAGE)) {
case MEM_MAPPED:
pmb->flags = MB_PAGER_MAPPING;
break;
case MEM_IMAGE:
pmb->flags = MB_PAGER_LOADER;
break;
case 0:
pmb->flags = MB_PAGER_NONE;
break;
default:
goto invalidParm;
}
if (fdwAllocationType & MEM_AUTO_COMMIT)
pmb->flags |= MB_FLAG_AUTOCOMMIT;
pmb->ixBase = ixBlock;
(*pscn)[ixBlock] = pmb;
DEBUGMSG(ZONE_VIRTMEM,
(TEXT("VirtualAlloc: created head block pmb=%8.8lx (%x)\r\n"),
pmb, ixBlock));
ixFirB = ixBlock; /* note start of region for error recovery */
for (cNeed = cPages, ix = ixBlock+1 ; (cNeed -= PAGES_PER_BLOCK) > 0 ; ++ix) {
if (cNeed < PAGES_PER_BLOCK || (fdwAllocationType & MEM_AUTO_COMMIT)) {
/* The last block is not full so must allocate a MEMBLOCK
* and mark the extra pages as invalid or this is an auto-commit
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -