📄 virtmem.c
字号:
if (IsSecureVa(dwAddr)) {
pscn = &NKSection;
baseScn = SECURE_VMBASE;
} else {
DEBUGCHK ((dwAddr >> VA_SECTION) <= SECTION_MASK); // can't be 0, but can be NULL_SECTION
if ((pscn = SectionTable[dwAddr >>VA_SECTION]) == NULL_SECTION)
goto invalidParm;
baseScn = dwAddr & (SECTION_MASK<<VA_SECTION);
}
ixBlock = (dwAddr >> VA_BLOCK) & BLOCK_MASK;
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
/* Verify the status of the region based upon the type of free operation
* being performed.
*
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page of lpvAddress
* (ixFirB) = index of first block in the region
*/
ixPage = (dwAddr >> VA_PAGE) & PAGE_MASK;
if (fdwFreeType == MEM_RELEASE) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree releasing region ixBlock=%x.\r\n"),ixBlock));
if (cbSize != 0 || ixPage != 0 || ixBlock != ixFirB)
goto invalidParm;
cpReserved = ScanRegion(pscn, ixFirB, ixFirB, 0, (BLOCK_MASK+1)*PAGES_PER_BLOCK, &cpRegion, baseScn);
DEBUGCHK(cpReserved != -1);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree: cpReserved=%d cpRegion=%d\r\n"), cpReserved, cpRegion));
/* The entire region must be either reserved or commited. */
if (cpReserved != cpRegion) {
if (cpReserved != 0)
goto invalidParm;
pPageList = DecommitPages(pscn, ixFirB, 0, cpRegion, baseScn, bForceDecommit, NULL);
}
ReleaseRegion(pscn, ixFirB);
LeaveCriticalSection(&VAcs);
FreePageList (pPageList);
return TRUE;
} else if (fdwFreeType == MEM_DECOMMIT) {
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree decommitting block %x page %x.\r\n"), ixBlock, ixPage));
if (cbSize == 0)
goto invalidParm;
cPages = ((dwAddr + cbSize + PAGE_SIZE-1) / PAGE_SIZE)
- (dwAddr / PAGE_SIZE);
cpReserved = ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, &cpRegion, baseScn);
if (cpRegion < cPages)
goto invalidParm;
if (cpReserved != cPages)
pPageList = DecommitPages(pscn, ixBlock, ixPage, cPages, baseScn, bForceDecommit, NULL);
LeaveCriticalSection(&VAcs);
FreePageList (pPageList);
return TRUE;
}
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
LPVOID SC_CeVirtualSharedAlloc (LPVOID lpvAddr, DWORD cbSize, DWORD fdwAction)
{
DWORD dwErr = 0;
TRUSTED_API (L"SC_CeVirtualSharedAlloc", NULL);
// verify parameters
if ((fdwAction & ~(MEM_RESERVE|MEM_COMMIT)) || !fdwAction) {
dwErr = ERROR_INVALID_PARAMETER;
} else if (lpvAddr) {
if (!IsInSharedSection (lpvAddr) || (MEM_COMMIT != fdwAction)) {
dwErr = ERROR_INVALID_PARAMETER;
}
} else {
// lpvAddr is NULL, always add the reserve flag and change the address
// to the base of the shared section
fdwAction |= MEM_RESERVE;
lpvAddr = (LPVOID) SHARED_BASE_ADDRESS;
}
if (dwErr) {
KSetLastError (pCurThread, dwErr);
return NULL;
}
return DoVirtualAlloc (lpvAddr, cbSize, fdwAction, PAGE_READWRITE, 0, 0);
}
//------------------------------------------------------------------------------
// @doc INTERNAL
// @func BOOL | VirtualCopy | Duplicate a virtual memory range (Windows CE Only)
// @parm LPVOID | lpvDest | address of destination pages
// @parm LPVOID | lpvSrc | address of source pages
// @parm DWORD | cbSize | number of bytes to copy
// @parm DWORD | fdwProtect) | access protection for destination pages
// @comm Description unavailable at this time.
//------------------------------------------------------------------------------
BOOL
DoVirtualCopy(
LPVOID lpvDest0, /* address of destination pages */
LPVOID lpvSrc0, /* address of source pages */
DWORD cbSize, /* # of bytes to copy */
DWORD fdwProtect /* access protection for destination pages */
)
{
int ixDestBlk, ixSrcBlk = 0; // keep prefast happy. Don't really need to
// initialize as it'll not be used
// if bPhys is true.
int ixPage;
int ixDestFirB; /* index of first block in destination region */
int ixSrcFirB; /* index of first block in destination region */
PSECTION pscnDest;
PSECTION pscnSrc;
int cpReserved; /* # of reserved (not commited) pages in region */
int cpRegion; /* total # of pages in region */
int cPages; /* # of pages to copy */
ulong ulPFN; /* page physical frame number */
ulong ulPgMask;
BOOL bPhys = FALSE; /* TRUE if mapping physical pages */
DWORD dwDstAddr = (DWORD) lpvDest0;
DWORD dwSrcAddr = (DWORD) lpvSrc0;
/* Verify that the requested regions are within range and within
* existing reserved memory regions that the client is allowed to
* access and locate the starting block of both regions.
*/
if (IsCeLogStatus(CELOGSTATUS_ENABLED_GENERAL)) {
CELOG_VirtualCopy(dwDstAddr, dwSrcAddr, cbSize, fdwProtect);
}
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualCopy %8.8lx <= %8.8lx size=%lx prot=%lx\r\n"),
dwDstAddr, dwSrcAddr, cbSize, fdwProtect));
if (fdwProtect & PAGE_PHYSICAL) {
bPhys = TRUE;
fdwProtect &= ~PAGE_PHYSICAL;
}
if (!cbSize
|| !dwDstAddr
|| !(ulPgMask = MakePagePerms(fdwProtect, dwDstAddr))
|| IsKernelVa (dwDstAddr)) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE; /* invalid protection flags, error # already set */
}
/* Lockout other changes to the virtual memory state. */
EnterCriticalSection(&VAcs);
/* Validate the destination parameters */
pscnDest = IsSecureVa (dwDstAddr)? &NKSection : SectionTable[(dwDstAddr>>VA_SECTION) & SECTION_MASK];
if (pscnDest == NULL_SECTION)
goto invalidParm;
ixDestBlk = (dwDstAddr >> VA_BLOCK) & BLOCK_MASK;
ixDestFirB = FindFirstBlock(pscnDest, ixDestBlk);
if (ixDestFirB == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the destination pages are reserved (not committed). */
ixPage = (dwDstAddr >> VA_PAGE) & PAGE_MASK;
cPages = ((dwDstAddr + cbSize + PAGE_SIZE-1) / PAGE_SIZE) - (dwDstAddr / PAGE_SIZE);
cpReserved = ScanRegion(pscnDest, ixDestFirB, ixDestBlk, ixPage, cPages, 0, dwDstAddr & -(1 << VA_SECTION));
if (cpReserved != cPages)
goto invalidParm;
/* Validate the source address parameters */
if (bPhys) {
ulPFN = PFNfrom256(dwSrcAddr);
if ((dwDstAddr&(PAGE_SIZE-1)) != ((dwSrcAddr<<8) &(PAGE_SIZE-1)))
goto invalidParm;
} else if (IsKernelVa (dwSrcAddr)) {
/* Mapping pages from a physical region. */
bPhys = TRUE;
ulPFN = GetPFN(dwSrcAddr);
if ((dwDstAddr&(PAGE_SIZE-1)) != (dwSrcAddr&(PAGE_SIZE-1)))
goto invalidParm;
} else {
/* Mapping pages from another virtual region. */
bPhys = FALSE;
if (!dwSrcAddr || (dwDstAddr&0xFFFFL) != (dwSrcAddr&0xFFFFL))
goto invalidParm;
pscnSrc = IsSecureVa (dwSrcAddr)? &NKSection : SectionTable[(dwSrcAddr>>VA_SECTION) & SECTION_MASK];
if (pscnSrc == NULL_SECTION)
goto invalidParm;
ixSrcBlk = (dwSrcAddr >> VA_BLOCK) & BLOCK_MASK;
ixSrcFirB = FindFirstBlock(pscnSrc, ixSrcBlk);
if (ixSrcFirB == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the source pages are committed */
cpReserved = ScanRegion(pscnSrc, ixSrcFirB, ixSrcBlk, ixPage, cPages, &cpRegion, dwSrcAddr & -(1 << VA_SECTION));
if (cpReserved || cpRegion != cPages)
goto invalidParm;
}
/* Walk the the page tables mapping the pages in the source region into
* the destination region. */
for (; cPages > 0 ; ++ixDestBlk, ++ixSrcBlk) {
MEMBLOCK *pmbSrc;
MEMBLOCK *pmbDest = (*pscnDest)[ixDestBlk];
if (!bPhys)
pmbSrc = (*pscnSrc)[ixSrcBlk];
#if !HARDWARE_PT_PER_PROC // MEMBLOCK cannot be shared for CPUs using hardware page table
if (!bPhys
&& (ixDestFirB == ixSrcFirB)
&& (ixPage == 0)
&& ((cPages >= PAGES_PER_BLOCK) || (pmbSrc->aPages[cPages] == BAD_PAGE))
&& (pmbSrc->aPages[0]&PG_PERMISSION_MASK) == ulPgMask) {
/* Copying an entire block with the same access permissions into
* the same offset within the two sections. Share the same MEMBLOCK
* by bumping the use count on the MEMBLOCK. */
DEBUGCHK(pmbDest != NULL_BLOCK && pmbDest != RESERVED_BLOCK);
++pmbSrc->cUses;
AddAccess (&pmbSrc->alk, pCurProc->aky);
// add the original access too
AddAccess (&pmbSrc->alk, pmbDest->alk);
MDFreeMemBlock (pmbDest);
(*pscnDest)[ixDestBlk] = pmbSrc;
cPages -= PAGES_PER_BLOCK;
#if defined(SH4) && (PAGE_SIZE==4096)
} else if (bPhys && !ixPage && !(ixDestBlk & 15) && !(ulPFN&0x000FFFFF) && (cPages >= 256)) {
int loop, loop2;
DWORD dwSetting = ulPgMask | PG_1M_MASK;
for (loop = 0; loop < 16; loop++) {
pmbDest = (*pscnDest)[ixDestBlk+loop];
for (loop2 = 0; loop2 < 16; loop2++) {
pmbDest->aPages[loop2] = dwSetting | ulPFN;
ulPFN = NextPFN(ulPFN);
}
}
cPages-=256;
ixDestBlk+=15;
#endif
} else
#endif
{
for ( ; cPages && ixPage < PAGES_PER_BLOCK ; ++ixPage, --cPages) {
if (bPhys) {
DEBUGMSG(ZONE_VIRTMEM,
(TEXT("Mapping physical page %8.8lx @%3.3x%x000 perm=%x\r\n"),
ulPFN, ixDestBlk, ixPage, ulPgMask));
DEBUGCHK(pmbDest->aPages[ixPage] == 0);
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
} else {
DWORD dwOrig = pmbSrc->aPages[ixPage];
DEBUGCHK(dwOrig != 0 && dwOrig != BAD_PAGE);
ulPFN = PFNfromEntry(dwOrig);
DupPhysPage(ulPFN);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Copying page %8.8lx @%3.3x%x000 perm=%x\r\n"),
ulPFN, ixDestBlk, ixPage, ulPgMask));
#if defined(SH3) && (PAGE_SIZE==1024)
pmbDest->aPages[ixPage] = ulPFN | ulPgMask | (dwOrig & PG_4K_MASK);
#elif defined(SH4) && (PAGE_SIZE==4096)
pmbDest->aPages[ixPage] = ulPFN | (ulPgMask & ~PG_1M_MASK) | (dwOrig & PG_1M_MASK);
#else
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
#endif
}
}
}
ixPage = 0; /* start with first page of next block */
}
LeaveCriticalSection(&VAcs);
return TRUE;
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualCopy failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
SC_VirtualCopy(
LPVOID lpvDest,
LPVOID lpvSrc,
DWORD cbSize,
DWORD fdwProtect
)
{
TRUSTED_API (L"SC_VirtualCopy", FALSE);
return DoVirtualCopy(lpvDest, lpvSrc, cbSize, fdwProtect);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
VirtualSetPages (
LPVOID pvAddr, // virtual address
DWORD nPages, // # of pages
LPDWORD pPages, // the pages
DWORD fdwProtect // protection
)
{
DWORD dwAddr = (DWORD) pvAddr;
int ixBlock = (dwAddr >> VA_BLOCK) & BLOCK_MASK; // block #
int ixPage = (dwAddr >> VA_PAGE) & PAGE_MASK; // page #
DWORD ulPgMask = MakePagePerms (fdwProtect, dwAddr);
int ixFirB; // first memblock of the reservation
PSECTION pscn;
MEMBLOCK *pmb;
ACCESSKEY aky;
DWORD dwBase;
// internal function. parameters are assumed to be always valid
DEBUGCHK (pPages && nPages && pvAddr);
EnterCriticalSection (&VAcs);
if (0x80000000 & dwAddr) {
dwBase = SECURE_VMBASE;
pscn = &NKSection;
aky = ProcArray[0].aky;
} else {
dwBase = dwAddr & (SECTION_MASK<<VA_SECTION);
pscn = SectionTable[(dwAddr>>VA_SECTION) & SECTION_MASK];
aky = pCurProc->aky;
}
ixFirB = FindFirstBlock (pscn, ixBlock);
DEBUGCHK (UNKNOWN_BASE != ixFirB);
for (ixPage = (dwAddr >> VA_PAGE) & PAGE_MASK ; nPages; ixPage = 0, ixBlock ++) {
// allocate memblock if necessary
if ((pmb = (*pscn)[ixBlock]) == RESERVED_BLOCK) {
if (!(pmb = MDAllocMemBlock (dwBase, ixBlock))) {
break;
}
pmb->alk = aky;
pmb->flags = (*pscn)[ixFirB
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -