📄 virtmem.c
字号:
AddAccess(&pmbSrc->alk, pCurProc->aky);
(*pscnDest)[ixDestBlk] = pmbSrc;
cPages -= PAGES_PER_BLOCK;
#if defined(SH4) && (PAGE_SIZE==4096)
} else if (bPhys && !ixPage && !(ixDestBlk & 15) && !(ulPFN&1048575) && (cPages >= 256)) {
int loop, loop2;
DWORD dwSetting = ulPgMask | PG_1M_MASK;
for (loop = 0; loop < 16; loop++) {
pmbDest = (*pscnDest)[ixDestBlk+loop];
for (loop2 = 0; loop2 < 16; loop2++) {
pmbDest->aPages[loop2] = dwSetting | ulPFN;
ulPFN = NextPFN(ulPFN);
}
}
cPages-=256;
ixDestBlk+=15;
#endif
} else {
for ( ; cPages && ixPage < PAGES_PER_BLOCK ; ++ixPage, --cPages) {
if (bPhys) {
DEBUGMSG(ZONE_VIRTMEM,
(TEXT("Mapping physical page %8.8lx @%3.3x%x000 perm=%x\r\n"),
ulPFN, ixDestBlk, ixPage, ulPgMask));
#if 0
#if defined(SH3) && (PAGE_SIZE==1024)
if (!(ixPage&3) && !(ulPFN&4095) && (cPages >= 4)) {
int loop;
DWORD dwSetting = ulPgMask | PG_4K_MASK;
for (loop = 0; loop < 4; loop++) {
DEBUGCHK(pmbDest->aPages[ixPage+loop] == 0);
pmbDest->aPages[ixPage+loop] = dwSetting | ulPFN;
ulPFN = NextPFN(ulPFN);
}
ixPage+=3;
cPages-=3;
} else {
DEBUGCHK(pmbDest->aPages[ixPage] == 0);
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
}
#elif defined(SH4) && (PAGE_SIZE==4096)
if (!(ixPage&15) && !(ulPFN&65535) && (cPages >= 16)) {
int loop;
DWORD dwSetting = (ulPgMask & ~PG_1M_MASK) | PG_64K_MASK;
for (loop = 0; loop < 16; loop++) {
DEBUGCHK(pmbDest->aPages[ixPage+loop] == 0);
pmbDest->aPages[ixPage+loop] = dwSetting | ulPFN;
ulPFN = NextPFN(ulPFN);
}
ixPage+=15;
cPages-=15;
} else {
DEBUGCHK(pmbDest->aPages[ixPage] == 0);
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
}
#else
DEBUGCHK(pmbDest->aPages[ixPage] == 0);
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
#endif
#else
DEBUGCHK(pmbDest->aPages[ixPage] == 0);
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
ulPFN = NextPFN(ulPFN);
#endif
} else {
DWORD dwOrig = pmbSrc->aPages[ixPage];
DEBUGCHK(dwOrig != 0 && dwOrig != BAD_PAGE);
ulPFN = PFNfromEntry(dwOrig);
DupPhysPage(ulPFN);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Copying page %8.8lx @%3.3x%x000 perm=%x\r\n"),
ulPFN, ixDestBlk, ixPage, ulPgMask));
#if defined(SH3) && (PAGE_SIZE==1024)
pmbDest->aPages[ixPage] = ulPFN | ulPgMask | (dwOrig & PG_4K_MASK);
#elif defined(SH4) && (PAGE_SIZE==4096)
pmbDest->aPages[ixPage] = ulPFN | (ulPgMask & ~PG_1M_MASK) | (dwOrig & PG_1M_MASK);
#else
pmbDest->aPages[ixPage] = ulPFN | ulPgMask;
#endif
}
}
}
ixPage = 0; /* start with first page of next block */
}
LeaveCriticalSection(&VAcs);
return TRUE;
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualCopy failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
BOOL SC_VirtualCopy(LPVOID lpvDest, LPVOID lpvSrc, DWORD cbSize, DWORD fdwProtect) {
if (pCurProc->bTrustLevel != KERN_TRUST_FULL) {
ERRORMSG(1,(L"SC_VirtualCopy failed due to insufficient trust\r\n"));
KSetLastError(pCurThread, ERROR_ACCESS_DENIED);
return 0;
}
return DoVirtualCopy(lpvDest, lpvSrc, cbSize, fdwProtect);
}
#ifdef SH4
BOOL SC_VirtualSetPageFlags(LPVOID lpvAddress, DWORD cbSize, DWORD dwFlags, LPDWORD lpdwOldFlags) {
DWORD bits;
int ixBlock;
int ixPage;
int ixFirB; /* index of first block in region */
PSECTION pscn;
int cPages; /* # of pages to adjust */
MEMBLOCK *pmb;
if (pCurProc->bTrustLevel != KERN_TRUST_FULL) {
ERRORMSG(1,(L"SC_VirtualSetPageFlags failed due to insufficient trust\r\n"));
KSetLastError(pCurThread, ERROR_ACCESS_DENIED);
return 0;
}
switch (dwFlags & ~VSPF_TC) {
case VSPF_NONE:
bits = 0;
break;
case VSPF_VARIABLE:
bits = 1;
break;
case VSPF_IOSPACE:
bits = 2;
break;
case VSPF_IOSPACE | VSPF_16BIT:
bits = 3;
break;
case VSPF_COMMON:
bits = 4;
break;
case VSPF_COMMON | VSPF_16BIT:
bits = 5;
break;
case VSPF_ATTRIBUTE:
bits = 6;
break;
case VSPF_ATTRIBUTE | VSPF_16BIT:
bits = 7;
break;
default:
KSetLastError(pCurThread,ERROR_INVALID_PARAMETER);
return FALSE;
}
bits <<= 29;
if (dwFlags & VSPF_TC)
bits |= (1 << 9);
EnterCriticalSection(&VAcs);
if (!cbSize || !lpvAddress || (cbSize & 0x80000000) || ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK))
goto invalidParm;
pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
if (pscn == NULL_SECTION)
goto invalidParm;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the pages within the specified range belong to the same VirtualAlloc region and are committed.
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page of lpvAddress
* (ixFirB) = index of first block in the region
*/
ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE)
- ((ulong)lpvAddress / PAGE_SIZE);
if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) != 0)
goto invalidParm;
/* Walk the page tables to set the new page permissions */
if (lpdwOldFlags) {
switch ((*pscn)[ixBlock]->aPages[ixPage] >> 29) {
case 0:
*lpdwOldFlags = VSPF_NONE;
break;
case 1:
*lpdwOldFlags = VSPF_VARIABLE;
break;
case 2:
*lpdwOldFlags = VSPF_IOSPACE;
break;
case 3:
*lpdwOldFlags = VSPF_IOSPACE | VSPF_16BIT;
break;
case 4:
*lpdwOldFlags = VSPF_COMMON;
break;
case 5:
*lpdwOldFlags = VSPF_COMMON | VSPF_16BIT;
break;
case 6:
*lpdwOldFlags = VSPF_ATTRIBUTE;
break;
case 7:
*lpdwOldFlags = VSPF_ATTRIBUTE | VSPF_16BIT;
break;
}
if (((*pscn)[ixBlock]->aPages[ixPage] >> 9) & 1)
*lpdwOldFlags |= VSPF_TC;
}
for (; cPages ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
do {
DEBUGCHK(pmb->aPages[ixPage] != 0 && pmb->aPages[ixPage] != BAD_PAGE);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Setting extra bits @%3.3x%x000 bits=%x\r\n"), ixBlock, ixPage, bits));
pmb->aPages[ixPage] = (pmb->aPages[ixPage] & 0x1ffffdff) | bits;
} while (--cPages && ++ixPage < PAGES_PER_BLOCK);
ixPage = 0; /* start with first page of next block */
}
LeaveCriticalSection(&VAcs);
return TRUE;
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualSetPageFlags failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
#endif
BOOL SC_VirtualProtect(
LPVOID lpvAddress, /* address of region of committed pages */
DWORD cbSize, /* size of the region */
DWORD fdwNewProtect, /* desired access protection */
PDWORD pfdwOldProtect) /* address of variable to get old protection */
{
int ixBlock;
int ixPage;
int ixFirB; /* index of first block in region */
PSECTION pscn;
int cPages; /* # of pages to adjust */
ulong ulPgMask; /* page permissions */
MEMBLOCK *pmb;
BOOL bRet = TRUE;
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualProtect @%8.8lx size=%lx fdwNewProtect=%lx\r\n"),
lpvAddress, cbSize, fdwNewProtect));
if ((ulPgMask = MakePagePerms(fdwNewProtect)) == 0)
return FALSE; /* invalid protection flags, error # already set */
/* Verify that the requested region is within range and within an
* existing reserved memory region that the client is allowed to
* access and locate the starting block of the region.
*/
/* Lockout other changes to the virtual memory state. */
EnterCriticalSection(&VAcs);
if (cbSize == 0 || pfdwOldProtect == 0)
goto invalidParm;
if (lpvAddress == 0 || (ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
goto invalidParm;
pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
if (pscn == NULL_SECTION)
goto invalidParm;
ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
goto invalidParm;
/* Verify that all of the pages within the specified range belong to the
* same VirtualAlloc region and are committed.
*
* (pscn) = ptr to section array
* (ixBlock) = index of block containing the first page of lpvAddress
* (ixFirB) = index of first block in the region
*/
ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE)
- ((ulong)lpvAddress / PAGE_SIZE);
if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) != 0)
goto invalidParm;
/* Walk the page tables to set the new page permissions */
*pfdwOldProtect = ProtectFromPerms((*pscn)[ixBlock]->aPages[ixPage]);
for (; cPages ; ++ixBlock) {
pmb = (*pscn)[ixBlock];
do {
if (!pmb->cLocks || ((ulPgMask & (PG_VALID_MASK | PG_PROT_WRITE)) == (PG_VALID_MASK | PG_PROT_WRITE))) {
DEBUGCHK(pmb->aPages[ixPage] != 0 && pmb->aPages[ixPage] != BAD_PAGE);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("Changing perms @%3.3x%x000 perm=%x\r\n"),
ixBlock, ixPage, ulPgMask));
#if defined(SH3) && (PAGE_SIZE == 1024)
pmb->aPages[ixPage] = (pmb->aPages[ixPage] & ~(PG_PERMISSION_MASK & ~PG_4K_MASK)) | ulPgMask;
#elif defined(SH4) && (PAGE_SIZE == 4096)
pmb->aPages[ixPage] = (pmb->aPages[ixPage] & ~(PG_PERMISSION_MASK & ~PG_1M_MASK)) | (ulPgMask & ~PG_1M_MASK);
#else
pmb->aPages[ixPage] = (pmb->aPages[ixPage] & ~PG_PERMISSION_MASK) | ulPgMask;
#endif
} else {
DEBUGMSG(1,(L"VirtualProtect: Cannot reduce access at %8.8lx, lock count %d\r\n",
((DWORD)lpvAddress & (SECTION_MASK<<VA_SECTION)) + (ixBlock<<VA_BLOCK) + (ixPage<<VA_PAGE), pmb->cLocks));
if (cbSize == PAGE_SIZE)
bRet = FALSE;
}
} while (--cPages && ++ixPage < PAGES_PER_BLOCK);
ixPage = 0; /* start with first page of next block */
}
// Since page permissions have been modified, it is necessary to flush the TLB.
InvalidateRange(lpvAddress, cbSize);
LeaveCriticalSection(&VAcs);
return bRet;
invalidParm:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualProtect failed.\r\n")));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
DWORD SC_VirtualQuery(
LPVOID lpvAddress, /* address of region */
PMEMORY_BASIC_INFORMATION pmbiBuffer, /* address of information buffer */
DWORD cbLength) /* size of buffer */
{
int ixBlock;
int ixPage;
int ixFirB; /* index of first block in region */
PSECTION pscn;
MEMBLOCK *pmb;
int cPages;
ulong ulPgPerm;
if (cbLength < sizeof(MEMORY_BASIC_INFORMATION)) {
KSetLastError(pCurThread, ERROR_BAD_LENGTH);
return 0;
}
DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualQuery @%8.8lx buf=%8.8lx cbLen=%d\r\n"),
lpvAddress, pmbiBuffer, cbLength));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -