📄 virtmem.c
字号:
++cPages; } while (++ixPage < PAGES_PER_BLOCK); } ixPage = 0; } while (++ixBlock < BLOCK_MASK+1 && ((pmb = (*pscn)[ixBlock]) == RESERVED_BLOCK || (pmb != NULL_BLOCK && pmb->ixBase == ixFirB))); } else { /* Count committed pages */ pmbiBuffer->State = MEM_COMMIT; ulPgPerm = pmb->aPages[ixPage] & PG_PERMISSION_MASK; pmbiBuffer->Protect = ProtectFromPerms(pmb->aPages[ixPage]); pmbiBuffer->Type = AllocationType[pmb->flags & MB_FLAG_PAGER_TYPE]; do { do { ulong ulPgInfo = pmb->aPages[ixPage]; if (ulPgInfo == 0 || ulPgInfo == BAD_PAGE || (ulPgInfo&PG_PERMISSION_MASK) != ulPgPerm) goto allCounted; ++cPages; } while (++ixPage < PAGES_PER_BLOCK); ixPage = 0; } while (++ixBlock < BLOCK_MASK+1 && (pmb = (*pscn)[ixBlock]) != NULL_BLOCK && pmb != RESERVED_BLOCK && pmb->ixBase == ixFirB); }allCounted: pmbiBuffer->AllocationBase = (LPVOID)(((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION)) | ((ulong)ixFirB << VA_BLOCK)); } pmbiBuffer->RegionSize = (DWORD)cPages << VA_PAGE; pmbiBuffer->BaseAddress = (LPVOID)((ulong)lpvAddress & ~(PAGE_SIZE-1)); return sizeof(MEMORY_BASIC_INFORMATION);invalidParm: DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualQuery failed.\r\n"))); KSetLastError(pCurThread, ERROR_INVALID_PARAMETER); return 0;}BOOL DoLockPages(LPCVOID lpvAddress, /* address of region of committed pages */DWORD cbSize, /* size of the region */PDWORD pPFNs, /* address of array to receive real PFN's */int fOptions) /* options: see LOCKFLAG_* in kernel.h */{ int ixBlock; int ixPage; int ixFirB; /* index of first block in region */ int ixStart; /* index of start block of lock region */ PSECTION pscn; int cPages; /* # of pages to adjust */ ulong ulBase; /* base virtual address */ MEMBLOCK *pmb; int err = ERROR_INVALID_PARAMETER; DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages @%8.8lx size=%lx pPFNs=%lx options=%x\r\n"), lpvAddress, cbSize, pPFNs, fOptions)); /* Verify that the requested region is within range and within an * existing reserved memory region that the client is allowed to * access and locate the starting block of the region. */ if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK) return TRUE; /* Lockout other changes to the virtual memory state. */ EnterCriticalSection(&VAcs); if (!cbSize || !lpvAddress) goto invalidParm; ulBase = (ulong)lpvAddress & (SECTION_MASK << VA_SECTION); pscn = SectionTable[ulBase>>VA_SECTION]; if (pscn == NULL_SECTION) goto invalidParm; ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK; if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE) goto invalidParm; /* Verify that all of the pages within the specified range belong to the * same VirtualAlloc region and are committed. * * (pscn) = ptr to section array * (ixBlock) = index of block containing the first page of lpvAddress * (ixFirB) = index of first block in the region */ ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK; cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE) - ((ulong)lpvAddress / PAGE_SIZE); if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) == -1) goto invalidParm; /* Walk the page tables to check that all pages are present and to * increment the lock count for each MEMBLOCK in the region. */ ixStart = ixBlock; for (; cPages ; ++ixBlock) { pmb = (*pscn)[ixBlock]; if (!(fOptions & LOCKFLAG_QUERY_ONLY)) { ++pmb->cLocks; DEBUGMSG(ZONE_VIRTMEM, (TEXT("Locking pages @%8.8x LC=%d\r\n"), ulBase | (ixBlock<<VA_BLOCK), pmb->cLocks)); } do { ulong addr = ulBase | (ixBlock<<VA_BLOCK) | (ixPage<<VA_PAGE); DEBUGCHK(pmb->aPages[ixPage] != BAD_PAGE); if (pmb->aPages[ixPage] == 0) { if (pmb->flags&MB_FLAG_AUTOCOMMIT) { if (SC_VirtualAlloc((void*)addr, cPages*PAGE_SIZE, MEM_COMMIT, PAGE_READWRITE) == 0) goto cleanUpLocks; } else { BOOL bRet; LeaveCriticalSection(&VAcs); bRet = ProcessPageFault(TRUE, addr); if (!bRet && !(fOptions & LOCKFLAG_WRITE)) bRet = ProcessPageFault(FALSE, addr); EnterCriticalSection(&VAcs); if (!bRet) { err = ERROR_NOACCESS; goto cleanUpLocks; } } } else { if (fOptions & LOCKFLAG_WRITE) { if (!IsPageWritable(pmb->aPages[ixPage])) { BOOL bRet; LeaveCriticalSection(&VAcs); bRet = ProcessPageFault(TRUE, addr); EnterCriticalSection(&VAcs); if (!bRet) { err = ERROR_NOACCESS; goto cleanUpLocks; } } } else if (!IsPageReadable(pmb->aPages[ixPage])) { err = ERROR_NOACCESS; goto cleanUpLocks; } if ((fOptions & LOCKFLAG_READ) && !IsPageReadable(pmb->aPages[ixPage])) { err = ERROR_NOACCESS; goto cleanUpLocks; } } if (pPFNs) *pPFNs++ = PFNfromEntry(pmb->aPages[ixPage]); } while (--cPages && ++ixPage < PAGES_PER_BLOCK); ixPage = 0; /* start with first page of next block */ } LeaveCriticalSection(&VAcs); return TRUE;cleanUpLocks: /* Unable to page in a page or this thread doesn't have the desired access * to a page in the range. Back out any locks which have been set. */ if (!(fOptions & LOCKFLAG_QUERY_ONLY)) { do { --pmb->cLocks; DEBUGMSG(ZONE_VIRTMEM, (TEXT("Restoring lock count @%8.8x LC=%d\r\n"), ulBase | (ixBlock<<VA_BLOCK), pmb->cLocks)); } while (ixBlock != ixStart && (pmb = (*pscn)[--ixBlock])); }invalidParm: LeaveCriticalSection(&VAcs); DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages failed.\r\n"))); KSetLastError(pCurThread, err); return FALSE;}BOOL SC_LockPages(LPVOID lpvAddress, DWORD cbSize, PDWORD pPFNs, int fOptions) { if (pCurProc->bTrustLevel != KERN_TRUST_FULL) { ERRORMSG(1,(L"SC_LockPages failed due to insufficient trust\r\n")); KSetLastError(pCurThread, ERROR_ACCESS_DENIED); return 0; } return DoLockPages(lpvAddress, cbSize, pPFNs, fOptions);}BOOL DoUnlockPages(LPCVOID lpvAddress, /* address of region of committed pages */DWORD cbSize) /* size of the region */{ int ixPage; int ixBlock; int ixFirB; /* index of first block in region */ int ixLastBlock; /* last block to be unlocked */ PSECTION pscn; int cPages; /* # of pages to adjust */ MEMBLOCK *pmb; DEBUGMSG(ZONE_VIRTMEM, (TEXT("UnlockPages @%8.8lx size=%lx\r\n"), lpvAddress, cbSize)); /* Verify that the requested region is within range and within an * existing reserved memory region that the client is allowed to * access and locate the starting block of the region. */ /* Lockout other changes to the virtual memory state. */ if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK) return TRUE; EnterCriticalSection(&VAcs); if (!cbSize || !lpvAddress) goto invalidParm; pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK]; if (pscn == NULL_SECTION) goto invalidParm; ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK; if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE) goto invalidParm; /* Verify that all of the pages within the specified range belong to the * same VirtualAlloc region and are committed. * * (pscn) = ptr to section array * (ixBlock) = index of block containing the first page of lpvAddress * (ixFirB) = index of first block in the region */ ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK; cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE) - ((ulong)lpvAddress / PAGE_SIZE); if (ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0) == -1) goto invalidParm; /* Walk the page tables to decrement the lock count for * each MEMBLOCK in the region. */ ixLastBlock = ixBlock + ((ixPage+cPages+PAGES_PER_BLOCK-1) >> (VA_BLOCK-VA_PAGE)); for (; ixBlock < ixLastBlock ; ++ixBlock) { pmb = (*pscn)[ixBlock]; if (pmb->cLocks) --pmb->cLocks; else DEBUGCHK(0); DEBUGMSG(ZONE_VIRTMEM, (TEXT("Unlocking pages @%8.8x LC=%d\r\n"), ((ulong)lpvAddress&(SECTION_MASK<<VA_SECTION)) | (ixBlock<<VA_BLOCK), pmb->cLocks)); } LeaveCriticalSection(&VAcs); return TRUE;invalidParm: LeaveCriticalSection(&VAcs); DEBUGMSG(ZONE_VIRTMEM, (TEXT("LockPages failed.\r\n"))); KSetLastError(pCurThread, ERROR_INVALID_PARAMETER); return FALSE;}BOOL SC_UnlockPages(LPVOID lpvAddress, DWORD cbSize) { if (pCurProc->bTrustLevel != KERN_TRUST_FULL) { ERRORMSG(1,(L"SC_UnlockPages failed due to insufficient trust\r\n")); KSetLastError(pCurThread, ERROR_ACCESS_DENIED); return 0; } return DoUnlockPages(lpvAddress, cbSize);}LPBYTE GrabFirstPhysPage(DWORD dwCount);/* AutoCommit - auto-commit a page * * This function is called for a TLB miss due to a memory load or store. * It is invoked from the general exception handler on the kernel stack. * If the faulting page, is within an auto-commit region then a new page * will be allocated and committed. */BOOL AutoCommit(ulong addr) { register MEMBLOCK *pmb; PSECTION pscn; int ixSect, ixBlock, ixPage; ulong ulPFN; LPBYTE pMem; DWORD loop; ixPage = addr>>VA_PAGE & PAGE_MASK; ixBlock = addr>>VA_BLOCK & BLOCK_MASK; ixSect = addr>>VA_SECTION; if ((ixSect <= SECTION_MASK) && ((pscn = SectionTable[ixSect]) != NULL_SECTION) && ((pmb = (*pscn)[ixBlock]) != NULL_BLOCK) && (pmb != RESERVED_BLOCK) && (pmb->flags&MB_FLAG_AUTOCOMMIT) && !pmb->aPages[ixPage]) { for (loop = 0; loop < 20; loop++) { // // Notify OOM thread if we're low on memory. // if (GwesOOMEvent && (PageFreeCount < GwesCriticalThreshold)) { SetEvent(GwesOOMEvent); } // // We don't want to let AutoCommit deplete all physical memory. // Yes, the thread needing the memory committed is going to fault // unexpectedly if we can't allocate, so we'll loop for 2 seconds // checking for available RAM. We need to reserve a small amount // so that DemandCommit can allocate stack for the exception // handlers if required. // if ((PageFreeCount > 5) && (pMem = (LPBYTE)KCall((PKFN)GrabFirstPhysPage,1))) { ulPFN = GetPFN(pMem); /* Map the new page in as read/write. */ DEBUGMSG(ZONE_VIRTMEM, (TEXT("Auto-committing %8.8lx @%8.8lx\r\n"), ulPFN, (ixSect<<VA_SECTION)+(ixBlock<<VA_BLOCK)+(ixPage*PAGE_SIZE))); pmb->aPages[ixPage] = ulPFN | PG_READ_WRITE; return TRUE; } Sleep(100); } RETAILMSG(1, (TEXT("WARNING (low memory) : Failed auto-commit of 0x%08X @ %d free pages\r\n"), addr, PageFreeCount)); DEBUGCHK(0); } return FALSE;}void GuardCommit(ulong addr) { register MEMBLOCK *pmb; PSECTION pscn; int ixSect, ixBlock, ixPage; ixPage = addr>>VA_PAGE & PAGE_MASK; ixBlock = addr>>VA_BLOCK & BLOCK_MASK; ixSect = addr>>VA_SECTION; if ((ixSect <= SECTION_MASK) && ((pscn = SectionTable[ixSect]) != NULL_SECTION) && ((pmb = (*pscn)[ixBlock]) != NULL_BLOCK) && (pmb != RESERVED_BLOCK) && (pmb->aPages[ixPage] & ~PG_PERMISSION_MASK)) { pmb->aPages[ixPage] |= PG_VALID_MASK;#ifdef MUST_CLEAR_GUARD_BIT pmb->aPages[ixPage] &= ~PG_GUARD;#endif }}ERRFALSE(MB_PAGER_FIRST==1);FN_PAGEIN * const PageInFuncs[MB_MAX_PAGER_TYPE] = { LoaderPageIn, MappedPageIn };/** ProcessPageFault - general page fault handler * * This function is called from the exception handling code to attempt to handle * a fault due to a missing page. If the function is able to resolve the fault, * it returns TRUE. * * Environment: * Kernel mode, preemtible, running on the thread's stack. */BOOL ProcessPageFault(BOOL bWri
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -