⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 virtmem.c

📁 WinCE5.0部分核心源码
💻 C
📖 第 1 页 / 共 5 页
字号:
int 
FindFirstBlock(
    PSECTION pscn,
    int ix
    ) 
{
    MEMBLOCK *pmb;
    do {
        if (ix < 0 || (pmb = (*pscn)[ix]) == NULL_BLOCK)
            return UNKNOWN_BASE;
        --ix;
    } while (pmb == RESERVED_BLOCK);
    return pmb->ixBase;
}

//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
int 
ScanRegion(
    PSECTION pscn,      /* section array */
    int ixFirB,         /* first block in region */
    int ixBlock,        /* starting block to scan */
    int ixPage,         /* starting page in block */
    int cPgScan,        /* # of pages to scan */
    int *pcPages,       /* ptr to count of pages in region */
    DWORD dwVMBase      /* VM Base address of the section */
    )
{
    register int cPages;
    int cpAlloc;
    register MEMBLOCK *pmb;
    DWORD err;
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("Scanning %d pages. ixFirB=%x, dwVMBase = %8.8lx\r\n"), cPgScan, ixFirB, dwVMBase));
    cpAlloc = 0;
    for (cPages = 0 ; cPages < cPgScan && ixBlock < BLOCK_MASK+1 ; ++ixBlock) {
        pmb = (*pscn)[ixBlock];
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("Scanning block %8.8lx (%x), ix=%d cPgScan=%d\r\n"),
                pmb, (pmb!=RESERVED_BLOCK&&pmb!=NULL_BLOCK)?pmb->ixBase:-1, ixPage, cPgScan));
        if (pmb == RESERVED_BLOCK) {
            /* Reserved block. If not just counting pages, allocate a new block
             * and initialize it as part of the current region.
             */
            if (!pcPages) {
                if (!(pmb = MDAllocMemBlock (dwVMBase, ixBlock))) {
                    err = ERROR_NOT_ENOUGH_MEMORY;
                    goto setError;
                }
                pmb->alk = (*pscn)[ixFirB]->alk;
                pmb->flags = (*pscn)[ixFirB]->flags;
                pmb->hPf = (*pscn)[ixFirB]->hPf;
                pmb->cUses = 1;
                pmb->ixBase = ixFirB;
                (*pscn)[ixBlock] = pmb;
                DEBUGMSG(ZONE_VIRTMEM, (TEXT("ScanRegion: created block %8.8lx, ix=%d\r\n"), pmb, ixBlock));
            }
            ixPage = PAGES_PER_BLOCK - ixPage;  /* # of pages relevant to scan */
            if ((cPages += ixPage) > cPgScan) {
                cpAlloc += cPgScan - (cPages - ixPage);
                cPages = cPgScan;
            } else
                cpAlloc += ixPage;
        } else if (pmb == NULL_BLOCK || pmb->ixBase != ixFirB) {
            /* This block is not part of the orginal region. If not just
             * counting pages, then fail the request.
             */
            if (pcPages)
                break;      /* just counting, so not an error */
            err = ERROR_INVALID_PARAMETER;
            goto setError;
        } else {
            /* Scan block to count uncommited pages and to verify that the
             * protection on any existing pages is compatible with the protection
             * being requested.
             */
            for ( ; cPages < cPgScan && ixPage < PAGES_PER_BLOCK
                    ; ++ixPage, ++cPages) {
                if (pmb->aPages[ixPage] == BAD_PAGE) {
                    if (pcPages)
                        goto allDone;
                    err = ERROR_INVALID_PARAMETER;
                    goto setError;
                } else if (pmb->aPages[ixPage] == 0)
                    ++cpAlloc;      /* count # of pages to allocate */
            }
        }
        ixPage = 0;            /* start with first page of next block */
    }
allDone:
    if (pcPages)
        *pcPages = cPages;      /* return # of pages in the region */
    return cpAlloc;

setError:
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("ScanRegion failed err=%d\r\n"), err));
    KSetLastError(pCurThread, err);
    return -1;
}

static LPDWORD DecommitOnePage (PSECTION pscn, int ixBlock, int ixPage, LPVOID pvAddr, BOOL bIgnoreLock, LPDWORD pPageList)
{
    MEMBLOCK *pmb = (*pscn)[ixBlock];
    DWORD dwEntry;

    DEBUGCHK (NULL_BLOCK != pmb);
    if ((RESERVED_BLOCK != pmb)
        && (bIgnoreLock || !pmb->cLocks)
        && (dwEntry = pmb->aPages[ixPage])
        && (BAD_PAGE != dwEntry)) {

        LPDWORD p;

		// need to do this before calling OEMCacheRangeFlush or we might except if the cache
        // is physically-tagged (MIPS/SHx) and the page is commited no-access/guard-page.
        pmb->aPages[ixPage] |= PG_VALID_MASK;
        OEMCacheRangeFlush (MapPtrProc (pvAddr, pCurProc), PAGE_SIZE, CACHE_SYNC_DISCARD|CACHE_SYNC_INSTRUCTIONS);
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("Decommitting one page %8.8lx @%3.3x%x000\r\n"), PFNfromEntry(dwEntry), ixBlock, ixPage));
        pmb->aPages[ixPage] = 0;
        if (p = PhysPageToZero (PFNfromEntry(dwEntry))) {
            DEBUGMSG (ZONE_VIRTMEM, (L"Delay Freeing Page (%8.8lx)\r\n", p));
            DEBUGCHK ((DWORD) p & 0x20000000);
            *p = (DWORD) pPageList;
            pPageList = p;
        }
        InvalidateRange (pvAddr, PAGE_SIZE);
    }

    return pPageList;
}

//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
LPDWORD
DecommitPages(
    PSECTION pscn,      /* section array */
    int ixBlock,        /* starting block to decommit */
    int ixPage,         /* starting page in block */
    int cPages,         /* # of pages to decommit */
    DWORD baseScn,      /* base address of section */
    BOOL bIgnoreLock,    /* ignore lockcount when decommitting */
    LPDWORD pPageList   /* the accumlated page list to be freed */
    )
{
    register MEMBLOCK *pmb;
    PHYSICAL_ADDRESS paPFN;
    int cbInvalidate;
    PVOID pvInvalidate = (PVOID)(baseScn + (ixBlock<<VA_BLOCK) + (ixPage<<VA_PAGE));

    if (1 == cPages)
        return DecommitOnePage (pscn, ixBlock, ixPage, pvInvalidate, bIgnoreLock, pPageList);

    cbInvalidate = cPages * PAGE_SIZE;
    
    /* Walk the page tables to free the physical pages. */
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("DecommitPages: %d pages from %3.3x%x000\r\n"), cPages, ixBlock, ixPage));
    OEMCacheRangeFlush (0, 0, CACHE_SYNC_DISCARD|CACHE_SYNC_INSTRUCTIONS);
    for (; cPages > 0 ; ++ixBlock) {
        DEBUGCHK(ixBlock < BLOCK_MASK+1);
        if ((pmb = (*pscn)[ixBlock]) == RESERVED_BLOCK)
            cPages -= PAGES_PER_BLOCK - ixPage;
        else {
            DEBUGCHK(pmb != NULL_BLOCK);
            DEBUGMSG(ZONE_VIRTMEM,(TEXT("Decommiting block (%8.8lx) uc=%d ixPg=%d cPages=%d\r\n"),
                    pmb, pmb->cUses, ixPage, cPages));
            if (pmb->cUses > 1 && ixPage == 0 && (cPages >= PAGES_PER_BLOCK || pmb->aPages[cPages] == BAD_PAGE)) {
                /* Decommiting a duplicated block. Remove the additional
                 * use of the block and change the block entry to ReservedBlock */
                (*pscn)[ixBlock] = RESERVED_BLOCK;
                --pmb->cUses;
                cPages -= PAGES_PER_BLOCK;
            } else if (bIgnoreLock || !pmb->cLocks) {
                PmbDecommitting = pmb;
                for ( ; cPages && ixPage < PAGES_PER_BLOCK ; ++ixPage, --cPages) {
                    if ((paPFN = pmb->aPages[ixPage]) != 0 && paPFN != BAD_PAGE) {
                        LPDWORD p;
                        paPFN = PFNfromEntry(paPFN);
                        DEBUGMSG(ZONE_VIRTMEM, (TEXT("Decommitting %8.8lx @%3.3x%x000\r\n"), paPFN, ixBlock, ixPage));
                        pmb->aPages[ixPage] = 0;
                        if (p = PhysPageToZero (paPFN)) {
                            DEBUGMSG (ZONE_VIRTMEM, (L"Delay Freeing Page (%8.8lx)\r\n", p));
                            DEBUGCHK ((DWORD) p & 0x20000000);
                            *p = (DWORD) pPageList;
                            pPageList = p;
                        }
                    }
                }
                PmbDecommitting = 0;
            } else {
                cPages -= PAGES_PER_BLOCK - ixPage;
                DEBUGMSG(1,(L"DecommitPages: Cannot decommit block at %8.8lx, lock count %d\r\n",
                    (PVOID)(baseScn + (ixBlock<<VA_BLOCK) + (ixPage<<VA_PAGE)),pmb->cLocks));
            }
        }
        ixPage = 0;            /* start with first page of next block */
    }
    // Since pages have been removed from the page tables, it is necessary to flush the TLB.
    InvalidateRange(pvInvalidate, cbInvalidate);
    return pPageList;
}

void FreePageList (LPDWORD pPageList)
{
    LPDWORD pCurrPage;
    for (pCurrPage = pPageList; pCurrPage; pCurrPage = pPageList) {
        DEBUGMSG (ZONE_VIRTMEM, (L"FreePageList Freeing Page (%8.8lx, pfn = %8.8lx)\r\n", pCurrPage, GetPFN ((DWORD) pCurrPage & ~0x20000000)));
        pPageList = (LPDWORD) *pCurrPage;
        ZeroAndLinkPhysPage (pCurrPage);
    }
}

//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void
ReleaseRegion(
    PSECTION pscn,          /* section array */
    int ixFirB              /* first block in region */
    )
{
    register MEMBLOCK *pmb;
    register int ixBlock;
    /* Walk the section array to free the MEMBLOCK entries. */
    for (ixBlock = ixFirB ; ixBlock < BLOCK_MASK+1 ; ++ixBlock) {
        pmb = (*pscn)[ixBlock];
        if (pmb == RESERVED_BLOCK)
            (*pscn)[ixBlock] = NULL_BLOCK;
        else if (pmb == NULL_BLOCK || pmb->ixBase != ixFirB)
            break;
        else {
#ifdef DEBUG
            int ix;
            ulong ulPFN;
            for (ix = 0 ; ix < PAGES_PER_BLOCK ; ++ix) {
                if ((ulPFN = pmb->aPages[ix]) != 0 && ulPFN != BAD_PAGE) {
                    DEBUGMSG(ZONE_VIRTMEM, (TEXT("ReleaseRegion: Commited page found: %8.8lx @%3.3x%x000\r\n"),
                        ulPFN, ixBlock, ix));
                    DEBUGCHK(0);
                }
            }
#endif
            DEBUGMSG(ZONE_VIRTMEM, (TEXT("Releasing memblock %8.8lx @%3.3x0000\r\n"), pmb, ixBlock));
            (*pscn)[ixBlock] = NULL_BLOCK;
            MDFreeMemBlock (pmb);
        }
    }
    /*Note: Since no pages are freed by this routine, no TLB flushing is needed */
}



//------------------------------------------------------------------------------
//  IsAccessOK() - check access permissions for Address
// 
//   This function checks the access permissions for an address. For user
//  virtual addresses, only the access key for the memory region is checked.
//  Kernel space addresses are always allowed because access to them is not
//  access key dependant.
// 
//  This function doesn't check that the page is either present or valid.
// 
//  Environment:
//    Kernel mode, preemtible, running on the thread's stack.
//------------------------------------------------------------------------------
BOOL 
IsAccessOK(
    void *addr,
    ACCESSKEY aky
    ) 
{
    if (!IsKernelVa (addr)) {

        PSECTION pscn = IsSecureVa (addr)? &NKSection : SectionTable[(ulong)addr >> VA_SECTION];
        MEMBLOCK *pmb;
        int ixBlock;

        ixBlock = ((ulong)addr>>VA_BLOCK) & BLOCK_MASK;

        if ((pmb = (*pscn)[ixBlock]) != NULL_BLOCK) {
            if (pmb == RESERVED_BLOCK)
                pmb = (*pscn)[FindFirstBlock(pscn, ixBlock)];
            if (!TestAccess(&pmb->alk, &aky)) {
                DEBUGMSG(1, (TEXT("IsAccessOK returning FALSE\r\n")));
                return FALSE;
            }
        }
            }
    return TRUE;
}


BOOL g_fForcedPaging = TRUE; // This is global because DbgVerify is called from DBG_CallCheck too


/*++

Routine Name:

    DbgVerify

    Routine Description:

    This function takes a void pointer to an address that the caller wants to read
    or write to and determines whether it is possible. Through this process, address 
    in pages that are not currently mapped (paged in) will be paged in if Force Page mode is ON.

    Most of the work is done through calls to VerifyAccess(), which determines 
    specific characteristics of the page in question and will specify whether access 
    is allowed.

    Argument(s):

    pvAddr - [IN] the address to be verified
    fProbeOnly - [IN] Should only test the presence of the VM, do not page-in even if Force Paging is ON
    pfPageInFailed - [OUT] an optional parameter that a caller can use to see if a page
                                was to be paged in, but was not since forced paging was disabled

    Return Value:

    Returns the same address if access if okay, or a different address, or NULL

--*/
PVOID
DbgVerify(
    PVOID pvAddr,
    BOOL fProbeOnly,
    BOOL* pfPageInFailed
)
{    
    BOOL fPageInFailed = FALSE;
    void *pvRet = NULL;
    int flVerify = VERIFY_KERNEL_OK;
    int flLock = LOCKFLAG_QUERY_ONLY | LOCKFLAG_READ;
    
    // Just verify that the Virtual address is valid (no page-in)
    if (!(pvRet = VerifyAccess (pvAddr, flVerify, (ACCESSKEY) -1)))
    { // VA invalid:
        if (g_fForcedPaging && !fProbeOnly)
        { // Forced Page-in active (intrusive debugger):
            if (!IsKernelVa (pvAddr) && !InSysCall () && 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -