⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 virtmem.c

📁 See Hanoi.cpp for the implementation of this cla
💻 C
📖 第 1 页 / 共 5 页
字号:
                 * region which must have all memblocks filled in so that
                 * pages can be committed without entering the virtual memory
                 * critical section. */
                if (!(pmb = AllocMem(HEAP_MEMBLOCK))) {
                    err = ERROR_NOT_ENOUGH_MEMORY;
                    goto setError;
                }
	        	memset(pmb,0,sizeof(MEMBLOCK));
                pmb->alk = (*pscn)[ixFirB]->alk;
                pmb->flags = (*pscn)[ixFirB]->flags;
                pmb->cUses = 1;
                pmb->ixBase = ixFirB;
                (*pscn)[ix] = pmb;
                DEBUGMSG(ZONE_VIRTMEM,
                        (TEXT("VirtualAlloc: created a tail block pmb=%8.8lx (%x)\r\n"),
                        pmb, ixBlock));
            } else
                (*pscn)[ix] = RESERVED_BLOCK;
        }
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("Reserved %d pages @%8.8lx\r\n"), cPages, lpvAddress));
        if (cNeed) {
            /* Set unused entries to BAD_PAGE */
            DEBUGMSG(ZONE_VIRTMEM, (TEXT("Reserved %d extra pages.\r\n"), -cNeed));
            cNeed += PAGES_PER_BLOCK;
            for (ix = cNeed ; ix < PAGES_PER_BLOCK ; ++ix)
                pmb->aPages[ix] = BAD_PAGE;
        }
        /* If not committing pages, then return the address of the
         * reserved region */
        if (!(fdwAllocationType & MEM_COMMIT)) {
            LeaveCriticalSection(&VAcs);
            ERRORMSG(!lpvAddress,(L"Failed VirtualAlloc/reserve of %8.8lx bytes\r\n",cbSize));
            CELOG_VirtualAlloc((DWORD)lpvAddress, (DWORD)lpvAddress, cbSize, fdwAllocationType, fdwProtect);
            return lpvAddress;
        }
    } else {
        /* Not reserving memory, so must be committing. Verify that the
         * requested region is within range and within an existing reserved
         * memory region that the client is allowed to access.
         */
        if (cbSize > (1<<VA_SECTION))
        	goto invalidParm;
        if (!(fdwAllocationType & MEM_COMMIT) || lpvAddress == 0)
            goto invalidParm;
        if ((ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
            goto invalidParm;
        pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
        if (pscn == NULL_SECTION)
            goto invalidParm;
        ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
        /* Adjust lpvAddress to PAGE boundary and calculate the number
         * of pages to commit. */
        lpvAddress = (LPVOID)((ulong)lpvAddress & ~(PAGE_SIZE-1));
        cPages = (ulong)(lpbEnd - (LPBYTE)lpvAddress + PAGE_SIZE-1) / PAGE_SIZE;
        /* locate the starting block of the region */
        if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
            goto invalidParm;
    }
    /* Verify that cPages of memory starting with the first page indicated by
     * lpvAddress can be committed within the region.
     *
     *  (pscn) = ptr to section array
     *  (ixBlock) = index of block containing the first page to commit
     *  (cPages) = # of pages to commit
     */
    ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
    cpAlloc = ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, 0);
    if (cpAlloc == -1)
        goto cleanUp;
    /* Commit cPages of memory starting with the first page indicated by
     * lpvAddress. Allocate all required pages before any changes to the
     * virtual region are performed.
     *
     *  (pscn) = ptr to section array
     *  (ixBlock) = index of block containing the first page to commit
     *  (cPages) = # of pages to commit
     *  (cpAlloc) = # of physical pages required
     */
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("Allocating %d pages.\r\n"), cpAlloc));
    if (!HoldPages(cpAlloc, FALSE)) {
        err = ERROR_NOT_ENOUGH_MEMORY;
        goto setError;
    }
	ixBlock = (ixBlock*PAGES_PER_BLOCK+ixPage+cPages-1)/PAGES_PER_BLOCK;
	ix = ((ixPage + cPages - 1) % PAGES_PER_BLOCK) + 1;
    /* Walk the page tables to map in the physical pages. */
    for (; cPages ; --ixBlock) {
        pmb = (*pscn)[ixBlock];
        for (; cPages && ix-- > 0 ; --cPages) {
            if (pmb->aPages[ix] == 0) {
            	DWORD dwRetries;
            	for (dwRetries = 0; (dwRetries < 20) && !(ulPFN = GetHeldPage()); dwRetries++)
            		Sleep(100);
            	if (ulPFN) {
	                DEBUGMSG(ZONE_VIRTMEM, (TEXT("Mapping %8.8lx @%3.3x%x000 perm=%x\r\n"),
    	                    ulPFN, ixBlock, ix, ulPgMask));
        	        pmb->aPages[ix] = ulPFN | ulPgMask;
        	    } else {
			    	InterlockedIncrement(&PageFreeCount);
					RETAILMSG(1,(L"--->>> VirtualAlloc: FATAL ERROR!  COMPLETELY OUT OF MEMORY (%8.8lx)!\r\n",PageFreeCount));
				}
            } else
                pmb->aPages[ix] = (pmb->aPages[ix] & ~PG_PERMISSION_MASK) | ulPgMask;
        }
        ix = PAGES_PER_BLOCK;            /* start with last page of previous block */
    }
    InvalidateRange(lpvAddress, cbSize); // in case we changed permissions above
    LeaveCriticalSection(&VAcs);
    ERRORMSG(!lpvAddress,(L"Failed VirtualAlloc(%8.8lx) of %8.8lx bytes\r\n",fdwAllocationType,cbSize));
    
    CELOG_VirtualAlloc((DWORD)lpvAddress, (DWORD)lpvAddress, cbSize, fdwAllocationType, fdwProtect);
    return lpvAddress;
    /* There was an error reserving or commiting a range of pages. If reserving
     * pages, release any pages which were reserved before the failure occured.
     */
invalidParm:
    err = ERROR_INVALID_PARAMETER;
setError:
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualAlloc failed. error=%d\r\n"), err));
    KSetLastError(pCurThread, err);
cleanUp:
    if (fdwAllocationType & MEM_RESERVE && ixFirB != UNKNOWN_BASE) {
        /* Remove the reservation */
        ReleaseRegion(pscn, ixFirB);
    }
    LeaveCriticalSection(&VAcs);
    ERRORMSG(!lpvAddress,(L"Failed VirtualAlloc(%8.8lx) of %8.8lx bytes\r\n",fdwAllocationType,cbSize));
    return 0;
}

BOOL SC_VirtualFree(
LPVOID lpvAddress,  /* address of region of committed pages */
DWORD cbSize,       /* size of the region */
DWORD fdwFreeType)  /* type of free operation */
{
    int ixBlock;
    int ixPage;
    int ixFirB;         /* index of first block in region */
    PSECTION pscn;
    int cpReserved;     /* # of reserved (not commited) pages in region */
    int cpRegion;       /* total # of pages in region */
    int cPages;         /* # of pages to free */
    DWORD baseScn;		/* base address of section */
    BOOL bForceDecommit;
    /* Verify that the requested region is within range and within an
     * existing reserved memory region that the client is allowed to
     * access and locate the starting block of the region.
     */
    bForceDecommit = (fdwFreeType & 0x80000000);
    fdwFreeType &= ~0x80000000;
    CELOG_VirtualFree((DWORD)lpvAddress, cbSize, fdwFreeType);
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree @%8.8lx size=%lx freetype=%lx\r\n"),
            lpvAddress, cbSize, fdwFreeType));
    if (((DWORD)lpvAddress >= FIRST_MAPPER_ADDRESS) && !cbSize && (fdwFreeType == MEM_RELEASE)) {
	    DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree @%8.8lx size=%lx freetype=%lx : doing HugeVirtualRelease\r\n"),
            lpvAddress, cbSize, fdwFreeType));
		return HugeVirtualRelease(lpvAddress);
    }
    /* Lockout other changes to the virtual memory state. */
    EnterCriticalSection(&VAcs);
    if (lpvAddress == 0 || (ulong)lpvAddress>>VA_SECTION > SECTION_MASK)
        goto invalidParm;
    pscn = SectionTable[((ulong)lpvAddress>>VA_SECTION) & SECTION_MASK];
    if (pscn == NULL_SECTION)
        goto invalidParm;
    baseScn = (DWORD)lpvAddress & (SECTION_MASK<<VA_SECTION);
    ixBlock = ((ulong)lpvAddress >> VA_BLOCK) & BLOCK_MASK;
    if ((ixFirB = FindFirstBlock(pscn, ixBlock)) == UNKNOWN_BASE)
        goto invalidParm;
    /* Verify the status of the region based upon the type of free operation
     * being performed.
     *
     *  (pscn) = ptr to section array
     *  (ixBlock) = index of block containing the first page of lpvAddress
     *  (ixFirB) = index of first block in the region
     */
    ixPage = ((ulong)lpvAddress >> VA_PAGE) & PAGE_MASK;
    if (fdwFreeType == MEM_RELEASE) {
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree releasing region ixBlock=%x.\r\n"),ixBlock));
        if (cbSize != 0 || ixPage != 0 || ixBlock != ixFirB)
            goto invalidParm;
        cpReserved = ScanRegion(pscn, ixFirB, ixFirB, 0, (BLOCK_MASK+1)*PAGES_PER_BLOCK, &cpRegion);
        DEBUGCHK(cpReserved != -1);
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree: cpReserved=%d cpRegion=%d\r\n"), cpReserved, cpRegion));
        /* The entire region must be either reserved or commited. */
        if (cpReserved != cpRegion) {
            if (cpReserved != 0)
                goto invalidParm;
            DecommitPages(pscn, ixFirB, 0, cpRegion, baseScn, bForceDecommit);
        }
        ReleaseRegion(pscn, ixFirB);
        LeaveCriticalSection(&VAcs);
        return TRUE;
    } else if (fdwFreeType == MEM_DECOMMIT) {
        DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree decommitting block %x page %x.\r\n"), ixBlock, ixPage));
        if (cbSize == 0)
            goto invalidParm;
        cPages = (((ulong)lpvAddress + cbSize + PAGE_SIZE-1) / PAGE_SIZE)
                - ((ulong)lpvAddress / PAGE_SIZE);
        cpReserved = ScanRegion(pscn, ixFirB, ixBlock, ixPage, cPages, &cpRegion);
        if (cpRegion < cPages)
            goto invalidParm;
        if (cpReserved != cPages)
            DecommitPages(pscn, ixBlock, ixPage, cPages, baseScn, bForceDecommit);
        LeaveCriticalSection(&VAcs);
        return TRUE;
    }
invalidParm:
    LeaveCriticalSection(&VAcs);
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualFree failed.\r\n")));
    KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
    return FALSE;
}

/*
	@doc INTERNAL
	@func BOOL | VirtualCopy | Duplicate a virtual memory range (Windows CE Only)
	@parm LPVOID | lpvDest | address of destination pages 
	@parm LPVOID | lpvSrc | address of source pages 
    @parm DWORD | cbSize | number of bytes to copy 
    @parm DWORD | fdwProtect) | access protection for destination pages 
	@comm Description unavailable at this time.
*/

BOOL DoVirtualCopy(
LPVOID lpvDest,         /* address of destination pages */
LPVOID lpvSrc,          /* address of source pages */
DWORD cbSize,           /* # of bytes to copy */
DWORD fdwProtect)       /* access protection for destination pages */
{
    int ixDestBlk, ixSrcBlk;
    int ixPage;
    int ixDestFirB;     /* index of first block in destination region */
    int ixSrcFirB;      /* index of first block in destination region */
    PSECTION pscnDest;
    PSECTION pscnSrc;
    int cpReserved;     /* # of reserved (not commited) pages in region */
    int cpRegion;       /* total # of pages in region */
    int cPages;         /* # of pages to copy */
    ulong ulPFN;        /* page physical frame number */
    ulong ulPgMask;
    BOOL bPhys = FALSE; /* TRUE if mapping physical pages */
    /* Verify that the requested regions are within range and within
     * existing reserved memory regions that the client is allowed to
     * access and locate the starting block of both regions.
     */
    CELOG_VirtualCopy((DWORD)lpvDest, (DWORD)lpvSrc, cbSize, fdwProtect);
    DEBUGMSG(ZONE_VIRTMEM, (TEXT("VirtualCopy %8.8lx <= %8.8lx size=%lx prot=%lx\r\n"),
            lpvDest, lpvSrc, cbSize, fdwProtect));
    if (fdwProtect & PAGE_PHYSICAL) {
        bPhys = TRUE;
        fdwProtect &= ~PAGE_PHYSICAL;
    }
    if ((ulPgMask = MakePagePerms(fdwProtect)) == 0)
        return FALSE;   /* invalid protection flags, error # already set */
    /* Lockout other changes to the virtual memory state. */
    EnterCriticalSection(&VAcs);
    /* Validate the destination parameters */
    if (!cbSize || !lpvDest || (ulong)lpvDest>>VA_SECTION > SECTION_MASK)
        goto invalidParm;
    pscnDest = SectionTable[((ulong)lpvDest>>VA_SECTION) & SECTION_MASK];
    if (pscnDest == NULL_SECTION)
        goto invalidParm;
    ixDestBlk = ((ulong)lpvDest >> VA_BLOCK) & BLOCK_MASK;
    ixDestFirB = FindFirstBlock(pscnDest, ixDestBlk);
    if (ixDestFirB == UNKNOWN_BASE)
        goto invalidParm;
    /* Verify that all of the destination pages are reserved (not committed). */
    ixPage = ((ulong)lpvDest >> VA_PAGE) & PAGE_MASK;
    cPages = (((ulong)lpvDest + cbSize + PAGE_SIZE-1) / PAGE_SIZE) - ((ulong)lpvDest / PAGE_SIZE);
    cpReserved = ScanRegion(pscnDest, ixDestFirB, ixDestBlk, ixPage, cPages, 0);
    if (cpReserved != cPages)
        goto invalidParm;
    /* Validate the source address parameters */
    if (bPhys) {
        ulPFN = PFNfrom256(lpvSrc);
        if (((ulong)lpvDest&(PAGE_SIZE-1)) != ((ulong)lpvSrc<<8 &(PAGE_SIZE-1)))
            goto invalidParm;
    } else if ((ulong)lpvSrc>>VA_SECTION > SECTION_MASK) {
        /* Mapping pages from a physical region. */
        bPhys = TRUE;
        ulPFN = GetPFN(lpvSrc);
        if (((ulong)lpvDest&(PAGE_SIZE-1)) != ((ulong)lpvSrc&(PAGE_SIZE-1)))
            goto invalidParm;
    } else {
        /* Mapping pages from another virtual region. */
        bPhys = FALSE;
        if (lpvSrc == 0 || ((ulong)lpvDest&0xFFFFL) != ((ulong)lpvSrc&0xFFFFL))
            goto invalidParm;
        pscnSrc = SectionTable[((ulong)lpvSrc>>VA_SECTION) & SECTION_MASK];
        if (pscnSrc == NULL_SECTION)
            goto invalidParm;
        ixSrcBlk = ((ulong)lpvSrc >> VA_BLOCK) & BLOCK_MASK;
        ixSrcFirB = FindFirstBlock(pscnSrc, ixSrcBlk);
        if (ixSrcFirB == UNKNOWN_BASE)
            goto invalidParm;
        /* Verify that all of the source pages are committed */
        cpReserved = ScanRegion(pscnSrc, ixSrcFirB, ixSrcBlk, ixPage, cPages, &cpRegion);
        if (cpReserved || cpRegion != cPages)
            goto invalidParm;
    }
    /* Walk the the page tables mapping the pages in the source region into
     * the destination region. */
    for (; cPages > 0 ; ++ixDestBlk, ++ixSrcBlk) {
        MEMBLOCK *pmbSrc;
        MEMBLOCK *pmbDest = (*pscnDest)[ixDestBlk];
        if (!bPhys)
            pmbSrc = (*pscnSrc)[ixSrcBlk];
        if (!bPhys && ixDestFirB == ixSrcFirB && ixPage == 0
                && (cPages >= PAGES_PER_BLOCK
                || pmbSrc->aPages[cPages] == BAD_PAGE)
                && (pmbSrc->aPages[0]&PG_PERMISSION_MASK) == ulPgMask) {
            /* Copying an entire block with the same access permissions into
             * the same offset within the two sections.  Share the same MEMBLOCK
             * by bumping the use count on the MEMBLOCK. */
            DEBUGCHK(pmbDest != NULL_BLOCK && pmbDest != RESERVED_BLOCK);
            FreeMem(pmbDest,HEAP_MEMBLOCK);
            ++pmbSrc->cUses;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -