📄 mapfile.c
字号:
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
//
// This source code is licensed under Microsoft Shared Source License
// Version 1.0 for Windows CE.
// For a copy of the license visit http://go.microsoft.com/fwlink/?LinkId=3223.
//
/*
* NK Kernel loader code
*
*
* Module Name:
*
* mapfile.c
*
* Abstract:
*
* This file implements the NK kernel mapped file routines
*
*/
#include "kernel.h"
#ifndef _PREFAST_
#pragma warning(disable: 4068) // Disable pragma warnings
#endif
DWORD rbRes;
DWORD rbSubRes[NUM_MAPPER_SECTIONS];
extern CRITICAL_SECTION VAcs, PagerCS, MapCS, MapNameCS, WriterCS;
HANDLE hMapList;
extern DWORD g_fSysFileReadable;
BOOL SC_MapCloseHandle(HANDLE hMap);
BOOL ValidateFile(LPFSMAP lpm);
BOOL FlushMapBuffersLogged(LPFSMAP lpm, DWORD dwOffset, DWORD dwLength, DWORD dwFlags);
const PFNVOID MapMthds[] = {
(PFNVOID)SC_MapCloseHandle,
(PFNVOID)0,
(PFNVOID)SC_MapViewOfFile,
};
const CINFO cinfMap = {
"FMAP",
DISPATCH_KERNEL_PSL,
HT_FSMAP,
sizeof(MapMthds)/sizeof(MapMthds[0]),
MapMthds
};
#define SUB_MAPPER_INCR (MAPPER_INCR/32)
#define MapBits(n) ((n) == 0x1f ? 0xffffffff : (1<<((n)+1))-1)
#define IsFromFilesys() (1 == pCurProc->procnum)
// The pDirty list is an array of BYTEs, one bit per page (1=dirty, 0=clean)
// DIRTY_INDEX is the index of the byte containing the bit for a given page number
#define DIRTY_INDEX(dwPage) ((dwPage) / 8)
// DIRTY_MASK is the mask for getting the bit for a given page number from its byte
// (page & (8-1)) is faster than (page % 8)
#define DIRTY_MASK(dwPage) (1 << ((dwPage) & (8-1)))
///////////////////////////////////////////
// Definitions for logged flush
//
// A set of flags and values are stored at the front of the file, on the volume
// log page, to track mapped flush progress. Only dwRestoreStart (which we do
// not modify) is used by filesys.
ERRFALSE(offsetof(fslog_t, dwRestoreStart) == offsetof(fslog_t, dwRestoreFlags) + 4);
ERRFALSE(offsetof(fslog_t, dwRestoreSize) == offsetof(fslog_t, dwRestoreFlags) + 8);
typedef struct {
DWORD dwRestoreFlags;
DWORD dwRestoreStart;
DWORD dwRestoreSize;
} FlushSettings;
#define RESTORE_FLAG_NONE 0
#define RESTORE_FLAG_UNFLUSHED 1
#define RESTORE_FLAG_FLUSHED 2
// All data is written to the end of the file and tagged with its proper offset
// inside the file. Once all pages are flushed, they are committed by copying
// them to their proper offsets.
typedef struct {
DWORD dataoffset;
BYTE restorepage[4096]; // must be last!
} RestoreInfo;
///////////////////////////////////////////
static BOOL FSWriteFile (HANDLE hFile, LPVOID lpBuffer, DWORD nBytes, LPDWORD pBytesWritten)
{
BOOL fRet;
SetNKCallOut (pCurThread);
fRet = WriteFile (hFile, lpBuffer, nBytes, pBytesWritten, 0);
ClearNKCallOut (pCurThread);
return fRet;
}
//------------------------------------------------------------------------------
// reserves a memory region of length len
// rbRes: each bit indicate a 32MB of VM slot.
// rbSubRes: each it indicate 1MB within each 32MB slot
//------------------------------------------------------------------------------
LPVOID FSMapMemReserve(
DWORD len,
BOOL fAddProtect
)
{
DWORD secs;
DWORD first, curr, trav;
LPVOID retval = 0;
DEBUGCHK(len);
if (!len)
return NULL;
DEBUGMSG(ZONE_VIRTMEM || ZONE_MAPFILE,
(L"FSMapMemReserve: len = 0x%x, fAddProtect = 0x%x, pCurProc->aky = 0x%x\n",
len, fAddProtect, pCurProc->aky));
EnterCriticalSection(&VAcs);
if (len >= MAPPER_INCR) {
// if the requested size is > 32M, find the consective free sections that can
// satisfy the request.
secs = (len + MAPPER_INCR - 1)/MAPPER_INCR; // # of 32 M sections required
first = 0;
// try to find a consective 32M slots that can hold the requested allocation
for (curr = 0; curr <= NUM_MAPPER_SECTIONS - secs; curr++) {
if (rbRes & (1<<curr))
first = curr+1;
else if (curr - first + 1 == secs)
break;
}
// not consective VM available, fail the call
if (curr > NUM_MAPPER_SECTIONS - secs)
goto exit;
// create the mapper section for each 32M slot
for (trav = first; trav <= curr; trav++) {
if (!CreateMapperSection(FIRST_MAPPER_ADDRESS+(MAPPER_INCR*trav), fAddProtect)) {
// if we fail to create all the mapper section, undo the allocations
// and fail the call
while (trav-- != first) {
DeleteMapperSection(FIRST_MAPPER_ADDRESS+(MAPPER_INCR*trav));
rbRes &= ~(1<<trav);
}
goto exit;
}
// mark the 32M slot in use
rbRes |= (1<<trav);
// if this is the last 32M slot we allocate, mark the rbSubRes bits only for
// the MB's we use. Otherwise set it to -1 (all in use)
rbSubRes[trav] = (trav == curr
? MapBits(((len - 1) & (MAPPER_INCR-1))/SUB_MAPPER_INCR) : (DWORD)-1);
}
retval = (LPVOID)(FIRST_MAPPER_ADDRESS + (MAPPER_INCR*first));
} else {
// if the request size is < 32M, start with what's left in the last 32M block
PSECTION pscn;
DWORD aky = fAddProtect? pCurProc->aky : ProcArray[0].aky;
secs = (len + SUB_MAPPER_INCR - 1) / SUB_MAPPER_INCR; // # of 1MB blocks needed
// look through all the existing mapper sections to see if there is any holes
// that could be used for this allocation.
for (curr = 0; curr < NUM_MAPPER_SECTIONS; curr++) {
if (rbRes & (1<<curr)) {
pscn = SectionTable[curr+(MAX_PROCESSES+RESERVED_SECTIONS)];
DEBUGCHK (pscn && (pscn != NULL_SECTION) && ((*pscn)[0] != RESERVED_BLOCK));
// check if we have the right to use this section
if (!TestAccess (&((*pscn)[0]->alk), &aky)) {
DEBUGMSG(ZONE_VIRTMEM || ZONE_MAPFILE,
(L"FSMapMemReserve: No Access to mapper section %d (%x, %x), try the next one\n",
curr, (*pscn)[0]->alk, aky));
continue;
}
first = 0;
for (trav = 0; trav <= 32-secs; trav++) {
if (rbSubRes[curr] & (1<<trav))
first = trav+1;
else if (trav - first == secs)
break; // found a 'hole' big enough for the allocation
}
if (trav <= 32-secs)
break;
}
}
// if there is no hole big enough, we need to create a new mapper section.
if (curr == NUM_MAPPER_SECTIONS) {
// find an unused 32M slot and create the section for it.
for (curr = 0; curr < NUM_MAPPER_SECTIONS; curr++) {
if (!(rbRes & (1<<curr))) {
if (!CreateMapperSection(FIRST_MAPPER_ADDRESS + (MAPPER_INCR*curr), fAddProtect))
goto exit;
// mark the seciton in use and set the proper sub-bits.
rbRes |= (1<<curr);
rbSubRes[curr] = MapBits(((len-1)/SUB_MAPPER_INCR));
retval = (LPVOID)(FIRST_MAPPER_ADDRESS + (MAPPER_INCR*curr));
break;
}
}
} else {
// we found a hole enough to satisfy the allocation, just mark the sub-bits
// and we're done.
rbSubRes[curr] |= (MapBits(((len-1)/SUB_MAPPER_INCR)) << first);
retval = (LPVOID)(FIRST_MAPPER_ADDRESS + (MAPPER_INCR*curr)
+ (SUB_MAPPER_INCR*first));
}
}
exit:
LeaveCriticalSection(&VAcs);
DEBUGMSG(ZONE_VIRTMEM || ZONE_MAPFILE, (L"FSMapMemReserve: returns %8.8lx\n", retval));
return retval;
}
CLEANEVENT *pHugeCleanList;
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
LPVOID
HugeVirtualReserve(
DWORD dwSize,
DWORD dwFlags // Flags from DoVirtualAlloc
)
{
LPCLEANEVENT lpce;
LPVOID pMem;
BOOL fAddProtect;
dwSize = PAGEALIGN_UP(dwSize);
if (!(lpce = AllocMem(HEAP_CLEANEVENT))) {
KSetLastError(pCurThread, ERROR_OUTOFMEMORY);
return 0;
}
fAddProtect = !(dwFlags & MEM_SHAREDONLY) && IsFromFilesys();
// special casing filesys. We'll add protection for filesys in the 2nd Gig
// to prevent accidentental filesys corruption.
if (!(pMem = FSMapMemReserve(dwSize, fAddProtect))) {
FreeMem(lpce, HEAP_CLEANEVENT);
KSetLastError(pCurThread, ERROR_OUTOFMEMORY);
return 0;
}
lpce->base = pMem;
lpce->op = dwSize;
lpce->size = (DWORD)pCurProc;
EnterCriticalSection(&MapNameCS);
lpce->ceptr = pHugeCleanList;
pHugeCleanList = lpce;
LeaveCriticalSection(&MapNameCS);
return pMem;
}
BOOL FSMapMemFree(LPBYTE pBase, DWORD len, DWORD flags);
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
CloseHugeMemoryAreas(
LPVOID pMem
)
{
LPCLEANEVENT pce, pce2;
EnterCriticalSection(&MapNameCS);
pce = pHugeCleanList;
while (pce && (pce->size == (DWORD)pCurProc) && (!pMem || (pMem == pce->base))) {
pHugeCleanList = pce->ceptr;
VirtualFree(pce->base, pce->op, MEM_DECOMMIT);
FSMapMemFree(pce->base, pce->op, MEM_RELEASE);
FreeMem(pce, HEAP_CLEANEVENT);
pce = pHugeCleanList;
if (pMem) {
LeaveCriticalSection(&MapNameCS);
return TRUE;
}
}
if (pce) {
while (pce->ceptr) {
if ((pce->ceptr->size == (DWORD)pCurProc)
&& (!pMem || (pMem == pce->ceptr->base))) {
pce2 = pce->ceptr;
pce->ceptr = pce2->ceptr;
VirtualFree(pce2->base, pce2->op, MEM_DECOMMIT);
FSMapMemFree(pce2->base, pce2->op, MEM_RELEASE);
FreeMem(pce2, HEAP_CLEANEVENT);
if (pMem) {
LeaveCriticalSection(&MapNameCS);
return TRUE;
}
} else
pce = pce->ceptr;
}
}
LeaveCriticalSection(&MapNameCS);
return FALSE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
BOOL
HugeVirtualRelease(
LPVOID pMem
)
{
if (!CloseHugeMemoryAreas(pMem)) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
return FALSE;
}
return TRUE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void
DecommitROPages(
LPBYTE pBase,
DWORD len
)
{
MEMORY_BASIC_INFORMATION mbi;
EnterCriticalSection(&VAcs);
while (len) {
if (!VirtualQuery(pBase, &mbi, sizeof(mbi))) {
DEBUGCHK(0);
break;
}
if ((mbi.State == MEM_COMMIT) && (mbi.Protect == PAGE_READONLY))
VirtualFree(pBase, mbi.RegionSize, MEM_DECOMMIT);
pBase += mbi.RegionSize;
if (len < mbi.RegionSize)
break;
len -= mbi.RegionSize;
}
LeaveCriticalSection(&VAcs);
}
//------------------------------------------------------------------------------
// Decommits or releases a MapMemReserve'd chunk of memory. Length must be passed in.
// Flags must be MEM_DECOMMIT *or* MEM_RELEASE, and to RELEASE the range must be
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -