📄 mapfile.c
字号:
for (hMap = hMapList; hMap; hMap = lpm->hNext) {
lpm = HandleToMap(hMap);
DEBUGCHK(lpm);
if (lpm->name && !strcmpW(lpm->name->name, lpName)) {
// Found an existing map
IncRef(hMap, pCurProc);
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_ALREADY_EXISTS);
LeaveCriticalSection(&MapCS);
goto exit;
}
}
KSetLastError(pCurThread, 0);
LeaveCriticalSection(&MapCS);
}
LeaveCriticalSection(&MapNameCS);
//
// Not found, validate params outside critsecs to avoid deadlocks
//
if (hFile != INVALID_HANDLE_VALUE) {
// Verify that we can read from the file (??)
// As far as I can tell, ReadFileWithSeek(*,0,0,0,0,0,0) will always
// return true!!!
if (!ReadFileWithSeek(hFile, 0, 0, 0, 0, 0, 0)) {
SetFilePointer(hFile, 0, 0, 0);
if (realfilelen && (!ReadFile(hFile, &testbyte, 1, &len, 0)
|| (len != 1))) {
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_INVALID_HANDLE);
return 0;
}
bPage = 0;
}
// Verify that we can write to the file (??)
if (realfilelen && (flProtect == PAGE_READWRITE)) {
if ((bPage && (!ReadFileWithSeek(hFile, &testbyte, 1, &len, 0, 0, 0)
|| (len != 1)
|| !WriteFileWithSeek(hFile, &testbyte, 1, &len, 0, 0, 0)
|| (len != 1)))
|| (!bPage && (SetFilePointer(hFile, 0, 0, FILE_BEGIN)
|| !WriteFile(hFile, &testbyte, 1, &len, 0)
|| (len != 1)))) {
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_ACCESS_DENIED);
return 0;
}
}
if (!bPage) {
// Reserve memory for mapping
if (!(pData = FSMapMemReserve(PAGEALIGN_UP(reallen)))) {
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
return 0;
}
// Commit
if (!FSMapMemCommit(pData, PAGEALIGN_UP(reallen), PAGE_READWRITE)) {
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_RELEASE);
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
return 0;
}
// Copy the file data into the mapped memory
SetFilePointer(hFile, 0, 0, FILE_BEGIN);
if (!ReadFile(hFile, pData, reallen, &len, 0) || (len != reallen)) {
// Free up the memory we've grabbed
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_DECOMMIT);
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_RELEASE);
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_INVALID_HANDLE);
return 0;
}
// Mark the memory as R/O, if necessary
if (flProtect != PAGE_READWRITE)
VirtualProtect(pData, PAGEALIGN_UP(reallen), PAGE_READONLY,&len);
}
}
//
// Check again to make sure the mapping doesn't already exist,
// since not holding critsecs above
//
EnterCriticalSection(&MapNameCS);
if (lpName) {
// Is this check necessary??
if ((len = strlenW(lpName)) > MAX_PATH - 1) {
KSetLastError(pCurThread,ERROR_INVALID_PARAMETER);
goto errexit;
}
EnterCriticalSection(&MapCS);
for (hMap = hMapList; hMap; hMap = lpm->hNext) {
lpm = HandleToMap(hMap);
DEBUGCHK(lpm);
if (lpm->name && !strcmpW(lpm->name->name, lpName)) {
// Found an existing map
IncRef(hMap, pCurProc);
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
KSetLastError(pCurThread, ERROR_ALREADY_EXISTS);
LeaveCriticalSection(&MapCS);
if (!bPage && pData) {
// Free up the memory we've grabbed
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_DECOMMIT);
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_RELEASE);
}
goto exit;
}
}
KSetLastError(pCurThread,0);
LeaveCriticalSection(&MapCS);
}
//
// Prepare the map control struct
//
lpm = 0;
hMap = 0;
// Validate args (??)
if (((flProtect != PAGE_READONLY) && (flProtect != PAGE_READWRITE))
|| dwMaxSizeHigh
|| ((hFile == INVALID_HANDLE_VALUE) && !dwMaxSizeLow)
|| ((flProtect == PAGE_READONLY) && (hFile == INVALID_HANDLE_VALUE))) {
KSetLastError(pCurThread,ERROR_INVALID_PARAMETER);
goto errexit;
}
// Allocate space for the map struct
if (!(lpm = (LPFSMAP)AllocMem(HEAP_FSMAP))) {
KSetLastError(pCurThread,ERROR_NOT_ENOUGH_MEMORY);
goto errexit;
}
lpm->lpmlist = 0;
lpm->bNoAutoFlush = bNoAutoFlush;
lpm->dwDirty = 0;
lpm->pBase = NULL;
// Copy the name
if (lpName) {
// Is this check necessary??
if (strlenW(lpName) > MAX_PATH - 1) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
goto errexit;
}
if (!(lpm->name = (Name *)AllocName((strlenW(lpName) + 1) * 2))) {
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
goto errexit;
}
kstrcpyW(lpm->name->name, lpName);
} else
lpm->name = 0;
if (!(hMap = AllocHandle(&cinfMap, lpm, pCurProc))) {
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
goto errexit;
}
lpm->length = reallen;
lpm->reslen = PAGEALIGN_UP(lpm->length);
// Leave space for dirty bits
if ((flProtect == PAGE_READWRITE) && (hFile != INVALID_HANDLE_VALUE))
lpm->reslen += PAGEALIGN_UP(((lpm->reslen + PAGE_SIZE - 1) / PAGE_SIZE + 7) / 8); // one bit per page
if (!lpm->reslen)
lpm->reslen = PAGE_SIZE;
// If we haven't already reserved memory for mapping, do it now
if (bPage) {
// Reserve memory for mapping
if (!(lpm->pBase = FSMapMemReserve(lpm->reslen))) {
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
goto errexit;
}
// Commit
if ((flProtect == PAGE_READWRITE) && (hFile != INVALID_HANDLE_VALUE)) {
lpm->pDirty = lpm->pBase + PAGEALIGN_UP(lpm->length);
lpm->bRestart = 0;
if (!FSMapMemCommit(lpm->pDirty, lpm->reslen - PAGEALIGN_UP(lpm->length),
PAGE_READWRITE)) {
KSetLastError(pCurThread, ERROR_NOT_ENOUGH_MEMORY);
goto errexit;
}
} else
lpm->pDirty = 0;
} else {
// We already reserved memory, so fill in the struct
lpm->pBase = pData;
lpm->bRestart = 0;
lpm->pDirty = (flProtect == PAGE_READWRITE) ? (LPBYTE)1 : 0;
}
// Final sanity checks on the file
if ((lpm->hFile = hFile) != INVALID_HANDLE_VALUE) {
phd = HandleToPointer(hFile);
if ((phd->lock != 1) || (phd->ref.count != 1)) {
RETAILMSG(1,(L"CreateFileMapping called with handle not created with " \
L"CreateFileForMapping!\r\n"));
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
goto errexit;
}
if ((lpm->filelen = realfilelen) == 0xFFFFFFFF) {
KSetLastError(pCurThread, ERROR_INVALID_PARAMETER);
goto errexit;
}
}
// Add to the list of mapped files
EnterCriticalSection(&MapCS);
lpm->hNext = hMapList;
hMapList = hMap;
LeaveCriticalSection(&MapCS);
exit:
LeaveCriticalSection(&MapNameCS);
if (bNoAutoFlush) {
if (!ValidateFile(lpm)) {
// We cannot commit the file. Fail.
goto errexitnocs;
}
}
DEBUGMSG(ZONE_ENTRY,(L"SC_CreateFileMapping exit: %8.8lx\r\n", hMap));
return hMap;
errexit:
LeaveCriticalSection(&MapNameCS);
errexitnocs:
// Remove from the list of maps, if it's there
if (hMap && lpm) {
EnterCriticalSection(&MapNameCS);
EnterCriticalSection(&MapCS);
if (hMapList == hMap)
hMapList = HandleToMap(hMap)->hNext;
else {
HANDLE hmTemp = 0;
LPFSMAP lpmTemp = 0;
for (hmTemp = hMapList; hmTemp; hmTemp = lpmTemp->hNext) {
lpmTemp = HandleToMap(hmTemp);
DEBUGCHK(lpmTemp);
if (lpmTemp->hNext == hMap) {
lpmTemp->hNext = lpm->hNext;
break;
}
}
}
LeaveCriticalSection(&MapCS);
LeaveCriticalSection(&MapNameCS);
}
// Free up allocated memory
if (lpm) {
if (lpm->name)
FreeName(lpm->name);
if (hMap) {
FreeHandle(hMap);
}
if (lpm->pBase) {
FSMapMemFree(lpm->pBase, lpm->reslen, MEM_DECOMMIT);
FSMapMemFree(lpm->pBase, lpm->reslen, MEM_RELEASE);
}
FreeMem(lpm,HEAP_FSMAP);
}
if (!bPage && pData) {
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_DECOMMIT);
FSMapMemFree(pData, PAGEALIGN_UP(reallen), MEM_RELEASE);
}
if (hFile != INVALID_HANDLE_VALUE)
KernelCloseHandle(hFile);
DEBUGMSG(ZONE_ENTRY,(L"SC_CreateFileMapping exit: %8.8lx\r\n", 0));
return 0;
}
BOOL SC_MapCloseHandle(HANDLE hMap)
{
DEBUGMSG(ZONE_ENTRY,(L"SC_MapCloseHandle entry: %8.8lx\r\n", hMap));
EnterCriticalSection(&MapNameCS);
if (DecRef(hMap, pCurProc, FALSE))
FreeMap(hMap);
else
LeaveCriticalSection(&MapNameCS);
DEBUGMSG(ZONE_ENTRY,(L"SC_MapCloseHandle exit: %8.8lx -> %8.8lx\r\n", hMap, TRUE));
return TRUE;
}
ERRFALSE(offsetof(fslog_t,dwRestoreStart) == offsetof(fslog_t,dwRestoreFlags) + 4);
ERRFALSE(offsetof(fslog_t,dwRestoreSize) == offsetof(fslog_t,dwRestoreFlags) + 8);
// This function is only run on non-auto-flush maps, ie. mapped database files.
BOOL ValidateFile(LPFSMAP lpm)
{
// This struct contains the state for restoring the file
struct {
DWORD dwRestoreFlags;
DWORD dwRestoreStart;
DWORD dwRestoreSize;
} FlushStruct;
// This struct is used to copy pages of data
struct {
DWORD dataoffset;
BYTE restorepage[4096]; // must be last!
} RestStruct;
DWORD bread, dwToWrite, offset, size, count;
// Read restore data
if (!ReadFileWithSeek(lpm->hFile, &FlushStruct, sizeof(FlushStruct), &bread, 0,
offsetof(fslog_t,dwRestoreFlags), 0)
|| (bread != sizeof(FlushStruct))) {
// This shouldn't happen.
DEBUGCHK(0);
return FALSE;
}
// Now act on the restore state
switch (FlushStruct.dwRestoreFlags) {
case RESTORE_FLAG_FLUSHED:
// The whole file has been flushed, but the dirty pages were flushed to
// the end of the file for safety. Now we must move all of those pages
// into their proper places within the file.
offset = FlushStruct.dwRestoreStart;
count = FlushStruct.dwRestoreSize>>16;
FlushStruct.dwRestoreSize &= 0xffff;
size = (DWORD)&RestStruct.restorepage - (DWORD)&RestStruct + FlushStruct.dwRestoreSize;
// Move each dirty page from the end of the file to its proper place.
while (count--
&& ReadFileWithSeek(lpm->hFile, &RestStruct, size, &bread, 0, offset, 0)
&& (bread == size)) {
dwToWrite = ((RestStruct.dataoffset + FlushStruct.dwRestoreSize <= FlushStruct.dwRestoreStart)
? FlushStruct.dwRestoreSize
: ((RestStruct.dataoffset < FlushStruct.dwRestoreStart)
? (FlushStruct.dwRestoreStart - RestStruct.dataoffset)
: 0));
if (!WriteFileWithSeek(lpm->hFile, &RestStruct.restorepage[0],
dwToWrite, &bread, 0, RestStruct.dataoffset, 0)
|| (bread != dwToWrite)) {
ERRORMSG(1,(L"Failed to commit page on ValidateFile!\r\n"));
if (count + 1 == FlushStruct.dwRestoreSize >> 16) {
// We haven't actually flushed anything yet, so the state
// is still consistent.
return FALSE;
}
DEBUGCHK(0);
return FALSE;
}
offset += size;
}
FlushFileBuffers(lpm->hFile);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -