📄 celog.c
字号:
INT_WriteRingBuffer(&RingBuf, NULL, sizeof(DWORD), &dwDestLen1, &dwDestLen2);
*((PDWORD) RingBuf.pWrite) = *PUB_VAR(pdwCeLogTLBMiss) - dwTLBPrev;
INT_WriteRingBuffer(&RingBuf, NULL, sizeof(DWORD), &dwDestLen1, &dwDestLen2);
RingBuf.pHeader->pWrite = RingBuf.pWrite; // Update map header
dwTLBPrev = *PUB_VAR(pdwCeLogTLBMiss);
}
SETCURKEY(akyOld);
}
//------------------------------------------------------------------------------
// Used to allocate the small data buffer and interrupt buffer
//------------------------------------------------------------------------------
#pragma prefast(disable: 262, "use 4K buffer")
static PBYTE
INT_AllocBuffer(
DWORD dwReqSize, // Requested size
DWORD* lpdwAllocSize // Actual size allocated
)
{
DWORD dwBufNumPages;
LPBYTE pBuffer = NULL;
BOOL fRet;
DWORD i;
DWORD rgdwPageList[PAGES_IN_MAX_BUFFER];
*lpdwAllocSize = 0;
//
// Allocate the buffer.
//
pBuffer = (LPBYTE) PUB_FUNC(VirtualAlloc, (NULL, dwReqSize, MEM_COMMIT, PAGE_READWRITE));
if (pBuffer == NULL) {
DEBUGMSG(1, (MODNAME TEXT(": Failed buffer VirtualAlloc (%u bytes, error=%u)\r\n"),
dwReqSize, (DWORD) PUB_FUNC(GetLastError, ())));
return NULL;
}
//
// Lock the pages and get the physical addresses.
//
fRet = (BOOL) PUB_FUNC(LockPages, (pBuffer, dwReqSize, rgdwPageList, LOCKFLAG_WRITE | LOCKFLAG_READ));
if (fRet == FALSE) {
DEBUGMSG(1, (MODNAME TEXT(": LockPages failed\r\n")));
}
//
// Convert the physical addresses to statically-mapped virtual addresses
//
dwBufNumPages = (dwReqSize / PAGE_SIZE) + (dwReqSize % PAGE_SIZE ? 1 : 0);
DEBUGCHK(dwBufNumPages);
for (i = 0; i < dwBufNumPages; i++) {
rgdwPageList[i] = (DWORD) PRIV_FUNC(Phys2Virt, (rgdwPageList[i]));
}
//
// Make sure the virtual addresses are on contiguous pages
//
for (i = 0; i < dwBufNumPages - 1; i++) {
if (rgdwPageList[i] != rgdwPageList[i+1] - PAGE_SIZE) {
// The pages need to be together, so force a single-page buffer
DEBUGMSG(1, (MODNAME TEXT(": Non-contiguous pages. Forcing single-page buffer.\r\n")));
PUB_FUNC(VirtualFree, (pBuffer, 0, MEM_RELEASE));
// Allocate a new, single-page buffer
dwReqSize = PAGE_SIZE;
dwBufNumPages = 1;
pBuffer = (LPBYTE) PUB_FUNC(VirtualAlloc, (NULL, PAGE_SIZE, MEM_COMMIT, PAGE_READWRITE));
if (pBuffer == NULL) {
DEBUGMSG(1, (MODNAME TEXT(": Failed buffer VirtualAlloc\r\n")));
return NULL;
}
// Lock the page and get the physical address.
fRet = (BOOL) PUB_FUNC(LockPages, (pBuffer, PAGE_SIZE, rgdwPageList, LOCKFLAG_WRITE | LOCKFLAG_READ));
if (fRet == FALSE) {
DEBUGMSG(1, (MODNAME TEXT(": LockPages failed\r\n")));
}
// Convert the physical address to a statically-mapped virtual address
rgdwPageList[0] = (DWORD) PRIV_FUNC(Phys2Virt, (rgdwPageList[0]));
break;
}
}
if (dwBufNumPages) {
pBuffer = (LPBYTE) rgdwPageList[0];
}
DEBUGMSG(ZONE_VERBOSE, (MODNAME TEXT(": AllocBuffer allocated %d kB for Buffer (0x%08X)\r\n"),
(dwReqSize) >> 10, pBuffer));
*lpdwAllocSize = dwReqSize;
return pBuffer;
}
#pragma prefast(pop)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
static BOOL
INT_Init()
{
BOOL fRet;
DWORD dwBufSize; // temp val
DEBUGMSG(1, (MODNAME TEXT(": +Init\r\n")));
RingBuf.hMap = 0;
RingBuf.pHeader = NULL;
IntBuf.pBuffer = NULL;
//
// Look for the import parameters in the local registry if the registry is
// available. This won't be possible if CeLog is loaded on boot
// (IMGCELOGENABLE=1) but will be if it's loaded later.
//
if (ISAPIREADY(SH_FILESYS_APIS)) {
DWORD dwType, dwVal;
dwBufSize = sizeof(DWORD);
if ((PRIV_FUNC(RegQueryValueExW, (HKEY_LOCAL_MACHINE, TEXT("BufferSize"),
TEXT("System\\CeLog"), &dwType,
(LPBYTE)&dwVal,
&dwBufSize)) == ERROR_SUCCESS)
&& (dwType == REG_DWORD)) {
PUB_VAR(dwCeLogLargeBuf) = dwVal;
}
}
//
// Check the values of the exported variables before using them.
//
if (PUB_VAR(dwCeLogLargeBuf) & (PAGE_SIZE-1)) {
DEBUGMSG(1, (MODNAME TEXT(": Large buffer size not page-aligned, rounding down.\r\n")));
PUB_VAR(dwCeLogLargeBuf) &= ~(PAGE_SIZE-1);
}
if (PUB_VAR(dwCeLogLargeBuf) == 0) {
DEBUGMSG(1, (MODNAME TEXT(": Large buffer size unspecified, using default size\r\n")));
PUB_VAR(dwCeLogLargeBuf) = RINGBUF_SIZE;
}
if (PUB_VAR(dwCeLogLargeBuf) >= (DWORD) (UserKInfo[KINX_PAGEFREE] * PAGE_SIZE)) {
// Try 3/4 of available RAM
PUB_VAR(dwCeLogLargeBuf) = ((UserKInfo[KINX_PAGEFREE] * PAGE_SIZE * 3) / 4) & ~(PAGE_SIZE-1);
DEBUGMSG(1, (MODNAME TEXT(": Only 0x%08X RAM available, using 0x%08x for large buffer.\r\n"),
(UserKInfo[KINX_PAGEFREE] * PAGE_SIZE), PUB_VAR(dwCeLogLargeBuf)));
}
if ((PUB_VAR(dwCeLogSmallBuf) > PUB_VAR(dwCeLogLargeBuf) / 2)
|| (PUB_VAR(dwCeLogSmallBuf) > SMALLBUF_MAX)
|| (PUB_VAR(dwCeLogSmallBuf) == 0)) {
DEBUGMSG(1, (MODNAME TEXT(": Small buffer size invalid or unspecified, using default size\r\n")));
PUB_VAR(dwCeLogSmallBuf) = SMALLBUF_SIZE;
}
//
// Allocate the large ring buffer that will hold the logging data.
//
do {
// CreateFileMapping will succeed as long as there's enough VM, but
// LockPages will only succeed if there is enough physical memory.
RingBuf.hMap = (HANDLE)PUB_FUNC(CreateFileMappingW,
(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE,
0, PUB_VAR(dwCeLogLargeBuf), DATAMAP_NAME));
if (RingBuf.hMap) {
RingBuf.pHeader = (PMAPHEADER)PUB_FUNC(MapViewOfFile,
(RingBuf.hMap, FILE_MAP_ALL_ACCESS,
0, 0, PUB_VAR(dwCeLogLargeBuf)));
if (RingBuf.pHeader) {
// We can't take page faults during the logging so lock the pages.
fRet = (BOOL) PUB_FUNC(LockPages,
(RingBuf.pHeader, PUB_VAR(dwCeLogLargeBuf),
NULL, LOCKFLAG_WRITE | LOCKFLAG_READ));
if (fRet) {
// Success!
break;
}
PUB_FUNC(UnmapViewOfFile, (RingBuf.pHeader));
RingBuf.pHeader = NULL;
}
PUB_FUNC(CloseHandle, (RingBuf.hMap));
RingBuf.hMap = 0;
}
// Keep trying smaller buffer sizes until we succeed
PUB_VAR(dwCeLogLargeBuf) /= 2;
if (PUB_VAR(dwCeLogLargeBuf) < PAGE_SIZE) {
RETAILMSG(1, (MODNAME TEXT(": Large Buffer alloc failed\r\n")));
goto error;
}
} while (PUB_VAR(dwCeLogLargeBuf) >= PAGE_SIZE);
// Explicitly map the pointer to the kernel. Kernel isn't always the current
// process during the logging, but will have permissions.
RingBuf.pHeader = (PMAPHEADER)PUB_FUNC(MapPtrToProcess,
((PVOID) RingBuf.pHeader, GetCurrentProcess()));
DEBUGMSG(ZONE_VERBOSE, (MODNAME TEXT(": RingBuf (VA) 0x%08X\r\n"), RingBuf.pHeader));
RingBuf.dwSize = PUB_VAR(dwCeLogLargeBuf) - sizeof(MAPHEADER);
RingBuf.pBuffer = RingBuf.pWrite = RingBuf.pRead
= (LPBYTE)RingBuf.pHeader + sizeof(MAPHEADER);
RingBuf.pWrap = (LPBYTE)RingBuf.pBuffer + RingBuf.dwSize;
RingBuf.dwBytesLeft = 0; // not used
// Initialize the header on the map
RingBuf.pHeader->dwBufSize = RingBuf.dwSize;
RingBuf.pHeader->pWrite = RingBuf.pWrite;
RingBuf.pHeader->pRead = RingBuf.pRead;
RingBuf.pHeader->fSetEvent = TRUE;
RingBuf.pHeader->dwLostBytes = 0;
//
// Initialize the immediate buffer (small).
//
pCelBuf = &CelBuf;
// Allocate the buffer
pCelBuf->pBuffer = (PDWORD)INT_AllocBuffer(PUB_VAR(dwCeLogSmallBuf), &dwBufSize);
if ((pCelBuf->pBuffer == NULL) || (dwBufSize < PUB_VAR(dwCeLogSmallBuf))) {
DEBUGMSG(1, (MODNAME TEXT(": Small buffer alloc failed\r\n")));
goto error;
}
pCelBuf->pWrite = pCelBuf->pBuffer;
pCelBuf->dwSize = dwBufSize;
pCelBuf->dwBytesLeft = pCelBuf->dwSize;
//
// Interrupts have a separate buffer.
//
// Allocate the buffer.
IntBuf.pBuffer = INT_AllocBuffer(PUB_VAR(dwCeLogSmallBuf), &dwBufSize);
if ((IntBuf.pBuffer == NULL) || (dwBufSize < PUB_VAR(dwCeLogSmallBuf))) {
DEBUGMSG(1, (MODNAME TEXT(": Interrupt buffer alloc failed\r\n")));
goto error;
}
IntBuf.pRead = (LPBYTE) IntBuf.pBuffer;
IntBuf.pWrite = (LPBYTE) IntBuf.pBuffer;
IntBuf.pWrap = (LPBYTE) ((DWORD) IntBuf.pBuffer + dwBufSize);
IntBuf.dwSize = dwBufSize;
IntBuf.dwBytesLeft = dwBufSize;
RETAILMSG((PUB_VAR(dwCeLogLargeBuf) != RINGBUF_SIZE),
(MODNAME TEXT(": Large buffer size = %u\r\n"), PUB_VAR(dwCeLogLargeBuf)));
DEBUGMSG((pCelBuf->dwSize != SMALLBUF_SIZE),
(MODNAME TEXT(": Small buffer size = %u\r\n"), pCelBuf->dwSize));
//
// Final init stuff
//
// Create the event to flag when the buffer is getting full
// (Must be auto-reset so we can call SetEvent during a flush!)
g_hFillEvent = (HANDLE)PUB_FUNC(CreateEventW, (NULL, FALSE, FALSE, FILLEVENT_NAME));
if (g_hFillEvent == NULL) {
DEBUGMSG(1, (MODNAME TEXT(": Fill event creation failed\r\n")));
goto error;
}
DEBUGMSG(1, (MODNAME TEXT(": -Init\r\n")));
return TRUE;
error:
// Dealloc buffers
if (RingBuf.pHeader) {
PUB_FUNC(UnlockPages, (RingBuf.pHeader, PUB_VAR(dwCeLogLargeBuf)));
PUB_FUNC(UnmapViewOfFile, (RingBuf.pHeader));
}
if (RingBuf.hMap) {
PUB_FUNC(CloseHandle, (RingBuf.hMap));
}
if (pCelBuf) {
if (pCelBuf->pBuffer) {
PUB_FUNC(VirtualFree, (pCelBuf->pBuffer, 0, MEM_DECOMMIT));
}
pCelBuf = NULL;
}
if (IntBuf.pBuffer) {
PUB_FUNC(VirtualFree, (IntBuf.pBuffer, 0, MEM_DECOMMIT));
}
return FALSE;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void
EXT_SetZones(
DWORD dwZoneUser,
DWORD dwZoneCE,
DWORD dwZoneProcess
)
{
if (!g_fInit) {
//
// we either failed the init, or haven't been initialized yet!
//
return;
}
pCelBuf->dwMaskUser = VALIDATE_ZONES(dwZoneUser);
pCelBuf->dwMaskCE = VALIDATE_ZONES(dwZoneCE);
pCelBuf->dwMaskProcess = dwZoneProcess;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Called by CeLogGetZones to retrieve the current zone settings.
BOOL
EXT_QueryZones(
LPDWORD lpdwZoneUser,
LPDWORD lpdwZoneCE,
LPDWORD lpdwZoneProcess
)
{
// Check whether the library has been initialized. Use pCelBuf instead of
// g_fInit because CeLogQueryZones is called during IOCTL_CELOG_REGISTER
// before g_fInit is set.
if (!pCelBuf) {
return FALSE;
}
if (lpdwZoneUser)
*lpdwZoneUser = pCelBuf->dwMaskUser;
if (lpdwZoneCE)
*lpdwZoneCE = pCelBuf->dwMaskCE;
if (lpdwZoneProcess)
*lpdwZoneProcess = pCelBuf->dwMaskProcess;
return TRUE;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -