📄 mmapmgr.cpp
字号:
}
}
}
UINT32
MemoryMapManager::GetBlock(REF(IHXBuffer*) pBuffer, void* pHandle,
UINT32 ulOffset, UINT32 ulSize)
{
LockMutex();
struct _FileInfo* pInfo = (struct _FileInfo*)pHandle;
UINT32 ulNeededPageNumber = (ulOffset / m_ulChunkSize);
UINT32 ulNeededL1Entry = ulNeededPageNumber / NUM_PTES;
UINT32 ulNeededL2Entry = ulNeededPageNumber % NUM_PTES;
struct _PageTableEntry* pEntry = 0;
#ifdef MMAPMOREDEBUG
MMAPDPRINTF(("%p GetBlock %d %d, Page %d/%d\n", pHandle, ulOffset, ulSize,
ulNeededL1Entry, ulNeededL2Entry));
fflush(0);
#endif
if (ulOffset >= pInfo->ulSize)
{
/* Maybe the file has grown */
#ifdef _UNIX
struct stat s;
if (fstat(pInfo->Descriptor, &s) == 0)
{
pInfo->ulSize = (UINT32)s.st_size;
}
#elif defined(_WINDOWS)
BY_HANDLE_FILE_INFORMATION FileInformation;
if (GetFileInformationByHandle(pInfo->Descriptor, &FileInformation))
{
pInfo->ulSize = FileInformation.nFileSizeLow;
}
#endif
if (ulOffset >= pInfo->ulSize)
{
pBuffer = NULL;
UnlockMutex();
return (ULONG32) MMAP_EOF_EXCEPTION;
}
}
if (pInfo->pPageTable[ulNeededL1Entry])
{
if (pInfo->pPageTable[ulNeededL1Entry]->pEntry[ulNeededL2Entry].bActive)
{
pEntry = &pInfo->pPageTable[ulNeededL1Entry]->
pEntry[ulNeededL2Entry];
}
}
UCHAR* ulDataPointer = 0;
if (!pEntry)
{
UINT32 ulEmptyAttempts = 0;
while (g_ulAddressSpaceUsed > MAX_ADDRESS_SPACE_USED &&
ulEmptyAttempts++ <
NUMBER_OF_REAP_BUCKETS_TO_EMPTY_ON_ADDRESS_SPACE_EXHUSTED)
{
/*
* Attempt to unmap some old pages and hopefully free up
* some address space.
*/
m_ulActiveReapList = (m_ulActiveReapList + 1)
% NUMBER_OF_REAP_BUCKETS;
EmptyReapBuckets();
}
if (g_ulAddressSpaceUsed > MAX_ADDRESS_SPACE_USED)
{
MMAPDPRINTF((" Address Space Exceeded, Exception\n"));
UnlockMutex();
return MMAP_EXCEPTION;
}
if (pInfo->pPageTable[ulNeededL1Entry] == NULL)
{
pInfo->pPageTable[ulNeededL1Entry] =
new struct MemoryMapManager::_PageTableLevel1;
memset(pInfo->pPageTable[ulNeededL1Entry], 0, sizeof(
struct MemoryMapManager::_PageTableLevel1));
pInfo->pPageTable[ulNeededL1Entry]->
ulNumberOfPageTableEntriesInUse = 0;
pInfo->pPageTable[ulNeededL1Entry]->
pMyEntryInParentsPageTable =
&pInfo->pPageTable[ulNeededL1Entry];
#ifdef _WIN32
pInfo->m_pPTEList = NULL;
#endif
}
pInfo->pPageTable[ulNeededL1Entry]->
ulNumberOfPageTableEntriesInUse++;
pEntry = &pInfo->pPageTable[ulNeededL1Entry]->pEntry[ulNeededL2Entry];
pEntry->bActive = TRUE;
pEntry->bReapMe = FALSE;
pEntry->bDeadPage = FALSE;
pEntry->ulPageRefCount = 0;
pEntry->pParent = pInfo->pPageTable[ulNeededL1Entry];
pEntry->pInfo = pInfo;
UINT32 ulChunkSize = m_ulChunkSize + MMAP_EXTRA_SLOP_SIZE;
if (ulChunkSize + ulNeededPageNumber * m_ulChunkSize > pInfo->ulSize)
{
ulChunkSize = pInfo->ulSize - ulNeededPageNumber * m_ulChunkSize;
}
pEntry->ulSize = ulChunkSize;
pInfo->ulRefCount++;
#ifdef _BEOS // no mmap for BeOS yet...
pEntry->pPage = MAP_FAIL;
#else
#ifdef _UNIX
pEntry->pPage =
mmap(0, ulChunkSize, PROT_READ, MAP_PRIVATE,
pInfo->Descriptor, ulNeededPageNumber * m_ulChunkSize);
#else
pEntry->pPage = MapViewOfFile(pInfo->Descriptor, FILE_MAP_READ, 0,
ulNeededPageNumber * m_ulChunkSize, ulChunkSize);
if (pEntry->pPage == 0)
{
pEntry->pPage = MAP_FAIL;
}
else
{
#if !defined(HELIX_FEATURE_SERVER) && defined(_WINDOWS)
// When MapViewOfFile is called it returns a handle to a page of memory that
// the system no longer knows about so an exception occurs. It is usally a
// EXCEPTION_IN_PAGE_ERROR. Reading the documentation it appears that all access
// to handles returned from memory mapped I/O should be wrapped in try/catch blocks
// since the handles may be invalid. I added try/catch logic to test this out and
// it fixes the bug. The main problem I'm not sure about is that this code is also
// used by the server and IsBadReadPtr may iterate over the memory block which can
// be slow. Because this is the layer of code that "knows" it is using memory mapped
// I/O this is probably where the try/catch code belongs so that if an exception
// occurs then it can return an error and no buffer since the buffer wouldn't be
// accessible anyway.
BOOL bInvalid = TRUE;
try
{
bInvalid = ::IsBadReadPtr(pEntry->pPage, ulChunkSize);
}
catch (...)
{
}
if (bInvalid)
{
pEntry->pPage = MAP_FAIL;
}
else
{
if (pInfo->m_pPTEList)
{
pInfo->m_pPTEList->m_pPrevPTE = pEntry;
}
pEntry->m_pNextPTE = pInfo->m_pPTEList;
pEntry->m_pPrevPTE = NULL;
pInfo->m_pPTEList = pEntry;
}
#else
if (pInfo->m_pPTEList)
{
pInfo->m_pPTEList->m_pPrevPTE = pEntry;
}
pEntry->m_pNextPTE = pInfo->m_pPTEList;
pEntry->m_pPrevPTE = NULL;
pInfo->m_pPTEList = pEntry;
#endif /* !HELIX_FEATURE_SERVER && _WINDOWS */
}
#endif
#endif /* _BEOS */
MMAPDPRINTF(("MMAP from %d Size %ld Pos %ld = %p (entry %p)\n",
pInfo->Descriptor, ulChunkSize,
ulNeededPageNumber * m_ulChunkSize,
pEntry->pPage, pEntry));
g_ulAddressSpaceUsed += ulChunkSize;
pEntry->usReapListNumber = m_ulActiveReapList;
pEntry->ReapListPosition = ReapBuckets[m_ulActiveReapList].
AddHead(pEntry);
if (pEntry->pPage == MAP_FAIL)
{
pBuffer = NULL;
pEntry->bReapMe = TRUE;
CheckAndReapPageTableEntry(pEntry);
UnlockMutex();
return (UINT32) MMAP_EXCEPTION;
}
}
else
{
if (pEntry->bDeadPage ||
(m_ulActiveReapList != pEntry->usReapListNumber))
{
if (pEntry->bDeadPage)
{
MMAPDPRINTF(("UnDeadPage to %p!\n", pEntry));
pEntry->bDeadPage = FALSE;
}
else
{
ReapBuckets[pEntry->usReapListNumber].
RemoveAt(pEntry->ReapListPosition);
}
pEntry->usReapListNumber = m_ulActiveReapList;
pEntry->ReapListPosition = ReapBuckets[m_ulActiveReapList].
AddHead(pEntry);
}
pEntry->bReapMe = FALSE;
}
ulDataPointer = ((UCHAR *) pEntry->pPage) + ulOffset % m_ulChunkSize;
/*
* Go back to normal read() if we are asking for something past
* the mapped region.
*/
if ((ulOffset + ulSize > pInfo->ulSize) ||
(ulOffset % m_ulChunkSize + ulSize > pEntry->ulSize))
{
/*
* If the file has grown, then the region we mapped may not be
* large enough, but we can remap it. This is a rare optimization
* that we can do without.
*/
BOOL bEOF = (ulOffset + ulSize > pInfo->ulSize);
MMAPDPRINTF((" Memory Map Page Overrun, Exception\n"));
pBuffer = NULL;
if (!z_bWithinServer)
{
// Do not defer clean-up of unused pages when in client
pEntry->bReapMe = TRUE;
CheckAndReapPageTableEntry(pEntry);
}
UnlockMutex();
if (bEOF)
{
return MMAP_EOF_EXCEPTION;
}
else
{
return MMAP_EXCEPTION;
}
}
pBuffer = new(m_pFastAlloc) Buffer(pEntry, ulDataPointer, ulSize);
if(pBuffer)
{
pBuffer->AddRef();
}
else
{
ulSize = 0;
}
UnlockMutex();
return ulSize;
}
/*
* On _WIN32 you MUST have the mutex to call this!
*/
BOOL
MemoryMapManager::CheckAndReapPageTableEntry(struct _PageTableEntry* pPTE)
{
#if defined _MMM_NEED_MUTEX && defined _DEBUG
HX_ASSERT(pPTE->pInfo->pMgr->m_bHaveMutex);
#endif
struct _FileInfo* pInfo = pPTE->pInfo;
if (pPTE->ulPageRefCount == 0 && pPTE->bReapMe)
{
MMAPDPRINTF(("Unmap Chunk %p %ld ", pPTE->pPage, pPTE->ulSize));
if (pPTE->pPage != MAP_FAIL)
{
#ifdef _BEOS
// no memory mapped IO for BeOS yet!
#else
#ifdef _UNIX
#ifdef _SOLARIS
munmap((char*)(pPTE->pPage), pPTE->ulSize);
#else
munmap(pPTE->pPage, pPTE->ulSize);
#endif
#else
UnmapViewOfFile(pPTE->pPage);
if (pPTE->m_pPrevPTE)
{
pPTE->m_pPrevPTE->m_pNextPTE = pPTE->m_pNextPTE;
}
else
{
pInfo->m_pPTEList = pPTE->m_pNextPTE;
}
if (pPTE->m_pNextPTE)
{
pPTE->m_pNextPTE->m_pPrevPTE = pPTE->m_pPrevPTE;
}
#endif
#endif /* _BEOS */
}
pInfo->ulRefCount--;
g_ulAddressSpaceUsed -= pPTE->ulSize;
MMAPDPRINTF((" (Down to %0.2f), Reap %u\n",
g_ulAddressSpaceUsed
/ (1.0 * pInfo->pMgr->m_ulChunkSize + MMAP_EXTRA_SLOP_SIZE),
pPTE->bReapMe));
pPTE->bActive = FALSE;
if (pPTE->bDeadPage == FALSE)
{
pInfo->pMgr->ReapBuckets[pPTE->usReapListNumber].
RemoveAt(pPTE->ReapListPosition);
}
if (--pPTE->pParent->ulNumberOfPageTableEntriesInUse == 0)
{
struct _PageTableLevel1** pPTL1 =
pPTE->pParent->pMyEntryInParentsPageTable;
*pPTL1 = 0;
delete pPTE->pParent;
}
if (pInfo->ulRefCount == 0)
DestroyFileInfo((void*)pInfo);
return TRUE;
}
return FALSE;
}
/*
* On _WIN32 you MUST have the mutex to call this!
*/
void
MemoryMapManager::DestroyFileInfo(void* pHandle)
{
#if defined _MMM_NEED_MUTEX && defined _DEBUG
HX_ASSERT(((struct _FileInfo*)pHandle)->pMgr->m_bHaveMutex);
#endif
struct _FileInfo* pInfo = (struct _FileInfo*)pHandle;
MMAPDPRINTF(("Remove %s from %p\n", pInfo->pKey,
pInfo->pMgr->m_pDevINodeToFileInfoMap));
pInfo->pMgr->m_pDevINodeToFileInfoMap->RemoveKey
((const char *)pInfo->pKey);
/*
* Don't use MemoryMapManager's context! You will credit the wrong
* process.
*/
if (pInfo->pDescReg)
{
pInfo->pDescReg->UnRegisterDescriptors(1);
HX_RELEASE(pInfo->pDescReg);
}
#ifdef _UNIX
close(pInfo->Descriptor);
#else
CloseHandle(pInfo->Descriptor);
#endif
HX_RELEASE(pInfo->pMgr);
delete pInfo;
}
MemoryMapManager::Buffer::Buffer(struct _PageTableEntry* pEntry, UCHAR* pData,
ULONG32 ulLength)
: m_lRefCount(0)
, m_ulLength(ulLength)
, m_pData(pData)
, m_pPTE(pEntry)
{
ASSERT(m_pPTE);
m_pPTE->ulPageRefCount++;
}
MemoryMapManager::Buffer::~Buffer()
{
MemoryMapManager* pMgr = m_pPTE->pInfo->pMgr;
pMgr->LockMutex();
m_pPTE->ulPageRefCount--;
if (!z_bWithinServer && !m_pPTE->ulPageRefCount)
m_pPTE->bReapMe = TRUE;
MemoryMapManager::CheckAndReapPageTableEntry(m_pPTE);
pMgr->UnlockMutex();
}
STDMETHODIMP
MemoryMapManager::Buffer::QueryInterface(REFIID riid, void** ppvObj)
{
QInterfaceList qiList[] =
{
{ GET_IIDHANDLE(IID_IUnknown), (IUnknown*)(IUnknown*)this },
{ GET_IIDHANDLE(IID_IHXBuffer), (IUnknown*)(IHXBuffer*)this },
};
return QIFind(qiList, QILISTSIZE(qiList), riid, ppvObj);
}
STDMETHODIMP_(ULONG32)
MemoryMapManager::Buffer::AddRef()
{
m_lRefCount++;
return m_lRefCount;
}
STDMETHODIMP_(ULONG32)
MemoryMapManager::Buffer::Release()
{
m_lRefCount--;
if (m_lRefCount > 0)
{
return m_lRefCount;
}
delete this;
return 0;
}
STDMETHODIMP
MemoryMapManager::Buffer::Get(REF(UCHAR*) pData, REF(ULONG32) ulLength)
{
pData = m_pData;
ulLength = m_ulLength;
return HXR_OK;
}
STDMETHODIMP
MemoryMapManager::Buffer::Set(const UCHAR* pData, ULONG32 ulLength)
{
/* XXXSMP We should support this. */
PANIC(("Internal Error mmgr/620"));
return HXR_UNEXPECTED;
}
STDMETHODIMP
MemoryMapManager::Buffer::SetSize(ULONG32 ulLength)
{
/* XXXSMP We should support this. */
if (ulLength < m_ulLength)
{
m_ulLength = ulLength;
return HXR_OK;
}
else
{
PANIC(("Internal Error mmgr/635"));
return HXR_UNEXPECTED;
}
}
STDMETHODIMP_(ULONG32)
MemoryMapManager::Buffer::GetSize()
{
return m_ulLength;
}
STDMETHODIMP_(UCHAR*)
MemoryMapManager::Buffer::GetBuffer()
{
return m_pData;
}
// MemoryMapManager callback
MMMCallback::MMMCallback(MemoryMapManager* pMMM)
:m_lRefCount(0)
,m_pMMM(pMMM)
,m_hPendingHandle(0)
{
}
MMMCallback::~MMMCallback()
{
}
STDMETHODIMP
MMMCallback::QueryInterface(REFIID riid, void**ppvObj)
{
QInterfaceList qiList[] =
{
{ GET_IIDHANDLE(IID_IUnknown), this },
{ GET_IIDHANDLE(IID_IHXCallback), (IHXCallback*) this },
};
return QIFind(qiList, QILISTSIZE(qiList), riid, ppvObj);
}
/////////////////////////////////////////////////////////////////////////
// Method:
// IUnknown::AddRef
// Purpose:
// Everyone usually implements this the same... feel free to use
// this implementation.
//
STDMETHODIMP_(ULONG32)
MMMCallback::AddRef()
{
m_lRefCount++;
return m_lRefCount;
}
/////////////////////////////////////////////////////////////////////////
// Method:
// IUnknown::Release
// Purpose:
// Everyone usually implements this the same... feel free to use
// this implementation.
//
STDMETHODIMP_(ULONG32)
MMMCallback::Release()
{
m_lRefCount--;
if (m_lRefCount > 0)
{
return m_lRefCount;
}
delete this;
return 0;
}
STDMETHODIMP
MMMCallback::Func(void)
{
m_hPendingHandle = 0;
if (m_pMMM)
{
m_pMMM->AddRef();
m_pMMM->ProcessIdle();
m_pMMM->Release();
}
return HXR_OK;
}
#endif
#endif /* _VXWORKS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -