📄 sec_mem.c
字号:
#endif /* Newer versions of VC++ */
#define getPageStartAddress( address ) \
( ( PTR_TYPE ) ( address ) & ~( pageSize - 1 ) )
#define getPageEndAddress( address, size ) \
getPageStartAddress( ( PTR_TYPE ) address + ( size ) - 1 )
/* A safe malloc function that performs page locking if possible */
int krnlMemalloc( void **pointer, int size )
{
MEMLOCK_INFO *memBlockPtr;
BYTE *memPtr;
int status;
status = checkInitAlloc( pointer, size );
if( cryptStatusError( status ) )
return( status );
/* Clear return values */
*pointer = NULL;
/* Try and allocate the memory */
adjustMemCanary( size ); /* For canary at end of block */
if( ( memPtr = clAlloc( "krnlMemAlloc", \
size + MEMLOCK_HEADERSIZE ) ) == NULL )
return( CRYPT_ERROR_MEMORY );
memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
memBlockPtr->isLocked = FALSE;
memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
insertMemCanary( memBlockPtr, memPtr );
*pointer = memPtr + MEMLOCK_HEADERSIZE;
/* Try to lock the pages in memory */
#if !defined( NT_DRIVER )
/* Under Win95 the VirtualLock() function is implemented as
`return( TRUE )' ("Thank Microsoft kids" - "Thaaaanks Bill"). Under
NT the function does actually work, but with a number of caveats.
The main one is that it has been claimed that VirtualLock() only
guarantees that the memory won't be paged while a thread in the
process is running, and when all threads are preempted the memory is
still a target for paging. This would mean that on a loaded system a
process that was idle for some time could have the memory unlocked by
the system and swapped out to disk (actually with NT's somewhat
strange paging strategy and gradual creeping takeover of free memory
for disk buffers, it can get paged even on a completely unloaded
system). However, attempts to force data to be paged under Win2K
and XP under various conditions have been unsuccesful so it may be
that the behaviour changed in post-NT versions of the OS. In any
case VirtualLock() under these newer OSes seems to be fairly
effective in keeping data off disk.
An additional concern is that although VirtualLock() takes arbitrary
memory pointers and a size parameter, the locking is actually done on
a per-page basis, so that unlocking a region that shares a page with
another locked region means that both reqions are unlocked. Since
VirtualLock() doesn't do reference counting (emulating the underlying
MMU page locking even though it seems to implement an intermediate
layer above the MMU so it could in theory do this), the only way
around this is to walk the chain of allocated blocks and not unlock a
block if there's another block allocated on the same page. Ick.
For the NT kernel driver, the memory is always allocated from the non-
paged pool so there's no need for these gyrations */
if( VirtualLock( memPtr, memBlockPtr->size ) )
memBlockPtr->isLocked = TRUE;
#endif /* !NT_DRIVER */
/* Lock the memory list, insert the new block, and unlock it again */
MUTEX_LOCK( allocation );
insertMemBlock( &krnlData->allocatedListHead,
&krnlData->allocatedListTail, memBlockPtr );
#ifdef USE_HEAP_CHECKING
/* Sanity check to detect memory chain corruption */
assert( _CrtIsValidHeapPointer( memBlockPtr ) );
assert( memBlockPtr->next == NULL );
assert( krnlData->allocatedListHead == krnlData->allocatedListTail || \
_CrtIsValidHeapPointer( memBlockPtr->prev ) );
#endif /* USE_HEAP_CHECKING */
MUTEX_UNLOCK( allocation );
return( CRYPT_OK );
}
/* A safe free function that scrubs memory and zeroes the pointer.
"You will softly and suddenly vanish away
And never be met with again" - Lewis Carroll,
"The Hunting of the Snark" */
void krnlMemfree( void **pointer )
{
MEMLOCK_INFO *memBlockPtr;
BYTE *memPtr;
int status;
status = checkInitFree( pointer, &memPtr, &memBlockPtr );
if( cryptStatusError( status ) )
return;
/* Lock the memory list, unlink the new block, and unlock it again */
MUTEX_LOCK( allocation );
checkMemCanary( memBlockPtr, memPtr );
#ifdef USE_HEAP_CHECKING
/* Sanity check to detect memory chain corruption */
assert( _CrtIsValidHeapPointer( memBlockPtr ) );
assert( memBlockPtr->next == NULL || \
_CrtIsValidHeapPointer( memBlockPtr->next ) );
assert( memBlockPtr->prev == NULL || \
_CrtIsValidHeapPointer( memBlockPtr->prev ) );
#endif /* USE_HEAP_CHECKING */
unlinkMemBlock( &krnlData->allocatedListHead,
&krnlData->allocatedListTail, memBlockPtr );
#if !defined( NT_DRIVER )
/* Because VirtualLock() works on a per-page basis, we can't unlock a
memory block if there's another locked block on the same page. The
only way to manage this is to walk the block list checking to see
whether there's another block allocated on the same page. Although in
theory this could make freeing memory rather slow, in practice there
are only a small number of allocated blocks to check so it's
relatively quick, especially compared to the overhead imposed by the
lethargic VC++ allocator. The only real disadvantage is that the
allocation objects remain locked while we do the free, but this
isn't any worse than the overhead of touchAllocatedPages(). Note
that the following code assumes that an allocated block will never
cover more than two pages, which is always the case */
if( memBlockPtr->isLocked )
{
MEMLOCK_INFO *currentBlockPtr;
PTR_TYPE block1PageAddress, block2PageAddress;
const int pageSize = getSysVar( SYSVAR_PAGESIZE );
/* Calculate the addresses of the page(s) in which the memory block
resides */
block1PageAddress = getPageStartAddress( memBlockPtr );
block2PageAddress = getPageEndAddress( memBlockPtr, memBlockPtr->size );
if( block1PageAddress == block2PageAddress )
block2PageAddress = 0;
/* Walk down the block list checking whether the page(s) contain
another locked block */
for( currentBlockPtr = krnlData->allocatedListHead; \
currentBlockPtr != NULL; currentBlockPtr = currentBlockPtr->next )
{
const PTR_TYPE currentPage1Address = \
getPageStartAddress( currentBlockPtr );
PTR_TYPE currentPage2Address = \
getPageEndAddress( currentBlockPtr, currentBlockPtr->size );
if( currentPage1Address == currentPage2Address )
currentPage2Address = 0;
/* There's another block allocated on either of the pages, don't
unlock it */
if( block1PageAddress == currentPage1Address || \
block1PageAddress == currentPage2Address )
{
block1PageAddress = 0;
if( !block2PageAddress )
break;
}
if( block2PageAddress == currentPage1Address || \
block2PageAddress == currentPage2Address )
{
block2PageAddress = 0;
if( !block1PageAddress )
break;
}
}
/* Finally, if either page needs unlocking, do so. The supplied size
is irrelevant since the entire page the memory is on is unlocked */
if( block1PageAddress )
VirtualUnlock( ( void * ) block1PageAddress, 16 );
if( block2PageAddress )
VirtualUnlock( ( void * ) block2PageAddress, 16 );
}
#endif /* !NT_DRIVER */
MUTEX_UNLOCK( allocation );
/* Zeroise the memory (including the memlock info), free it, and zero
the pointer */
zeroise( memPtr, memBlockPtr->size );
clFree( "krnlMemFree", memPtr );
*pointer = NULL;
}
/****************************************************************************
* *
* Unix/BeOS Secure Memory Allocation Functions *
* *
****************************************************************************/
#elif defined( __UNIX__ ) || defined( __BEOS__ )
/* Since the function prototypes for the SYSV/Posix mlock() call are stored
all over the place depending on the Unix version, we usually have to
prototype it ourselves here rather than trying to guess its location */
#if defined( __osf__ ) || defined( __alpha__ )
#include <sys/mman.h>
#elif defined( sun )
#include <sys/mman.h>
#include <sys/types.h>
#else
int mlock( void *address, size_t length );
int munlock( void *address, size_t length );
#endif /* Unix-variant-specific includes */
/* Under many Unix variants the SYSV/Posix mlock() call can be used, but only
by the superuser. OSF/1 has mlock(), but this is defined to the
nonexistant memlk() so we need to special-case it out. QNX (depending on
the version) either doesn't have mlock() at all or it's a dummy that just
returns -1, so we no-op it out. Aches, A/UX, PHUX, Linux < 1.3.something,
and Ultrix don't even pretend to have mlock(). Many systems also have
plock(), but this is pretty crude since it locks all data, and also has
various other shortcomings. Finally, PHUX has datalock(), which is just
a plock() variant */
#if defined( _AIX ) || defined( __alpha__ ) || defined( __aux ) || \
defined( _CRAY ) || defined( __CYGWIN__ ) || defined( __hpux ) || \
( defined( __linux__ ) && OSVERSION < 2 ) || \
defined( _M_XENIX ) || defined( __osf__ ) || \
( defined( __QNX__ ) && OSVERSION <= 6 ) || \
defined( __TANDEM_NSK__ ) || defined( __TANDEM_OSS__ ) || \
defined( __ultrix )
#define mlock( a, b ) 1
#define munlock( a, b )
#endif /* Unix OS-specific defines */
/* A safe malloc function that performs page locking if possible */
int krnlMemalloc( void **pointer, int size )
{
MEMLOCK_INFO *memBlockPtr;
BYTE *memPtr;
#if defined( __BEOS__ )
area_id areaID;
#endif /* __BEOS__ && BeOS areas */
int status;
status = checkInitAlloc( pointer, size );
if( cryptStatusError( status ) )
return( status );
/* Clear return values */
*pointer = NULL;
/* Try and allocate the memory */
adjustMemCanary( size ); /* For canary at end of block */
#if defined( __BEOS__ )
/* Under BeOS we have to allocate a locked area, we can't lock it after
the event. create_area(), like most of the low-level memory access
functions provided by different OSes, functions at the page level, so
we round the size up to the page size. We can mitigate the
granularity somewhat by specifying lazy locking, which means that the
page isn't locked until it's committed.
In pre-open-source BeOS, areas were bit of a security tradeoff because
they were globally visible(!!!) through the use of find_area(), so
that any other process in the system could find them. An attacker
could always find the app's malloc() arena anyway because of this,
but putting data directly into areas made the attacker's task
somewhat easier. Open-source BeOS fixed this, mostly because it
would have taken extra work to make areas explicitly globally visible
and no-one could see a reason for this, so it's somewhat safer there.
However, the implementation of create_area() in the open-source BeOS
seems to be rather flaky (simply creating an area and then
immediately destroying it again causes a segmentation violation) so
it may be necessary to turn it off for some BeOS releases */
areaID = create_area( "memory_block", ( void ** ) &memPtr, B_ANY_ADDRESS,
roundUp( size + MEMLOCK_HEADERSIZE, B_PAGE_SIZE ),
B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA );
if( areaID < B_NO_ERROR )
#else
if( ( memPtr = clAlloc( "krnlMemAlloc", \
size + MEMLOCK_HEADERSIZE ) ) == NULL )
#endif /* __BEOS__ */
return( CRYPT_ERROR_MEMORY );
memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
memBlockPtr->isLocked = FALSE;
memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
#if defined( __BEOS__ )
memBlockPtr->areaID = areaID;
#endif /* __BEOS__ && BeOS areas */
insertMemCanary( memBlockPtr, memPtr );
*pointer = memPtr + MEMLOCK_HEADERSIZE;
/* Try to lock the pages in memory */
#if !defined( __BEOS__ )
if( !mlock( memPtr, memBlockPtr->size ) )
memBlockPtr->isLocked = TRUE;
#endif /* !__BEOS__ */
/* Lock the memory list, insert the new block, and unlock it again */
MUTEX_LOCK( allocation );
insertMemBlock( &krnlData->allocatedListHead,
&krnlData->allocatedListTail, memBlockPtr );
MUTEX_UNLOCK( allocation );
return( CRYPT_OK );
}
/* A safe free function that scrubs memory and zeroes the pointer.
"You will softly and suddenly vanish away
And never be met with again" - Lewis Carroll,
"The Hunting of the Snark" */
void krnlMemfree( void **pointer )
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -