📄 sbheap.c
字号:
bitvCommit <<= 1;
}
// test if group vector is valid
if (bitvEntryHi != pHeader->bitvEntryHi ||
bitvEntryLo != pHeader->bitvEntryLo)
return -17;
// adjust for next header in list
pHeader++;
}
return 0;
}
/***
* Old (VC++ 5.0) small-block heap data and code
***/
__old_sbh_region_t __old_small_block_heap = {
&__old_small_block_heap, /* p_next_region */
&__old_small_block_heap, /* p_prev_region */
&__old_small_block_heap.region_map[0], /* p_starting_region_map */
&__old_small_block_heap.region_map[0], /* p_first_uncommitted */
(__old_sbh_page_t *)_OLD_NO_PAGES, /* p_pages_begin */
(__old_sbh_page_t *)_OLD_NO_PAGES, /* p_pages_end */
{ _OLD_PARAS_PER_PAGE, _OLD_NO_FAILED_ALLOC } /* region_map[] */
};
static __old_sbh_region_t *__old_sbh_p_starting_region = &__old_small_block_heap;
static int __old_sbh_decommitable_pages = 0;
size_t __old_sbh_threshold = _OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 8);
/*
* Prototypes for user functions.
*/
size_t __cdecl _get_old_sbh_threshold(void);
int __cdecl _set_old_sbh_threshold(size_t);
/***
*size_t _get_old_sbh_threshold() - return small-block threshold
*
*Purpose:
* Return the current value of __old_sbh_threshold
*
*Entry:
* None.
*
*Exit:
* See above.
*
*Exceptions:
*
*******************************************************************************/
size_t __cdecl _get_old_sbh_threshold (
void
)
{
return __old_sbh_threshold;
}
/***
*int _set_old_sbh_threshold(size_t threshold) - set small-block heap threshold
*
*Purpose:
* Set the upper limit for the size of an allocation which will be
* supported from the small-block heap. It is required that at least two
* allocations can come from a page. This imposes an upper limit on how
* big the new threshold can be.
*
*Entry:
* size_t threshold - proposed new value for __sbh_theshold
*
*Exit:
* Returns 1 if successful. Returns 0 if threshold was too big.
*
*Exceptions:
*
*******************************************************************************/
int __cdecl _set_old_sbh_threshold (
size_t threshold
)
{
/*
* Round up the proposed new value to the nearest paragraph
*/
threshold = (threshold + _OLD_PARASIZE - 1) & ~(_OLD_PARASIZE - 1);
/*
* Require that at least two allocations be can be made within a
* page.
*/
if ( threshold <= (_OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 2)) ) {
__old_sbh_threshold = threshold;
return 1;
}
else
return 0;
}
/***
*__old_sbh_region_t * __old_sbh_new_region() - get a region for the small-block heap
*
*Purpose:
* Creates and adds a new region for the small-block heap. First, a
* descriptor (__old_sbh_region_t) is obtained for the new region. Next,
* VirtualAlloc() is used to reserved an address space of size
* _OLD_PAGES_PER_REGION * _OLD_PAGESIZE, and the first _PAGES_PER_COMMITTMENT
* pages are committed.
*
* Note that if __old_small_block_heap is available (i.e., the p_pages_begin
* field is _OLD_NO_PAGES), it becomes the descriptor for the new regions. This is
* basically the small-block heap initialization.
*
*Entry:
* No arguments.
*
*Exit:
* If successful, a pointer to the descriptor for the new region is
* returned. Otherwise, NULL is returned.
*
*******************************************************************************/
__old_sbh_region_t * __cdecl __old_sbh_new_region(
void
)
{
__old_sbh_region_t * pregnew;
__old_sbh_page_t * ppage;
int i;
/*
* Get a region descriptor (__old_sbh_region_t). If __old_small_block_heap is
* available, always use it.
*/
if ( __old_small_block_heap.p_pages_begin == _OLD_NO_PAGES ) {
pregnew = &__old_small_block_heap;
}
else {
/*
* Allocate space for the new __old_sbh_region_t structure. Note that
* this allocation comes out of the 'big block heap.
*/
if ( (pregnew = HeapAlloc( _crtheap, 0, sizeof(__old_sbh_region_t) ))
== NULL )
return NULL;
}
/*
* Reserve a new contiguous address range (i.e., a region).
*/
if ( (ppage = VirtualAlloc( NULL,
_OLD_PAGESIZE * _OLD_PAGES_PER_REGION,
MEM_RESERVE,
PAGE_READWRITE )) != NULL )
{
/*
* Commit the first _OLD_PAGES_PER_COMMITMENT of the new region.
*/
if ( VirtualAlloc( ppage,
_OLD_PAGESIZE * _OLD_PAGES_PER_COMMITMENT,
MEM_COMMIT,
PAGE_READWRITE ) != NULL )
{
/*
* Insert *pregnew into the linked list of regions (just
* before __old_small_block_heap)
*/
if ( pregnew == &__old_small_block_heap ) {
if ( __old_small_block_heap.p_next_region == NULL )
__old_small_block_heap.p_next_region =
&__old_small_block_heap;
if ( __old_small_block_heap.p_prev_region == NULL )
__old_small_block_heap.p_prev_region =
&__old_small_block_heap;
}
else {
pregnew->p_next_region = &__old_small_block_heap;
pregnew->p_prev_region = __old_small_block_heap.p_prev_region;
__old_small_block_heap.p_prev_region = pregnew;
pregnew->p_prev_region->p_next_region = pregnew;
}
/*
* Fill in the rest of *pregnew
*/
pregnew->p_pages_begin = ppage;
pregnew->p_pages_end = ppage + _OLD_PAGES_PER_REGION;
pregnew->p_starting_region_map = &(pregnew->region_map[0]);
pregnew->p_first_uncommitted =
&(pregnew->region_map[_OLD_PAGES_PER_COMMITMENT]);
/*
* Initialize pregnew->region_map[].
*/
for ( i = 0 ; i < _OLD_PAGES_PER_REGION ; i++ ) {
if ( i < _OLD_PAGES_PER_COMMITMENT )
pregnew->region_map[i].free_paras_in_page =
_OLD_PARAS_PER_PAGE;
else
pregnew->region_map[i].free_paras_in_page =
_OLD_UNCOMMITTED_PAGE;
pregnew->region_map[i].last_failed_alloc =
_OLD_NO_FAILED_ALLOC;
}
/*
* Initialize pages
*/
memset( ppage, 0, _OLD_PAGESIZE * _OLD_PAGES_PER_COMMITMENT );
while ( ppage < pregnew->p_pages_begin +
_OLD_PAGES_PER_COMMITMENT )
{
ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
ppage->free_paras_at_start = _OLD_PARAS_PER_PAGE;
(ppage++)->alloc_map[_OLD_PARAS_PER_PAGE] = (__old_page_map_t)-1;
}
/*
* Return success
*/
return pregnew;
}
else {
/*
* Couldn't commit the pages. Release the address space .
*/
VirtualFree( ppage, 0, MEM_RELEASE );
}
}
/*
* Unable to create the new region. Free the region descriptor, if necessary.
*/
if ( pregnew != &__old_small_block_heap )
HeapFree(_crtheap, 0, pregnew);
/*
* Return failure.
*/
return NULL;
}
/***
*void __old_sbh_release_region(preg) - release region
*
*Purpose:
* Release the address space associated with the specified region
* descriptor. Also, free the specified region descriptor and update
* the linked list of region descriptors if appropriate.
*
*Entry:
* __old_sbh_region_t * preg - pointer to descriptor for the region to
* be released.
*
*Exit:
* No return value.
*
*Exceptions:
*
*******************************************************************************/
void __cdecl __old_sbh_release_region(
__old_sbh_region_t * preg
)
{
/*
* Release the passed region
*/
VirtualFree( preg->p_pages_begin, 0, MEM_RELEASE);
/*
* Update __old_sbh_p_starting_region, if necessary
*/
if ( __old_sbh_p_starting_region == preg )
__old_sbh_p_starting_region = preg->p_prev_region;
if ( preg != &__old_small_block_heap ) {
/*
* Update linked list of region descriptors.
*/
preg->p_prev_region->p_next_region = preg->p_next_region;
preg->p_next_region->p_prev_region = preg->p_prev_region;
/*
* Free the region desciptor
*/
HeapFree(_crtheap, 0, preg);
}
else {
/*
* Mark p_pages_begin as _OLD_NO_PAGES to indicate __old_small_block_heap
* is not associated with any region (and can be reused). This the
* only region descriptor for which this is supported.
*/
__old_small_block_heap.p_pages_begin = _OLD_NO_PAGES;
}
}
/***
*void __old_sbh_decommit_pages(count) - decommit specified number of pages
*
*Purpose:
* Decommit count pages, if possible, in reverse (i.e., last to
* first) order. If this results in all the pages in any region being
* uncommitted, the region is released.
*
*Entry:
* int count - number of pages to decommit
*
*Exit:
* No return value.
*
*Exceptions:
*
*******************************************************************************/
void __cdecl __old_sbh_decommit_pages(
int count
)
{
__old_sbh_region_t * preg1;
__old_sbh_region_t * preg2;
__old_region_map_t * pregmap;
int page_decommitted_flag;
int i;
/*
* Scan the regions of the small-block heap, in reverse order. looking
* for pages which can be decommitted.
*/
preg1 = __old_small_block_heap.p_prev_region;
do {
if ( preg1->p_pages_begin != _OLD_NO_PAGES ) {
/*
* Scan the pages in *preg1, in reverse order, looking for
* pages which can be decommitted.
*/
for ( i = _OLD_PAGES_PER_REGION - 1, page_decommitted_flag = 0,
pregmap = &(preg1->region_map[i]) ;
i >= 0 ; i--, pregmap-- )
{
/*
* Check if the pool page is unused and, if so, decommit it.
*/
if ( pregmap->free_paras_in_page == _OLD_PARAS_PER_PAGE ) {
if ( VirtualFree((preg1->p_pages_begin) + i, _OLD_PAGESIZE,
MEM_DECOMMIT) )
{
/*
* Mark the page as uncommitted, update the count
* (global) decommitable pages, update the
* first_uncommitted_index field of the region
* descriptor, set the flag indicating at least
* one page has been decommitted in the region,
* and decrement count.
*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -