📄 sepmetaimpl.inl
字号:
freehead.next = freehead.prev = &memhead; // Since allocedhead and freehead are placeholders, not real blocks, // assign addresses which can't match list searches allocedhead.memnext = allocedhead.memprev = NULL; freehead.memnext = freehead.memprev = NULL; freehead.mem = allocedhead.mem = NULL; freemem = top - bottom;}// -------------------------------------------------------------------------inlineCyg_Mempool_Sepmeta_Implementation::~Cyg_Mempool_Sepmeta_Implementation(){}// -------------------------------------------------------------------------// allocation is mostly simple// First we look down the free list for a large enough block// If we find a block the right size, we unlink the block from// the free list and return a pointer to it.// If we find a larger block, we chop a piece off the end// and return that// Otherwise we reach the end of the list and return NULLinline cyg_uint8 *Cyg_Mempool_Sepmeta_Implementation::try_alloc( cyg_int32 size ){ struct memdq *alloced; CYG_REPORT_FUNCTION(); // Allow uninitialised (zero sized) heaps because they could exist as a // quirk of the MLT setup where a dynamically sized heap is at the top of // memory. if (NULL == bottom || NULL==metabase) return NULL; size = (size + alignment - 1) & -alignment; struct memdq *dq = find_free_dq( size ); if (NULL == dq) return NULL; cyg_int32 dqsize = dq->memnext->mem - dq->mem; if( size == dqsize ) { // exact fit -- unlink from free list dq->prev->next = dq->next; dq->next->prev = dq->prev; // set up this block for insertion into alloced list dq->next = dq->memnext; // since dq was free, dq->memnext must // be allocated otherwise it would have // been coalesced dq->prev = dq->next->prev; alloced = dq; } else { CYG_ASSERT( dqsize > size, "block found is too small"); // Split into two memdq's, returning the second one // first get a memdq if ( NULL == freemetahead ) // out of metadata. return NULL; // FIXME: since we don't search all the way for an exact fit // first we may be able to find an exact fit later and therefore // not need more metadata. We don't do this yet though. alloced = freemetahead; freemetahead = alloced->next; // now set its values alloced->memnext = dq->memnext; alloced->next = dq->memnext; // since dq was free, dq->memnext must // be allocated otherwise it would have // been coalesced alloced->memprev = dq; alloced->prev = alloced->next->prev; alloced->mem = alloced->next->mem - size; // now set up dq (the portion that remains a free block) // dq->next and dq->prev are unchanged as we still end up pointing // at the same adjacent free blocks // dq->memprev obviously doesn't change dq->memnext = alloced; // finish inserting into memory block list alloced->memnext->memprev = alloced; alloced->next->prev = alloced->prev->next = alloced; check_free_memdq(dq); } CYG_ASSERT( bottom <= alloced->mem && alloced->mem <= top, "alloced outside pool" ); // Insert block into alloced list. alloced->next->prev = alloced->prev->next = alloced; check_alloced_memdq(alloced); freemem -=size; CYG_ASSERT( ((CYG_ADDRESS)alloced->mem & (alignment-1)) == 0, "returned memory not aligned" ); return alloced->mem;}// -------------------------------------------------------------------------// resize existing allocation, if oldsize is non-NULL, previous// allocation size is placed into it. If previous size not available,// it is set to 0. NB previous allocation size may have been rounded up.// Occasionally the allocation can be adjusted *backwards* as well as,// or instead of forwards, therefore the address of the resized// allocation is returned, or NULL if no resizing was possible.// Note that this differs from ::realloc() in that no attempt is// made to call malloc() if resizing is not possible - that is left// to higher layers. The data is copied from old to new though.// The effects of alloc_ptr==NULL or newsize==0 are undefinedinline cyg_uint8 *Cyg_Mempool_Sepmeta_Implementation::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, cyg_int32 *oldsize ){ cyg_int32 currsize, origsize; CYG_REPORT_FUNCTION(); CYG_CHECK_DATA_PTRC( alloc_ptr ); if ( NULL != oldsize ) CYG_CHECK_DATA_PTRC( oldsize ); CYG_ASSERT( (bottom <= alloc_ptr) && (alloc_ptr <= top), "alloc_ptr outside pool" ); struct memdq *dq=find_alloced_dq( alloc_ptr ); CYG_ASSERT( dq != NULL, "passed address not previously alloced"); currsize = origsize = dq->memnext->mem - dq->mem; if ( NULL != oldsize ) *oldsize = currsize; if ( newsize > currsize ) { cyg_int32 nextmemsize=0, prevmemsize=0; // see if we can increase the allocation size. Don't change anything // so we don't have to undo it later if it wouldn't fit if ( dq->next != dq->memnext ) { // if not equal, memnext must // be on free list nextmemsize = dq->memnext->memnext->mem - dq->memnext->mem; } if ( dq->prev != dq->memprev) { // ditto prevmemsize = dq->mem - dq->memprev->mem; } if (nextmemsize + prevmemsize + currsize < newsize) return NULL; // can't fit it // expand forwards if ( nextmemsize != 0 ) { if (nextmemsize <= (newsize - currsize)) { // taking all of it struct memdq *fblk = dq->memnext; // fix up mem list ptrs dq->memnext = fblk->memnext; dq->memnext->memprev=dq; // fix up free list ptrs fblk->next->prev = fblk->prev; fblk->prev->next = fblk->next; // return to meta list fblk->next = freemetahead; freemetahead = fblk->next; currsize += nextmemsize; } else { // only needs some dq->memnext->mem += (newsize - currsize); currsize = newsize; } } // expand backwards if ( currsize < newsize && prevmemsize != 0 ) { cyg_uint8 *oldmem = dq->mem; CYG_ASSERT( prevmemsize >= newsize - currsize, "miscalculated expansion" ); if (prevmemsize == (newsize - currsize)) { // taking all of it struct memdq *fblk = dq->memprev; // fix up mem list ptrs dq->memprev = fblk->memprev; dq->memprev->memnext=dq; dq->mem = fblk->mem; // fix up free list ptrs fblk->next->prev = fblk->prev; fblk->prev->next = fblk->next; // return to meta list fblk->next = freemetahead; freemetahead = fblk->next; } else { // only needs some dq->mem -= (newsize - currsize); } // move data into place copy_data( dq->mem, oldmem, origsize ); } } if (newsize < currsize) { // shrink allocation // easy if the next block is already a free block if ( dq->memnext != dq->next ) { dq->memnext->mem -= currsize - newsize; CYG_ASSERT( dq->memnext->mem > dq->mem, "moving next block back corruption" ); } else { // if its already allocated we need to create a new free list // entry if (NULL == freemetahead) return NULL; // can't do it struct memdq *fdq = freemetahead; freemetahead = fdq->next; fdq->memprev = dq; fdq->memnext = dq->memnext; fdq->mem = dq->mem + newsize; insert_free_block( fdq ); } } freemem += origsize - newsize; return dq->mem;} // resize_alloc()// -------------------------------------------------------------------------// When no coalescing is done, free is simply a matter of using the// freed memory as an element of the free list linking it in at the// start. When coalescing, the free list is sorted inline cyg_boolCyg_Mempool_Sepmeta_Implementation::free( cyg_uint8 *p, cyg_int32 size ){ CYG_REPORT_FUNCTION(); CYG_CHECK_DATA_PTRC( p ); if (!((bottom <= p) && (p <= top))) return false; struct memdq *dq = find_alloced_dq( p ); if (NULL == dq) return false; if (0 == size) size = dq->memnext->mem - dq->mem; else { size = (size + alignment - 1) & -alignment; if( (dq->memnext->mem - dq->mem) != size ) return false; } check_alloced_memdq( dq ); // Remove dq from alloced list dq->prev->next = dq->next; dq->next->prev = dq->prev; insert_free_block( dq ); freemem += size; return true;} // -------------------------------------------------------------------------inline voidCyg_Mempool_Sepmeta_Implementation::get_status( cyg_mempool_status_flag_t flags, Cyg_Mempool_Status &status ){ CYG_REPORT_FUNCTION();// as quick or quicker to just set it, rather than test flag first status.arenabase = obase; if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) status.arenasize = top - bottom; if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) status.totalallocated = (top-bottom) - freemem;// as quick or quicker to just set it, rather than test flag first status.totalfree = freemem; if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) { struct memdq *dq = &freehead; cyg_int32 mf = 0; do { CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); dq = dq->next; if (dq == &freehead) // wrapped round break; if(dq->memnext->mem - dq->mem > mf) mf = dq->memnext->mem - dq->mem; } while(1); status.maxfree = mf; }// as quick or quicker to just set it, rather than test flag first status.origbase = obase;// as quick or quicker to just set it, rather than test flag first status.origsize = osize; CYG_REPORT_RETURN();} // get_status()// -------------------------------------------------------------------------#endif // ifndef CYGONCE_MEMALLOC_SEPMETAIMPL_INL// EOF sepmetaimpl.inl
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -