📄 dlmalloc.c.svn-base
字号:
return ptr;
}
//----------------------------------------------------------------------------
/*
free() algorithm :
cases:
1. free(NULL) has no effect.
2. Chunks are consolidated as they arrive, and
placed in corresponding bins. (This includes the case of
consolidating with the current `last_remainder').
*/
int
dlfree( void* _mem )
{
mchunkptr p; /* chunk corresponding to mem */
INTERNAL_SIZE_T hd; /* its head field */
INTERNAL_SIZE_T sz; /* its size */
int idx; /* its bin index */
mchunkptr next; /* next contiguous chunk */
INTERNAL_SIZE_T nextsz; /* its size */
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
mchunkptr bck; /* misc temp for linking */
mchunkptr fwd; /* misc temp for linking */
int islr; /* track whether merging with last_remainder */
uint8* mem = (uint8*)_mem;
if (mem == NULL) /* free(NULL) has no effect */
return 0;
MALLOC_LOCK;
p = mem2chunk(mem);
hd = p->size;
check_inuse_chunk(p);
sz = hd & ~PREV_INUSE;
next = chunk_at_offset(p, sz);
nextsz = chunksize(next);
if (next == top) /* merge with top */
{
sz += nextsz;
if (!(hd & PREV_INUSE)) /* consolidate backward */
{
prevsz = p->prev_size;
p = chunk_at_offset(p, -((long) prevsz));
sz += prevsz;
unlink(p, bck, fwd);
}
set_head(p, sz | PREV_INUSE);
top = p;
MALLOC_UNLOCK;
return 1;
}
set_head(next, nextsz); /* clear inuse bit */
islr = 0;
if (!(hd & PREV_INUSE)) /* consolidate backward */
{
prevsz = p->prev_size;
p = chunk_at_offset(p, -((long) prevsz));
sz += prevsz;
if (p->fd == last_remainder) /* keep as last_remainder */
islr = 1;
else
unlink(p, bck, fwd);
}
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
{
sz += nextsz;
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
{
islr = 1;
link_last_remainder(p);
}
else
unlink(next, bck, fwd);
}
set_head(p, sz | PREV_INUSE);
set_foot(p, sz);
if (!islr)
frontlink(p, sz, idx, bck, fwd);
MALLOC_UNLOCK;
return 1;
} // free()
//----------------------------------------------------------------------------
// resize existing allocation, if oldsize is non-NULL, previous
// allocation size is placed into it. If previous size not available,
// it is set to 0. NB previous allocation size may have been rounded up.
// Occasionally the allocation can be adjusted *backwards* as well as,
// or instead of forwards, therefore the address of the resized
// allocation is returned, or NULL if no resizing was possible.
// Note that this differs from ::realloc() in that no attempt is
// made to call malloc() if resizing is not possible - that is left
// to higher layers. The data is copied from old to new though.
// The effects of alloc_ptr==NULL or newsize==0 are undefined
// DOCUMENTATION FROM ORIGINAL FILE:
// (some now irrelevant parts elided)
/*
Realloc algorithm:
If the reallocation is for additional space, and the
chunk can be extended, it is, else a malloc-copy-free sequence is
taken. There are several different ways that a chunk could be
extended. All are tried:
* Extending forward into following adjacent free chunk.
* Shifting backwards, joining preceding adjacent space
* Both shifting backwards and extending forward.
If the reallocation is for less space, and the new request is for
a `small' (<512 bytes) size, then the newly unused space is lopped
off and freed.
The old unix realloc convention of allowing the last-free'd chunk
to be used as an argument to realloc is no longer supported.
I don't know of any programs still relying on this feature,
and allowing it would also allow too many other incorrect
usages of realloc to be sensible.
*/
void*
dlresize_alloc( void* _oldmem, int32 bytes, int32 *poldsize )
{
INTERNAL_SIZE_T nb; /* padded request size */
mchunkptr oldp; /* chunk corresponding to oldmem */
INTERNAL_SIZE_T oldsize; /* its size */
mchunkptr newp; /* chunk to return */
INTERNAL_SIZE_T newsize; /* its size */
uint8* newmem; /* corresponding user mem */
mchunkptr next; /* next contiguous chunk after oldp */
INTERNAL_SIZE_T nextsize; /* its size */
mchunkptr prev; /* previous contiguous chunk before oldp */
INTERNAL_SIZE_T prevsize; /* its size */
mchunkptr remainder; /* holds split off extra space from newp */
INTERNAL_SIZE_T remainder_size; /* its size */
mchunkptr bck; /* misc temp for linking */
mchunkptr fwd; /* misc temp for linking */
uint8* oldmem = (uint8*)_oldmem;
MALLOC_LOCK;
newp = oldp = mem2chunk(oldmem);
newsize = oldsize = chunksize(oldp);
if (NULL != poldsize)
*poldsize = oldsize - SIZE_SZ;
nb = request2size(bytes);
check_inuse_chunk(oldp);
if ((long)(oldsize) < (long)(nb))
{
/* Try expanding forward */
next = chunk_at_offset(oldp, oldsize);
if (next == top || !inuse(next))
{
nextsize = chunksize(next);
/* Forward into top only if a remainder */
if (next == top)
{
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
{
newsize += nextsize;
top = chunk_at_offset(oldp, nb);
set_head(top, (newsize - nb) | PREV_INUSE);
set_head_size(oldp, nb);
MALLOC_UNLOCK;
return (void*)chunk2mem(oldp);
}
}
/* Forward into next chunk */
else if (((long)(nextsize + newsize) >= (long)(nb)))
{
unlink(next, bck, fwd);
newsize += nextsize;
goto split;
}
}
else
{
next = 0;
nextsize = 0;
}
/* Try shifting backwards. */
if (!prev_inuse(oldp))
{
prev = prev_chunk(oldp);
prevsize = chunksize(prev);
/* try forward + backward first to save a later consolidation */
if (next != 0)
{
/* into top */
if (next == top)
{
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
{
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize + nextsize;
newmem = chunk2mem(newp);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
top = chunk_at_offset(newp, nb);
set_head(top, (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb);
MALLOC_UNLOCK;
return (void*)newmem;
}
}
/* into next chunk */
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
{
unlink(next, bck, fwd);
unlink(prev, bck, fwd);
newp = prev;
newsize += nextsize + prevsize;
newmem = chunk2mem(newp);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
goto split;
}
}
/* backward only */
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
{
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize;
newmem = chunk2mem(newp);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
goto split;
}
}
// couldn't resize the allocation any direction, so return failure
MALLOC_UNLOCK;
return NULL;
}
split: /* split off extra room in old or expanded chunk */
remainder_size = long_sub_size_t(newsize, nb);
if (remainder_size >= (long)MINSIZE) /* split off remainder */
{
remainder = chunk_at_offset(newp, nb);
set_head_size(newp, nb);
set_head(remainder, remainder_size | PREV_INUSE);
set_inuse_bit_at_offset(remainder, remainder_size);
/* let free() deal with it */
dlfree( chunk2mem(remainder) );
}
else
{
set_head_size(newp, newsize);
set_inuse_bit_at_offset(newp, newsize);
}
check_inuse_chunk(newp);
MALLOC_UNLOCK;
return (void*)chunk2mem(newp);
} // resize_alloc()
void*
dlrealloc(void* ptr, int32 size)
{
int32 oldsize;
return dlresize_alloc(ptr, size, &oldsize);
}
//----------------------------------------------------------------------------
// Get memory pool status
// flags is a bitmask of requested fields to fill in. The flags are
// defined in common.hxx
void
dlmem_get_status(uint32 flags, struct mempool_status* status )
{
status->arenabase = (const uint8 *)-1;
status->arenasize = -1;
status->freeblocks = -1;
status->totalallocated = -1;
status->totalfree = -1;
status->blocksize = -1;
status->maxfree = -1;
status->waiting = (char)-1;
status->origbase = (const uint8 *)-1;
status->origsize = -1;
status->maxoverhead = (char)-1;
if (0 != (flags & (MEMPOOL_STAT_FREEBLOCKS | MEMPOOL_STAT_TOTALFREE |
MEMPOOL_STAT_TOTALALLOCATED | MEMPOOL_STAT_MAXFREE)))
{
int i;
mbinptr b;
mchunkptr p;
int32 chunksizep;
int32 maxfree;
#ifdef MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
mchunkptr q;
#endif
INTERNAL_SIZE_T avail = chunksize(top);
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
maxfree = avail;
for (i = 1; i < MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) {
b = bin_at(i);
for (p = last(b); p != b; p = p->bk) {
#ifdef MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
check_free_chunk(p);
for (q = next_chunk(p);
(q < top) && inuse(q) &&
(long)(chunksize(q)) >= (long)MINSIZE;
q = next_chunk(q))
check_inuse_chunk(q);
#endif
chunksizep = chunksize(p);
avail += chunksizep;
if ( chunksizep > maxfree )
maxfree = chunksizep;
navail++;
}
}
if ( 0 != (flags & MEMPOOL_STAT_TOTALALLOCATED) )
status->totalallocated = arenasize - avail;
// as quick or quicker to just set most of these, rather than
// test flag first
status->totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
>= MINSIZE); // free mem negative!
status->freeblocks = navail;
status->maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
//diag_printf("raw mf: %d, ret mf: %d\n\r", maxfree, status.maxfree);
ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) &
~MALLOC_ALIGN_MASK) >= MINSIZE); // max free block size negative!
} // if
// as quick or quicker to just set most of these, rather than
// test flag first
status->arenabase = status->origbase = arenabase;
status->arenasize = status->origsize = arenasize;
status->maxoverhead = MINSIZE + MALLOC_ALIGNMENT;
} // mem_get_status()
/*
struct mallinfo
mallinfo( void )
{
struct mallinfo ret = { 0 }; // initialize to all zeros
struct mempool_status stat;
mem_get_status( MEMPOOL_STAT_ARENASIZE |
MEMPOOL_STAT_FREEBLOCKS |
MEMPOOL_STAT_TOTALALLOCATED |
MEMPOOL_STAT_TOTALFREE |
MEMPOOL_STAT_MAXFREE, &stat );
if ( stat.arenasize > 0 )
ret.arena = stat.arenasize;
if ( stat.freeblocks > 0 )
ret.ordblks = stat.freeblocks;
if ( stat.totalallocated > 0 )
ret.uordblks = stat.totalallocated;
if ( stat.totalfree > 0 )
ret.fordblks = stat.totalfree;
if ( stat.maxfree > 0 )
ret.maxfree = stat.maxfree;
return ret;
} // mallinfo()
*/
//----------------------------------------------------------------------------
// EOF dlmalloc.cxx
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -