📄 dlmalloc.cxx
字号:
} // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(voidCyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = long_sub_size_t(sz, s); do_check_inuse_chunk(p); /* Legal size ... */ ASSERT((long)sz >= (long)MINSIZE); ASSERT((sz & MALLOC_ALIGN_MASK) == 0); ASSERT(room >= 0); ASSERT(room < (long)MINSIZE); /* ... and alignment */ ASSERT(aligned_OK(chunk2mem(p))); /* ... and was allocated at front of an available chunk */ ASSERT(prev_inuse(p));} // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(#define check_free_chunk(P) do_check_free_chunk(P)#define check_inuse_chunk(P) do_check_inuse_chunk(P)#define check_chunk(P) do_check_chunk(P)#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)#else#define check_free_chunk(P) #define check_inuse_chunk(P)#define check_chunk(P)#define check_malloced_chunk(P,N)#endif//----------------------------------------------------------------------------/* Macro-based internal utilities*//* Linking chunks in bin lists. Call these only with variables, not arbitrary expressions, as arguments.*//* Place chunk p of size s in its bin, in size order, putting it ahead of others of same size.*/#define frontlink(P, S, IDX, BK, FD) \{ \ if (S < MAX_SMALLBIN_SIZE) \ { \ IDX = smallbin_index(S); \ mark_binblock(IDX); \ BK = bin_at(IDX); \ FD = BK->fd; \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \ else \ { \ IDX = bin_index(S); \ BK = bin_at(IDX); \ FD = BK->fd; \ if (FD == BK) mark_binblock(IDX); \ else \ { \ while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ BK = FD->bk; \ } \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \}/* take a chunk off a list */#define unlink(P, BK, FD) \{ \ BK = P->bk; \ FD = P->fd; \ FD->bk = BK; \ BK->fd = FD; \} \/* Place p as the last remainder */#define link_last_remainder(P) \{ \ last_remainder->fd = last_remainder->bk = P; \ P->fd = P->bk = last_remainder; \}/* Clear the last_remainder bin */#define clear_last_remainder \ (last_remainder->fd = last_remainder->bk = last_remainder)//----------------------------------------------------------------------------Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation( cyg_uint8 *base, cyg_int32 size, CYG_ADDRWORD /* argthru */ ){ arenabase = base; arenasize = size; CYG_ADDRESS front_misalign; cyg_int32 correction;#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE cyg_ucount16 i; av_[0] = av_[1] = 0; for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) { av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i); } // for #elif defined(CYGDBG_USE_ASSERTS) static int instances; if ( ++instances > 1 ) CYG_FAIL( "Multiple dlmalloc instances but " "CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE " "not defined" );#endif front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK; if ( front_misalign > 0 ) { correction = (MALLOC_ALIGNMENT) - front_misalign; } else { correction = 0; } // too small to be useful? if ( correction + 2*MALLOC_ALIGNMENT > (unsigned) size ) // help catch errors. Don't fail now. arenabase = NULL; else { top = (mchunkptr)(base + correction); set_head(top, arenasize | PREV_INUSE); }}//----------------------------------------------------------------------------/* Main public routines *//* Malloc Algorithm: The requested size is first converted into a usable form, `nb'. This currently means to add 4 bytes overhead plus possibly more to obtain 8-byte alignment and/or to obtain a size of at least MINSIZE (currently 16 bytes), the smallest allocatable size. (All fits are considered `exact' if they are within MINSIZE bytes.) From there, the first successful of the following steps is taken: 1. The bin corresponding to the request size is scanned, and if a chunk of exactly the right size is found, it is taken. 2. The most recently remaindered chunk is used if it is big enough. This is a form of (roving) first fit, used only in the absence of exact fits. Runs of consecutive requests use the remainder of the chunk used for the previous such request whenever possible. This limited use of a first-fit style allocation strategy tends to give contiguous chunks coextensive lifetimes, which improves locality and can reduce fragmentation in the long run. 3. Other bins are scanned in increasing size order, using a chunk big enough to fulfill the request, and splitting off any remainder. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected. 4. If large enough, the chunk bordering the end of memory (`top') is split off. (This use of `top' is in accord with the best-fit search rule. In effect, `top' is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations). All allocations are made from the the `lowest' part of any found chunk. (The implementation invariant is that prev_inuse is always true of any allocated chunk; i.e., that each allocated chunk borders either a previously allocated and still in-use chunk, or the base of its memory arena.)*/cyg_uint8 *Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes ){ mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T victim_size; /* its size */ int idx; /* index for bin traversal */ mbinptr bin; /* associated bin */ mchunkptr remainder; /* remainder from a split */ long remainder_size; /* its size */ int remainder_index; /* its bin index */ unsigned long block; /* block traverser bit */ int startidx; /* first bin of a traversed block */ mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */ mbinptr q; /* misc temp */ INTERNAL_SIZE_T nb; /* Allow uninitialised (zero sized) heaps because they could exist as a * quirk of the MLT setup where a dynamically sized heap is at the top of * memory. */ if (NULL==arenabase) return NULL; if ((long)bytes < 0) return 0; nb = request2size(bytes); /* padded request size; */ MALLOC_LOCK; /* Check for exact match in a bin */ if (is_small_request(nb)) /* Faster version for small requests */ { idx = smallbin_index(nb); /* No traversal or size check necessary for small bins. */ q = bin_at(idx); victim = last(q);#if MALLOC_ALIGN != 16 /* Also scan the next one, since it would have a remainder < MINSIZE */ if (victim == q) { q = next_bin(q); victim = last(q); }#endif if (victim != q) { victim_size = chunksize(victim); unlink(victim, bck, fwd); set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ } else { idx = bin_index(nb); bin = bin_at(idx); for (victim = last(bin); victim != bin; victim = victim->bk) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* too big */ { --idx; /* adjust to rescan below after checking last remainder */ break; } else if (remainder_size >= 0) /* exact fit */ { unlink(victim, bck, fwd); set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } } ++idx; } /* Try to use the last split-off remainder */ if ( (victim = last_remainder->fd) != last_remainder) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* re-split */ { remainder = chunk_at_offset(victim, nb); set_head(victim, nb | PREV_INUSE); link_last_remainder(remainder); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } clear_last_remainder; if (remainder_size >= 0) /* exhaust */ { set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } /* Else place in bin */ frontlink(victim, victim_size, remainder_index, bck, fwd); } /* If there are any possibly nonempty big-enough blocks, search for best fitting chunk by scanning bins in blockwidth units. */ if ( (block = idx2binblock(idx)) <= binblocks) { /* Get to the first marked block */ if ( (block & binblocks) == 0) { /* force to an even block boundary */ idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; block <<= 1; while ((block & binblocks) == 0) { idx += BINBLOCKWIDTH; block <<= 1; } } /* For each possibly nonempty block ... */ for (;;) { startidx = idx; /* (track incomplete blocks) */ q = bin = bin_at(idx); /* For each bin in this block ... */ do { /* Find and use first big enough chunk ... */ for (victim = last(bin); victim != bin; victim = victim->bk) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* split */ { remainder = chunk_at_offset(victim, nb); set_head(victim, nb | PREV_INUSE); unlink(victim, bck, fwd); link_last_remainder(remainder); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } else if (remainder_size >= 0) /* take */ { set_inuse_bit_at_offset(victim, victim_size); unlink(victim, bck, fwd); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } } bin = next_bin(bin);#if MALLOC_ALIGN == 16 if (idx < MAX_SMALLBIN) { bin = next_bin(bin); ++idx; }#endif } while ((++idx & (BINBLOCKWIDTH - 1)) != 0); /* Clear out the block bit. */ do /* Possibly backtrack to try to clear a partial block */ { if ((startidx & (BINBLOCKWIDTH - 1)) == 0) { binblocks &= ~block; break; } --startidx; q = prev_bin(q); } while (first(q) == q);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -