⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 malloc.c

📁 Axis 221 camera embedded programing interface
💻 C
📖 第 1 页 / 共 3 页
字号:
/*  This is a version (aka dlmalloc) of malloc/free/realloc written by  Doug Lea and released to the public domain.  Use, modify, and  redistribute this code without permission or acknowledgement in any  way you wish.  Send questions, comments, complaints, performance  data, etc to dl@cs.oswego.edu  VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)  Note: There may be an updated version of this malloc obtainable at           ftp://gee.cs.oswego.edu/pub/misc/malloc.c  Check before installing!  Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>*/#define _GNU_SOURCE#include "malloc.h"#ifdef __UCLIBC_HAS_THREADS__pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;#endif/*   There is exactly one instance of this struct in this malloc.   If you are adapting this malloc in a way that does NOT use a static   malloc_state, you MUST explicitly zero-fill it before using. This   malloc relies on the property that malloc_state is initialized to   all zeroes (as is true of C statics).*/struct malloc_state __malloc_state;  /* never directly referenced *//* forward declaration */static int __malloc_largebin_index(unsigned int sz);#ifdef __MALLOC_DEBUGGING/*  Debugging support  Because freed chunks may be overwritten with bookkeeping fields, this  malloc will often die when freed memory is overwritten by user  programs.  This can be very effective (albeit in an annoying way)  in helping track down dangling pointers.  If you compile with -D__MALLOC_DEBUGGING, a number of assertion checks are  enabled that will catch more memory errors. You probably won't be  able to make much sense of the actual assertion errors, but they  should help you locate incorrectly overwritten memory.  The  checking is fairly extensive, and will slow down execution  noticeably. Calling malloc_stats or mallinfo with __MALLOC_DEBUGGING set will  attempt to check every non-mmapped allocated and free chunk in the  course of computing the summmaries. (By nature, mmapped regions  cannot be checked very much automatically.)  Setting __MALLOC_DEBUGGING may also be helpful if you are trying to modify  this code. The assertions in the check routines spell out in more  detail the assumptions and invariants underlying the algorithms.  Setting __MALLOC_DEBUGGING does NOT provide an automated mechanism for checking  that all accesses to malloced memory stay within their  bounds. However, there are several add-ons and adaptations of this  or other mallocs available that do this.*//* Properties of all chunks */void __do_check_chunk(mchunkptr p){    mstate av = get_malloc_state();#ifdef __DOASSERTS__    /* min and max possible addresses assuming contiguous allocation */    char* max_address = (char*)(av->top) + chunksize(av->top);    char* min_address = max_address - av->sbrked_mem;    unsigned long  sz = chunksize(p);#endif    if (!chunk_is_mmapped(p)) {	/* Has legal address ... */	if (p != av->top) {	    if (contiguous(av)) {		assert(((char*)p) >= min_address);		assert(((char*)p + sz) <= ((char*)(av->top)));	    }	}	else {	    /* top size is always at least MINSIZE */	    assert((unsigned long)(sz) >= MINSIZE);	    /* top predecessor always marked inuse */	    assert(prev_inuse(p));	}    }    else {	/* address is outside main heap  */	if (contiguous(av) && av->top != initial_top(av)) {	    assert(((char*)p) < min_address || ((char*)p) > max_address);	}	/* chunk is page-aligned */	assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);	/* mem is aligned */	assert(aligned_OK(chunk2mem(p)));    }}/* Properties of free chunks */void __do_check_free_chunk(mchunkptr p){    size_t sz = p->size & ~PREV_INUSE;#ifdef __DOASSERTS__    mstate av = get_malloc_state();    mchunkptr next = chunk_at_offset(p, sz);#endif    __do_check_chunk(p);    /* Chunk must claim to be free ... */    assert(!inuse(p));    assert (!chunk_is_mmapped(p));    /* Unless a special marker, must have OK fields */    if ((unsigned long)(sz) >= MINSIZE)    {	assert((sz & MALLOC_ALIGN_MASK) == 0);	assert(aligned_OK(chunk2mem(p)));	/* ... matching footer field */	assert(next->prev_size == sz);	/* ... and is fully consolidated */	assert(prev_inuse(p));	assert (next == av->top || inuse(next));	/* ... and has minimally sane links */	assert(p->fd->bk == p);	assert(p->bk->fd == p);    }    else /* markers are always of size (sizeof(size_t)) */	assert(sz == (sizeof(size_t)));}/* Properties of inuse chunks */void __do_check_inuse_chunk(mchunkptr p){    mstate av = get_malloc_state();    mchunkptr next;    __do_check_chunk(p);    if (chunk_is_mmapped(p))	return; /* mmapped chunks have no next/prev */    /* Check whether it claims to be in use ... */    assert(inuse(p));    next = next_chunk(p);    /* ... and is surrounded by OK chunks.       Since more things can be checked with free chunks than inuse ones,       if an inuse chunk borders them and debug is on, it's worth doing them.       */    if (!prev_inuse(p))  {	/* Note that we cannot even look at prev unless it is not inuse */	mchunkptr prv = prev_chunk(p);	assert(next_chunk(prv) == p);	__do_check_free_chunk(prv);    }    if (next == av->top) {	assert(prev_inuse(next));	assert(chunksize(next) >= MINSIZE);    }    else if (!inuse(next))	__do_check_free_chunk(next);}/* Properties of chunks recycled from fastbins */void __do_check_remalloced_chunk(mchunkptr p, size_t s){#ifdef __DOASSERTS__    size_t sz = p->size & ~PREV_INUSE;#endif    __do_check_inuse_chunk(p);    /* Legal size ... */    assert((sz & MALLOC_ALIGN_MASK) == 0);    assert((unsigned long)(sz) >= MINSIZE);    /* ... and alignment */    assert(aligned_OK(chunk2mem(p)));    /* chunk is less than MINSIZE more than request */    assert((long)(sz) - (long)(s) >= 0);    assert((long)(sz) - (long)(s + MINSIZE) < 0);}/* Properties of nonrecycled chunks at the point they are malloced */void __do_check_malloced_chunk(mchunkptr p, size_t s){    /* same as recycled case ... */    __do_check_remalloced_chunk(p, s);    /*       ... plus,  must obey implementation invariant that prev_inuse is       always true of any allocated chunk; i.e., that each allocated       chunk borders either a previously allocated and still in-use       chunk, or the base of its memory arena. This is ensured       by making all allocations from the the `lowest' part of any found       chunk.  This does not necessarily hold however for chunks       recycled via fastbins.       */    assert(prev_inuse(p));}/*  Properties of malloc_state.  This may be useful for debugging malloc, as well as detecting user  programmer errors that somehow write into malloc_state.  If you are extending or experimenting with this malloc, you can  probably figure out how to hack this routine to print out or  display chunk addresses, sizes, bins, and other instrumentation.*/void __do_check_malloc_state(void){    mstate av = get_malloc_state();    int i;    mchunkptr p;    mchunkptr q;    mbinptr b;    unsigned int binbit;    int empty;    unsigned int idx;    size_t size;    unsigned long  total = 0;    int max_fast_bin;    /* internal size_t must be no wider than pointer type */    assert(sizeof(size_t) <= sizeof(char*));    /* alignment is a power of 2 */    assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);    /* cannot run remaining checks until fully initialized */    if (av->top == 0 || av->top == initial_top(av))	return;    /* pagesize is a power of 2 */    assert((av->pagesize & (av->pagesize-1)) == 0);    /* properties of fastbins */    /* max_fast is in allowed range */    assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));    max_fast_bin = fastbin_index(av->max_fast);    for (i = 0; i < NFASTBINS; ++i) {	p = av->fastbins[i];	/* all bins past max_fast are empty */	if (i > max_fast_bin)	    assert(p == 0);	while (p != 0) {	    /* each chunk claims to be inuse */	    __do_check_inuse_chunk(p);	    total += chunksize(p);	    /* chunk belongs in this bin */	    assert(fastbin_index(chunksize(p)) == i);	    p = p->fd;	}    }    if (total != 0)	assert(have_fastchunks(av));    else if (!have_fastchunks(av))	assert(total == 0);    /* check normal bins */    for (i = 1; i < NBINS; ++i) {	b = bin_at(av,i);	/* binmap is accurate (except for bin 1 == unsorted_chunks) */	if (i >= 2) {	    binbit = get_binmap(av,i);	    empty = last(b) == b;	    if (!binbit)		assert(empty);	    else if (!empty)		assert(binbit);	}	for (p = last(b); p != b; p = p->bk) {	    /* each chunk claims to be free */	    __do_check_free_chunk(p);	    size = chunksize(p);	    total += size;	    if (i >= 2) {		/* chunk belongs in bin */		idx = bin_index(size);		assert(idx == i);		/* lists are sorted */		if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {		    assert(p->bk == b ||			    (unsigned long)chunksize(p->bk) >=			    (unsigned long)chunksize(p));		}	    }	    /* chunk is followed by a legal chain of inuse chunks */	    for (q = next_chunk(p);		    (q != av->top && inuse(q) &&		     (unsigned long)(chunksize(q)) >= MINSIZE);		    q = next_chunk(q))		__do_check_inuse_chunk(q);	}    }    /* top chunk is OK */    __do_check_chunk(av->top);    /* sanity checks for statistics */    assert(total <= (unsigned long)(av->max_total_mem));    assert(av->n_mmaps >= 0);    assert(av->n_mmaps <= av->max_n_mmaps);    assert((unsigned long)(av->sbrked_mem) <=	    (unsigned long)(av->max_sbrked_mem));    assert((unsigned long)(av->mmapped_mem) <=	    (unsigned long)(av->max_mmapped_mem));    assert((unsigned long)(av->max_total_mem) >=	    (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));}#endif/* ----------- Routines dealing with system allocation -------------- *//*  sysmalloc handles malloc cases requiring more memory from the system.  On entry, it is assumed that av->top does not have enough  space to service request for nb bytes, thus requiring that av->top  be extended or replaced.*/static void* __malloc_alloc(size_t nb, mstate av){    mchunkptr       old_top;        /* incoming value of av->top */    size_t old_size;       /* its size */    char*           old_end;        /* its end address */    long            size;           /* arg to first MORECORE or mmap call */    char*           brk;            /* return value from MORECORE */    long            correction;     /* arg to 2nd MORECORE call */    char*           snd_brk;        /* 2nd return val */    size_t front_misalign; /* unusable bytes at front of new space */    size_t end_misalign;   /* partial page left at end of new space */    char*           aligned_brk;    /* aligned offset into brk */    mchunkptr       p;              /* the allocated/returned chunk */    mchunkptr       remainder;      /* remainder from allocation */    unsigned long    remainder_size; /* its size */    unsigned long    sum;            /* for updating stats */    size_t          pagemask  = av->pagesize - 1;    /*       If there is space available in fastbins, consolidate and retry       malloc from scratch rather than getting memory from system.  This       can occur only if nb is in smallbin range so we didn't consolidate       upon entry to malloc. It is much easier to handle this case here       than in malloc proper.       */    if (have_fastchunks(av)) {	assert(in_smallbin_range(nb));	__malloc_consolidate(av);	return malloc(nb - MALLOC_ALIGN_MASK);    }    /*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -