⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dlmalloc.c

📁 -
💻 C
📖 第 1 页 / 共 5 页
字号:
#ifdef INTERNAL_LINUX_C_LIB#if __STD_CVoid_t * __default_morecore_init (ptrdiff_t);Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;#elseVoid_t * __default_morecore_init ();Void_t *(*__morecore)() = __default_morecore_init;#endif#define MORECORE (*__morecore)#define MORECORE_FAILURE 0#define MORECORE_CLEARS 1 #else /* INTERNAL_LINUX_C_LIB */#if __STD_Cextern Void_t*     sbrk(ptrdiff_t);#elseextern Void_t*     sbrk();#endif#ifndef MORECORE#define MORECORE sbrk#endif#ifndef MORECORE_FAILURE#define MORECORE_FAILURE -1#endif#ifndef MORECORE_CLEARS#define MORECORE_CLEARS 1#endif#endif /* INTERNAL_LINUX_C_LIB */#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)#define cALLOc		__libc_calloc#define fREe		__libc_free#define mALLOc		__libc_malloc#define mEMALIGn	__libc_memalign#define rEALLOc		__libc_realloc#define vALLOc		__libc_valloc#define pvALLOc		__libc_pvalloc#define mALLINFo	__libc_mallinfo#define mALLOPt		__libc_mallopt#pragma weak calloc = __libc_calloc#pragma weak free = __libc_free#pragma weak cfree = __libc_free#pragma weak malloc = __libc_malloc#pragma weak memalign = __libc_memalign#pragma weak realloc = __libc_realloc#pragma weak valloc = __libc_valloc#pragma weak pvalloc = __libc_pvalloc#pragma weak mallinfo = __libc_mallinfo#pragma weak mallopt = __libc_mallopt#else#define cALLOc		calloc#define fREe		free#define mALLOc		malloc#define mEMALIGn	memalign#define rEALLOc		realloc#define vALLOc		valloc#define pvALLOc		pvalloc#define mALLINFo	mallinfo#define mALLOPt		mallopt#endif/* Public routines */#if __STD_CVoid_t* mALLOc(size_t);void    fREe(Void_t*);Void_t* rEALLOc(Void_t*, size_t);Void_t* mEMALIGn(size_t, size_t);Void_t* vALLOc(size_t);Void_t* pvALLOc(size_t);Void_t* cALLOc(size_t, size_t);void    cfree(Void_t*);int     malloc_trim(size_t);size_t  malloc_usable_size(Void_t*);void    malloc_stats();int     mALLOPt(int, int);struct mallinfo mALLINFo(void);#elseVoid_t* mALLOc();void    fREe();Void_t* rEALLOc();Void_t* mEMALIGn();Void_t* vALLOc();Void_t* pvALLOc();Void_t* cALLOc();void    cfree();int     malloc_trim();size_t  malloc_usable_size();void    malloc_stats();int     mALLOPt();struct mallinfo mALLINFo();#endif#ifdef __cplusplus};  /* end of extern "C" */#endif/* ---------- To make a malloc.h, end cutting here ------------ *//*   Emulation of sbrk for WIN32  All code within the ifdef WIN32 is untested by me.*/#ifdef WIN32#define AlignPage(add) (((add) + (malloc_getpagesize-1)) &~(malloc_getpagesize-1))/* resrve 64MB to insure large contiguous space */ #define RESERVED_SIZE (1024*1024*64)#define NEXT_SIZE (2048*1024)#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)struct GmListElement;typedef struct GmListElement GmListElement;struct GmListElement {	GmListElement* next;	void* base;};static GmListElement* head = 0;static unsigned int gNextAddress = 0;static unsigned int gAddressBase = 0;static unsigned int gAllocatedSize = 0;staticGmListElement* makeGmListElement (void* bas){	GmListElement* this;	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));	ASSERT (this);	if (this)	{		this->base = bas;		this->next = head;		head = this;	}	return this;}void gcleanup (){	BOOL rval;	ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase));	if (gAddressBase && (gNextAddress - gAddressBase))	{		rval = VirtualFree ((void*)gAddressBase, 							gNextAddress - gAddressBase, 							MEM_DECOMMIT);        ASSERT (rval);	}	while (head)	{		GmListElement* next = head->next;		rval = VirtualFree (head->base, 0, MEM_RELEASE);		ASSERT (rval);		LocalFree (head);		head = next;	}}		staticvoid* findRegion (void* start_address, unsigned long size){	MEMORY_BASIC_INFORMATION info;	while ((unsigned long)start_address < TOP_MEMORY)	{		VirtualQuery (start_address, &info, sizeof (info));		if (info.State != MEM_FREE)			start_address = (char*)info.BaseAddress + info.RegionSize;		else if (info.RegionSize >= size)			return start_address;		else			start_address = (char*)info.BaseAddress + info.RegionSize; 	}	return NULL;	}void* wsbrk (long size){	void* tmp;	if (size > 0)	{		if (gAddressBase == 0)		{			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));			gNextAddress = gAddressBase = 				(unsigned int)VirtualAlloc (NULL, gAllocatedSize, 											MEM_RESERVE, PAGE_NOACCESS);		} else if (AlignPage (gNextAddress + size) > (gAddressBase +gAllocatedSize))		{			long new_size = max (NEXT_SIZE, AlignPage (size));			void* new_address = (void*)(gAddressBase+gAllocatedSize);			do 			{				new_address = findRegion (new_address, new_size);								if (new_address == 0)					return (void*)-1;				gAddressBase = gNextAddress =					(unsigned int)VirtualAlloc (new_address, new_size,												MEM_RESERVE, PAGE_NOACCESS);				// repeat in case of race condition				// The region that we found has been snagged 				// by another thread			}			while (gAddressBase == 0);			ASSERT (new_address == (void*)gAddressBase);			gAllocatedSize = new_size;			if (!makeGmListElement ((void*)gAddressBase))				return (void*)-1;		}		if ((size + gNextAddress) > AlignPage (gNextAddress))		{			void* res;			res = VirtualAlloc ((void*)AlignPage (gNextAddress),								(size + gNextAddress - 								 AlignPage (gNextAddress)), 								MEM_COMMIT, PAGE_READWRITE);			if (res == 0)				return (void*)-1;		}		tmp = (void*)gNextAddress;		gNextAddress = (unsigned int)tmp + size;		return tmp;	}	else if (size < 0)	{		unsigned int alignedGoal = AlignPage (gNextAddress + size);		/* Trim by releasing the virtual memory */		if (alignedGoal >= gAddressBase)		{			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,  						 MEM_DECOMMIT);			gNextAddress = gNextAddress + size;			return (void*)gNextAddress;		}		else 		{			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,						 MEM_DECOMMIT);			gNextAddress = gAddressBase;			return (void*)-1;		}	}	else	{		return (void*)gNextAddress;	}}#endif/*  Type declarations*/struct malloc_chunk{  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */  struct malloc_chunk* fd;   /* double links -- used only if free. */  struct malloc_chunk* bk;};typedef struct malloc_chunk* mchunkptr;/*   malloc_chunk details:    (The following includes lightly edited explanations by Colin Plumb.)    Chunks of memory are maintained using a `boundary tag' method as    described in e.g., Knuth or Standish.  (See the paper by Paul    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a    survey of such techniques.)  Sizes of free chunks are stored both    in the front of each chunk and at the end.  This makes    consolidating fragmented chunks into bigger chunks very fast.  The    size fields also hold bits representing whether chunks are free or    in use.    An allocated chunk looks like this:      chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of previous chunk, if allocated            | |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of chunk, in bytes                         |P|      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             User data starts here...                          .            .                                                               .            .             (malloc_usable_space() bytes)                     .            .                                                               |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of chunk                                     |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    Where "chunk" is the front of the chunk for the purpose of most of    the malloc code, but "mem" is the pointer that is returned to the    user.  "Nextchunk" is the beginning of the next contiguous chunk.    Chunks always begin on even word boundries, so the mem portion    (which is returned to the user) is also on an even word boundary, and    thus double-word aligned.    Free chunks are stored in circular doubly-linked lists, and look like this:    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of previous chunk                            |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    `head:' |             Size of chunk, in bytes                         |P|      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Forward pointer to next chunk in list             |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Back pointer to previous chunk in list            |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Unused space (may be 0 bytes long)                .            .                                                               .            .                                                               |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    `foot:' |             Size of chunk, in bytes                           |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    The P (PREV_INUSE) bit, stored in the unused low-order bit of the    chunk size (which is always a multiple of two words), is an in-use    bit for the *previous* chunk.  If that bit is *clear*, then the    word before the current chunk size contains the previous chunk    size, and can be used to find the front of the previous chunk.    (The very first chunk allocated always has this bit set,    preventing access to non-existent (or non-owned) memory.)    Note that the `foot' of the current chunk is actually represented    as the prev_size of the NEXT chunk. (This makes it easier to    deal with alignments etc).    The two exceptions to all this are      1. The special chunk `top', which doesn't bother using the         trailing size field since there is no        next contiguous chunk that would have to index off it. (After        initialization, `top' is forced to always exist.  If it would        become less than MINSIZE bytes long, it is replenished via        malloc_extend_top.)     2. Chunks allocated via mmap, which have the second-lowest-order        bit (IS_MMAPPED) set in their size fields.  Because they are        never merged or traversed from any other chunk, they have no        foot size or inuse information.    Available chunks are kept in any of several places (all declared below):

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -