⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dlmalloc.src

📁 F:worksip2440a board可启动u-boot-like.tar.gz F:worksip2440a board可启动u-boot-like.tar.gz
💻 SRC
📖 第 1 页 / 共 5 页
字号:
/*    USE_DL_PREFIX will prefix all public routines with the string 'dl'.      Useful to quickly avoid procedure declaration conflicts and linker      symbol conflicts with existing memory allocation routines.*//* #define USE_DL_PREFIX *//*  Special defines for linux libc  Except when compiled using these special defines for Linux libc  using weak aliases, this malloc is NOT designed to work in  multithreaded applications.  No semaphores or other concurrency  control are provided to ensure that multiple malloc or free calls  don't run at the same time, which could be disasterous. A single  semaphore could be used across malloc, realloc, and free (which is  essentially the effect of the linux weak alias approach). It would  be hard to obtain finer granularity.*/#ifdef INTERNAL_LINUX_C_LIB#if __STD_CVoid_t * __default_morecore_init (ptrdiff_t);Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;#elseVoid_t * __default_morecore_init ();Void_t *(*__morecore)() = __default_morecore_init;#endif#define MORECORE (*__morecore)#define MORECORE_FAILURE 0#define MORECORE_CLEARS 1#else /* INTERNAL_LINUX_C_LIB */#if __STD_Cextern Void_t*     sbrk(ptrdiff_t);#elseextern Void_t*     sbrk();#endif#ifndef MORECORE#define MORECORE sbrk#endif#ifndef MORECORE_FAILURE#define MORECORE_FAILURE -1#endif#ifndef MORECORE_CLEARS#define MORECORE_CLEARS 1#endif#endif /* INTERNAL_LINUX_C_LIB */#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)#define cALLOc		__libc_calloc#define fREe		__libc_free#define mALLOc		__libc_malloc#define mEMALIGn	__libc_memalign#define rEALLOc		__libc_realloc#define vALLOc		__libc_valloc#define pvALLOc		__libc_pvalloc#define mALLINFo	__libc_mallinfo#define mALLOPt		__libc_mallopt#pragma weak calloc = __libc_calloc#pragma weak free = __libc_free#pragma weak cfree = __libc_free#pragma weak malloc = __libc_malloc#pragma weak memalign = __libc_memalign#pragma weak realloc = __libc_realloc#pragma weak valloc = __libc_valloc#pragma weak pvalloc = __libc_pvalloc#pragma weak mallinfo = __libc_mallinfo#pragma weak mallopt = __libc_mallopt#else#ifdef USE_DL_PREFIX#define cALLOc		dlcalloc#define fREe		dlfree#define mALLOc		dlmalloc#define mEMALIGn	dlmemalign#define rEALLOc		dlrealloc#define vALLOc		dlvalloc#define pvALLOc		dlpvalloc#define mALLINFo	dlmallinfo#define mALLOPt		dlmallopt#else /* USE_DL_PREFIX */#define cALLOc		calloc#define fREe		free#define mALLOc		malloc#define mEMALIGn	memalign#define rEALLOc		realloc#define vALLOc		valloc#define pvALLOc		pvalloc#define mALLINFo	mallinfo#define mALLOPt		mallopt#endif /* USE_DL_PREFIX */#endif/* Public routines */#if __STD_CVoid_t* mALLOc(size_t);void    fREe(Void_t*);Void_t* rEALLOc(Void_t*, size_t);Void_t* mEMALIGn(size_t, size_t);Void_t* vALLOc(size_t);Void_t* pvALLOc(size_t);Void_t* cALLOc(size_t, size_t);void    cfree(Void_t*);int     malloc_trim(size_t);size_t  malloc_usable_size(Void_t*);void    malloc_stats();int     mALLOPt(int, int);struct mallinfo mALLINFo(void);#elseVoid_t* mALLOc();void    fREe();Void_t* rEALLOc();Void_t* mEMALIGn();Void_t* vALLOc();Void_t* pvALLOc();Void_t* cALLOc();void    cfree();int     malloc_trim();size_t  malloc_usable_size();void    malloc_stats();int     mALLOPt();struct mallinfo mALLINFo();#endif#ifdef __cplusplus};  /* end of extern "C" */#endif/* ---------- To make a malloc.h, end cutting here ------------ *//*  Emulation of sbrk for WIN32  All code within the ifdef WIN32 is untested by me.  Thanks to Martin Fong and others for supplying this.*/#ifdef WIN32#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \~(malloc_getpagesize-1))#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))/* resrve 64MB to insure large contiguous space */#define RESERVED_SIZE (1024*1024*64)#define NEXT_SIZE (2048*1024)#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)struct GmListElement;typedef struct GmListElement GmListElement;struct GmListElement{	GmListElement* next;	void* base;};static GmListElement* head = 0;static unsigned int gNextAddress = 0;static unsigned int gAddressBase = 0;static unsigned int gAllocatedSize = 0;staticGmListElement* makeGmListElement (void* bas){	GmListElement* this;	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));	assert (this);	if (this)	{		this->base = bas;		this->next = head;		head = this;	}	return this;}void gcleanup (){	BOOL rval;	assert ( (head == NULL) || (head->base == (void*)gAddressBase));	if (gAddressBase && (gNextAddress - gAddressBase))	{		rval = VirtualFree ((void*)gAddressBase,							gNextAddress - gAddressBase,							MEM_DECOMMIT);	assert (rval);	}	while (head)	{		GmListElement* next = head->next;		rval = VirtualFree (head->base, 0, MEM_RELEASE);		assert (rval);		LocalFree (head);		head = next;	}}staticvoid* findRegion (void* start_address, unsigned long size){	MEMORY_BASIC_INFORMATION info;	if (size >= TOP_MEMORY) return NULL;	while ((unsigned long)start_address + size < TOP_MEMORY)	{		VirtualQuery (start_address, &info, sizeof (info));		if ((info.State == MEM_FREE) && (info.RegionSize >= size))			return start_address;		else		{			/* Requested region is not available so see if the */			/* next region is available.  Set 'start_address' */			/* to the next region and call 'VirtualQuery()' */			/* again. */			start_address = (char*)info.BaseAddress + info.RegionSize;			/* Make sure we start looking for the next region */			/* on the *next* 64K boundary.  Otherwise, even if */			/* the new region is free according to */			/* 'VirtualQuery()', the subsequent call to */			/* 'VirtualAlloc()' (which follows the call to */			/* this routine in 'wsbrk()') will round *down* */			/* the requested address to a 64K boundary which */			/* we already know is an address in the */			/* unavailable region.  Thus, the subsequent call */			/* to 'VirtualAlloc()' will fail and bring us back */			/* here, causing us to go into an infinite loop. */			start_address =				(void *) AlignPage64K((unsigned long) start_address);		}	}	return NULL;}void* wsbrk (long size){	void* tmp;	if (size > 0)	{		if (gAddressBase == 0)		{			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));			gNextAddress = gAddressBase =				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,											MEM_RESERVE, PAGE_NOACCESS);		} else if (AlignPage (gNextAddress + size) > (gAddressBase +gAllocatedSize))		{			long new_size = max (NEXT_SIZE, AlignPage (size));			void* new_address = (void*)(gAddressBase+gAllocatedSize);			do			{				new_address = findRegion (new_address, new_size);				if (new_address == 0)					return (void*)-1;				gAddressBase = gNextAddress =					(unsigned int)VirtualAlloc (new_address, new_size,												MEM_RESERVE, PAGE_NOACCESS);				/* repeat in case of race condition */				/* The region that we found has been snagged */				/* by another thread */			}			while (gAddressBase == 0);			assert (new_address == (void*)gAddressBase);			gAllocatedSize = new_size;			if (!makeGmListElement ((void*)gAddressBase))				return (void*)-1;		}		if ((size + gNextAddress) > AlignPage (gNextAddress))		{			void* res;			res = VirtualAlloc ((void*)AlignPage (gNextAddress),								(size + gNextAddress -								 AlignPage (gNextAddress)),								MEM_COMMIT, PAGE_READWRITE);			if (res == 0)				return (void*)-1;		}		tmp = (void*)gNextAddress;		gNextAddress = (unsigned int)tmp + size;		return tmp;	}	else if (size < 0)	{		unsigned int alignedGoal = AlignPage (gNextAddress + size);		/* Trim by releasing the virtual memory */		if (alignedGoal >= gAddressBase)		{			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,						 MEM_DECOMMIT);			gNextAddress = gNextAddress + size;			return (void*)gNextAddress;		}		else		{			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,						 MEM_DECOMMIT);			gNextAddress = gAddressBase;			return (void*)-1;		}	}	else	{		return (void*)gNextAddress;	}}#endif/*  Type declarations*/struct malloc_chunk{  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */  struct malloc_chunk* fd;   /* double links -- used only if free. */  struct malloc_chunk* bk;};typedef struct malloc_chunk* mchunkptr;/*   malloc_chunk details:    (The following includes lightly edited explanations by Colin Plumb.)    Chunks of memory are maintained using a `boundary tag' method as    described in e.g., Knuth or Standish.  (See the paper by Paul    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a    survey of such techniques.)  Sizes of free chunks are stored both    in the front of each chunk and at the end.  This makes    consolidating fragmented chunks into bigger chunks very fast.  The    size fields also hold bits representing whether chunks are free or    in use.    An allocated chunk looks like this:    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+	    |             Size of previous chunk, if allocated            | |	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+	    |             Size of chunk, in bytes                         |P|      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+	    |             User data starts here...                          .	    .                                                               .	    .             (malloc_usable_space() bytes)                     .	    .                                                               |

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -