⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vxmalloc.c

📁 This version of malloc for VxWorks contains two different algorithms. One is the BSD based Kingsley
💻 C
📖 第 1 页 / 共 5 页
字号:
#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))/*   The first 2 bins are never indexed. The corresponding av_ cells are instead   used for bookkeeping. This is not to save space, but to simplify   indexing, maintain locality, and avoid some initialization tests.*/#define top	       (bin_at(0)->fd)	 /* The	topmost chunk */#define last_remainder (bin_at(1))	 /* remainder from last	split *//*   Because top initially points to its own bin with initial   zero size, thus forcing extension on the first malloc request,    we avoid having any special code in malloc to check whether    it even exists yet. But we still need to in malloc_extend_top.*/#define initial_top    ((mchunkptr)(bin_at(0)))/* Helper macro to initialize bins */#define IAV(i)  bin_at(i), bin_at(i)mbinptr av_[NAV * 2 + 2] = { 0, 0, IAV(0),   IAV(1),   IAV(2),   IAV(3),	 IAV(4),   IAV(5),   IAV(6),   IAV(7), IAV(8),   IAV(9),   IAV(10),  IAV(11),	 IAV(12),  IAV(13),  IAV(14),  IAV(15), IAV(16),  IAV(17),  IAV(18),  IAV(19),	 IAV(20),  IAV(21),  IAV(22),  IAV(23), IAV(24),  IAV(25),  IAV(26),  IAV(27),	 IAV(28),  IAV(29),  IAV(30),  IAV(31), IAV(32),  IAV(33),  IAV(34),  IAV(35),	 IAV(36),  IAV(37),  IAV(38),  IAV(39), IAV(40),  IAV(41),  IAV(42),  IAV(43),	 IAV(44),  IAV(45),  IAV(46),  IAV(47), IAV(48),  IAV(49),  IAV(50),  IAV(51),	 IAV(52),  IAV(53),  IAV(54),  IAV(55), IAV(56),  IAV(57),  IAV(58),  IAV(59),	 IAV(60),  IAV(61),  IAV(62),  IAV(63), IAV(64),  IAV(65),  IAV(66),  IAV(67),	 IAV(68),  IAV(69),  IAV(70),  IAV(71), IAV(72),  IAV(73),  IAV(74),  IAV(75),	 IAV(76),  IAV(77),  IAV(78),  IAV(79), IAV(80),  IAV(81),  IAV(82),  IAV(83),	 IAV(84),  IAV(85),  IAV(86),  IAV(87), IAV(88),  IAV(89),  IAV(90),  IAV(91),	 IAV(92),  IAV(93),  IAV(94),  IAV(95), IAV(96),  IAV(97),  IAV(98),  IAV(99),	 IAV(100), IAV(101), IAV(102), IAV(103), IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)};/* field-extraction macros */#define first(b) ((b)->fd)#define last(b)	 ((b)->bk)/*   Indexing into bins*/#define bin_index(sz)							       \(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \ ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \ ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \ ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \ ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \ ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \					  126)			   /*   bins for chunks < 512 are all spaced 8 bytes apart, and hold  identically sized chunks. This is exploited in malloc.*/#define MAX_SMALLBIN	     63#define MAX_SMALLBIN_SIZE   512#define SMALLBIN_WIDTH	      8#define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)/*    Requests are `small' if both the corresponding and the next bin are small*/#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)/*    To help compensate for the large number of bins, a one-level index    structure is used for bin-by-bin searching.	 `binblocks' is a    one-word bitvector recording whether groups of BINBLOCKWIDTH bins    have any (possibly) non-empty bins, so they can be skipped over    all at once during during traversals. The bits are NOT always    cleared as soon as all bins in a block are empty, but instead only    when all are noticed to be empty during traversal in malloc.*/#define BINBLOCKWIDTH	  4   /* bins per block	*/#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks *//* bin<->block macros */#define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))/*  Other static bookkeeping data *//* variables holding tunable values */static unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;static unsigned long top_pad	      =	DEFAULT_TOP_PAD;/* The first value returned from sbrk */static char* sbrk_base = (char*)(-1);/* The maximum memory obtained from system via sbrk */static unsigned long max_sbrked_mem = 0; /* The maximum via either sbrk or mmap */static unsigned long max_total_mem = 0; /* internal working copy of mallinfo */static struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };/* The total memory obtained from system via sbrk */#define sbrked_mem  (current_mallinfo.arena)/*   Debugging support *//*  These routines make a number of assertions about the states  of data structures that should be true at all times. If any  are not true, it's very likely that a user program has somehow  trashed memory. (It's also possible that there is a coding error  in malloc. In which case, please report it!)*/static void do_check_chunk(mchunkptr p) {   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;  /* Check for legal address ... */  ASSERT((char*)p >= sbrk_base);  if (p != top)     ASSERT((char*)p + sz <= (char*)top);  else    ASSERT((char*)p + sz <= sbrk_base + sbrked_mem);}static void do_check_free_chunk(mchunkptr p) {   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;  mchunkptr next = chunk_at_offset(p, sz);  do_check_chunk(p);  /* Check whether it claims to be free ... */  ASSERT(!inuse(p));  /* Unless a special marker, must have OK fields */  if ((long)sz >= (long)MINSIZE)  {    ASSERT((sz & MALLOC_ALIGN_MASK) == 0);    ASSERT(aligned_OK(chunk2mem(p)));    /* ... matching footer field */    ASSERT(next->prev_size == sz);    /* ... and is fully consolidated */    ASSERT(prev_inuse(p));    ASSERT (next == top || inuse(next));        /* ... and has minimally sane links */    ASSERT(p->fd->bk == p);    ASSERT(p->bk->fd == p);  }  else /* markers are always of size SIZE_SZ */    ASSERT(sz == SIZE_SZ);}static void do_check_inuse_chunk(mchunkptr p) {   mchunkptr next = next_chunk(p);  do_check_chunk(p);  /* Check whether it claims to be in use ... */  ASSERT(inuse(p));  /* ... and is surrounded by OK chunks.    Since more things can be checked with free chunks than inuse ones,    if an inuse chunk borders them and debug is on, it's worth doing them.  */  if (!prev_inuse(p))   {    mchunkptr prv = prev_chunk(p);    ASSERT(next_chunk(prv) == p);    do_check_free_chunk(prv);  }  if (next == top)  {    ASSERT(prev_inuse(next));    ASSERT(chunksize(next) >= MINSIZE);  }  else if (!inuse(next))    do_check_free_chunk(next);}static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) {  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;  long room = sz - s;  do_check_inuse_chunk(p);  /* Legal size ... */  ASSERT((long)sz >= (long)MINSIZE);  ASSERT((sz & MALLOC_ALIGN_MASK) == 0);  ASSERT(room >= 0);  ASSERT(room < (long)MINSIZE);  /* ... and alignment */  ASSERT(aligned_OK(chunk2mem(p)));  /* ... and was allocated at front of an available chunk */  ASSERT(prev_inuse(p));}#define check_free_chunk(P)  do_check_free_chunk(P)#define check_inuse_chunk(P) do_check_inuse_chunk(P)#define check_chunk(P) do_check_chunk(P)#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)/*   Macro-based internal utilities*//*    Linking chunks in bin lists.  Call these only with variables, not arbitrary expressions, as arguments.*//*   Place chunk p of size s in its bin, in size order,  putting it ahead of others of same size.*/#define frontlink(P, S, IDX, BK, FD)					      \{									      \  if (S < MAX_SMALLBIN_SIZE)						      \  {									      \    IDX = smallbin_index(S);						      \    mark_binblock(IDX);							      \    BK = bin_at(IDX);							      \    FD = BK->fd;							      \    P->bk = BK;								      \    P->fd = FD;								      \    FD->bk = BK->fd = P;						      \  }									      \  else									      \  {									      \    IDX = bin_index(S);							      \    BK = bin_at(IDX);							      \    FD = BK->fd;							      \    if (FD == BK) mark_binblock(IDX);					      \    else								      \    {									      \      while (FD != BK && S < chunksize(FD)) FD = FD->fd;		      \      BK = FD->bk;							      \    }									      \    P->bk = BK;								      \    P->fd = FD;								      \    FD->bk = BK->fd = P;						      \  }									      \}/* take a chunk off a list */#define unlink(P, BK, FD)						      \{									      \  BK = P->bk;								      \  FD = P->fd;								      \  FD->bk = BK;								      \  BK->fd = FD;								      \}									      \/* Place p as the last remainder */#define link_last_remainder(P)						      \{									      \  last_remainder->fd = last_remainder->bk =  P;				      \  P->fd = P->bk = last_remainder;					      \}/* Clear the last_remainder bin */#define clear_last_remainder \  (last_remainder->fd = last_remainder->bk = last_remainder)/*   Extend the top-most chunk by obtaining memory from system.  Main interface to sbrk (but see also malloc_trim).*/static void malloc_extend_top(INTERNAL_SIZE_T nb){  char*	    brk;		  /* return value from sbrk */  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */  INTERNAL_SIZE_T correction;	  /* bytes for 2nd sbrk	call */  char*	    new_brk;		  /* return of 2nd sbrk	call */  INTERNAL_SIZE_T top_size;	  /* new size of top chunk */  mchunkptr old_top     = top;  /* Record state of old top */  INTERNAL_SIZE_T old_top_size = chunksize(old_top);  char*	    old_end	 = (char*)(chunk_at_offset(old_top, old_top_size));  /* Pad request with top_pad plus minimal overhead */    INTERNAL_SIZE_T    sbrk_size	   = nb	+ top_pad + MINSIZE;  unsigned long pagesz	  = malloc_getpagesize;  /* If not the first time through, round to preserve page boundary */  /* Otherwise, we need to correct to a page size below anyway. */  /* (We also correct below if an intervening foreign sbrk call.) */  if (sbrk_base != (char*)(-1))    sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);  brk = (char*)(MORECORE (sbrk_size));  /* Fail if sbrk failed or if a foreign sbrk call killed our space */  if (brk == (char*)(MORECORE_FAILURE) ||       (brk < old_end && old_top != initial_top))  {      dbg_printf(" extend-top failed brk %x old_end %x old_top %x init_top %x sbrked_mem %x\n",        brk,old_end,old_top,initial_top, sbrked_mem );       return;	     }  sbrked_mem += sbrk_size;  if (brk == old_end) /* can just add bytes to current top */  {    top_size = sbrk_size + old_top_size;    set_head(top, top_size | PREV_INUSE);  }  else  {    if (sbrk_base == (char*)(-1))  /* First time through. Record base */      sbrk_base = brk;    else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */      sbrked_mem += brk - (char*)old_end;    /* Guarantee alignment of first new chunk made from this space */    front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;    if (front_misalign > 0)     {      correction = (MALLOC_ALIGNMENT) - front_misalign;      brk += correction;    }    else      correction = 0;    /* Guarantee the next brk will be at a page boundary */    correction += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));    /* Allocate correction */    new_brk = (char*)(MORECORE (correction));    if (new_brk == (char*)(MORECORE_FAILURE)){        dbg_printf("new_brk failed correction %x \n",correction);                return;     }    sbrked_mem += correction;    top = (mchunkptr)brk;    top_size = new_brk - brk + correction;    set_head(top, top_size | PREV_INUSE);    if (old_top != initial_top)    {      /* There must have been an intervening foreign sbrk call. */      /* A double fencepost is necessary to prevent consolidation */      /* If not enough space to do this, then user did something very wrong */      if (old_top_size < MINSIZE)       {        set_head(top, PREV_INUSE); /* will force null return from malloc */        dbg_printf(" extend failed old_top %x != initial_top %x old_top_size %x \n",        old_top,initial_top,old_top_size);         return;      }      /* Also keep size a multiple of MALLOC_ALIGNMENT */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -