⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mallocx.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. * Copyright (c) 1996 by Silicon Graphics.  All rights reserved. * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose,  provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* * These are extra allocation routines which are likely to be less * frequently used than those in malloc.c.  They are separate in the * hope that the .o file will be excluded from statically linked * executables.  We should probably break this up further. */#include <stdio.h>#include "private/gc_priv.h"/* extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */void GC_extend_size_map();      /* in misc.c. */GC_bool GC_alloc_reclaim_list();	/* in malloc.c *//* Some externally visible but unadvertised variables to allow access to *//* free lists from inlined allocators without including gc_priv.h	 *//* or introducing dependencies on internal data structure layouts.	 */void ** const GC_objfreelist_ptr = GC_objfreelist;void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;# ifdef ATOMIC_UNCOLLECTABLE    void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;# endifvoid * GC_generic_or_special_malloc(size_t lb, int knd){    switch(knd) {#     ifdef STUBBORN_ALLOC	case STUBBORN:	    return(GC_malloc_stubborn((size_t)lb));#     endif	case PTRFREE:	    return(GC_malloc_atomic((size_t)lb));	case NORMAL:	    return(GC_malloc((size_t)lb));	case UNCOLLECTABLE:	    return(GC_malloc_uncollectable((size_t)lb));#       ifdef ATOMIC_UNCOLLECTABLE	  case AUNCOLLECTABLE:	    return(GC_malloc_atomic_uncollectable((size_t)lb));#	endif /* ATOMIC_UNCOLLECTABLE */	default:	    return(GC_generic_malloc(lb,knd));    }}/* Change the size of the block pointed to by p to contain at least   *//* lb bytes.  The object may be (and quite likely will be) moved.     *//* The kind (e.g. atomic) is the same as that of the old.	      *//* Shrinking of large blocks is not implemented well.                 */void * GC_realloc(void * p, size_t lb){    struct hblk * h;    hdr * hhdr;    size_t sz;	 /* Current size in bytes	*/    size_t orig_sz;	 /* Original sz in bytes	*/    int obj_kind;    if (p == 0) return(GC_malloc(lb));	/* Required by ANSI */    h = HBLKPTR(p);    hhdr = HDR(h);    sz = hhdr -> hb_sz;    obj_kind = hhdr -> hb_obj_kind;    orig_sz = sz;    if (sz > MAXOBJBYTES) {	/* Round it up to the next whole heap block */	  register word descr;	  	  sz = (sz+HBLKSIZE-1) & (~HBLKMASK);	  hhdr -> hb_sz = sz;	  descr = GC_obj_kinds[obj_kind].ok_descriptor;          if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;          hhdr -> hb_descr = descr;#	  ifdef MARK_BIT_PER_OBJ	    GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);#	  else	    GC_ASSERT(hhdr -> hb_large_block &&		      hhdr -> hb_map[ANY_INDEX] == 1);#	  endif	  if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);	  /* Extra area is already cleared by GC_alloc_large_and_clear. */    }    if (ADD_SLOP(lb) <= sz) {	if (lb >= (sz >> 1)) {#	    ifdef STUBBORN_ALLOC	        if (obj_kind == STUBBORN) GC_change_stubborn(p);#	    endif	    if (orig_sz > lb) {	      /* Clear unneeded part of object to avoid bogus pointer */	      /* tracing.					      */	      /* Safe for stubborn objects.			      */	        BZERO(((ptr_t)p) + lb, orig_sz - lb);	    }	    return(p);	} else {	    /* shrink */	      void * result =	      		GC_generic_or_special_malloc((word)lb, obj_kind);	      if (result == 0) return(0);	          /* Could also return original object.  But this 	*/	          /* gives the client warning of imminent disaster.	*/	      BCOPY(p, result, lb);#	      ifndef IGNORE_FREE	        GC_free(p);#	      endif	      return(result);	}    } else {	/* grow */	  void * result =	  	GC_generic_or_special_malloc((word)lb, obj_kind);	  if (result == 0) return(0);	  BCOPY(p, result, sz);#	  ifndef IGNORE_FREE	    GC_free(p);#	  endif	  return(result);    }}# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)#   define REDIRECT_REALLOC GC_realloc# endif# ifdef REDIRECT_REALLOC/* As with malloc, avoid two levels of extra calls here.	*/# ifdef GC_ADD_CALLER#   define RA GC_RETURN_ADDR,# else#   define RA# endif# define GC_debug_realloc_replacement(p, lb) \	GC_debug_realloc(p, lb, RA "unknown", 0)void * realloc(void * p, size_t lb)  {    return(REDIRECT_REALLOC(p, lb));  }# undef GC_debug_realloc_replacement# endif /* REDIRECT_REALLOC *//* Allocate memory such that only pointers to near the          *//* beginning of the object are considered.                      *//* We avoid holding allocation lock while we clear memory.	*/void * GC_generic_malloc_ignore_off_page(size_t lb, int k){    void *result;    size_t lw;    size_t lb_rounded;    word n_blocks;    GC_bool init;    DCL_LOCK_STATE;        if (SMALL_OBJ(lb))        return(GC_generic_malloc((word)lb, k));    lw = ROUNDED_UP_WORDS(lb);    lb_rounded = WORDS_TO_BYTES(lw);    n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);    init = GC_obj_kinds[k].ok_init;    if (GC_have_errors) GC_print_all_errors();    GC_INVOKE_FINALIZERS();    LOCK();    result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);    if (0 != result) {        if (GC_debugging_started) {	    BZERO(result, n_blocks * HBLKSIZE);        } else {#           ifdef THREADS	      /* Clear any memory that might be used for GC descriptors */	      /* before we release the lock.			      */	        ((word *)result)[0] = 0;	        ((word *)result)[1] = 0;	        ((word *)result)[lw-1] = 0;	        ((word *)result)[lw-2] = 0;#	    endif        }    }    GC_bytes_allocd += lb_rounded;    UNLOCK();    if (0 == result) {        return((*GC_oom_fn)(lb));    } else {    	if (init && !GC_debugging_started) {	    BZERO(result, n_blocks * HBLKSIZE);        }        return(result);    }}void * GC_malloc_ignore_off_page(size_t lb){    return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));}void * GC_malloc_atomic_ignore_off_page(size_t lb){    return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));}/* Increment GC_bytes_allocd from code that doesn't have direct access 	*//* to GC_arrays.							*/void GC_incr_bytes_allocd(size_t n){    GC_bytes_allocd += n;}/* The same for GC_bytes_freed.				*/void GC_incr_bytes_freed(size_t n){    GC_bytes_freed += n;}#if defined(THREADS)extern signed_word GC_bytes_found;   /* Protected by GC lock.  */#ifdef PARALLEL_MARKvolatile signed_word GC_bytes_allocd_tmp = 0;                        /* Number of bytes of memory allocated since    */                        /* we released the GC lock.  Instead of         */                        /* reacquiring the GC lock just to add this in, */                        /* we add it in the next time we reacquire      */                        /* the lock.  (Atomically adding it doesn't     */                        /* work, since we would have to atomically      */                        /* update it in GC_malloc, which is too         */                        /* expensive.)                                   */#endif /* PARALLEL_MARK *//* Return a list of 1 or more objects of the indicated size, linked	*//* through the first word in the object.  This has the advantage that	*//* it acquires the allocation lock only once, and may greatly reduce	*//* time wasted contending for the allocation lock.  Typical usage would *//* be in a thread that requires many items of the same size.  It would	*//* keep its own free list in thread-local storage, and call		*//* GC_malloc_many or friends to replenish it.  (We do not round up	*//* object sizes, since a call indicates the intention to consume many	*//* objects of exactly this size.)					*//* We assume that the size is a multiple of GRANULE_BYTES.		*//* We return the free-list by assigning it to *result, since it is	*//* not safe to return, e.g. a linked list of pointer-free objects,	*//* since the collector would not retain the entire list if it were 	*//* invoked just as we were returning.					*//* Note that the client should usually clear the link field.		*/void GC_generic_malloc_many(size_t lb, int k, void **result){void *op;void *p;void **opp;size_t lw;	/* Length in words.	*/size_t lg;	/* Length in granules.	*/signed_word my_bytes_allocd = 0;struct obj_kind * ok = &(GC_obj_kinds[k]);DCL_LOCK_STATE;    GC_ASSERT((lb & (GRANULE_BYTES-1)) == 0);    if (!SMALL_OBJ(lb)) {        op = GC_generic_malloc(lb, k);        if(0 != op) obj_link(op) = 0;	*result = op;        return;    }    lw = BYTES_TO_WORDS(lb);    lg = BYTES_TO_GRANULES(lb);    if (GC_have_errors) GC_print_all_errors();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -