📄 mallocx.c
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996 by Silicon Graphics. All rights reserved. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* * These are extra allocation routines which are likely to be less * frequently used than those in malloc.c. They are separate in the * hope that the .o file will be excluded from statically linked * executables. We should probably break this up further. */#include <stdio.h>#include "private/gc_priv.h"extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */void GC_extend_size_map(); /* in misc.c. */GC_bool GC_alloc_reclaim_list(); /* in malloc.c *//* Some externally visible but unadvertised variables to allow access to *//* free lists from inlined allocators without including gc_priv.h *//* or introducing dependencies on internal data structure layouts. */ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;# ifdef ATOMIC_UNCOLLECTABLE ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;# endifGC_PTR GC_generic_or_special_malloc(lb,knd)word lb;int knd;{ switch(knd) {# ifdef STUBBORN_ALLOC case STUBBORN: return(GC_malloc_stubborn((size_t)lb));# endif case PTRFREE: return(GC_malloc_atomic((size_t)lb)); case NORMAL: return(GC_malloc((size_t)lb)); case UNCOLLECTABLE: return(GC_malloc_uncollectable((size_t)lb));# ifdef ATOMIC_UNCOLLECTABLE case AUNCOLLECTABLE: return(GC_malloc_atomic_uncollectable((size_t)lb));# endif /* ATOMIC_UNCOLLECTABLE */ default: return(GC_generic_malloc(lb,knd)); }}/* Change the size of the block pointed to by p to contain at least *//* lb bytes. The object may be (and quite likely will be) moved. *//* The kind (e.g. atomic) is the same as that of the old. *//* Shrinking of large blocks is not implemented well. */# ifdef __STDC__ GC_PTR GC_realloc(GC_PTR p, size_t lb)# else GC_PTR GC_realloc(p,lb) GC_PTR p; size_t lb;# endif{register struct hblk * h;register hdr * hhdr;register word sz; /* Current size in bytes */register word orig_sz; /* Original sz in bytes */int obj_kind; if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */ h = HBLKPTR(p); hhdr = HDR(h); sz = hhdr -> hb_sz; obj_kind = hhdr -> hb_obj_kind; sz = WORDS_TO_BYTES(sz); orig_sz = sz; if (sz > MAXOBJBYTES) { /* Round it up to the next whole heap block */ register word descr; sz = (sz+HBLKSIZE-1) & (~HBLKMASK); hhdr -> hb_sz = BYTES_TO_WORDS(sz); descr = GC_obj_kinds[obj_kind].ok_descriptor; if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz; hhdr -> hb_descr = descr; if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz); /* Extra area is already cleared by GC_alloc_large_and_clear. */ } if (ADD_SLOP(lb) <= sz) { if (lb >= (sz >> 1)) {# ifdef STUBBORN_ALLOC if (obj_kind == STUBBORN) GC_change_stubborn(p);# endif if (orig_sz > lb) { /* Clear unneeded part of object to avoid bogus pointer */ /* tracing. */ /* Safe for stubborn objects. */ BZERO(((ptr_t)p) + lb, orig_sz - lb); } return(p); } else { /* shrink */ GC_PTR result = GC_generic_or_special_malloc((word)lb, obj_kind); if (result == 0) return(0); /* Could also return original object. But this */ /* gives the client warning of imminent disaster. */ BCOPY(p, result, lb);# ifndef IGNORE_FREE GC_free(p);# endif return(result); } } else { /* grow */ GC_PTR result = GC_generic_or_special_malloc((word)lb, obj_kind); if (result == 0) return(0); BCOPY(p, result, sz);# ifndef IGNORE_FREE GC_free(p);# endif return(result); }}# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)# define REDIRECT_REALLOC GC_realloc# endif# ifdef REDIRECT_REALLOC/* As with malloc, avoid two levels of extra calls here. */# ifdef GC_ADD_CALLER# define RA GC_RETURN_ADDR,# else# define RA# endif# define GC_debug_realloc_replacement(p, lb) \ GC_debug_realloc(p, lb, RA "unknown", 0)# ifdef __STDC__ GC_PTR realloc(GC_PTR p, size_t lb)# else GC_PTR realloc(p,lb) GC_PTR p; size_t lb;# endif { return(REDIRECT_REALLOC(p, lb)); }# undef GC_debug_realloc_replacement# endif /* REDIRECT_REALLOC *//* Allocate memory such that only pointers to near the *//* beginning of the object are considered. *//* We avoid holding allocation lock while we clear memory. */ptr_t GC_generic_malloc_ignore_off_page(lb, k)register size_t lb;register int k;{ register ptr_t result; word lw; word n_blocks; GC_bool init; DCL_LOCK_STATE; if (SMALL_OBJ(lb)) return(GC_generic_malloc((word)lb, k)); lw = ROUNDED_UP_WORDS(lb); n_blocks = OBJ_SZ_TO_BLOCKS(lw); init = GC_obj_kinds[k].ok_init; if (GC_have_errors) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); DISABLE_SIGNALS(); LOCK(); result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else {# ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[lw-1] = 0; ((word *)result)[lw-2] = 0;# endif } } GC_words_allocd += lw; UNLOCK(); ENABLE_SIGNALS(); if (0 == result) { return((*GC_oom_fn)(lb)); } else { if (init && !GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } return(result); }}# if defined(__STDC__) || defined(__cplusplus) void * GC_malloc_ignore_off_page(size_t lb)# else char * GC_malloc_ignore_off_page(lb) register size_t lb;# endif{ return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));}# if defined(__STDC__) || defined(__cplusplus) void * GC_malloc_atomic_ignore_off_page(size_t lb)# else char * GC_malloc_atomic_ignore_off_page(lb) register size_t lb;# endif{ return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));}/* Increment GC_words_allocd from code that doesn't have direct access *//* to GC_arrays. */# ifdef __STDC__void GC_incr_words_allocd(size_t n){ GC_words_allocd += n;}/* The same for GC_mem_freed. */void GC_incr_mem_freed(size_t n){ GC_mem_freed += n;}# endif /* __STDC__ *//* Analogous to the above, but assumes a small object size, and *//* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */ptr_t GC_generic_malloc_words_small_inner(lw, k)register word lw;register int k;{register ptr_t op;register ptr_t *opp;register struct obj_kind * kind = GC_obj_kinds + k; opp = &(kind -> ok_freelist[lw]); if( (op = *opp) == 0 ) { if (!GC_is_initialized) { GC_init_inner(); } if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) { op = GC_clear_stack(GC_allocobj((word)lw, k)); } if (op == 0) { UNLOCK(); ENABLE_SIGNALS(); return ((*GC_oom_fn)(WORDS_TO_BYTES(lw))); } } *opp = obj_link(op); obj_link(op) = 0; GC_words_allocd += lw; return((ptr_t)op);}/* Analogous to the above, but assumes a small object size, and *//* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */#ifdef __STDC__ ptr_t GC_generic_malloc_words_small(size_t lw, int k)#else ptr_t GC_generic_malloc_words_small(lw, k) register word lw; register int k;#endif{register ptr_t op;DCL_LOCK_STATE; if (GC_have_errors) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); DISABLE_SIGNALS(); LOCK(); op = GC_generic_malloc_words_small_inner(lw, k); UNLOCK(); ENABLE_SIGNALS(); return((ptr_t)op);}#if defined(THREADS) && !defined(SRC_M3)extern signed_word GC_mem_found; /* Protected by GC lock. */#ifdef PARALLEL_MARKvolatile signed_word GC_words_allocd_tmp = 0; /* Number of words of memory allocated since */ /* we released the GC lock. Instead of */ /* reacquiring the GC lock just to add this in, */ /* we add it in the next time we reacquire */ /* the lock. (Atomically adding it doesn't */ /* work, since we would have to atomically */ /* update it in GC_malloc, which is too */ /* expensive. */#endif /* PARALLEL_MARK *//* See reclaim.c: */extern ptr_t GC_reclaim_generic();/* Return a list of 1 or more objects of the indicated size, linked *//* through the first word in the object. This has the advantage that *//* it acquires the allocation lock only once, and may greatly reduce *//* time wasted contending for the allocation lock. Typical usage would *//* be in a thread that requires many items of the same size. It would *//* keep its own free list in thread-local storage, and call *//* GC_malloc_many or friends to replenish it. (We do not round up *//* object sizes, since a call indicates the intention to consume many *//* objects of exactly this size.) *//* We return the free-list by assigning it to *result, since it is *//* not safe to return, e.g. a linked list of pointer-free objects, *//* since the collector would not retain the entire list if it were *//* invoked just as we were returning. *//* Note that the client should usually clear the link field. */void GC_generic_malloc_many(lb, k, result)register word lb;register int k;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -