📄 mallocx.c
字号:
ptr_t *result;{ptr_t op;ptr_t p;ptr_t *opp;word lw;word my_words_allocd = 0;struct obj_kind * ok = &(GC_obj_kinds[k]);DCL_LOCK_STATE;# if defined(GATHERSTATS) || defined(PARALLEL_MARK)# define COUNT_ARG , &my_words_allocd# else# define COUNT_ARG# define NEED_TO_COUNT# endif if (!SMALL_OBJ(lb)) { op = GC_generic_malloc(lb, k); if(0 != op) obj_link(op) = 0; *result = op; return; } lw = ALIGNED_WORDS(lb); if (GC_have_errors) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); DISABLE_SIGNALS(); LOCK(); if (!GC_is_initialized) GC_init_inner(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) { ENTER_GC(); GC_collect_a_little_inner(1); EXIT_GC(); } /* First see if we can reclaim a page of objects waiting to be */ /* reclaimed. */ { struct hblk ** rlh = ok -> ok_reclaim_list; struct hblk * hbp; hdr * hhdr; rlh += lw; while ((hbp = *rlh) != 0) { hhdr = HDR(hbp); *rlh = hhdr -> hb_next; hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;# ifdef PARALLEL_MARK { signed_word my_words_allocd_tmp = GC_words_allocd_tmp; GC_ASSERT(my_words_allocd_tmp >= 0); /* We only decrement it while holding the GC lock. */ /* Thus we can't accidentally adjust it down in more */ /* than one thread simultaneously. */ if (my_words_allocd_tmp != 0) { (void)GC_atomic_add( (volatile GC_word *)(&GC_words_allocd_tmp), (GC_word)(-my_words_allocd_tmp)); GC_words_allocd += my_words_allocd_tmp; } } GC_acquire_mark_lock(); ++ GC_fl_builder_count; UNLOCK(); ENABLE_SIGNALS(); GC_release_mark_lock();# endif op = GC_reclaim_generic(hbp, hhdr, lw, ok -> ok_init, 0 COUNT_ARG); if (op != 0) {# ifdef NEED_TO_COUNT /* We are neither gathering statistics, nor marking in */ /* parallel. Thus GC_reclaim_generic doesn't count */ /* for us. */ for (p = op; p != 0; p = obj_link(p)) { my_words_allocd += lw; }# endif# if defined(GATHERSTATS) /* We also reclaimed memory, so we need to adjust */ /* that count. */ /* This should be atomic, so the results may be */ /* inaccurate. */ GC_mem_found += my_words_allocd;# endif# ifdef PARALLEL_MARK *result = op; (void)GC_atomic_add( (volatile GC_word *)(&GC_words_allocd_tmp), (GC_word)(my_words_allocd)); GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); (void) GC_clear_stack(0); return;# else GC_words_allocd += my_words_allocd; goto out;# endif }# ifdef PARALLEL_MARK GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); DISABLE_SIGNALS(); LOCK(); /* GC lock is needed for reclaim list access. We */ /* must decrement fl_builder_count before reaquiring GC */ /* lock. Hopefully this path is rare. */# endif } } /* Next try to use prefix of global free list if there is one. */ /* We don't refill it, but we need to use it up before allocating */ /* a new block ourselves. */ opp = &(GC_obj_kinds[k].ok_freelist[lw]); if ( (op = *opp) != 0 ) { *opp = 0; my_words_allocd = 0; for (p = op; p != 0; p = obj_link(p)) { my_words_allocd += lw; if (my_words_allocd >= BODY_SZ) { *opp = obj_link(p); obj_link(p) = 0; break; } } GC_words_allocd += my_words_allocd; goto out; } /* Next try to allocate a new block worth of objects of this size. */ { struct hblk *h = GC_allochblk(lw, k, 0); if (h != 0) { if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h)); GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE) - BYTES_TO_WORDS(HBLKSIZE) % lw;# ifdef PARALLEL_MARK GC_acquire_mark_lock(); ++ GC_fl_builder_count; UNLOCK(); ENABLE_SIGNALS(); GC_release_mark_lock();# endif op = GC_build_fl(h, lw, ok -> ok_init, 0);# ifdef PARALLEL_MARK *result = op; GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); (void) GC_clear_stack(0); return;# else goto out;# endif } } /* As a last attempt, try allocating a single object. Note that */ /* this may trigger a collection or expand the heap. */ op = GC_generic_malloc_inner(lb, k); if (0 != op) obj_link(op) = 0; out: *result = op; UNLOCK(); ENABLE_SIGNALS(); (void) GC_clear_stack(0);}GC_PTR GC_malloc_many(size_t lb){ ptr_t result; GC_generic_malloc_many(lb, NORMAL, &result); return result;}/* Note that the "atomic" version of this would be unsafe, since the *//* links would not be seen by the collector. */# endif/* Allocate lb bytes of pointerful, traced, but not collectable data */# ifdef __STDC__ GC_PTR GC_malloc_uncollectable(size_t lb)# else GC_PTR GC_malloc_uncollectable(lb) size_t lb;# endif{register ptr_t op;register ptr_t *opp;register word lw;DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) {# ifdef MERGE_SIZES if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lw = GC_size_map[lb];# else lw = ALIGNED_WORDS(lb);# endif opp = &(GC_uobjfreelist[lw]); FASTLOCK(); if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) { /* See above comment on signals. */ *opp = obj_link(op); obj_link(op) = 0; GC_words_allocd += lw; /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += WORDS_TO_BYTES(lw); FASTUNLOCK(); return((GC_PTR) op); } FASTUNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); } else { op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); } if (0 == op) return(0); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ { register struct hblk * h; h = HBLKPTR(op); lw = HDR(h) -> hb_sz; DISABLE_SIGNALS(); LOCK(); GC_set_mark_bit(op); GC_non_gc_bytes += WORDS_TO_BYTES(lw); UNLOCK(); ENABLE_SIGNALS(); return((GC_PTR) op); }}#ifdef __STDC__/* Not well tested nor integrated. *//* Debug version is tricky and currently missing. */#include <limits.h>GC_PTR GC_memalign(size_t align, size_t lb) { size_t new_lb; size_t offset; ptr_t result;# ifdef ALIGN_DOUBLE if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);# endif if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb); if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) { if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */; return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb); /* Will be HBLKSIZE aligned. */ } /* We could also try to make sure that the real rounded-up object size */ /* is a multiple of align. That would be correct up to HBLKSIZE. */ new_lb = lb + align - 1; result = GC_malloc(new_lb); offset = (word)result % align; if (offset != 0) { offset = align - offset; if (!GC_all_interior_pointers) { if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE); GC_register_displacement(offset); } } result = (GC_PTR) ((ptr_t)result + offset); GC_ASSERT((word)result % align == 0); return result;}#endif # ifdef ATOMIC_UNCOLLECTABLE/* Allocate lb bytes of pointerfree, untraced, uncollectable data *//* This is normally roughly equivalent to the system malloc. *//* But it may be useful if malloc is redefined. */# ifdef __STDC__ GC_PTR GC_malloc_atomic_uncollectable(size_t lb)# else GC_PTR GC_malloc_atomic_uncollectable(lb) size_t lb;# endif{register ptr_t op;register ptr_t *opp;register word lw;DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) {# ifdef MERGE_SIZES if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lw = GC_size_map[lb];# else lw = ALIGNED_WORDS(lb);# endif opp = &(GC_auobjfreelist[lw]); FASTLOCK(); if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) { /* See above comment on signals. */ *opp = obj_link(op); obj_link(op) = 0; GC_words_allocd += lw; /* Mark bit was already set while object was on free list. */ GC_non_gc_bytes += WORDS_TO_BYTES(lw); FASTUNLOCK(); return((GC_PTR) op); } FASTUNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE); } else { op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE); } if (0 == op) return(0); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ { register struct hblk * h; h = HBLKPTR(op); lw = HDR(h) -> hb_sz; DISABLE_SIGNALS(); LOCK(); GC_set_mark_bit(op); GC_non_gc_bytes += WORDS_TO_BYTES(lw); UNLOCK(); ENABLE_SIGNALS(); return((GC_PTR) op); }}#endif /* ATOMIC_UNCOLLECTABLE */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -