⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mallocx.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 2 页
字号:
    GC_INVOKE_FINALIZERS();    LOCK();    if (!GC_is_initialized) GC_init_inner();    /* Do our share of marking work */      if (GC_incremental && !GC_dont_gc) {        ENTER_GC();	GC_collect_a_little_inner(1);        EXIT_GC();      }    /* First see if we can reclaim a page of objects waiting to be */    /* reclaimed.						   */    {	struct hblk ** rlh = ok -> ok_reclaim_list;	struct hblk * hbp;	hdr * hhdr;	rlh += lg;    	while ((hbp = *rlh) != 0) {            hhdr = HDR(hbp);            *rlh = hhdr -> hb_next;	    GC_ASSERT(hhdr -> hb_sz == lb);	    hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;#	    ifdef PARALLEL_MARK		{		  signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;		  GC_ASSERT(my_bytes_allocd_tmp >= 0);		  /* We only decrement it while holding the GC lock.	*/		  /* Thus we can't accidentally adjust it down in more	*/		  /* than one thread simultaneously.			*/		  if (my_bytes_allocd_tmp != 0) {		    (void)AO_fetch_and_add(				(volatile AO_t *)(&GC_bytes_allocd_tmp),				(AO_t)(-my_bytes_allocd_tmp));		    GC_bytes_allocd += my_bytes_allocd_tmp;		  }		}		GC_acquire_mark_lock();		++ GC_fl_builder_count;		UNLOCK();		GC_release_mark_lock();#	    endif	    op = GC_reclaim_generic(hbp, hhdr, lb,				    ok -> ok_init, 0, &my_bytes_allocd);            if (op != 0) {	      /* We also reclaimed memory, so we need to adjust 	*/	      /* that count.						*/	      /* This should be atomic, so the results may be		*/	      /* inaccurate.						*/	      GC_bytes_found += my_bytes_allocd;#	      ifdef PARALLEL_MARK		*result = op;		(void)AO_fetch_and_add(				(volatile AO_t *)(&GC_bytes_allocd_tmp),				(AO_t)(my_bytes_allocd));		GC_acquire_mark_lock();		-- GC_fl_builder_count;		if (GC_fl_builder_count == 0) GC_notify_all_builder();		GC_release_mark_lock();		(void) GC_clear_stack(0);		return;#	      else	        GC_bytes_allocd += my_bytes_allocd;	        goto out;#	      endif	    }#	    ifdef PARALLEL_MARK	      GC_acquire_mark_lock();	      -- GC_fl_builder_count;	      if (GC_fl_builder_count == 0) GC_notify_all_builder();	      GC_release_mark_lock();	      LOCK();	      /* GC lock is needed for reclaim list access.	We	*/	      /* must decrement fl_builder_count before reaquiring GC	*/	      /* lock.  Hopefully this path is rare.			*/#	    endif    	}    }    /* Next try to use prefix of global free list if there is one.	*/    /* We don't refill it, but we need to use it up before allocating	*/    /* a new block ourselves.						*/      opp = &(GC_obj_kinds[k].ok_freelist[lg]);      if ( (op = *opp) != 0 ) {	*opp = 0;        my_bytes_allocd = 0;        for (p = op; p != 0; p = obj_link(p)) {          my_bytes_allocd += lb;          if (my_bytes_allocd >= HBLKSIZE) {            *opp = obj_link(p);            obj_link(p) = 0;            break;	  }        }	GC_bytes_allocd += my_bytes_allocd;	goto out;      }    /* Next try to allocate a new block worth of objects of this size.	*/    {	struct hblk *h = GC_allochblk(lb, k, 0);	if (h != 0) {	  if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));	  GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;#	  ifdef PARALLEL_MARK	    GC_acquire_mark_lock();	    ++ GC_fl_builder_count;	    UNLOCK();	    GC_release_mark_lock();#	  endif	  op = GC_build_fl(h, lw, ok -> ok_init, 0);#	  ifdef PARALLEL_MARK	    *result = op;	    GC_acquire_mark_lock();	    -- GC_fl_builder_count;	    if (GC_fl_builder_count == 0) GC_notify_all_builder();	    GC_release_mark_lock();	    (void) GC_clear_stack(0);	    return;#	  else	    goto out;#	  endif	}    }        /* As a last attempt, try allocating a single object.  Note that	*/    /* this may trigger a collection or expand the heap.		*/      op = GC_generic_malloc_inner(lb, k);      if (0 != op) obj_link(op) = 0;      out:    *result = op;    UNLOCK();    (void) GC_clear_stack(0);}void * GC_malloc_many(size_t lb){    void *result;    GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)			   & ~(GRANULE_BYTES-1)),	    		   NORMAL, &result);    return result;}/* Note that the "atomic" version of this would be unsafe, since the	*//* links would not be seen by the collector.				*/# endif/* Allocate lb bytes of pointerful, traced, but not collectable data */void * GC_malloc_uncollectable(size_t lb){    void *op;    void **opp;    size_t lg;    DCL_LOCK_STATE;    if( SMALL_OBJ(lb) ) {	if (EXTRA_BYTES != 0 && lb != 0) lb--;	    	  /* We don't need the extra byte, since this won't be	*/	    	  /* collected anyway.					*/	lg = GC_size_map[lb];	opp = &(GC_uobjfreelist[lg]);	LOCK();        if( (op = *opp) != 0 ) {            /* See above comment on signals.	*/            *opp = obj_link(op);            obj_link(op) = 0;            GC_bytes_allocd += GRANULES_TO_BYTES(lg);            /* Mark bit ws already set on free list.  It will be	*/	    /* cleared only temporarily during a collection, as a 	*/	    /* result of the normal free list mark bit clearing.	*/            GC_non_gc_bytes += GRANULES_TO_BYTES(lg);            UNLOCK();        } else {            UNLOCK();            op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);	    /* For small objects, the free lists are completely marked. */	}	GC_ASSERT(0 == op || GC_is_marked(op));        return((void *) op);    } else {	hdr * hhdr;		op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);        if (0 == op) return(0);		GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */	hhdr = HDR((struct hbklk *)op);	/* We don't need the lock here, since we have an undisguised 	*/	/* pointer.  We do need to hold the lock while we adjust	*/	/* mark bits.							*/	lb = hhdr -> hb_sz;	LOCK();	set_mark_bit_from_hdr(hhdr, 0);	/* Only object.	*/	GC_ASSERT(hhdr -> hb_n_marks == 0);	hhdr -> hb_n_marks = 1;	UNLOCK();	return((void *) op);    }}/* Not well tested nor integrated.	*//* Debug version is tricky and currently missing.	*/#include <limits.h>void * GC_memalign(size_t align, size_t lb) {     size_t new_lb;    size_t offset;    ptr_t result;    if (align <= GRANULE_BYTES) return GC_malloc(lb);    if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {        if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;	return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);	    /* Will be HBLKSIZE aligned.	*/    }    /* We could also try to make sure that the real rounded-up object size */    /* is a multiple of align.  That would be correct up to HBLKSIZE.	   */    new_lb = lb + align - 1;    result = GC_malloc(new_lb);    offset = (word)result % align;    if (offset != 0) {	offset = align - offset;        if (!GC_all_interior_pointers) {	    if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);	    GC_register_displacement(offset);	}    }    result = (void *) ((ptr_t)result + offset);    GC_ASSERT((word)result % align == 0);    return result;}# ifdef ATOMIC_UNCOLLECTABLE/* Allocate lb bytes of pointerfree, untraced, uncollectable data 	*//* This is normally roughly equivalent to the system malloc.		*//* But it may be useful if malloc is redefined.				*/void * GC_malloc_atomic_uncollectable(size_t lb){    void *op;    void **opp;    size_t lg;    DCL_LOCK_STATE;    if( SMALL_OBJ(lb) ) {	if (EXTRA_BYTES != 0 && lb != 0) lb--;	    	  /* We don't need the extra byte, since this won't be	*/	    	  /* collected anyway.					*/	lg = GC_size_map[lb];	opp = &(GC_auobjfreelist[lg]);	LOCK();        if( (op = *opp) != 0 ) {            /* See above comment on signals.	*/            *opp = obj_link(op);            obj_link(op) = 0;            GC_bytes_allocd += GRANULES_TO_BYTES(lg);	    /* Mark bit was already set while object was on free list. */            GC_non_gc_bytes += GRANULES_TO_BYTES(lg);            UNLOCK();        } else {            UNLOCK();            op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);	}	GC_ASSERT(0 == op || GC_is_marked(op));        return((void *) op);    } else {	hdr * hhdr;		op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);        if (0 == op) return(0);	GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);	hhdr = HDR((struct hbklk *)op);	lb = hhdr -> hb_sz;		LOCK();	set_mark_bit_from_hdr(hhdr, 0);	/* Only object.	*/	GC_ASSERT(hhdr -> hb_n_marks == 0);	hhdr -> hb_n_marks = 1;	UNLOCK();	return((void *) op);    }}#endif /* ATOMIC_UNCOLLECTABLE */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -