⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gc_priv.h

📁 著名的boost库
💻 H
📖 第 1 页 / 共 5 页
字号:
#       define GETENV(name) fixed_getenv(name)
#   else
#       define GETENV(name) getenv(name)
#   endif
#else
#   define GETENV(name) 0
#endif

#if defined(DARWIN)
#	if defined(POWERPC)
#		if CPP_WORDSZ == 32
#                 define GC_THREAD_STATE_T ppc_thread_state_t
#		  define GC_MACH_THREAD_STATE PPC_THREAD_STATE
#		  define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
#		  define GC_MACH_HEADER mach_header
#		  define GC_MACH_SECTION section
#	        else
#                 define GC_THREAD_STATE_T ppc_thread_state64_t
#		  define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
#		  define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
#		  define GC_MACH_HEADER mach_header_64
#		  define GC_MACH_SECTION section_64
#		endif
#	elif defined(I386) || defined(X86_64)
#               if CPP_WORDSZ == 32
#		  define GC_THREAD_STATE_T x86_thread_state32_t
#		  define GC_MACH_THREAD_STATE x86_THREAD_STATE32
#		  define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
#		  define GC_MACH_HEADER mach_header
#		  define GC_MACH_SECTION section
#               else
#		  define GC_THREAD_STATE_T x86_thread_state64_t
#		  define GC_MACH_THREAD_STATE x86_THREAD_STATE64
#		  define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
#		  define GC_MACH_HEADER mach_header_64
#		  define GC_MACH_SECTION section_64
#               endif
#	else
#		error define GC_THREAD_STATE_T
#		define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
#		define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
#	endif
/* Try to work out the right way to access thread state structure members.
   The structure has changed its definition in different Darwin versions.
   This now defaults to the (older) names without __, thus hopefully,
   not breaking any existing Makefile.direct builds.  */
#       if defined (HAS_PPC_THREAD_STATE___R0) \
	  || defined (HAS_PPC_THREAD_STATE64___R0) \
	  || defined (HAS_X86_THREAD_STATE32___EAX) \
	  || defined (HAS_X86_THREAD_STATE64___RAX)
#         define THREAD_FLD(x) __ ## x
#       else
#         define THREAD_FLD(x) x
#       endif
#endif

/*********************************/
/*                               */
/* Word-size-dependent defines   */
/*                               */
/*********************************/

#if CPP_WORDSZ == 32
#  define WORDS_TO_BYTES(x)   ((x)<<2)
#  define BYTES_TO_WORDS(x)   ((x)>>2)
#  define LOGWL               ((word)5)    /* log[2] of CPP_WORDSZ */
#  define modWORDSZ(n) ((n) & 0x1f)        /* n mod size of word	    */
#  if ALIGNMENT != 4
#	define UNALIGNED
#  endif
#endif

#if CPP_WORDSZ == 64
#  define WORDS_TO_BYTES(x)   ((x)<<3)
#  define BYTES_TO_WORDS(x)   ((x)>>3)
#  define LOGWL               ((word)6)    /* log[2] of CPP_WORDSZ */
#  define modWORDSZ(n) ((n) & 0x3f)        /* n mod size of word	    */
#  if ALIGNMENT != 8
#	define UNALIGNED
#  endif
#endif

/* The first TINY_FREELISTS free lists correspond to the first	*/
/* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep 	*/
/* separate free lists for each multiple of GRANULE_BYTES	*/
/* up to (TINY_FREELISTS-1) * GRANULE_BYTES.  After that they	*/
/* may be spread out further. 					*/
#include "../gc_tiny_fl.h"
#define GRANULE_BYTES GC_GRANULE_BYTES
#define TINY_FREELISTS GC_TINY_FREELISTS

#define WORDSZ ((word)CPP_WORDSZ)
#define SIGNB  ((word)1 << (WORDSZ-1))
#define BYTES_PER_WORD      ((word)(sizeof (word)))
#define ONES                ((word)(signed_word)(-1))
#define divWORDSZ(n) ((n) >> LOGWL)	   /* divide n by size of word      */

#if GRANULE_BYTES == 8
# define BYTES_TO_GRANULES(n) ((n)>>3)
# define GRANULES_TO_BYTES(n) ((n)<<3)
# if CPP_WORDSZ == 64
#   define GRANULES_TO_WORDS(n) (n)
# elif CPP_WORDSZ == 32
#   define GRANULES_TO_WORDS(n) ((n)<<1)
# else
#   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
# endif
#elif GRANULE_BYTES == 16
# define BYTES_TO_GRANULES(n) ((n)>>4)
# define GRANULES_TO_BYTES(n) ((n)<<4)
# if CPP_WORDSZ == 64
#   define GRANULES_TO_WORDS(n) ((n)<<1)
# elif CPP_WORDSZ == 32
#   define GRANULES_TO_WORDS(n) ((n)<<2)
# else
#   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
# endif
#else
# error Bad GRANULE_BYTES value
#endif

/*********************/
/*                   */
/*  Size Parameters  */
/*                   */
/*********************/

/*  heap block size, bytes. Should be power of 2 */

#ifndef HBLKSIZE
# ifdef SMALL_CONFIG
#   define CPP_LOG_HBLKSIZE 10
# else
#   if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA))
      /* HPUX/PA seems to use 4K pages with the 64 bit ABI */
#     define CPP_LOG_HBLKSIZE 12
#   else
#     define CPP_LOG_HBLKSIZE 13
#   endif
# endif
#else
# if HBLKSIZE == 512
#   define CPP_LOG_HBLKSIZE 9
# endif
# if HBLKSIZE == 1024
#   define CPP_LOG_HBLKSIZE 10
# endif
# if HBLKSIZE == 2048
#   define CPP_LOG_HBLKSIZE 11
# endif
# if HBLKSIZE == 4096
#   define CPP_LOG_HBLKSIZE 12
# endif
# if HBLKSIZE == 8192
#   define CPP_LOG_HBLKSIZE 13
# endif
# if HBLKSIZE == 16384
#   define CPP_LOG_HBLKSIZE 14
# endif
# ifndef CPP_LOG_HBLKSIZE
    --> fix HBLKSIZE
# endif
# undef HBLKSIZE
#endif
# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
# define LOG_HBLKSIZE   ((size_t)CPP_LOG_HBLKSIZE)
# define HBLKSIZE ((size_t)CPP_HBLKSIZE)


/*  max size objects supported by freelist (larger objects are	*/
/*  allocated directly with allchblk(), by rounding to the next */
/*  multiple of HBLKSIZE.					*/

#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
#define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
#define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
#define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
#define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
#define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
		
# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)

# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
	/* Equivalent to subtracting 2 hblk pointers.	*/
	/* We do it this way because a compiler should	*/
	/* find it hard to use an integer division	*/
	/* instead of a shift.  The bundled SunOS 4.1	*/
	/* o.w. sometimes pessimizes the subtraction to	*/
	/* involve a call to .div.			*/
 
# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
 
# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))

# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))

/* Round up byte allocation requests to integral number of words, etc. */
# define ROUNDED_UP_WORDS(n) \
	BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
# define ROUNDED_UP_GRANULES(n) \
	BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
# if MAX_EXTRA_BYTES == 0
#  define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
# else
#  define SMALL_OBJ(bytes) \
	    (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
	     (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
    	/* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES.	*/
    	/* But we try to avoid looking up EXTRA_BYTES.			*/
# endif
# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
# ifndef MIN_WORDS
#  define MIN_WORDS 2	/* FIXME: obsolete */
# endif


/*
 * Hash table representation of sets of pages.
 * Implements a map from aligned HBLKSIZE chunks of the address space to one
 * bit each.
 * This assumes it is OK to spuriously set bits, e.g. because multiple
 * addresses are represented by a single location.
 * Used by black-listing code, and perhaps by dirty bit maintenance code.
 */
 
# ifdef LARGE_CONFIG
#   define LOG_PHT_ENTRIES  20  /* Collisions likely at 1M blocks,	*/
				/* which is >= 4GB.  Each table takes	*/
				/* 128KB, some of which may never be	*/
				/* touched.				*/
# else
#   ifdef SMALL_CONFIG
#     define LOG_PHT_ENTRIES  14 /* Collisions are likely if heap grows	*/
				 /* to more than 16K hblks = 64MB.	*/
				 /* Each hash table occupies 2K bytes.   */
#   else /* default "medium" configuration */
#     define LOG_PHT_ENTRIES  16 /* Collisions are likely if heap grows	*/
				 /* to more than 64K hblks >= 256MB.	*/
				 /* Each hash table occupies 8K bytes.  */
				 /* Even for somewhat smaller heaps, 	*/
				 /* say half that, collisions may be an	*/
				 /* issue because we blacklist 		*/
				 /* addresses outside the heap.		*/
#   endif
# endif
# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
typedef word page_hash_table[PHT_SIZE];

# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))

# define get_pht_entry_from_index(bl, index) \
		(((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
# define set_pht_entry_from_index(bl, index) \
		(bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
# define clear_pht_entry_from_index(bl, index) \
		(bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
/* And a dumb but thread-safe version of set_pht_entry_from_index.	*/
/* This sets (many) extra bits.						*/
# define set_pht_entry_from_index_safe(bl, index) \
		(bl)[divWORDSZ(index)] = ONES
	


/********************************************/
/*                                          */
/*    H e a p   B l o c k s                 */
/*                                          */
/********************************************/

/*  heap block header */
#define HBLKMASK   (HBLKSIZE-1)

#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
	   /* upper bound                                    */
	   /* We allocate 1 bit per allocation granule.	     */
	   /* If MARK_BIT_PER_GRANULE is defined, we use     */
	   /* every nth bit, where n is the number of 	     */
	   /* allocation granules per object.  If	     */
	   /* MARK_BIT_PER_OBJ is defined, we only use the   */
   	   /* initial group of mark bits, and it is safe     */
	   /* to allocate smaller header for large objects.  */

# ifdef USE_MARK_BYTES
#   define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
	/* Unlike the other case, this is in units of bytes.		*/
	/* Since we force doubleword alignment, we need at most one	*/
	/* mark bit per 2 words.  But we do allocate and set one	*/
	/* extra mark bit to avoid an explicit check for the 		*/
	/* partial object at the end of each block.			*/
# else
#   define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
# endif

#ifdef PARALLEL_MARK
# include <atomic_ops.h>
  typedef AO_t counter_t;
#else
  typedef size_t counter_t;
#endif

/* We maintain layout maps for heap blocks containing objects of a given */
/* size.  Each entry in this map describes a byte offset and has the	 */
/* following type.							 */
struct hblkhdr {
    struct hblk * hb_next; 	/* Link field for hblk free list	 */
    				/* and for lists of chunks waiting to be */
    				/* reclaimed.				 */
    struct hblk * hb_prev;	/* Backwards link for free list.	*/
    struct hblk * hb_block;	/* The corresponding block.		*/
    unsigned char hb_obj_kind;
    			 /* Kind of objects in the block.  Each kind 	*/
    			 /* identifies a mark procedure and a set of 	*/
    			 /* list headers.  Sometimes called regions.	*/
    unsigned char hb_flags;
#	define IGNORE_OFF_PAGE	1	/* Ignore pointers that do not	*/
					/* point to the first page of 	*/
					/* this object.			*/
#	define WAS_UNMAPPED 2	/* This is a free block, which has	*/
				/* been unmapped from the address 	*/
				/* space.				*/
				/* GC_remap must be invoked on it	*/
				/* before it can be reallocated.	*/
				/* Only set with USE_MUNMAP.		*/
#	define FREE_BLK 4	/* Block is free, i.e. not in use.	*/
    unsigned short hb_last_reclaimed;
    				/* Value of GC_gc_no when block was	*/
    				/* last allocated or swept. May wrap.   */
				/* For a free block, this is maintained */
				/* only for USE_MUNMAP, and indicates	*/
				/* when the header was allocated, or	*/
				/* when the size of the block last	*/
				/* changed.				*/
    size_t hb_sz;  /* If in use, size in bytes, of objects in the block. */
		   /* if free, the size in bytes of the whole block      */
    word hb_descr;   		/* object descriptor for marking.  See	*/
    				/* mark.h.				*/
#   ifdef MARK_BIT_PER_OBJ
      unsigned32 hb_inv_sz;	/* A good upper bound for 2**32/hb_sz.	*/
    				/* For large objects, we use		*/
    				/* LARGE_INV_SZ.			*/
#     define LARGE_INV_SZ (1 << 16)
#   else
      unsigned char hb_large_block;
      short * hb_map;		/* Essentially a table of remainders	*/
      				/* mod BYTES_TO_GRANULES(hb_sz), except	*/
      				/* for large blocks.  See GC_obj_map.	*/
#   endif
    counter_t hb_n_marks;	/* Number of set mark bits, excluding 	*/
    				/* the one always set at the end.	*/
    				/* Currently it is concurrently 	*/
    				/* updated and hence only approximate.  */
    				/* But a zero value does guarantee that	*/
    				/* the block contains no marked		*/
    				/* objects.				*/
    				/* Ensuring this property means that we	*/
    				/* never decrement it to zero during a	*/
    				/* collection, and hence the count may 	*/
    				/* be one too high.  Due to concurrent	*/
    				/* updates, an arbitrary number of	*/
    				/* increments, but not all of them (!)	*/

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -