⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gc_priv.h

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 H
📖 第 1 页 / 共 5 页
字号:
# else    void GC_add_to_black_list_stack(word p);#   define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \	    GC_add_to_black_list_stack((word)(bits))# endifstruct hblk * GC_is_black_listed(struct hblk * h, word len);  			/* If there are likely to be false references	*/  			/* to a block starting at h of the indicated    */  			/* length, then return the next plausible	*/  			/* starting location for h that might avoid	*/  			/* these false references.			*/void GC_promote_black_lists(void);  			/* Declare an end to a black listing phase.	*/void GC_unpromote_black_lists(void);  			/* Approximately undo the effect of the above.	*/  			/* This actually loses some information, but	*/  			/* only in a reasonably safe way.		*/word GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1);  			/* Return the number of (stack) blacklisted	*/  			/* blocks in the range for statistical		*/  			/* purposes.					*/  		 	ptr_t GC_scratch_alloc(size_t bytes);  				/* GC internal memory allocation for	*/  				/* small objects.  Deallocation is not  */  				/* possible.				*/  	/* Heap block layout maps: */			GC_bool GC_add_map_entry(size_t sz);  				/* Add a heap block map for objects of	*/  				/* size sz to obj_map.			*/  				/* Return FALSE on failure.		*/void GC_register_displacement_inner(size_t offset);  				/* Version of GC_register_displacement	*/  				/* that assumes lock is already held	*/  				/* and signals are already disabled.	*/void GC_initialize_offsets(void);				/* Initialize GC_valid_offsets,		*/				/* depending on current 		*/				/* GC_all_interior_pointers settings.	*/  /*  hblk allocation: */		void GC_new_hblk(size_t size_in_granules, int kind);  				/* Allocate a new heap block, and build */  				/* a free list in it.			*/				ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear, ptr_t list);				/* Build a free list for objects of 	*/				/* size sz in block h.  Append list to	*/				/* end of the free lists.  Possibly	*/				/* clear objects on the list.  Normally	*/				/* called by GC_new_hblk, but also	*/				/* called explicitly without GC lock.	*/struct hblk * GC_allochblk (size_t size_in_bytes, int kind,		            unsigned flags);				/* Allocate a heap block, inform	*/				/* the marker that block is valid	*/				/* for objects of indicated size.	*/ptr_t GC_alloc_large (size_t lb, int k, unsigned flags);			/* Allocate a large block of size lb bytes.	*/			/* The block is not cleared.			*/			/* Flags is 0 or IGNORE_OFF_PAGE.		*/			/* Calls GC_allchblk to do the actual 		*/			/* allocation, but also triggers GC and/or	*/			/* heap expansion as appropriate.		*/			/* Does not update GC_bytes_allocd, but does	*/			/* other accounting.				*/ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags);			/* As above, but clear block if appropriate	*/			/* for kind k.					*/void GC_freehblk(struct hblk * p);				/* Deallocate a heap block and mark it  */  				/* as invalid.				*/  				/*  Misc GC: */void GC_init_inner(void);GC_bool GC_expand_hp_inner(word n);void GC_start_reclaim(int abort_if_found);  				/* Restore unmarked objects to free	*/  				/* lists, or (if abort_if_found is	*/  				/* TRUE) report them.			*/  				/* Sweeping of small object pages is	*/  				/* largely deferred.			*/void GC_continue_reclaim(size_t sz, int kind);  				/* Sweep pages of the given size and	*/  				/* kind, as long as possible, and	*/  				/* as long as the corr. free list is    */  				/* empty.  Sz is in granules.		*/void GC_reclaim_or_delete_all(void);  				/* Arrange for all reclaim lists to be	*/  				/* empty.  Judiciously choose between	*/  				/* sweeping and discarding each page.	*/GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);  				/* Reclaim all blocks.  Abort (in a	*/  				/* consistent state) if f returns TRUE. */ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,			 GC_bool init, ptr_t list, signed_word *count);			 	/* Rebuild free list in hbp with 	*/				/* header hhdr, with objects of size sz */				/* bytes.  Add list to the end of the	*/				/* free list.  Add the number of	*/				/* reclaimed bytes to *count.		*/GC_bool GC_block_empty(hdr * hhdr); 				/* Block completely unmarked? 	*/GC_bool GC_never_stop_func(void);				/* Returns FALSE.		*/GC_bool GC_try_to_collect_inner(GC_stop_func f);				/* Collect; caller must have acquired	*/				/* lock and disabled signals.		*/				/* Collection is aborted if f returns	*/				/* TRUE.  Returns TRUE if it completes	*/				/* successfully.			*/# define GC_gcollect_inner() \	(void) GC_try_to_collect_inner(GC_never_stop_func)void GC_finish_collection(void); 				/* Finish collection.  Mark bits are	*/  				/* consistent and lock is still held.	*/GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page);  				/* Collect or expand heap in an attempt */  				/* make the indicated number of free	*/  				/* blocks available.  Should be called	*/  				/* until the blocks are available or	*/  				/* until it fails by returning FALSE.	*/extern GC_bool GC_is_initialized;	/* GC_init() has been run.	*/#if defined(MSWIN32) || defined(MSWINCE)  void GC_deinit(void);                                /* Free any resources allocated by      */                                /* GC_init                              */#endifvoid GC_collect_a_little_inner(int n);  				/* Do n units worth of garbage 		*/  				/* collection work, if appropriate.	*/  				/* A unit is an amount appropriate for  */  				/* HBLKSIZE bytes of allocation.	*//* void * GC_generic_malloc(size_t lb, int k); */  				/* Allocate an object of the given	*/  				/* kind.  By default, there are only	*/  				/* a few kinds: composite(pointerfree), */				/* atomic, uncollectable, etc.		*/				/* We claim it's possible for clever	*/				/* client code that understands GC	*/				/* internals to add more, e.g. to	*/				/* communicate object layout info	*/				/* to the collector.			*/				/* The actual decl is in gc_mark.h.	*/void * GC_generic_malloc_ignore_off_page(size_t b, int k);  				/* As above, but pointers past the 	*/  				/* first page of the resulting object	*/  				/* are ignored.				*/void * GC_generic_malloc_inner(size_t lb, int k);  				/* Ditto, but I already hold lock, etc.	*/void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);  				/* Allocate an object, where		*/  				/* the client guarantees that there	*/  				/* will always be a pointer to the 	*/  				/* beginning of the object while the	*/  				/* object is live.			*/void GC_generic_malloc_many(size_t lb, int k, void **result);				/* Store a pointer to a list of newly	*/				/* allocated objects of kind k and size */				/* lb in *result.			*/				/* Caler must make sure that *result is	*/				/* traced even if objects are ptrfree.	*/ptr_t GC_allocobj(size_t sz, int kind);  				/* Make the indicated 			*/  				/* free list nonempty, and return its	*/  				/* head.  Sz is in granules.		*//* Allocation routines that bypass the thread local cache.	*//* Used internally.						*/#ifdef THREAD_LOCAL_ALLOC  void * GC_core_malloc(size_t);  void * GC_core_malloc_atomic(size_t);# ifdef GC_GCJ_SUPPORT    void *GC_core_gcj_malloc(size_t, void *); # endif#endif /* THREAD_LOCAL_ALLOC */void GC_free_inner(void * p);void GC_debug_free_inner(void * p);  void GC_init_headers(void);struct hblkhdr * GC_install_header(struct hblk *h);  				/* Install a header for block h.	*/  				/* Return 0 on failure, or the header	*/  				/* otherwise.				*/GC_bool GC_install_counts(struct hblk * h, size_t sz);  				/* Set up forwarding counts for block	*/  				/* h of size sz.			*/  				/* Return FALSE on failure.		*/void GC_remove_header(struct hblk * h);  				/* Remove the header for block h.	*/void GC_remove_counts(struct hblk * h, size_t sz);  				/* Remove forwarding counts for h.	*/hdr * GC_find_header(ptr_t h); /* Debugging only.		*/  void GC_finalize(void); 			/* Perform all indicated finalization actions	*/  			/* on unmarked objects.				*/  			/* Unreachable finalizable objects are enqueued	*/  			/* for processing by GC_invoke_finalizers.	*/  			/* Invoked with lock.				*/void GC_notify_or_invoke_finalizers(void);			/* If GC_finalize_on_demand is not set, invoke	*/			/* eligible finalizers. Otherwise:		*/			/* Call *GC_finalizer_notifier if there are	*/			/* finalizers to be run, and we haven't called	*/			/* this procedure yet this GC cycle.		*/GC_API void * GC_make_closure(GC_finalization_proc fn, void * data);GC_API void GC_debug_invoke_finalizer(void * obj, void * data);			/* Auxiliary fns to make finalization work	*/			/* correctly with displaced pointers introduced	*/			/* by the debugging allocators.			*/  			void GC_add_to_heap(struct hblk *p, size_t bytes);  			/* Add a HBLKSIZE aligned chunk to the heap.	*/  void GC_print_obj(ptr_t p);  			/* P points to somewhere inside an object with	*/  			/* debugging info.  Print a human readable	*/  			/* description of the object to stderr.		*/extern void (*GC_check_heap)(void);  			/* Check that all objects in the heap with 	*/  			/* debugging info are intact.  			*/  			/* Add any that are not to GC_smashed list.	*/extern void (*GC_print_all_smashed) (void);			/* Print GC_smashed if it's not empty.		*/			/* Clear GC_smashed list.			*/extern void GC_print_all_errors (void);			/* Print smashed and leaked objects, if any.	*/			/* Clear the lists of such objects.		*/extern void (*GC_print_heap_obj) (ptr_t p);  			/* If possible print s followed by a more	*/  			/* detailed description of the object 		*/  			/* referred to by p.				*/#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)  void GC_print_address_map (void);  			/* Print an address map of the process.		*/#endifextern GC_bool GC_have_errors;  /* We saw a smashed or leaked object.	*/				/* Call error printing routine 		*/				/* occasionally.			*/#ifndef SMALL_CONFIG  extern int GC_print_stats;	/* Nonzero generates basic GC log.	*/				/* VERBOSE generates add'l messages.	*/#else# define GC_print_stats 0  	/* Will this keep the message character strings from the executable? */  	/* It should ...						     */#endif#define VERBOSE 2#ifndef NO_DEBUGGING  extern GC_bool GC_dump_regularly;  /* Generate regular debugging dumps. */# define COND_DUMP if (GC_dump_regularly) GC_dump();#else# define COND_DUMP#endif#ifdef KEEP_BACK_PTRS  extern long GC_backtraces;  void GC_generate_random_backtrace_no_gc(void);#endifextern GC_bool GC_print_back_height;#ifdef MAKE_BACK_GRAPH  void GC_print_back_graph_stats(void);#endif/* Macros used for collector internal allocation.	*//* These assume the collector lock is held.		*/#ifdef DBG_HDRS_ALL    extern void * GC_debug_generic_malloc_inner(size_t lb, int k);    extern void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,								int k);#   define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner#   define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \		 GC_debug_generic_malloc_inner_ignore_off_page#   ifdef THREADS#       define GC_INTERNAL_FREE GC_debug_free_inner#   else#       define GC_INTERNAL_FREE GC_debug_free#   endif#else#   define GC_INTERNAL_MALLOC GC_generic_malloc_inner#   define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \		 GC_generic_malloc_inner_ignore_off_page#   ifdef THREADS#       define GC_INTERNAL_FREE GC_free_inner#   else#       define GC_INTERNAL_FREE GC_free#   endif#endif/* Memory unmapping: */#ifdef USE_MUNMAP  void GC_unmap_old(void);  void GC_merge_unmapped(void);  void GC_unmap(ptr_t start, size_t bytes);  void GC_remap(ptr_t start, size_t bytes);  void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2);#endif/* Virtual dirty bit implementation:		*//* Each implementation exports the following:	*/void GC_read_dirty(void);			/* Retrieve dirty bits.	*/GC_bool GC_page_was_dirty(struct hblk *h);  			/* Read retrieved dirty bits.	*/GC_bool GC_page_was_ever_dirty(struct hblk *h);  			/* Could the page contain valid heap pointers?	*/void GC_remove_protection(struct hblk *h, word nblocks,			  GC_bool pointerfree);  			/* h is about to be writteni or allocated.  Ensure  */			/* that it's not write protected by the virtual	    */			/* dirty bit implementation.			    */			void GC_dirty_init(void);  /* Slow/general mark bit manipulation: */GC_API GC_bool GC_is_marked(ptr_t p);void GC_clear_mark_bit(ptr_t p);void GC_set_mark_bit(ptr_t p);  /* Stubborn objects: */void GC_read_changed(void);	/* Analogous to GC_read_dirty */GC_bool GC_page_was_changed(struct hblk * h); 				/* Analogous to GC_page_was_dirty */void GC_clean_changing_list(void); 				/* Collect obsolete changing list entries */void GC_stubborn_init(void);  /* Debugging print routines: */void GC_print_block_list(void);void GC_print_hblkfreelist(void);void GC_print_heap_sects(void);void GC_print_static_roots(void);void GC_print_finalization_stats(void);/* void GC_dump(void); - declared in gc.h */#ifdef KEEP_BACK_PTRS   void GC_store_back_pointer(ptr_t source, ptr_t dest);   void GC_marked_for_finalization(ptr_t dest);#  define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)#  define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)#else#  define GC_STORE_BACK_PTR(source, dest) #  define GC_MARKED_FOR_FINALIZATION(dest)#endif/* Make arguments appear live to compiler */# ifdef __WATCOMC__    void GC_noop(void*, ...);# else#   ifdef 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -