📄 finalize.c
字号:
new_fo -> fo_client_data = (ptr_t)cd; new_fo -> fo_object_size = hhdr -> hb_sz; new_fo -> fo_mark_proc = mp; fo_set_next(new_fo, fo_head[index]); GC_fo_entries++; fo_head[index] = new_fo;# ifdef THREADS UNLOCK(); ENABLE_SIGNALS();# endif}# if defined(__STDC__) void GC_register_finalizer(void * obj, GC_finalization_proc fn, void * cd, GC_finalization_proc *ofn, void ** ocd)# else void GC_register_finalizer(obj, fn, cd, ofn, ocd) GC_PTR obj; GC_finalization_proc fn; GC_PTR cd; GC_finalization_proc * ofn; GC_PTR * ocd;# endif{ GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, GC_normal_finalize_mark_proc);}# if defined(__STDC__) void GC_register_finalizer_ignore_self(void * obj, GC_finalization_proc fn, void * cd, GC_finalization_proc *ofn, void ** ocd)# else void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd) GC_PTR obj; GC_finalization_proc fn; GC_PTR cd; GC_finalization_proc * ofn; GC_PTR * ocd;# endif{ GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, GC_ignore_self_finalize_mark_proc);}# if defined(__STDC__) void GC_register_finalizer_no_order(void * obj, GC_finalization_proc fn, void * cd, GC_finalization_proc *ofn, void ** ocd)# else void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd) GC_PTR obj; GC_finalization_proc fn; GC_PTR cd; GC_finalization_proc * ofn; GC_PTR * ocd;# endif{ GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, GC_null_finalize_mark_proc);}#ifndef NO_DEBUGGINGvoid GC_dump_finalization(){ struct disappearing_link * curr_dl; struct finalizable_object * curr_fo; ptr_t real_ptr, real_link; int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size); int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size); int i; GC_printf0("Disappearing links:\n"); for (i = 0; i < dl_size; i++) { for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) { real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj); real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link); GC_printf2("Object: 0x%lx, Link:0x%lx\n", real_ptr, real_link); } } GC_printf0("Finalizers:\n"); for (i = 0; i < fo_size; i++) { for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_printf1("Finalizable object: 0x%lx\n", real_ptr); } }}#endif/* Called with world stopped. Cause disappearing links to disappear, *//* and invoke finalizers. */void GC_finalize(){ struct disappearing_link * curr_dl, * prev_dl, * next_dl; struct finalizable_object * curr_fo, * prev_fo, * next_fo; ptr_t real_ptr, real_link; register int i; int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size); int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size); /* Make disappearing links disappear */ for (i = 0; i < dl_size; i++) { curr_dl = dl_head[i]; prev_dl = 0; while (curr_dl != 0) { real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj); real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link); if (!GC_is_marked(real_ptr)) { *(word *)real_link = 0; next_dl = dl_next(curr_dl); if (prev_dl == 0) { dl_head[i] = next_dl; } else { dl_set_next(prev_dl, next_dl); } GC_clear_mark_bit((ptr_t)curr_dl); GC_dl_entries--; curr_dl = next_dl; } else { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); } } } /* Mark all objects reachable via chains of 1 or more pointers */ /* from finalizable objects. */ GC_ASSERT(GC_mark_state == MS_NONE); for (i = 0; i < fo_size; i++) { for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); if (!GC_is_marked(real_ptr)) { GC_MARKED_FOR_FINALIZATION(real_ptr); GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc); if (GC_is_marked(real_ptr)) { WARN("Finalization cycle involving %lx\n", real_ptr); } } } } /* Enqueue for finalization all objects that are still */ /* unreachable. */ GC_words_finalized = 0; for (i = 0; i < fo_size; i++) { curr_fo = fo_head[i]; prev_fo = 0; while (curr_fo != 0) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); if (!GC_is_marked(real_ptr)) { if (!GC_java_finalization) { GC_set_mark_bit(real_ptr); } /* Delete from hash table */ next_fo = fo_next(curr_fo); if (prev_fo == 0) { fo_head[i] = next_fo; } else { fo_set_next(prev_fo, next_fo); } GC_fo_entries--; /* Add to list of objects awaiting finalization. */ fo_set_next(curr_fo, GC_finalize_now); GC_finalize_now = curr_fo; /* unhide object pointer so any future collections will */ /* see it. */ curr_fo -> fo_hidden_base = (word) REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_words_finalized += ALIGNED_WORDS(curr_fo -> fo_object_size) + ALIGNED_WORDS(sizeof(struct finalizable_object)); GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo))); curr_fo = next_fo; } else { prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } } } if (GC_java_finalization) { /* make sure we mark everything reachable from objects finalized using the no_order mark_proc */ for (curr_fo = GC_finalize_now; curr_fo != NULL; curr_fo = fo_next(curr_fo)) { real_ptr = (ptr_t)curr_fo -> fo_hidden_base; if (!GC_is_marked(real_ptr)) { if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) { GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc); } GC_set_mark_bit(real_ptr); } } } /* Remove dangling disappearing links. */ for (i = 0; i < dl_size; i++) { curr_dl = dl_head[i]; prev_dl = 0; while (curr_dl != 0) { real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link)); if (real_link != 0 && !GC_is_marked(real_link)) { next_dl = dl_next(curr_dl); if (prev_dl == 0) { dl_head[i] = next_dl; } else { dl_set_next(prev_dl, next_dl); } GC_clear_mark_bit((ptr_t)curr_dl); GC_dl_entries--; curr_dl = next_dl; } else { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); } } }}#ifndef JAVA_FINALIZATION_NOT_NEEDED/* Enqueue all remaining finalizers to be run - Assumes lock is * held, and signals are disabled */void GC_enqueue_all_finalizers(){ struct finalizable_object * curr_fo, * prev_fo, * next_fo; ptr_t real_ptr; register int i; int fo_size; fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size); GC_words_finalized = 0; for (i = 0; i < fo_size; i++) { curr_fo = fo_head[i]; prev_fo = 0; while (curr_fo != 0) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc); GC_set_mark_bit(real_ptr); /* Delete from hash table */ next_fo = fo_next(curr_fo); if (prev_fo == 0) { fo_head[i] = next_fo; } else { fo_set_next(prev_fo, next_fo); } GC_fo_entries--; /* Add to list of objects awaiting finalization. */ fo_set_next(curr_fo, GC_finalize_now); GC_finalize_now = curr_fo; /* unhide object pointer so any future collections will */ /* see it. */ curr_fo -> fo_hidden_base = (word) REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_words_finalized += ALIGNED_WORDS(curr_fo -> fo_object_size) + ALIGNED_WORDS(sizeof(struct finalizable_object)); curr_fo = next_fo; } } return;}/* Invoke all remaining finalizers that haven't yet been run. * This is needed for strict compliance with the Java standard, * which can make the runtime guarantee that all finalizers are run. * Unfortunately, the Java standard implies we have to keep running * finalizers until there are no more left, a potential infinite loop. * YUCK. * Note that this is even more dangerous than the usual Java * finalizers, in that objects reachable from static variables * may have been finalized when these finalizers are run. * Finalizers run at this point must be prepared to deal with a * mostly broken world. * This routine is externally callable, so is called without * the allocation lock. */GC_API void GC_finalize_all(){ DCL_LOCK_STATE; DISABLE_SIGNALS(); LOCK(); while (GC_fo_entries > 0) { GC_enqueue_all_finalizers(); UNLOCK(); ENABLE_SIGNALS(); GC_INVOKE_FINALIZERS(); DISABLE_SIGNALS(); LOCK(); } UNLOCK(); ENABLE_SIGNALS();}#endif/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if *//* finalizers can only be called from some kind of `safe state' and *//* getting into that safe state is expensive.) */int GC_should_invoke_finalizers GC_PROTO((void)){ return GC_finalize_now != 0;}/* Invoke finalizers for all objects that are ready to be finalized. *//* Should be called without allocation lock. */int GC_invoke_finalizers(){ struct finalizable_object * curr_fo; int count = 0; word mem_freed_before; DCL_LOCK_STATE; while (GC_finalize_now != 0) {# ifdef THREADS DISABLE_SIGNALS(); LOCK();# endif if (count == 0) { mem_freed_before = GC_mem_freed; } curr_fo = GC_finalize_now;# ifdef THREADS if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo); UNLOCK(); ENABLE_SIGNALS(); if (curr_fo == 0) break;# else GC_finalize_now = fo_next(curr_fo);# endif fo_set_next(curr_fo, 0); (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base), curr_fo -> fo_client_data); curr_fo -> fo_client_data = 0; ++count;# ifdef UNDEFINED /* This is probably a bad idea. It throws off accounting if */ /* nearly all objects are finalizable. O.w. it shouldn't */ /* matter. */ GC_free((GC_PTR)curr_fo);# endif } if (count != 0 && mem_freed_before != GC_mem_freed) { LOCK(); GC_finalizer_mem_freed += (GC_mem_freed - mem_freed_before); UNLOCK(); } return count;}void (* GC_finalizer_notifier)() = (void (*) GC_PROTO((void)))0;static GC_word last_finalizer_notification = 0;void GC_notify_or_invoke_finalizers GC_PROTO((void)){ /* This is a convenient place to generate backtraces if appropriate, */ /* since that code is not callable with the allocation lock. */# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH) static word last_back_trace_gc_no = 1; /* Skip first one. */ if (GC_gc_no > last_back_trace_gc_no) { word i;# ifdef KEEP_BACK_PTRS LOCK(); /* Stops when GC_gc_no wraps; that's OK. */ last_back_trace_gc_no = (word)(-1); /* disable others. */ for (i = 0; i < GC_backtraces; ++i) { /* FIXME: This tolerates concurrent heap mutation, */ /* which may cause occasional mysterious results. */ /* We need to release the GC lock, since GC_print_callers */ /* acquires it. It probably shouldn't. */ UNLOCK(); GC_generate_random_backtrace_no_gc(); LOCK(); } last_back_trace_gc_no = GC_gc_no; UNLOCK();# endif# ifdef MAKE_BACK_GRAPH if (GC_print_back_height) GC_print_back_graph_stats();# endif }# endif if (GC_finalize_now == 0) return; if (!GC_finalize_on_demand) { (void) GC_invoke_finalizers();# ifndef THREADS GC_ASSERT(GC_finalize_now == 0);# endif /* Otherwise GC can run concurrently and add more */ return; } if (GC_finalizer_notifier != (void (*) GC_PROTO((void)))0 && last_finalizer_notification != GC_gc_no) { last_finalizer_notification = GC_gc_no; GC_finalizer_notifier(); }}# ifdef __STDC__ GC_PTR GC_call_with_alloc_lock(GC_fn_type fn, GC_PTR client_data)# else GC_PTR GC_call_with_alloc_lock(fn, client_data) GC_fn_type fn; GC_PTR client_data;# endif{ GC_PTR result; DCL_LOCK_STATE; # ifdef THREADS DISABLE_SIGNALS(); LOCK(); SET_LOCK_HOLDER();# endif result = (*fn)(client_data);# ifdef THREADS# ifndef GC_ASSERTIONS UNSET_LOCK_HOLDER();# endif /* o.w. UNLOCK() does it implicitly */ UNLOCK(); ENABLE_SIGNALS();# endif return(result);}#if !defined(NO_DEBUGGING)void GC_print_finalization_stats(){ struct finalizable_object *fo = GC_finalize_now; size_t ready = 0; GC_printf2("%lu finalization table entries; %lu disappearing links\n", GC_fo_entries, GC_dl_entries); for (; 0 != fo; fo = fo_next(fo)) ++ready; GC_printf1("%lu objects are eligible for immediate finalization\n", ready);}#endif /* NO_DEBUGGING */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -