📄 ggc.c
字号:
sizeof(Eterm)*(HEAP_SIZE(p)), sizeof(Eterm)*new_sz); p->hend = new_heap + new_sz; p->stop = p->hend - stack_size; VERBOSE(DEBUG_PRIVATE_GC, ("shrink_new_heap: FROM %d DOWNTO %d (used %d)\n", HEAP_SIZE(p), new_sz, heap_size)); if ((offs = new_heap - HEAP_START(p)) != 0) { /* * Normally, we don't expect a shrunk heap to move, but you never * know on some strange embedded systems... Or when using purify. */ offset_heap(new_heap, heap_size, offs, HEAP_START(p), HEAP_TOP(p)); HIGH_WATER(p) = new_heap + (HIGH_WATER(p) - HEAP_START(p));#ifdef INCREMENTAL p->scan_top = new_heap + (p->scan_top - HEAP_START(p));#endif offset_rootset(p, offs, HEAP_START(p), HEAP_TOP(p), objv, nobj); HEAP_TOP(p) = new_heap + heap_size; HEAP_START(p) = new_heap; } HEAP_SIZE(p) = new_sz;}static Uintadjust_after_fullsweep(Process *p, int size_before, int need, Eterm *objv, int nobj){ int wanted, sz, size_after, need_after; int stack_size = p->hend - p->stop; Uint reclaimed_now; size_after = (HEAP_TOP(p) - HEAP_START(p)); reclaimed_now = (size_before - size_after); /* * Resize the heap if needed. */ need_after = size_after + need + stack_size; if (HEAP_SIZE(p) < need_after) { /* Too small - grow to match requested need */ sz = next_heap_size(p, need_after, 0); grow_new_heap(p, sz, objv, nobj); } else if (3 * HEAP_SIZE(p) < 4 * need_after){ /* Need more than 75% of current, postpone to next GC.*/ FLAGS(p) |= F_HEAP_GROW; } else if (4 * need_after < HEAP_SIZE(p) && HEAP_SIZE(p) > H_MIN_SIZE){ /* We need less than 25% of the current heap, shrink.*/ /* XXX - This is how it was done in the old GC: wanted = 4 * need_after; I think this is better as fullsweep is used mainly on small memory systems, but I could be wrong... */ wanted = 2 * need_after; if (wanted < p->min_heap_size) { sz = p->min_heap_size; } else { sz = next_heap_size(p, wanted, 0); } if (sz < HEAP_SIZE(p)) { erts_shrink_new_heap(p, sz, objv, nobj); } } return reclaimed_now;}/* * Garbage collect a process. * * p: Pointer to the process structure. * need: Number of (erlang) words needed on the heap. * objv: Array of terms to add to rootset, that is to preserve. * nobj: Number of objects in objv. */interts_garbage_collect(Process* p, int need, Eterm* objv, int nobj){ Uint reclaimed_now = 0; int size_before; int size_after; int need_after; Uint saved_status; int wanted; int stack_size; /* Size of stack ON HEAP. */ int sz;#ifdef __BENCHMARK__ uint this_was_major = 0;#endif Uint ms1, s1, us1; BM_STOP_TIMER(system); VERBOSE(DEBUG_PRIVATE_GC, ("Heap GC START Proc: %T\n", p->id));#ifdef BM_HEAP_SIZES { double total_used_heap = 0; int i; for (i = 0; i < erts_max_processes; i++) { Process *cp = process_tab[i]; if (cp == NULL) continue; total_used_heap += (cp->htop - cp->heap) + cp->mbuf_sz + (cp->old_htop - cp->old_heap); } if (total_used_heap > max_used_heap) max_used_heap = total_used_heap; }#endif /* BM_HEAP_SIZES */ BM_RESET_TIMER(gc); BM_START_TIMER(gc);#ifdef HEAP_FRAG_ELIM_TEST if (SAVED_HEAP_TOP(p) != NULL) { HEAP_TOP(p) = SAVED_HEAP_TOP(p); SAVED_HEAP_TOP(p) = NULL; }#endif#define OverRunCheck() \ if (HEAP_LIMIT(p) < HEAP_TOP(p)) { \ erl_exit(1, "%T: Heap-top passed heap limit at line %d\n", \ p->id, __LINE__); \ } if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_start); } erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (erts_system_monitor_long_gc != 0) { get_now(&ms1, &s1, &us1); } saved_status = p->status; p->status = P_GARBING; erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC); CHECK(p); OverRunCheck(); if (GEN_GCS(p) >= MAX_GEN_GCS(p)) { FLAGS(p) |= F_NEED_FULLSWEEP; }#ifdef HYBRID if (p->rrma == NULL) { p->nrr = 0; p->rrsz = RRMA_DEFAULT_SIZE; p->rrma = erts_alloc(ERTS_ALC_T_ROOTSET, RRMA_DEFAULT_SIZE * sizeof(Eterm)); p->rrsrc = erts_alloc(ERTS_ALC_T_ROOTSET, RRMA_DEFAULT_SIZE * sizeof(Eterm)); ERTS_PROC_MORE_MEM(sizeof(Eterm) * p->rrsz * 2); }#endif stack_size = p->hend - p->stop; MSO(p).overhead = 0; /* Size of heap before first GC */ size_before = MBUF_SIZE(p) + (HEAP_TOP(p) - HEAP_START(p)); /* * Generational GC from here on. We need an old heap. */ if (OLD_HEAP(p) == NULL && HIGH_WATER(p) != HEAP_START(p) && (FLAGS(p) & F_NEED_FULLSWEEP) == 0) { Eterm* n_old; /* Note: We choose a larger heap size than strictly needed, * which seems to reduce the number of fullsweeps. * This improved Estone by more than 1200 estones on my computer * (Ultra Sparc 10). */ size_t new_sz = next_heap_size(p, HIGH_WATER(p) - HEAP_START(p), 1); /* Create new, empty old_heap */ n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP, sizeof(Eterm)*new_sz); OLD_HEND(p) = n_old + new_sz; OLD_HEAP(p) = OLD_HTOP(p) = n_old; VERBOSE(DEBUG_PRIVATE_GC,("Created an old_heap\n")); } /* * Try a generational GC if the old heap is large enough. */ if ((FLAGS(p) & F_NEED_FULLSWEEP) == 0 && HIGH_WATER(p) - HEAP_START(p) <= OLD_HEND(p) - OLD_HTOP(p)) { /* * There is space enough in old_heap for everything * below the high water mark. Do a generational GC. */ gen_gc(p, next_heap_size(p, HEAP_SIZE(p) + MBUF_SIZE(p), 0), objv, nobj); GEN_GCS(p)++; size_after = HEAP_TOP(p) - HEAP_START(p); need_after = size_after + need + stack_size; reclaimed_now += (size_before - size_after); /* * Excessively large heaps should be shrunk, but * don't even bother on reasonable small heaps. * * The reason for this is that after tenuring, we often * use a really small portion of new heap, therefore, unless * the heap size is substantial, we don't want to shrink. */ if ((HEAP_SIZE(p) > 300) && (4 * need_after < HEAP_SIZE(p)) && ((HEAP_SIZE(p) > 8000) || (HEAP_SIZE(p) > (OLD_HEND(p) - OLD_HEAP(p))))) { wanted = 3 * need_after; if (wanted < p->min_heap_size) { wanted = p->min_heap_size; } else { wanted = next_heap_size(p, wanted, 0); } if (wanted < HEAP_SIZE(p)) { erts_shrink_new_heap(p, wanted, objv, nobj); } ASSERT(HEAP_SIZE(p) == p->min_heap_size || HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); goto done; } /* * The heap size turned out to be just right. We are done. */ if (HEAP_SIZE(p) >= need_after) { ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); goto done; } VERBOSE(DEBUG_PRIVATE_GC, ("Did a gen_gc, still not enough room\n")); } /* * The previous generational GC did not leave enough free heap space. * We must do a fullsweep GC. First figure out the size of the heap * to receive all live data. */ sz = HEAP_SIZE(p) + MBUF_SIZE(p) + (OLD_HTOP(p) - OLD_HEAP(p)); sz += p->hend - p->stop; sz = next_heap_size(p, sz, 0); /* * Should we grow although we don't actually need to? */ if (sz == HEAP_SIZE(p) && FLAGS(p) & F_HEAP_GROW) { sz = next_heap_size(p, HEAP_SIZE(p), 1); } FLAGS(p) &= ~F_HEAP_GROW; fullsweep_heap(p, sz, objv, nobj); CHECK(p); reclaimed_now += adjust_after_fullsweep(p, size_before, need, objv, nobj);#ifdef __BENCHMARK__ this_was_major = 1;#endif done: erts_smp_spin_lock(&info_lck); garbage_cols++; reclaimed += reclaimed_now; erts_smp_spin_unlock(&info_lck); CHECK(p); OverRunCheck(); erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); p->status = saved_status; erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_end); } BM_STOP_TIMER(gc); erts_smp_locked_activity_end(ERTS_ACTIVITY_GC); if (erts_system_monitor_long_gc != 0) { Uint ms2, s2, us2; Sint t; get_now(&ms2, &s2, &us2); t = ms2 - ms1; t = t*1000000 + s2 - s1; t = t*1000 + ((Sint)(us2 - us1))/1000; if (t > 0 && (Uint)t >= erts_system_monitor_long_gc) { monitor_long_gc(p, t); } } if (erts_system_monitor_large_heap != 0 && HEAP_SIZE(p) >= erts_system_monitor_large_heap) { monitor_large_heap(p); }#ifdef __BENCHMARK__#ifdef BM_TIMERS local_pause_times[(((gc_time * 1000) < MAX_PAUSE_TIME) ? (int)(gc_time * 1000) : MAX_PAUSE_TIME - 1)]++;#endif if (this_was_major == 1) { BM_COUNT(major_gc);#ifdef BM_TIMERS major_gc_time += gc_time; if (gc_time > max_major_time) max_major_time = gc_time;#endif } else { BM_COUNT(minor_gc);#ifdef BM_TIMERS minor_gc_time += gc_time; if (gc_time > max_minor_time) max_minor_time = gc_time;#endif }#ifdef BM_HEAP_SIZES { double total_used_heap = 0; double total_allocated_heap = 0; int i; for (i = 0; i < erts_max_processes; i++) { Process *cp = process_tab[i]; if (cp == NULL) continue; total_used_heap += (cp->htop - cp->heap) + (cp->old_htop - cp->old_heap); total_allocated_heap += cp->heap_sz + (cp->old_hend - cp->old_heap); } if (total_used_heap > max_used_heap) max_used_heap = total_used_heap; if (total_allocated_heap > max_allocated_heap) max_allocated_heap = total_allocated_heap;#ifdef BM_TIMERS_MAXALLOC { int min,sec,milli,micro; BM_TIMER_T tmp = ((BM_TIMER_T)vperfctr_read_tsc(system_clock) / cpu_khz) * 1000; micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); tmp /= 1000; milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); tmp /= 1000; sec = (uint)(tmp - ((int)(tmp / 60)) * 60); min = (uint)tmp / 60; erts_fprintf(stderr,"%4d:%02d.%03d %03d : Footprint: %d\n", min,sec,milli,micro,(ulong)total_used_heap); }#endif }#endif /* BM_HEAP_SIZES */ BM_START_TIMER(system);#endif /* __BENCHMARK__ */ ARITH_AVAIL(p) = 0; ARITH_HEAP(p) = NULL;#ifdef DEBUG ARITH_CHECK_ME(p) = NULL;#endif#ifdef CHECK_FOR_HOLES /* * We intentionally do not rescan the areas copied by the GC. * We trust the GC not to leave any holes. */ { Eterm* start = p->htop; Eterm* stop = p->stop; p->last_htop = p->htop; p->last_mbuf = 0; while (start < stop) { *start++ = ERTS_HOLE_MARKER; } }#endif #ifdef HYBRID#ifdef DEBUG { int i; for (i = 0; i < p->nrr; i++) { Eterm *ptr = p->rrsrc[i]; ASSERT(ptr_within(ptr,HEAP_START(p),HEAP_TOP(p)) || ptr_within(ptr,OLD_HEAP(p),OLD_HTOP(p))); } }#endif#endif VERBOSE(DEBUG_PRIVATE_GC,("Heap GC END\n")); return ((int) (HEAP_TOP(p) - HEAP_START(p)) / 10);#undef OverRunCheck}/* * Place all living data on a the new heap; deallocate any old heap. * Meant to be used by hibernate/3. */voiderts_garbage_collect_hibernate(Process* p){ Uint new_sz; FLAGS(p) |= F_NEED_FULLSWEEP; erts_garbage_collect(p, 0, p->arg_reg, p->arity); new_sz = HEAP_TOP(p) - HEAP_START(p); if (new_sz == 0) { new_sz = 1; /* We want a heap... */ } erts_shrink_new_heap(p, new_sz, p->arg_reg, p->arity);}static Eterm*gen_cheney(Process *p, Eterm* low, Eterm* high, Eterm* n_hp, Eterm* n_htop){ Eterm* ptr; Eterm val; Eterm gval; char* water_start = (char *)low; Uint water_size = (char *)high - water_start;#ifdef HYBRID Eterm *g_start = global_heap; Eterm *g_end = global_htop; Eterm *go_start = global_old_heap; Eterm *go_end = global_old_hend;#endif#ifdef INCREMENTAL Eterm *i_heap = inc_fromspc; Eterm *i_hend = inc_fromend;#endif while (n_hp != n_htop) { gval = *n_hp; switch (primary_tag(gval)) { case TAG_PRIMARY_BOXED: { ptr = boxed_val(gval); val = *ptr; if (in_area(ptr, water_start, water_size)) { if (MY_IS_MOVED(val)) { ASSERT(is_boxed(val)); *n_hp++ = val; } else { MOVE_BOXED(ptr,val,n_htop,n_hp++); }#ifdef HYBRID } else if (ptr_within(ptr,g_start,g_end) ||#ifdef INCREMENTAL ptr_within(ptr, i_heap, i_hend) ||#endif ptr_within(ptr,go_start,go_end)) { RRMA_STORE(p,gval,n_hp); ++n_hp;#endif } else { ASSERT(within(ptr, p)); ++n_hp; } continue; } case TAG_PRIMARY_LIST: { ptr = list_val(gval); val = *ptr; if (in_area(ptr, water_start, water_size)) { if (is_non_value(val)) { *n_hp++ = ptr[1]; } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -