📄 gc-incremental.c
字号:
/* gc-incremental.c * The garbage collector. * The name is misleading. GC is non-incremental at this point. * * Copyright (c) 1996, 1997 * Transvirtual Technologies, Inc. All rights reserved. * * See the file "license.terms" for information on usage and redistribution * of this file. *//* XXX this should be controllable, somehow. */#define SUPPORT_VERBOSEMEM#include "config.h"#include "debug.h"#include "config-std.h"#include "config-mem.h"#include "gtypes.h"#include "gc.h"#include "gc-mem.h"#include "locks.h"#include "thread.h"#include "jthread.h"#include "errors.h"#include "md.h"#include "stats.h"#include "classMethod.h"/* Avoid recursively allocating OutOfMemoryError */#define OOM_ALLOCATING ((void *) -1)#define GCSTACKSIZE 16384#define FINALIZERSTACKSIZE THREADSTACKSIZE/* * Object implementing the collector interface. */static struct CollectorImpl { Collector collector; /* XXX include static below here for encapsulation */} gc_obj;/* XXX don't use these types ! */Hjava_lang_Thread* garbageman;static Hjava_lang_Thread* finalman;static gcList gclists[5];static const int mustfree = 4; /* temporary list */static const int white = 3;static const int grey = 2;static const int black = 1;static const int finalise = 0;static int gc_init = 0;static volatile int gcRunning = 0;static volatile bool finalRunning = false;static void (*walkRootSet)(Collector*);#if defined(KAFFE_STATS)static timespent gc_time;static timespent sweep_time;static counter gcgcablemem;static counter gcfixedmem;#endif /* KAFFE_STATS *//* Is this pointer within our managed heap? */#define IS_A_HEAP_POINTER(from) \ ((uintp) (from) >= gc_heap_base && \ (uintp) (from) < gc_heap_base + gc_heap_range)static void *reserve;static void *outOfMem;static void *outOfMem_allocator;#if defined(SUPPORT_VERBOSEMEM)static void objectStatsChange(gc_unit*, int);static void objectStatsPrint(void);#define OBJECTSTATSADD(M) objectStatsChange(M, 1)#define OBJECTSTATSREMOVE(M) objectStatsChange(M, -1)#define OBJECTSTATSPRINT() objectStatsPrint()#else#define OBJECTSTATSADD(M)#define OBJECTSTATSREMOVE(M)#define OBJECTSTATSPRINT()#endif/* For statistics gathering, record how many objects and how * much memory was marked. */static inlinevoid record_marked(int nr_of_objects, uint32 size){ gcStats.markedobj += nr_of_objects; gcStats.markedmem += size;} iLock* gcman;iLock* finman;iLock* gc_lock; /* allocator mutex */static void gcFree(Collector* gcif, void* mem);/* Standard GC function sets. We call them "allocation types" now */static gcFuncs gcFunctions[GC_ALLOC_MAX_INDEX];/* Number of registered allocation types */static int nrTypes;/* * register an allocation type under a certain index * NB: we could instead return a pointer to the record describing the * allocation type. This would give us more flexibility, but it wouldn't * allow us to use compile-time constants. */static voidregisterTypeByIndex(int index, walk_func_t walk, final_func_t final, destroy_func_t destroy, const char *description){ /* once only, please */ assert (gcFunctions[index].description == 0); /* don't exceed bounds */ assert (index >= 0 && index < sizeof(gcFunctions)/sizeof(gcFunctions[0])); gcFunctions[index].walk = walk; gcFunctions[index].final = final; gcFunctions[index].destroy = destroy; gcFunctions[index].description = description; if (index >= nrTypes) { nrTypes = index + 1; }}/* * Register a fixed allocation type. The only reason we tell them apart * is for statistical purposes. */static voidgcRegisterFixedTypeByIndex(Collector* gcif, int index, const char *description){ registerTypeByIndex(index, 0, GC_OBJECT_FIXED, 0, description);}/* * Register a allocation type that is subject to gc. */static voidgcRegisterGcTypeByIndex(Collector* gcif, int index, walk_func_t walk, final_func_t final, destroy_func_t destroy, const char *description){ registerTypeByIndex(index, walk, final, destroy, description);}struct _gcStats gcStats;static void startGC(Collector *gcif);static void finishGC(Collector *gcif);static void startFinalizer(void);static void markObjectDontCheck(gc_unit *unit, gc_block *info, int idx);/* Return true if gc_unit is pointer to an allocated object */static inline intgc_heap_isobject(gc_block *info, gc_unit *unit){ uintp p = (uintp) UTOMEM(unit) - gc_heap_base; int idx; if (!(p & (MEMALIGN - 1)) && p < gc_heap_range && info->inuse) { /* Make sure 'unit' refers to the beginning of an * object. We do this by making sure it is correctly * aligned within the block. */ idx = GCMEM2IDX(info, unit); if (idx < info->nr && GCBLOCK2MEM(info, idx) == unit && (GC_GET_COLOUR(info, idx) & GC_COLOUR_INUSE) == GC_COLOUR_INUSE) { return 1; } } return 0;}static inline voidmarkObjectDontCheck(gc_unit *unit, gc_block *info, int idx){ /* If object's been traced before, don't do it again */ if (GC_GET_COLOUR(info, idx) != GC_COLOUR_WHITE) { return; }DBG(GCWALK, dprintf(" marking @%p: %s\n", UTOMEM(unit), describeObject(UTOMEM(unit))); ) DBG(GCSTAT, switch (GC_GET_FUNCS(info, idx)) { case GC_ALLOC_NORMALOBJECT: case GC_ALLOC_FINALIZEOBJECT: case GC_ALLOC_PRIMARRAY: case GC_ALLOC_REFARRAY: { Hjava_lang_Object *obj; obj = (Hjava_lang_Object *)(unit+1); if (obj->dtable != NULL) { Hjava_lang_Class *c; c = OBJECT_CLASS(obj); if (c) c->live_count++; } }}); /* If we found a new white object, mark it as grey and * move it into the grey list. */ GC_SET_COLOUR(info, idx, GC_COLOUR_GREY); UREMOVELIST(unit); UAPPENDLIST(gclists[grey], unit);}/* * Mark the memory given by an address if it really is an object. */static voidgcMarkAddress(Collector* gcif, const void* mem){ gc_block* info; gc_unit* unit; /* * First we check to see if the memory 'mem' is in fact the * beginning of an object. If not we just return. */ /* Get block info for this memory - if it exists */ info = GCMEM2BLOCK(mem); unit = UTOUNIT(mem); if (gc_heap_isobject(info, unit)) { markObjectDontCheck(unit, info, GCMEM2IDX(info, unit)); }}/* * Mark an object. Argument is assumed to point to a valid object, * and never, ever, be null. */static voidgcMarkObject(Collector* gcif, const void* objp){ gc_unit *unit = UTOUNIT(objp); gc_block *info = GCMEM2BLOCK(unit); DBG(GCDIAG, assert(gc_heap_isobject(info, unit))); markObjectDontCheck(unit, info, GCMEM2IDX(info, unit));}static voidgcWalkConservative(Collector* gcif, const void* base, uint32 size){ int8* mem;DBG(GCWALK, dprintf("scanning %d bytes conservatively from %p-%p\n", size, base, base+size); ) record_marked(1, size); if (size > 0) { for (mem = ((int8*)base) + (size & -ALIGNMENTOF_VOIDP) - sizeof(void*); (void*)mem >= base; mem -= ALIGNMENTOF_VOIDP) { void *p = *(void **)mem; if (p) { gcMarkAddress(gcif, p); } } }}/* * Like walkConservative, except that length is computed from the block size * of the object. Must be called with pointer to object allocated by gc. */staticuint32gcGetObjectSize(Collector* gcif, const void* mem){ return (GCBLOCKSIZE(GCMEM2BLOCK(UTOUNIT(mem))));}staticintgcGetObjectIndex(Collector* gcif, const void* mem){ gc_unit* unit = UTOUNIT(mem); gc_block* info = GCMEM2BLOCK(unit); if (!gc_heap_isobject(info, unit)) { return (-1); } else { return (GC_GET_FUNCS(info, GCMEM2IDX(info, unit))); }}/* * Given a pointer within an object, find the base of the object. * This works for both gcable and fixed object types. * * This method uses many details of the allocator implementation. * Specifically, it relies on the contiguous layout of block infos * and the way GCMEM2BLOCK and GCMEM2IDX are implemented. */staticvoid*gcGetObjectBase(Collector *gcif, const void* mem){ int idx; gc_block* info; /* quickly reject pointers that are not part of this heap */ if (!IS_A_HEAP_POINTER(mem)) { return (0); } info = GCMEM2BLOCK(mem); if (!info->inuse) { /* go down block list to find out whether we were hitting * in a large object */ while (!info->inuse && (uintp)info > (uintp)gc_block_base) { info--; } /* must be a large block, hence nr must be 1 */ if (!info->inuse || info->nr != 1) { return (0); } } idx = GCMEM2IDX(info, mem); /* report fixed objects as well */ if (idx < info->nr && ((GC_GET_COLOUR(info, idx) & GC_COLOUR_INUSE) || (GC_GET_COLOUR(info, idx) & GC_COLOUR_FIXED))) { return (UTOMEM(GCBLOCK2MEM(info, idx))); } return (0);}staticconst char*gcGetObjectDescription(Collector* gcif, const void* mem){ return (gcFunctions[gcGetObjectIndex(gcif, mem)].description);}/* * Walk a bit of memory. */staticvoidgcWalkMemory(Collector* gcif, void* mem){ gc_block* info; int idx; gc_unit* unit; uint32 size; walk_func_t walkf; unit = UTOUNIT(mem); info = GCMEM2BLOCK(unit); idx = GCMEM2IDX(info, unit); UREMOVELIST(unit); UAPPENDLIST(gclists[black], unit); GC_SET_COLOUR(info, idx, GC_COLOUR_BLACK); assert(GC_GET_FUNCS(info, idx) < sizeof(gcFunctions)/sizeof(gcFunctions[0])); size = GCBLOCKSIZE(info); record_marked(1, size); walkf = gcFunctions[GC_GET_FUNCS(info, idx)].walk; if (walkf != 0) {DBG(GCWALK, dprintf("walking %d bytes @%p: %s\n", size, mem, describeObject(mem)); ) walkf(gcif, mem, size); }}#ifdef DEBUGstatic intgcClearCounts(Hjava_lang_Class *c, void *_){ c->live_count = 0; return 0;}static intgcDumpCounts(Hjava_lang_Class *c, void *_){ if (c->live_count) dprintf("%7d %s\n", c->live_count, c->name->data); return 0;}#endif/* * The Garbage Collector sits in a loop starting a collection, waiting * until it's finished incrementally, then tidying up before starting * another one. */static voidgcMan(void* arg){ gc_unit* unit; gc_unit* nunit; gc_block* info; int idx; Collector *gcif = (Collector*)arg; int iLockRoot; /* Wake up anyone waiting for the GC to finish every time we're done */ for (;;) { lockStaticMutex(&gcman); while (gcRunning == 0) { waitStaticCond(&gcman, 0); } /* We have observed that gcRunning went from 0 to 1 or 2 * One thread requested a gc. We will decide whether to gc * or not, and then we will set gcRunning back to 0 and * inform the calling thread of the change */ assert(gcRunning > 0); /* * gcRunning will either be 1 or 2. If it's 1, we can apply * some heuristics for when we skip a collection. * If it's 2, we must collect. See gcInvokeGC. */ /* First, since multiple thread can wake us up without * coordinating with each other, we must make sure that we * don't collect multiple times in a row. */ if (gcRunning == 1 && gcStats.allocmem == 0) { /* XXX: If an application runs out of memory, it may be * possible that an outofmemory error was raised and the * application in turn dropped some references. Then * allocmem will be 0, yet a gc would be in order. * Once we implement OOM Errors properly, we will fix * this; for now, this guards against wakeups by * multiple threads. */DBG(GCSTAT, dprintf("skipping collection cause allocmem==0...\n"); ) goto gcend; } /* * Now try to decide whether we should postpone the gc and get * some memory from the system instead. * * If we already use the maximum amount of memory, we must gc. * * Otherwise, wait until the newly allocated memory is at * least 1/4 of the total memory in use. Assuming that the * gc will collect all newly allocated memory, this would * asymptotically converge to a memory usage of approximately * 4/3 the amount of long-lived and fixed data combined. * * Feel free to tweak this parameter. * NB: Boehm calls this the liveness factor, we stole the * default 1/4 setting from there. * * XXX: make this a run-time configurable parameter. */ if (gcRunning == 1 && gc_heap_total < gc_heap_limit && gcStats.allocmem * 4 < gcStats.totalmem * 1) {DBG(GCSTAT, dprintf("skipping collection since alloc/total " "%dK/%dK = %.2f < 1/3\n", gcStats.allocmem/1024, gcStats.totalmem/1024, gcStats.allocmem/(double)gcStats.totalmem); ) goto gcend; } lockStaticMutex(&gc_lock); DBG(GCSTAT, walkClassPool(gcClearCounts, 0)); startGC(gcif); for (unit = gclists[grey].cnext; unit != &gclists[grey]; unit = gclists[grey].cnext) { gcWalkMemory(gcif, UTOMEM(unit)); } /* Now walk any white objects which will be finalized. They * may get reattached, so anything they reference must also * be live just in case. */ for (unit = gclists[white].cnext; unit != &gclists[white]; unit = nunit) { nunit = unit->cnext; info = GCMEM2BLOCK(unit); idx = GCMEM2IDX(info, unit); if (GC_GET_STATE(info, idx) == GC_STATE_NEEDFINALIZE) { /* this assert is somewhat expensive */ DBG(GCDIAG, assert(gc_heap_isobject(info, unit))); GC_SET_STATE(info, idx, GC_STATE_INFINALIZE); markObjectDontCheck(unit, info, idx); } } /* We may now have more grey objects, so walk them */ for (unit = gclists[grey].cnext; unit != &gclists[grey]; unit = gclists[grey].cnext) { gcWalkMemory(gcif, UTOMEM(unit)); } finishGC(gcif); DBG(GCSTAT, dprintf("REACHABLE OBJECT HISTOGRAM\n"); dprintf("%-7s %s\n", "COUNT", "CLASS"); dprintf("%-7s %s\n", "-------", "-----------------------------------" "-----------------------------------"); walkClassPool(gcDumpCounts, 0)); unlockStaticMutex(&gc_lock); startFinalizer(); if (Kaffe_JavaVMArgs[0].enableVerboseGC > 0) { /* print out all the info you ever wanted to know */ dprintf( "<GC: heap %dK, total before %dK," " after %dK (%d/%d objs)\n %2.1f%% free," " alloced %dK (#%d), marked %dK, " "swept %dK (#%d)\n" " %d objs (%dK) awaiting finalization>\n", (int)(gc_heap_total/1024), gcStats.totalmem/1024, (gcStats.totalmem-gcStats.freedmem)/1024, gcStats.totalobj, gcStats.totalobj-gcStats.freedobj, (1.0 - ((gcStats.totalmem-gcStats.freedmem)/ (double)gc_heap_total)) * 100.0, gcStats.allocmem/1024, gcStats.allocobj, gcStats.markedmem/1024, gcStats.freedmem/1024, gcStats.freedobj, gcStats.finalobj, gcStats.finalmem/1024); } if (Kaffe_JavaVMArgs[0].enableVerboseGC > 1) { OBJECTSTATSPRINT(); } gcStats.totalmem -= gcStats.freedmem; gcStats.totalobj -= gcStats.freedobj; gcStats.allocobj = 0; gcStats.allocmem = 0;gcend:; /* now signal any waiters */ gcRunning = 0; broadcastStaticCond(&gcman); unlockStaticMutex(&gcman); }}/* * Start the GC process by scanning the root and thread stack objects. */staticvoidstartGC(Collector *gcif){ gc_unit* unit; gc_unit* nunit; gcStats.freedmem = 0; gcStats.freedobj = 0; gcStats.markedobj = 0; gcStats.markedmem = 0; /* disable the mutator to protect colour lists */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -