📄 gc-incremental.c
字号:
staticvoidgcInvokeFinalizer(Collector* gcif){ while (!finaliserStarted) KTHREAD(yield)(); /* First invoke the GC */ KGC_invoke(gcif, 1); /* Run the finalizer (if might already be running as a result of * the GC) */ lockStaticMutex(&finman); if (finalRunning == false) { finalRunning = true; signalStaticCond(&finman); } lockStaticMutex(&finmanend); unlockStaticMutex(&finman); waitStaticCond(&finmanend, (jlong)0); unlockStaticMutex(&finmanend);}/* * Allocate a new object. The object is attached to the white queue. * After allocation, if incremental collection is active we peform * a little garbage collection. If we finish it, we wakeup the garbage * collector. */void throwOutOfMemory(void) NONRETURNING;staticvoid*gcMalloc(Collector* gcif, size_t size, gc_alloc_type_t fidx){ gc_block* info; gc_unit* unit; void * volatile mem; /* needed on SGI, see comment below */ int i; size_t bsz; int times = 0; assert(gc_init != 0); assert(gcFunctions[fidx].description != NULL); assert(size != 0); assert(size > 0); size += sizeof(gc_unit); lockStaticMutex(&gc_lock); for (unit=NULL; unit==NULL;) { times++; unit = gc_heap_malloc(size); /* keep pointer to object */ mem = UTOMEM(unit); if (unit == 0) { switch (times) { case 1: /* Try invoking GC if it is available */ if (garbageman != 0) { unlockStaticMutex(&gc_lock); KGC_invoke(gcif, 0); lockStaticMutex(&gc_lock); } break; case 2: /* Grow the heap */ DBG (GCSYSALLOC, dprintf ("growing heap by %u bytes of type %s (%2.1f%% free)\n", (unsigned int)size, gcFunctions[fidx].description, (1.0 - ((double)gcStats.totalmem / gc_get_heap_total())) * 100.0); ); gc_heap_grow(size); break; default: if (DBGEXPR(CATCHOUTOFMEM, true, false)) { /* * If we ran out of memory, a OutOfMemoryException is * thrown. If we fail to allocate memory for it, all * is lost. */ assert (!!!"Ran out of memory!"); } /* Guess we've really run out */ unlockStaticMutex(&gc_lock); return (NULL); } } } info = gc_mem2block(mem); i = GCMEM2IDX(info, unit); bsz = GCBLOCKSIZE(info); gcStats.totalmem += bsz; gcStats.totalobj += 1; gcStats.allocmem += bsz; gcStats.allocobj += 1; KGC_SET_FUNCS(info, i, fidx); OBJECTSTATSADD(unit); OBJECTSIZESADD(size); /* Determine whether we need to finalise or not */ if (gcFunctions[fidx].final == KGC_OBJECT_NORMAL || gcFunctions[fidx].final == KGC_OBJECT_FIXED) { KGC_SET_STATE(info, i, KGC_STATE_NORMAL); } else { KGC_SET_STATE(info, i, KGC_STATE_NEEDFINALIZE); } /* If object is fixed, we give it the fixed colour and do not * attach it to any lists. This object is not part of the GC * regime and must be freed explicitly. */ if (gcFunctions[fidx].final == KGC_OBJECT_FIXED) { addToCounter(&gcfixedmem, "gcmem-fixed objects", 1, bsz); KGC_SET_COLOUR(info, i, KGC_COLOUR_FIXED); } else { addToCounter(&gcgcablemem, "gcmem-gcable objects", 1, bsz); /* * Note that as soon as we put the object on the white list, * the gc might come along and free the object if it can't * find any references to it. This is why we need to keep * a reference in `mem'. Note that keeping a reference in * `unit' will not do because markObject performs a UTOUNIT()! * In addition, on some architectures (SGI), we must tell the * compiler to not delay computing mem by defining it volatile. */ KGC_SET_COLOUR(info, i, KGC_COLOUR_WHITE); if (KGC_GET_STATE(info, i) == KGC_STATE_NEEDFINALIZE) { UAPPENDLIST(gclists[fin_white], unit); } else { UAPPENDLIST(gclists[nofin_white], unit); } } /* It is not safe to allocate java objects the first time * gcMalloc is called, but it should be safe after gcEnable * has been called. */ if (garbageman && !outOfMem && !outOfMem_allocator) { outOfMem_allocator = KTHREAD(current)(); } unlockStaticMutex(&gc_lock); /* KTHREAD(current)() will be null in some window before we * should try allocating java objects */ if (!outOfMem && outOfMem_allocator && outOfMem_allocator == KTHREAD(current)()) { outOfMem = OOM_ALLOCATING; outOfMem = OutOfMemoryError; /* implicit allocation */ outOfMem_allocator = NULL; gc_add_ref(outOfMem); } return (mem);}staticstruct Hjava_lang_Throwable *gcThrowOOM(Collector *gcif UNUSED){ Hjava_lang_Throwable *ret = NULL; int reffed; /* * Make sure we are the only thread to use this exception * object. */ lockStaticMutex(&gc_lock); ret = outOfMem; reffed = outOfMem != 0; outOfMem = NULL; /* We try allocating reserved pages before we allocate the * outOfMemory error. We can use some or all of the reserved * pages to actually grab an error. */ if (gc_primitive_use_reserve()) { if (!ret || ret == OOM_ALLOCATING) { unlockStaticMutex(&gc_lock); ret = OutOfMemoryError; /* implicit allocation */ lockStaticMutex(&gc_lock); } } if (ret == OOM_ALLOCATING || ret == NULL) { /* die now */ unlockStaticMutex(&gc_lock); dprintf( "Not enough memory to throw OutOfMemoryError!\n"); KAFFEVM_ABORT(); } unlockStaticMutex(&gc_lock); if (reffed) gc_rm_ref(ret); return ret;}/* * Reallocate an object. */staticvoid*gcRealloc(Collector* gcif, void* mem, size_t size, gc_alloc_type_t fidx){ gc_block* info; int idx; void* newmem; gc_unit* unit; size_t osize; assert(gcFunctions[fidx].final == KGC_OBJECT_FIXED); /* If nothing to realloc from, just allocate */ if (mem == NULL) { return (gcMalloc(gcif, size, fidx)); } lockStaticMutex(&gc_lock); unit = UTOUNIT(mem); info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); osize = GCBLOCKSIZE(info) - sizeof(gc_unit); assert(KGC_GET_FUNCS(info, idx) == fidx); /* Can only handled fixed objects at the moment */ assert(KGC_GET_COLOUR(info, idx) == KGC_COLOUR_FIXED); info = NULL; unlockStaticMutex(&gc_lock); /* If we'll fit into the current space, just send it back */ if (osize >= size) { return (mem); } /* Allocate new memory, copy data, and free the old */ newmem = gcMalloc(gcif, size, fidx); memcpy(newmem, mem, (size_t)osize); gcFree(gcif, mem); return (newmem);}/* * Explicitly free an object. */staticvoidgcFree(Collector* gcif UNUSED, void* mem){ gc_block* info; int idx; gc_unit* unit; if (mem != NULL) { lockStaticMutex(&gc_lock); unit = UTOUNIT(mem); info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); if (KGC_GET_COLOUR(info, idx) == KGC_COLOUR_FIXED) { size_t sz = GCBLOCKSIZE(info); OBJECTSTATSREMOVE(unit); /* Keep the stats correct */ gcStats.totalmem -= sz; gcStats.totalobj -= 1; addToCounter(&gcfixedmem, "gcmem-fixed objects", 1, -(jlong)sz); gc_heap_free(unit); } else { assert(!!!"Attempt to explicitly free nonfixed object"); } unlockStaticMutex(&gc_lock); }}staticvoidgcInit(Collector *collector UNUSED){ gc_init = 1;}/* * Start gc threads, which enable collection */static void/* ARGSUSED */gcEnable(Collector* collector){ errorInfo info; if (DBGEXPR(NOGC, false, true)) { /* Start the GC daemons we need */ finalman = createDaemon(&finaliserMan, "finaliser", collector, THREAD_MAXPRIO, FINALIZERSTACKSIZE, &info); garbageman = createDaemon(&gcMan, "gc", collector, THREAD_MAXPRIO, GCSTACKSIZE, &info); assert(finalman && garbageman); }}#if defined(SUPPORT_VERBOSEMEM)/* --------------------------------------------------------------------- *//* The following functions are strictly for statistics gathering */staticvoidobjectStatsChange(gc_unit* unit, int diff){ gc_block* info; int idx; info = gc_mem2block(unit); idx = KGC_GET_FUNCS(info, GCMEM2IDX(info, unit)); assert(idx >= 0 && gcFunctions[idx].description!=NULL); gcFunctions[idx].nr += diff * 1; gcFunctions[idx].mem += diff * GCBLOCKSIZE(info);}staticvoidobjectStatsPrint(void){ int cnt = 0; dprintf("Memory statistics:\n"); dprintf("------------------\n"); while (cnt < KGC_ALLOC_MAX_INDEX) { dprintf("%14.14s: Nr %6d Mem %6dK", gcFunctions[cnt].description, gcFunctions[cnt].nr, gcFunctions[cnt].mem/1024); if (++cnt % 2 != 0) { dprintf(" "); } else { dprintf("\n"); } } if (cnt % 2 != 0) { dprintf("\n"); } objectSizesPrint();}staticvoidobjectSizesAdd(size_t sz){ int i; for (i = 0; objectSizes[i].size > 0 && sz > (size_t)objectSizes[i].size; i++) ; objectSizes[i].count++; /* This might be slightly misleading as it * doesn't take into account the block overhead * for small allocations */ if (objectSizes[i].size > MAX_SMALL_OBJECT_SIZE) { objectSizes[i].total += sz; } else { objectSizes[i].total += objectSizes[i].size; }}staticvoidobjectSizesPrint(void){ int i; uint64 total; int count; int nr; int j; total = 0; count = 0; for (i = 0; objectSizes[i].size != -1; i++) { count += objectSizes[i].count; total += objectSizes[i].total; } nr = i; if (total == 0) { return; } dprintf("Percentage size allocations: %% of allocation counts / %% of memory\n"); dprintf("-----------------------------------------------------------------\n"); for (i = 0; i < nr; ) { for (j = 0; j < 3 && i < nr; j++, i++) { dprintf("%10zd :%5.1f /%5.1f ", objectSizes[i].size, (float)(objectSizes[i].count * 100 / (float)count), (float)(objectSizes[i].total * 100 / (float)total)); } dprintf("\n"); }}#endifstatic uintpgcGetHeapLimit(Collector *gcif UNUSED){ return gc_get_heap_limit();}static uintpgcGetHeapTotal(Collector *gcif UNUSED){ return gc_get_heap_total();}static uintpgcGetHeapFree(Collector *gcif){ return gcGetHeapTotal(gcif) - gcStats.totalmem;}static const char *gcGetName(UNUSED Collector *gcif){ return "kaffe-gc";}/* * vtable for object implementing the collector interface. */static struct GarbageCollectorInterface_Ops KGC_Ops = { gcGetName, /* reserved */ NULL, /* reserved */ NULL, /* reserved */ gcMalloc, gcRealloc, gcFree, gcInvokeGC, gcInvokeFinalizer, gcInit, gcEnable, gcMarkAddress, gcMarkObject, gcGetObjectSize, gcGetObjectDescription, gcGetObjectIndex, gcGetObjectBase, gcRegisterFixedTypeByIndex, gcRegisterGcTypeByIndex, gcThrowOOM, gcEnableGC, gcDisableGC, gcGetHeapFree, gcGetHeapLimit, gcGetHeapTotal, KaffeGC_addRef, KaffeGC_rmRef, KaffeGC_addWeakRef, KaffeGC_rmWeakRef};/* * Initialise the Garbage Collection system. */Collector* createGC(void){ initStaticLock(&gcman); initStaticLock(&gcmanend); initStaticLock(&finman); initStaticLock(&finmanend); initStaticLock(&gc_lock); KaffeGC_initRefs(); URESETLIST(gclists[nofin_white]); URESETLIST(gclists[fin_white]); URESETLIST(gclists[grey]); URESETLIST(gclists[nofin_black]); URESETLIST(gclists[fin_black]); URESETLIST(gclists[finalise]); gc_obj.collector.ops = &KGC_Ops; gc_heap_initialise (); gc_primitive_reserve(KGC_NUMBER_PAGE_IN_RESERVE); return (&gc_obj.collector);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -