📄 gc-incremental.c
字号:
lockStaticMutex(&gcman); gcRunning = 0; /* Wake up anyone waiting for the GC to finish every time we're done */ for (;;) { while (gcRunning == 0) { waitStaticCond(&gcman, (jlong)0); } /* We have observed that gcRunning went from 0 to 1 or 2 * One thread requested a gc. We will decide whether to gc * or not, and then we will set gcRunning back to 0 and * inform the calling thread of the change */ assert(gcRunning > 0); /* * gcRunning will either be 1 or 2. If it's 1, we can apply * some heuristics for when we skip a collection. * If it's 2, we must collect. See gcInvokeGC. */ /* First, since multiple thread can wake us up without * coordinating with each other, we must make sure that we * don't collect multiple times in a row. */ if (gcRunning == 1 && gcStats.allocmem == 0) { /* XXX: If an application runs out of memory, it may be * possible that an outofmemory error was raised and the * application in turn dropped some references. Then * allocmem will be 0, yet a gc would be in order. * Once we implement OOM Errors properly, we will fix * this; for now, this guards against wakeups by * multiple threads. */DBG(GCSTAT, dprintf("skipping collection cause allocmem==0...\n"); ); goto gcend; } /* * Now try to decide whether we should postpone the gc and get * some memory from the system instead. * * If we already use the maximum amount of memory, we must gc. * * Otherwise, wait until the newly allocated memory is at * least 1/4 of the total memory in use. Assuming that the * gc will collect all newly allocated memory, this would * asymptotically converge to a memory usage of approximately * 4/3 the amount of long-lived and fixed data combined. * * Feel free to tweak this parameter. * NB: Boehm calls this the liveness factor, we stole the * default 1/4 setting from there. * * XXX: make this a run-time configurable parameter. */ if (gcRunning == 1 && gc_get_heap_total() < gc_get_heap_limit() && gcStats.allocmem * 4 < gcStats.totalmem * 1) {DBG(GCSTAT, dprintf("skipping collection since alloc/total " "%dK/%dK = %.2f < 1/3\n", gcStats.allocmem/1024, gcStats.totalmem/1024, gcStats.allocmem/(double)gcStats.totalmem); ); goto gcend; } DBG(GCSTAT, walkClassPool(gcClearCounts, NULL)); startGC(gcif); /* process any objects found by walking the root references */ while (gclists[grey].cnext != &gclists[grey]) { unit = gclists[grey].cnext; KaffeGC_WalkMemory(gcif, UTOMEM(unit)); } /* Now walk any white objects which will be finalized. They * may get reattached, so anything they reference must also * be live just in case. */ while (gclists[fin_white].cnext != &gclists[fin_white]) { unit = gclists[fin_white].cnext; info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); assert (KGC_GET_STATE(info, idx) == KGC_STATE_NEEDFINALIZE); /* this assert is somewhat expensive */ DBG(GCDIAG, assert(gc_heap_isobject(info, unit))); KGC_SET_STATE(info, idx, KGC_STATE_INFINALIZE); markObjectDontCheck(unit, info, idx); } /* now process the objects that are referenced by objects to be finalized */ while (gclists[grey].cnext != &gclists[grey]) { unit = gclists[grey].cnext; KaffeGC_WalkMemory(gcif, UTOMEM(unit)); } finishGC(gcif); DBG(GCSTAT, dprintf("REACHABLE OBJECT HISTOGRAM\n"); dprintf("%-7s %s\n", "COUNT", "CLASS"); dprintf("%-7s %s\n", "-------", "-----------------------------------" "-----------------------------------"); walkClassPool(gcDumpCounts, NULL)); startFinalizer(); if (Kaffe_JavaVMArgs.enableVerboseGC > 0) { /* print out all the info you ever wanted to know */ dprintf( "<GC: heap %dK, total before %dK," " after %dK (%d/%d objs)\n %2.1f%% free," " allocated %dK (#%d), marked %dK, " "swept %dK (#%d)\n" " %d objs (%dK) awaiting finalization>\n", (int)(gc_get_heap_total()/1024), gcStats.totalmem/1024, (gcStats.totalmem-gcStats.freedmem)/1024, gcStats.totalobj, gcStats.totalobj-gcStats.freedobj, (1.0 - (((float)gcStats.totalmem-gcStats.freedmem)/ gc_get_heap_total())) * 100.0, gcStats.allocmem/1024, gcStats.allocobj, gcStats.markedmem/1024, gcStats.freedmem/1024, gcStats.freedobj, gcStats.finalobj, gcStats.finalmem/1024); } if (Kaffe_JavaVMArgs.enableVerboseGC > 1) { OBJECTSTATSPRINT(); } gcStats.totalmem -= gcStats.freedmem; gcStats.totalobj -= gcStats.freedobj; gcStats.allocobj = 0; gcStats.allocmem = 0;gcend:; /* now signal any waiters */ lockStaticMutex(&gcmanend); gcRunning = 0; broadcastStaticCond(&gcmanend); unlockStaticMutex(&gcmanend); } unlockStaticMutex(&gcman);}/* * Start the GC process by scanning the root and thread stack objects. */staticvoidstartGC(Collector *gcif){ gc_unit* unit; gc_block* info; uintp idx; gcStats.freedmem = 0; gcStats.freedobj = 0; gcStats.markedobj = 0; gcStats.markedmem = 0;#if defined(ENABLE_JVMPI) if( JVMPI_EVENT_ISENABLED(JVMPI_EVENT_GC_START) ) { JVMPI_Event ev; ev.event_type = JVMPI_EVENT_GC_START; jvmpiPostEvent(&ev); }#endif KTHREAD(lockGC)(); lockStaticMutex(&gc_lock); /* disable the mutator to protect colour lists */ STOPWORLD(); /* measure time */ startTiming(&gc_time, "gctime-scan"); /* * Since objects whose finaliser has to be run need to * be kept alive, we have to mark them here. They will * be put back into the finalise list later on during * the gc pass. * * Since these objects are treated like garbage, we have * to set their colour to white before marking them. */ while (gclists[finalise].cnext != &gclists[finalise]) { unit = gclists[finalise].cnext; info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); KGC_SET_COLOUR (info, idx, KGC_COLOUR_WHITE); gcStats.finalobj -= 1; gcStats.finalmem -= GCBLOCKSIZE(info); markObjectDontCheck(unit, info, idx); } /* * Now we may walk static strong references. */ KaffeGC_walkRefs(gcif);}/* * Finish off the GC process. Any unreached (white) objects are moved * for finalising and the finaliser woken. * The reached (black) objects are moved onto the now empty white list * and recoloured white. */staticvoidfinishGC(Collector *gcif){ gc_unit* unit; gc_block* info; int idx; gcList toRemove; int i; /* There shouldn't be any grey objects at this point */ assert(gclists[grey].cnext == &gclists[grey]); if (gclists[nofin_white].cnext != &gclists[nofin_white]) { toRemove.cnext = gclists[nofin_white].cnext; toRemove.cprev = gclists[nofin_white].cprev; toRemove.cnext->cprev = &toRemove; toRemove.cprev->cnext = &toRemove; URESETLIST(gclists[nofin_white]); } else { URESETLIST(toRemove); } stopTiming(&gc_time); RESUMEWORLD(); /* * Now move the black objects back to the white queue for next time. */ for (i=1; i<3; i++) { while (gclists[i].cnext != &gclists[i]) { unit = gclists[i].cnext; UREMOVELIST(unit); info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); assert(KGC_GET_COLOUR(info, idx) == KGC_COLOUR_BLACK); UAPPENDLIST(gclists[i+3], unit); KGC_SET_COLOUR(info, idx, KGC_COLOUR_WHITE); } } KTHREAD(unlockGC)(); unlockStaticMutex(&gc_lock); startTiming(&sweep_time, "gctime-sweep"); while (toRemove.cnext != &toRemove) { destroy_func_t destroy; unit = toRemove.cnext; info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); gcStats.freedmem += GCBLOCKSIZE(info); gcStats.freedobj += 1; OBJECTSTATSREMOVE(unit);#if defined(ENABLE_JVMPI) if( JVMPI_EVENT_ISENABLED(JVMPI_EVENT_OBJECT_FREE) ) { JVMPI_Event ev; ev.event_type = JVMPI_EVENT_OBJECT_FREE; ev.u.obj_free.obj_id = UTOMEM(unit); jvmpiPostEvent(&ev); }#endif /* clear all weak references to the object if it has not already been * during the finalisation mark phase. */ KaffeGC_clearWeakRef(gcif, UTOMEM(unit)); /* invoke destroy function before freeing the object */ info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); destroy = gcFunctions[KGC_GET_FUNCS(info,idx)].destroy; if (destroy != NULL) { destroy(gcif, UTOMEM(unit)); } UREMOVELIST(unit); addToCounter(&gcgcablemem, "gcmem-gcable objects", 1, -((jlong)GCBLOCKSIZE(info))); gc_heap_free(unit); }#if defined(ENABLE_JVMPI) if( JVMPI_EVENT_ISENABLED(JVMPI_EVENT_GC_FINISH) ) { JVMPI_Event ev; ev.event_type = JVMPI_EVENT_GC_FINISH; ev.u.gc_info.used_objects = (jlong)gcStats.markedobj; ev.u.gc_info.used_object_space = (jlong)gcStats.markedmem; ev.u.gc_info.total_object_space = (jlong)gcStats.totalmem; jvmpiPostEvent(&ev); }#endif stopTiming(&sweep_time);}staticvoidstartFinalizer(void){ int start; start = 0; lockStaticMutex(&gc_lock); /* If there's stuff to be finalised then we'd better do it */ if (gclists[finalise].cnext != &gclists[finalise]) { start = 1; } unlockStaticMutex(&gc_lock); lockStaticMutex(&finman); if (start != 0 && finalRunning == false) { finalRunning = true; signalStaticCond(&finman); } unlockStaticMutex(&finman);}/* * The finaliser sits in a loop waiting to finalise objects. When a * new finalised list is available, it is woken by the GC and finalises * the objects in turn. An object is only finalised once after which * it is deleted. */static void finaliserJob(Collector *gcif){ gc_block* info = NULL; gc_unit* unit = NULL; int idx = 0; int func = 0; /* * Loop until the list of objects whose finaliser needs to be run is empty * [ checking the condition without holding a lock is ok, since we're the only * thread removing elements from the list (the list can never shrink during * a gc pass) ]. * * According to the spec, the finalisers have to be run without any user * visible locks held. Therefore, we must temporarily release the finman * lock and may not hold the gc_lock while running the finalisers as they * are exposed to the user by java.lang.Runtime. * * In addition, we must prevent an object and everything it references from * being collected while the finaliser is run (since we can't hold the gc_lock, * there may be several gc passes in the meantime). To do so, we keep the * object in the finalise list and only remove it from there when its * finaliser is done (simply adding the object to the grey list while its * finaliser is run only works as long as there's at most one gc pass). * * In order to determine the finaliser of an object, we have to access the * gc_block that contains it and its index. Doing this without holding a * lock only works as long as both, the gc_blocks and the indices of the * objects in a gc_block, are constant. */ while (gclists[finalise].cnext != &gclists[finalise]) { unit = gclists[finalise].cnext; lockStaticMutex(&gc_lock); info = gc_mem2block(unit); idx = GCMEM2IDX(info, unit); func = KGC_GET_FUNCS(info, idx); unlockStaticMutex(&gc_lock); /* Clear weak references to this object. Because according to the Java API spec. * "Suppose that the garbage collector determines at a certain point in time * that an object is weakly reachable. At that time it will atomically clear * all weak references to that object and all weak references to any other * weakly-reachable objects from which that object is reachable through a chain * of strong and soft references." */ KaffeGC_clearWeakRef(gcif, UTOMEM(unit)); /* Call finaliser */ unlockStaticMutex(&finman); (*gcFunctions[func].final)(gcif, UTOMEM(unit)); lockStaticMutex(&finman); /* and remove unit from the finaliser list */ lockStaticMutex(&gc_lock); info = gc_mem2block(unit); UREMOVELIST(unit); UAPPENDLIST(gclists[nofin_white], unit); gcStats.finalmem -= GCBLOCKSIZE(info); gcStats.finalobj -= 1; assert(KGC_GET_STATE(info,idx) == KGC_STATE_INFINALIZE); /* Objects are only finalised once */ KGC_SET_STATE(info, idx, KGC_STATE_FINALIZED); KGC_SET_COLOUR(info, idx, KGC_COLOUR_WHITE); unlockStaticMutex(&gc_lock); } info = NULL; unit = NULL; idx = 0;}static void NONRETURNINGfinaliserMan(void* arg){ Collector *gcif = (Collector*)arg; lockStaticMutex(&finman); finaliserStarted = true; for (;;) { while (finalRunning == false) { waitStaticCond(&finman, (jlong)0); } assert(finalRunning == true); finaliserJob(gcif); /* Wake up anyone waiting for the finalizer to finish */ lockStaticMutex(&finmanend); finalRunning = false; broadcastStaticCond(&finmanend); unlockStaticMutex(&finmanend); } unlockStaticMutex(&finman);}staticvoidgcEnableGC(Collector* gcif UNUSED){ lockStaticMutex(&gcman); gcDisabled -= 1; if( gcDisabled == 0 ) broadcastStaticCond(&gcman); unlockStaticMutex(&gcman);}staticvoidgcDisableGC(Collector* gcif UNUSED){ lockStaticMutex(&gcman); gcDisabled += 1; unlockStaticMutex(&gcman);}/* * Explicity invoke the garbage collector and wait for it to complete. */staticvoidgcInvokeGC(Collector* gcif UNUSED, int mustgc){ while (gcRunning < 0) KTHREAD(yield)(); lockStaticMutex(&gcman); if (gcRunning == 0) { gcRunning = mustgc ? 2 : 1; if (!gcDisabled) signalStaticCond(&gcman); } lockStaticMutex(&gcmanend); unlockStaticMutex(&gcman); while (gcRunning != 0) { waitStaticCond(&gcmanend, (jlong)0); } unlockStaticMutex(&gcmanend);}/* * GC and invoke the finalizer. Used to run finalizers on exit. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -