gc_common.c

来自「This is a resource based on j2me embedde」· C语言 代码 · 共 1,731 行 · 第 1/4 页

C
1,731
字号
    }    /* Do callback for the postAction: */    if (postActionCallback != NULL) {        postActionCallback(ee, data, success, preActionStatus);    }    /* Bring the stackmap memory usage back in line: */    CVMgcTrimStackmapsMemUsage(ee);    /* Allow threads to become GC-unsafe again. */    CVMD_gcAllowUnsafeAll(ee);    /* Relinquish all GC locks except for the heapLock. */    CVMlocksForGCRelease(ee);    return success;}/* Purpose: Do pre-GC actions. *//* NOTE: Returns 0 if successful, else returns error code. */static CVMUint32CVMgcStopTheWorldAndGCSafePreAction(CVMExecEnv *ee, void *data){#ifdef CVM_JVMPI    if (CVMgcLockerIsActive(&CVMjvmpiRec()->gcLocker)) {        return 1;    }#endif#ifdef CVM_INSPECTOR    while (CVMgcLockerIsActive(&CVMglobals.inspectorGCLocker)) {        /* Wait until GC is re-enabled i.e. the GCLocker is deactivated: */        CVMinspectorGCLockerWait(&CVMglobals.inspectorGCLocker, ee);    }#endif    /* By default, we need to scan for class-unloading activity.  The       specific GC implementation can choose to override this if       appropriate: */    CVMglobals.gcCommon.doClassCleanup = CVM_TRUE;    CVMgcStartGC(ee);    return 0;}/* Purpose: Calls the implementation specific GC to do GC work. */static CVMBool CVMgcStopTheWorldAndGCSafeAction(CVMExecEnv *ee, void *data){    /*      * The following cast might be problematic in a 64-bit port.     * Assume that there will be no single allocation > 4GB and do only     * an intermediate cast to CVMAddr to make a 64-bit compiler happy.     * This is harmless on a 32-bit platform.     */    CVMUint32 numBytes = (CVMUint32)(CVMAddr) data;    /* Starting point of calculating GC pause time */    CVMgcstatStartGCMeasurement();    CVMgcimplDoGC(ee, numBytes);    /* End point of calculating GC pause time */    CVMgcstatEndGCMeasurement();        return CVM_TRUE;}/* Purpose: Do post-GC actions. */static voidCVMgcStopTheWorldAndGCSafePostAction(CVMExecEnv *ee, void *data,                                     CVMBool actionSuccess,                                     CVMUint32 preActionStatus){#ifdef CVM_JVMPI    if (CVMjvmpiEventGCFinishIsEnabled() && CVMjvmpiGCWasStarted()) {        CVMUint32 objCount = CVMgcimplGetObjectCount(ee);        CVMUint32 freeMem = CVMgcimplFreeMemory(ee);        CVMUint32 totalMem = CVMgcimplTotalMemory(ee);        CVMjvmpiPostGCFinishEvent(objCount, totalMem - freeMem, totalMem);    }    CVMjvmpiResetGCWasStarted();#endif#ifdef CVM_JVMTI    if (CVMjvmtiShouldPostGarbageCollectionFinish()) {        CVMjvmtiPostGCFinishEvent();    }    CVMjvmtiTagRehash();#endif    /* After GC is done and before we allow all threads to become unsafe       again, i.e. here, take this opportunity to scavenge and deflate       monitors: */    CVMsyncGCSafeAllMonitorScavenge(ee);    /* Now that we're done with a GC cycle, reset this runtime flags for       the next GC cycle.  But only do this if we actually did run the       GC cycle.  If the GC cycle was not run, don't reset the flags yet. */    if (actionSuccess && (preActionStatus == 0)) {        CVMglobals.gcCommon.stringInternedSinceLastGC = CVM_FALSE;        CVMglobals.gcCommon.classCreatedSinceLastGC = CVM_FALSE;        CVMglobals.gcCommon.loaderCreatedSinceLastGC = CVM_FALSE;    }}/* * Initiate a GC. Acquire all GC locks, stop all threads, and then * call back to the particular GC to do the work. When the particular * GC is done, resume. * * If we can't execute the GC action successfully, return CVM_FALSE. * Otherwise, return CVM_TRUE. */static CVMBoolstopTheWorldAndGCSafe(CVMExecEnv* ee, CVMUint32 numBytes,		      void (*retryAfterActionCallback)(CVMExecEnv *ee, 						       void *data),		      void* data){    CVMObjectICell* referenceLockCell =	&CVMglobals.java_lang_ref_Reference_lock->r;    CVMBool weakrefsInitialized;    CVMBool gcOccurred;    CVMassert(CVMD_isgcSafe(ee));    /* We should not own the jitlock or heaplock already, or we risk       a deadlock with the referenceLockCell */#ifdef CVM_JIT    CVMassert(CVMsysMutexOwner(&CVMglobals.jitLock) != ee);#endif    CVMassert(CVMsysMutexOwner(&CVMglobals.heapLock) != ee);    weakrefsInitialized = !CVMID_icellIsNull(referenceLockCell);    /*     * First lock the reference lock.     * %comment: f004     */    if (weakrefsInitialized) {	CVMBool success;	CVMD_gcUnsafeExec(ee, {            /* %comment l002 */            success = 		CVMobjectTryLock(ee, CVMID_icellDirect(ee, referenceLockCell));	    if (!success) {                success = CVMobjectLock(ee, referenceLockCell);	    }	});	if (!success) {	    return CVM_FALSE;	} else {	    CVMglobals.referenceWorkTODO = CVM_FALSE;	}    }    /* We must grab the jitLock and heapLock after the referenceLockCell */#ifdef CVM_JIT    CVMsysMutexLock(ee, &CVMglobals.jitLock);#endif    CVMsysMutexLock(ee, &CVMglobals.heapLock);#ifdef CVM_JVMPI    /* NOTE: The GCStart event must be sent before we actually force every       thread to become GC safe.  This is necessary because the GCStart event       is meant to give the JVMPI agent the opportunity to acquire locks and       allocate memory buffer.  Hence, it is possible for us to have sent the       GCStart event and discover after the fact that GC was disabled.  If       the GCStart event was posted, then we should set a flag indicating       so that we will have the opportunity to send the corresponding GCFinish       event later on.  If GC gets disabled after we sent this event, it will       appear as if we have a GC cycle that does nothing.    */    if (!CVMgcLockerIsActive(&CVMjvmpiRec()->gcLocker)) {        if (CVMjvmpiEventGCStartIsEnabled()) {            CVMjvmpiPostGCStartEvent();        }        CVMjvmpiSetGCWasStarted();    }#endif#ifdef CVM_JVMTI    /* Ditto above JVMPI comment, except for JVMTI  */    if (CVMjvmtiShouldPostGarbageCollectionStart()) {	CVMjvmtiPostGCStartEvent();    }#endif    /* Go stop the world and do GC: */    /*      * The numBytes cast might be problematic in a 64-bit port.     * Assume that there will be no single allocation > 4GB and do only     * an intermediate cast to CVMAddr to make a 64-bit compiler happy.     * This is harmless on a 32-bit platform.     */    gcOccurred = stopTheWorldAndDoAction(ee, (void *)(CVMAddr)numBytes,                        CVMgcStopTheWorldAndGCSafePreAction,                        CVMgcStopTheWorldAndGCSafeAction,                        CVMgcStopTheWorldAndGCSafePostAction,	                retryAfterActionCallback, data);    /*      * Do post-GC actions     */    CVMgcEndGC(ee);    CVMsysMutexUnlock(ee, &CVMglobals.heapLock);#ifdef CVM_JIT    CVMsysMutexUnlock(ee, &CVMglobals.jitLock);#endif    /*     * Now notify the reference lock waiters, and unlock.     */    if (weakrefsInitialized) {	CVMD_gcUnsafeExec(ee, {	    /*	     * Wake the reference handler thread so it enqueues its weak	     * references in the right ReferenceQueue's.	     * 	     * We're still holding the reference lock here, so another thread	     * could not have changed CVMglobals.referenceWorkTODO from	     * under us.	     */	    if (CVMglobals.referenceWorkTODO) {		CVMobjectNotifyAll(ee, referenceLockCell);		CVMglobals.referenceWorkTODO = CVM_FALSE;	    }            /* %comment l003 */            if (!CVMobjectTryUnlock(                    ee, CVMID_icellDirect(ee, referenceLockCell))) {                if (!CVMobjectUnlock(ee, referenceLockCell)) {		    CVMassert(CVM_FALSE); /* should never happen */		}            }	});    }    return gcOccurred;}static CVMBoolstopTheWorldAndGCWithRetry(CVMExecEnv* ee, CVMUint32 numBytes,			   void (*retryAfterActionCallback)(CVMExecEnv *ee, 							    void *data),			   void* retryData){    CVMBool retVal;    CVMD_gcSafeExec(ee, {	retVal = stopTheWorldAndGCSafe(ee, numBytes,				       retryAfterActionCallback,				       retryData);    });    return retVal;}CVMBoolCVMgcStopTheWorldAndGC(CVMExecEnv* ee, CVMUint32 numBytes){    return stopTheWorldAndGCWithRetry(ee, numBytes, NULL, NULL);}/* Purpose: Do a synchronous GC cycle. */voidCVMgcRunGC(CVMExecEnv* ee){    /* Do a full-scale GC */    (void)stopTheWorldAndGCSafe(ee, ~0, NULL, NULL); }/* * Return the number of bytes free in the heap.  */CVMJavaLongCVMgcFreeMemory(CVMExecEnv* ee){    CVMUint32 freeMem = CVMgcimplFreeMemory(ee);    /* %comment f005 */    return CVMint2Long(freeMem);}/* * Return the amount of total memory in the heap, in bytes. */CVMJavaLongCVMgcTotalMemory(CVMExecEnv* ee){    CVMUint32 totalMem = CVMgcimplTotalMemory(ee);    /* %comment f005 */    return CVMint2Long(totalMem);}/* * Destroy heap */CVMBool CVMgcDestroyHeap(){    CVMBool result;    CVMdestroyParsedSubOptions(&CVMglobals.gcCommon.gcOptions);    result = CVMgcimplDestroyHeap(&CVMglobals.gc);#ifdef CVM_JVMPI    if (CVMjvmpiEventArenaDeleteIsEnabled()) {        CVMjvmpiPostArenaDeleteEvent(CVM_GC_ARENA_PRELOADED);    }#endif    return result;}/*================================================== CVMGCLocker mechanism ==*/#if defined(CVM_INSPECTOR) || defined(CVM_JVMPI)/* Purpose: Constuctor. */void CVMgcLockerInit(CVMGCLocker *self){    self->lockCount = 0;    self->wasContended = CVM_FALSE;}/* Purpose: Activates the GC lock. *//* NOTE: Calls to CVMgcLockerLock() & CVMgcLockerUnlock() can be nested.         Can be called while GC safe or unsafe. */void CVMgcLockerLock(CVMGCLocker *self, CVMExecEnv *current_ee){    if (CVMD_isgcSafe(current_ee)) {        CVMsysMutexLock(current_ee, &CVMglobals.gcLockerLock);    }    CVMsysMicroLock(current_ee, CVM_GC_LOCKER_MICROLOCK);    self->lockCount++;    CVMsysMicroUnlock(current_ee, CVM_GC_LOCKER_MICROLOCK);    if (CVMD_isgcSafe(current_ee)) {        CVMsysMutexUnlock(current_ee, &CVMglobals.gcLockerLock);    }}/* Purpose: Deactivates the GC lock. *//* NOTE: Calls to CVMgcLockerLock() & CVMgcLockerUnlock() can be nested.         Can be called while GC safe or unsafe. */void CVMgcLockerUnlock(CVMGCLocker *self, CVMExecEnv *current_ee){    if (CVMD_isgcSafe(current_ee)) {        CVMsysMutexLock(current_ee, &CVMglobals.gcLockerLock);    }    CVMsysMicroLock(current_ee, CVM_GC_LOCKER_MICROLOCK);    if (self->lockCount > 0) {        self->lockCount--;    }    CVMsysMicroUnlock(current_ee, CVM_GC_LOCKER_MICROLOCK);    if (CVMD_isgcSafe(current_ee)) {        CVMsysMutexUnlock(current_ee, &CVMglobals.gcLockerLock);    }}#endif /* defined(CVM_INSPECTOR) || defined(CVM_JVMPI) || defined(CVM_JVMTI) *//*===========================================================================*/#if defined(CVM_INSPECTOR) || defined(CVM_JVMPI) || defined(CVM_JVMTI)/* * Scan objects in contiguous range, and do per-object callback in support * of heap dump. *//* Returns: CVM_TRUE if exiting due to completion of scan i.e. every object            in the specified region has been scanned.            CVM_FALSE if exiting due to an abortion i.e. the callback function            has returned a CVM_FALSE status indicating a need to abort. */CVMBoolCVMgcScanObjectRange(CVMExecEnv* ee, CVMUint32* base, CVMUint32* top,		     CVMObjectCallbackFunc callback, void* callbackData){    CVMUint32* curr = base;    while (curr < top) {	CVMObject* currObj = (CVMObject*)curr;	CVMClassBlock* currCb = CVMobjectGetClass(currObj);	CVMUint32  objSize    = CVMobjectSizeGivenClass(currObj, currCb);	CVMBool completeScanDone;        completeScanDone = (*callback)(currObj, currCb, objSize, callbackData);        if (!completeScanDone) {            return CVM_FALSE; /* Abort i.e. complete scan NOT done. */        }	/* iterate */	curr += objSize / 4;    }    CVMassert(curr == top); /* This had better be exact */    return CVM_TRUE; /* Complete scan DONE. */}#endif /* defined(CVM_INSPECTOR) || defined(CVM_JVMPI) || defined(CVM_JVMTI)*/#ifdef CVM_JVMPI/* Purpose: Posts the JVMPI_EVENT_ARENA_NEW events. *//* NOTE: This function is necessary to compensate for the fact that         this event has already transpired by the time that JVMPI is         initialized. */void CVMgcPostJVMPIArenaNewEvent(void){    if (CVMjvmpiEventArenaNewIsEnabled()) {        CVMjvmpiPostArenaNewEvent(CVM_GC_ARENA_PRELOADED, "Preloaded");        CVMgcimplPostJVMPIArenaNewEvent();    }}/* Purpose: Gets the last shared arena ID that is used by shared code.  GC            specific implementations should start their arena ID after the last            shared arena ID returned by this function. */CVMUint32 CVMgcGetLastSharedArenaID(void){    /* NOTE: See the enum list at the top of this file for a list of shared       (i.e. not GC implementation specific) arena IDs. */    return (CVM_GC_TOTAL_SHARED_ARENA_IDS - 1);}/* Purpose: Gets the arena ID for the specified object. */CVMUint32 CVMgcGetArenaID(CVMObject *obj){    /* First check to see if the object is located in the GC specific heap       implementation: */    CVMUint32 arenaID = CVMgcimplGetArenaID(obj);    if (arenaID == CVM_GC_ARENA_UNKNOWN) {        /* If the object isn't in the GC specific heap implementation, check           to see if it is a preloaded object: */        if (CVMpreloaderIsPreloadedObject(obj)) {            arenaID = CVM_GC_ARENA_PRELOADED;        }    }    return arenaID;}#endif /* CVM_JVMPI */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?