📄 jsgc.c
字号:
* protect against GC that the hook can trigger. */ fp = cx->fp; if (fp) { JS_ASSERT(!fp->dormantNext); fp->dormantNext = cx->dormantFrameChain; cx->dormantFrameChain = fp; } cx->fp = NULL; genp = &tempList.head; ok = JS_TRUE; while ((gen = *genp) != NULL) { ok = ShouldDeferCloseHook(cx, gen, &defer); if (!ok) { /* Quit ASAP discarding the hook. */ *genp = gen->next; break; } if (defer) { genp = &gen->next; METER(deferCount++); continue; } ok = js_CloseGeneratorObject(cx, gen); /* * Unlink the generator after closing it to make sure it always stays * rooted through tempList. */ *genp = gen->next; if (cx->throwing) { /* * Report the exception thrown by the close hook and continue to * execute the rest of the hooks. */ if (!js_ReportUncaughtException(cx)) JS_ClearPendingException(cx); ok = JS_TRUE; } else if (!ok) { /* * Assume this is a stop signal from the branch callback or * other quit ASAP condition. Break execution until the next * invocation of js_RunCloseHooks. */ break; } } cx->fp = fp; if (fp) { JS_ASSERT(cx->dormantFrameChain == fp); cx->dormantFrameChain = fp->dormantNext; fp->dormantNext = NULL; } if (tempList.head) { /* * Some close hooks were not yet executed, put them back into the * scheduled list. */ while ((gen = *genp) != NULL) { genp = &gen->next; METER(deferCount++); } /* Now genp is a pointer to the tail of tempList. */ JS_LOCK_GC(rt); *genp = rt->gcCloseState.todoQueue; rt->gcCloseState.todoQueue = tempList.head; METER(rt->gcStats.closelater += deferCount); METER(rt->gcStats.maxcloselater = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater)); JS_UNLOCK_GC(rt); } JS_POP_TEMP_CLOSE_LIST(cx, &tempList); *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE; return ok;}#endif /* JS_HAS_GENERATORS */#if defined(DEBUG_brendan) || defined(DEBUG_timeless)#define DEBUG_gchist#endif#ifdef DEBUG_gchist#define NGCHIST 64static struct GCHist { JSBool lastDitch; JSGCThing *freeList;} gchist[NGCHIST];unsigned gchpos;#endifvoid *js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes){ JSRuntime *rt; uintN flindex; JSBool doGC; JSGCThing *thing; uint8 *flagp, *firstPage; JSGCArenaList *arenaList; jsuword offset; JSGCArena *a; JSLocalRootStack *lrs;#ifdef JS_THREADSAFE JSBool gcLocked; uintN localMallocBytes; JSGCThing **flbase, **lastptr; JSGCThing *tmpthing; uint8 *tmpflagp; uintN maxFreeThings; /* max to take from the global free list */ METER(size_t nfree);#endif rt = cx->runtime; METER(rt->gcStats.alloc++); /* this is not thread-safe */ nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing)); flindex = GC_FREELIST_INDEX(nbytes);#ifdef JS_THREADSAFE gcLocked = JS_FALSE; JS_ASSERT(cx->thread); flbase = cx->thread->gcFreeLists; JS_ASSERT(flbase); thing = flbase[flindex]; localMallocBytes = cx->thread->gcMallocBytes; if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) { flagp = thing->flagp; flbase[flindex] = thing->next; METER(rt->gcStats.localalloc++); /* this is not thread-safe */ goto success; } JS_LOCK_GC(rt); gcLocked = JS_TRUE; /* Transfer thread-local counter to global one. */ if (localMallocBytes != 0) { cx->thread->gcMallocBytes = 0; if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes) rt->gcMallocBytes = rt->gcMaxMallocBytes; else rt->gcMallocBytes += localMallocBytes; }#endif JS_ASSERT(!rt->gcRunning); if (rt->gcRunning) { METER(rt->gcStats.finalfail++); JS_UNLOCK_GC(rt); return NULL; }#ifdef TOO_MUCH_GC#ifdef WAY_TOO_MUCH_GC rt->gcPoke = JS_TRUE;#endif doGC = JS_TRUE;#else doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);#endif arenaList = &rt->gcArenaList[flindex]; for (;;) { if (doGC) { /* * Keep rt->gcLock across the call into js_GC so we don't starve * and lose to racing threads who deplete the heap just after * js_GC has replenished it (or has synchronized with a racing * GC that collected a bunch of garbage). This unfair scheduling * can happen on certain operating systems. For the gory details, * see bug 162779 at https://bugzilla.mozilla.org/. */ js_GC(cx, GC_LAST_DITCH); METER(rt->gcStats.retry++); } /* Try to get thing from the free list. */ thing = arenaList->freeList; if (thing) { arenaList->freeList = thing->next; flagp = thing->flagp; JS_ASSERT(*flagp & GCF_FINAL); METER(arenaList->stats.freelen--); METER(arenaList->stats.recycle++);#ifdef JS_THREADSAFE /* * Refill the local free list by taking several things from the * global free list unless we are still at rt->gcMaxMallocBytes * barrier or the free list is already populated. The former * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN) * or no gcPoke. The latter is caused via allocating new things * in gcCallback(cx, JSGC_END). */ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex]) break; tmpthing = arenaList->freeList; if (tmpthing) { maxFreeThings = MAX_THREAD_LOCAL_THINGS; do { if (!tmpthing->next) break; tmpthing = tmpthing->next; } while (--maxFreeThings != 0); flbase[flindex] = arenaList->freeList; arenaList->freeList = tmpthing->next; tmpthing->next = NULL; }#endif break; } /* Allocate from the tail of last arena or from new arena if we can. */ if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) || NewGCArena(rt, arenaList)) { offset = arenaList->lastLimit; if ((offset & GC_PAGE_MASK) == 0) { /* * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary. */ offset += PAGE_THING_GAP(nbytes); } JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE); arenaList->lastLimit = (uint16)(offset + nbytes); a = arenaList->last; firstPage = (uint8 *)FIRST_THING_PAGE(a); thing = (JSGCThing *)(firstPage + offset); flagp = a->base + offset / sizeof(JSGCThing); if (flagp >= firstPage) flagp += GC_THINGS_SIZE; METER(++arenaList->stats.nthings); METER(arenaList->stats.maxthings = JS_MAX(arenaList->stats.nthings, arenaList->stats.maxthings));#ifdef JS_THREADSAFE /* * Refill the local free list by taking free things from the last * arena. Prefer to order free things by ascending address in the * (unscientific) hope of better cache locality. */ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex]) break; METER(nfree = 0); lastptr = &flbase[flindex]; maxFreeThings = MAX_THREAD_LOCAL_THINGS; for (offset = arenaList->lastLimit; offset != GC_THINGS_SIZE && maxFreeThings-- != 0; offset += nbytes) { if ((offset & GC_PAGE_MASK) == 0) offset += PAGE_THING_GAP(nbytes); JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE); tmpflagp = a->base + offset / sizeof(JSGCThing); if (tmpflagp >= firstPage) tmpflagp += GC_THINGS_SIZE; tmpthing = (JSGCThing *)(firstPage + offset); tmpthing->flagp = tmpflagp; *tmpflagp = GCF_FINAL; /* signifying that thing is free */ *lastptr = tmpthing; lastptr = &tmpthing->next; METER(++nfree); } arenaList->lastLimit = offset; *lastptr = NULL; METER(arenaList->stats.freelen += nfree);#endif break; } /* Consider doing a "last ditch" GC unless already tried. */ if (doGC) goto fail; rt->gcPoke = JS_TRUE; doGC = JS_TRUE; } /* We successfully allocated the thing. */#ifdef JS_THREADSAFE success:#endif lrs = cx->localRootStack; if (lrs) { /* * If we're in a local root scope, don't set newborn[type] at all, to * avoid entraining garbage from it for an unbounded amount of time * on this context. A caller will leave the local root scope and pop * this reference, allowing thing to be GC'd if it has no other refs. * See JS_EnterLocalRootScope and related APIs. */ if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) { /* * When we fail for a thing allocated through the tail of the last * arena, thing's flag byte is not initialized. So to prevent GC * accessing the uninitialized flags during the finalization, we * always mark the thing as final. See bug 337407. */ *flagp = GCF_FINAL; goto fail; } } else { /* * No local root scope, so we're stuck with the old, fragile model of * depending on a pigeon-hole newborn per type per context. */ cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing; } /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */ *flagp = (uint8)flags; /* * Clear thing before unlocking in case a GC run is about to scan it, * finding it via newborn[]. */ thing->next = NULL; thing->flagp = NULL;#ifdef DEBUG_gchist gchist[gchpos].lastDitch = doGC; gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList; if (++gchpos == NGCHIST) gchpos = 0;#endif METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++); METER(++rt->gcArenaList[flindex].stats.totalnew);#ifdef JS_THREADSAFE if (gcLocked) JS_UNLOCK_GC(rt);#endif return thing;fail:#ifdef JS_THREADSAFE if (gcLocked) JS_UNLOCK_GC(rt);#endif METER(rt->gcStats.fail++); JS_ReportOutOfMemory(cx); return NULL;}JSBooljs_LockGCThing(JSContext *cx, void *thing){ JSBool ok = js_LockGCThingRT(cx->runtime, thing); if (!ok) JS_ReportOutOfMemory(cx); return ok;}/* * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because * their descendants must be marked by the GC. To find them during the mark * phase, they are added to rt->gcLocksHash, which is created lazily. * * NB: we depend on the order of GC-thing type indexes here! */#define GC_TYPE_IS_STRING(t) ((t) == GCX_STRING || \ (t) >= GCX_EXTERNAL_STRING)#define GC_TYPE_IS_XML(t) ((unsigned)((t) - GCX_NAMESPACE) <= \ (unsigned)(GCX_XML - GCX_NAMESPACE))#define GC_TYPE_IS_DEEP(t) ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))#define IS_DEEP_STRING(t,o) (GC_TYPE_IS_STRING(t) && \ JSSTRING_IS_DEPENDENT((JSString *)(o)))#define GC_THING_IS_DEEP(t,o) (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))/* This is compatible with JSDHashEntryStub. */typedef struct JSGCLockHashEntry { JSDHashEntryHdr hdr; const JSGCThing *thing; uint32 count;} JSGCLockHashEntry;JSBooljs_LockGCThingRT(JSRuntime *rt, void *thing){ JSBool ok, deep; uint8 *flagp; uintN flags, lock, type; JSGCLockHashEntry *lhe; ok = JS_TRUE; if (!thing) return ok; flagp = js_GetGCThingFlags(thing); JS_LOCK_GC(rt); flags = *flagp; lock = (flags & GCF_LOCK); type = (flags & GCF_TYPEMASK); deep = GC_THING_IS_DEEP(type, thing); /* * Avoid adding a rt->gcLocksHash entry for shallow things until someone * nests a lock -- then start such an entry with a count of 2, not 1. */ if (lock || deep) { if (!rt->gcLocksHash) { rt->gcLocksHash =
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -