📄 collector.cpp
字号:
{ suspendThread(thread->platformThread); PlatformThreadRegisters regs; size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); // mark the thread's registers markConservatively(static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); void* stackPointer = otherThreadStackPointer(regs); markConservatively(stackPointer, thread->stackBase); resumeThread(thread->platformThread);}#endifvoid Heap::markStackObjectsConservatively(){ markCurrentThreadConservatively();#if ENABLE(JSC_MULTIPLE_THREADS) if (m_currentThreadRegistrar) { MutexLocker lock(m_registeredThreadsMutex);#ifndef NDEBUG // Forbid malloc during the mark phase. Marking a thread suspends it, so // a malloc inside mark() would risk a deadlock with a thread that had been // suspended while holding the malloc lock. fastMallocForbid();#endif // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held, // and since this is a shared heap, they are real locks. for (Thread* thread = m_registeredThreads; thread; thread = thread->next) { if (!pthread_equal(thread->posixThread, pthread_self())) markOtherThreadConservatively(thread); }#ifndef NDEBUG fastMallocAllow();#endif }#endif}void Heap::setGCProtectNeedsLocking(){ // Most clients do not need to call this, with the notable exception of WebCore. // Clients that use shared heap have JSLock protection, while others are supposed // to do explicit locking. WebCore violates this contract in Database code, // which calls gcUnprotect from a secondary thread. if (!m_protectedValuesMutex) m_protectedValuesMutex.set(new Mutex);}void Heap::protect(JSValuePtr k){ ASSERT(k); ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); if (!k.isCell()) return; if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); m_protectedValues.add(k.asCell()); if (m_protectedValuesMutex) m_protectedValuesMutex->unlock();}void Heap::unprotect(JSValuePtr k){ ASSERT(k); ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); if (!k.isCell()) return; if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); m_protectedValues.remove(k.asCell()); if (m_protectedValuesMutex) m_protectedValuesMutex->unlock();}Heap* Heap::heap(JSValuePtr v){ if (!v.isCell()) return 0; return Heap::cellBlock(v.asCell())->heap;}void Heap::markProtectedObjects(){ if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); ProtectCountSet::iterator end = m_protectedValues.end(); for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) { JSCell* val = it->first; if (!val->marked()) val->mark(); } if (m_protectedValuesMutex) m_protectedValuesMutex->unlock();}template <HeapType heapType> size_t Heap::sweep(){ typedef typename HeapConstants<heapType>::Block Block; typedef typename HeapConstants<heapType>::Cell Cell; // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; size_t emptyBlocks = 0; size_t numLiveObjects = heap.numLiveObjects; for (size_t block = 0; block < heap.usedBlocks; block++) { Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]); size_t usedCells = curBlock->usedCells; Cell* freeList = curBlock->freeList; if (usedCells == HeapConstants<heapType>::cellsPerBlock) { // special case with a block where all cells are used -- testing indicates this happens often for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) { if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { Cell* cell = curBlock->cells + i; if (heapType != NumberHeap) { JSCell* imp = reinterpret_cast<JSCell*>(cell); // special case for allocated but uninitialized object // (We don't need this check earlier because nothing prior this point // assumes the object has a valid vptr.) if (cell->u.freeCell.zeroIfFree == 0) continue; imp->~JSCell(); } --usedCells; --numLiveObjects; // put cell on the free list cell->u.freeCell.zeroIfFree = 0; cell->u.freeCell.next = freeList - (cell + 1); freeList = cell; } } } else { size_t minimumCellsToProcess = usedCells; for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) { Cell* cell = curBlock->cells + i; if (cell->u.freeCell.zeroIfFree == 0) { ++minimumCellsToProcess; } else { if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { if (heapType != NumberHeap) { JSCell* imp = reinterpret_cast<JSCell*>(cell); imp->~JSCell(); } --usedCells; --numLiveObjects; // put cell on the free list cell->u.freeCell.zeroIfFree = 0; cell->u.freeCell.next = freeList - (cell + 1); freeList = cell; } } } } curBlock->usedCells = static_cast<uint32_t>(usedCells); curBlock->freeList = freeList; curBlock->marked.clearAll(); if (usedCells == 0) { emptyBlocks++; if (emptyBlocks > SPARE_EMPTY_BLOCKS) {#if !DEBUG_COLLECTOR freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));#endif // swap with the last block so we compact as we go heap.blocks[block] = heap.blocks[heap.usedBlocks - 1]; heap.usedBlocks--; block--; // Don't move forward a step in this case if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) { heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*))); } } } } if (heap.numLiveObjects != numLiveObjects) heap.firstBlockWithPossibleSpace = 0; heap.numLiveObjects = numLiveObjects; heap.numLiveObjectsAtLastCollect = numLiveObjects; heap.extraCost = 0; return numLiveObjects;} bool Heap::collect(){#ifndef NDEBUG if (m_globalData->isSharedInstance) { ASSERT(JSLock::lockCount() > 0); ASSERT(JSLock::currentThreadIsHoldingLock()); }#endif ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation)); if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation)) CRASH(); JAVASCRIPTCORE_GC_BEGIN(); primaryHeap.operationInProgress = Collection; numberHeap.operationInProgress = Collection; // MARK: first mark all referenced objects recursively starting out from the set of root objects markStackObjectsConservatively(); markProtectedObjects(); if (m_markListSet && m_markListSet->size()) ArgList::markLists(*m_markListSet); if (m_globalData->exception && !m_globalData->exception.marked()) m_globalData->exception.mark(); m_globalData->interpreter->registerFile().markCallFrames(this); m_globalData->smallStrings.mark(); if (m_globalData->scopeNodeBeingReparsed) m_globalData->scopeNodeBeingReparsed->mark(); JAVASCRIPTCORE_GC_MARKED(); size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects; size_t numLiveObjects = sweep<PrimaryHeap>(); numLiveObjects += sweep<NumberHeap>(); primaryHeap.operationInProgress = NoOperation; numberHeap.operationInProgress = NoOperation; JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects); return numLiveObjects < originalLiveObjects;}size_t Heap::objectCount() { return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count(); }template <HeapType heapType> static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap){ typedef HeapConstants<heapType> HC; for (size_t i = 0; i < heap.usedBlocks; ++i) { if (heap.blocks[i]) { statistics.size += BLOCK_SIZE; statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize; } }}Heap::Statistics Heap::statistics() const{ Statistics statistics = { 0, 0 }; JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap); JSC::addToStatistics<NumberHeap>(statistics, numberHeap); return statistics;}size_t Heap::globalObjectCount(){ size_t count = 0; if (JSGlobalObject* head = m_globalData->head) { JSGlobalObject* o = head; do { ++count; o = o->next(); } while (o != head); } return count;}size_t Heap::protectedGlobalObjectCount(){ if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); size_t count = 0; if (JSGlobalObject* head = m_globalData->head) { JSGlobalObject* o = head; do { if (m_protectedValues.contains(o)) ++count; o = o->next(); } while (o != head); } if (m_protectedValuesMutex) m_protectedValuesMutex->unlock(); return count;}size_t Heap::protectedObjectCount(){ if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); size_t result = m_protectedValues.size(); if (m_protectedValuesMutex) m_protectedValuesMutex->unlock(); return result;}static const char* typeName(JSCell* cell){ if (cell->isString()) return "string"; if (cell->isNumber()) return "number"; if (cell->isGetterSetter()) return "gettersetter"; ASSERT(cell->isObject()); const ClassInfo* info = static_cast<JSObject*>(cell)->classInfo(); return info ? info->className : "Object";}HashCountedSet<const char*>* Heap::protectedObjectTypeCounts(){ HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; if (m_protectedValuesMutex) m_protectedValuesMutex->lock(); ProtectCountSet::iterator end = m_protectedValues.end(); for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) counts->add(typeName(it->first)); if (m_protectedValuesMutex) m_protectedValuesMutex->unlock(); return counts;}bool Heap::isBusy(){ return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);}Heap::iterator Heap::primaryHeapBegin(){ return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);}Heap::iterator Heap::primaryHeapEnd(){ return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);}} // namespace JSC
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -