📄 heap.cc.svn-base
字号:
void Heap::MarkCompact(GCTracer* tracer) { gc_state_ = MARK_COMPACT; mc_count_++; tracer->set_full_gc_count(mc_count_); LOG(ResourceEvent("markcompact", "begin")); MarkCompactPrologue(); MarkCompactCollector::CollectGarbage(tracer); MarkCompactEpilogue(); LOG(ResourceEvent("markcompact", "end")); gc_state_ = NOT_IN_GC; Shrink(); Counters::objs_since_last_full.Set(0);}void Heap::MarkCompactPrologue() { CompilationCache::MarkCompactPrologue(); RegExpImpl::OldSpaceCollectionPrologue(); Top::MarkCompactPrologue(); ThreadManager::MarkCompactPrologue();}void Heap::MarkCompactEpilogue() { Top::MarkCompactEpilogue(); ThreadManager::MarkCompactEpilogue();}Object* Heap::FindCodeObject(Address a) { Object* obj = code_space_->FindObject(a); if (obj->IsFailure()) { obj = lo_space_->FindObject(a); } ASSERT(!obj->IsFailure()); return obj;}// Helper class for copying HeapObjectsclass CopyVisitor: public ObjectVisitor { public: void VisitPointer(Object** p) { CopyObject(p); } void VisitPointers(Object** start, Object** end) { // Copy all HeapObject pointers in [start, end) for (Object** p = start; p < end; p++) CopyObject(p); } private: void CopyObject(Object** p) { if (!Heap::InFromSpace(*p)) return; Heap::CopyObject(reinterpret_cast<HeapObject**>(p)); }};// Shared state read by the scavenge collector and set by CopyObject.static Address promoted_top = NULL;#ifdef DEBUG// Visitor class to verify pointers in code or data space do not point into// new space.class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { public: void VisitPointers(Object** start, Object**end) { for (Object** current = start; current < end; current++) { if ((*current)->IsHeapObject()) { ASSERT(!Heap::InNewSpace(HeapObject::cast(*current))); } } }};#endifvoid Heap::Scavenge() {#ifdef DEBUG if (FLAG_enable_slow_asserts) { VerifyNonPointerSpacePointersVisitor v; HeapObjectIterator it(code_space_); while (it.has_next()) { HeapObject* object = it.next(); if (object->IsCode()) { Code::cast(object)->ConvertICTargetsFromAddressToObject(); } object->Iterate(&v); if (object->IsCode()) { Code::cast(object)->ConvertICTargetsFromObjectToAddress(); } } }#endif gc_state_ = SCAVENGE; // Implements Cheney's copying algorithm LOG(ResourceEvent("scavenge", "begin")); scavenge_count_++; if (new_space_->Capacity() < new_space_->MaximumCapacity() && scavenge_count_ > new_space_growth_limit_) { // Double the size of the new space, and double the limit. The next // doubling attempt will occur after the current new_space_growth_limit_ // more collections. // TODO(1240712): NewSpace::Double has a return value which is // ignored here. new_space_->Double(); new_space_growth_limit_ *= 2; } // Flip the semispaces. After flipping, to space is empty, from space has // live objects. new_space_->Flip(); new_space_->ResetAllocationInfo(); // We need to sweep newly copied objects which can be in either the to space // or the old space. For to space objects, we use a mark. Newly copied // objects lie between the mark and the allocation top. For objects // promoted to old space, we write their addresses downward from the top of // the new space. Sweeping newly promoted objects requires an allocation // pointer and a mark. Note that the allocation pointer 'top' actually // moves downward from the high address in the to space. // // There is guaranteed to be enough room at the top of the to space for the // addresses of promoted objects: every object promoted frees up its size in // bytes from the top of the new space, and objects are at least one pointer // in size. Using the new space to record promoted addresses makes the // scavenge collector agnostic to the allocation strategy (eg, linear or // free-list) used in old space. Address new_mark = new_space_->ToSpaceLow(); Address promoted_mark = new_space_->ToSpaceHigh(); promoted_top = new_space_->ToSpaceHigh(); CopyVisitor copy_visitor; // Copy roots. IterateRoots(©_visitor); // Copy objects reachable from the old generation. By definition, there // are no intergenerational pointers in code or data spaces. IterateRSet(old_pointer_space_, &CopyObject); IterateRSet(map_space_, &CopyObject); lo_space_->IterateRSet(&CopyObject); bool has_processed_weak_pointers = false; while (true) { ASSERT(new_mark <= new_space_->top()); ASSERT(promoted_mark >= promoted_top); // Copy objects reachable from newly copied objects. while (new_mark < new_space_->top() || promoted_mark > promoted_top) { // Sweep newly copied objects in the to space. The allocation pointer // can change during sweeping. Address previous_top = new_space_->top(); SemiSpaceIterator new_it(new_space_, new_mark); while (new_it.has_next()) { new_it.next()->Iterate(©_visitor); } new_mark = previous_top; // Sweep newly copied objects in the old space. The promotion 'top' // pointer could change during sweeping. previous_top = promoted_top; for (Address current = promoted_mark - kPointerSize; current >= previous_top; current -= kPointerSize) { HeapObject* object = HeapObject::cast(Memory::Object_at(current)); object->Iterate(©_visitor); UpdateRSet(object); } promoted_mark = previous_top; } if (has_processed_weak_pointers) break; // We are done. // Copy objects reachable from weak pointers. GlobalHandles::IterateWeakRoots(©_visitor); has_processed_weak_pointers = true; } // Set age mark. new_space_->set_age_mark(new_mark); LOG(ResourceEvent("scavenge", "end")); gc_state_ = NOT_IN_GC;}void Heap::ClearRSetRange(Address start, int size_in_bytes) { uint32_t start_bit; Address start_word_address = Page::ComputeRSetBitPosition(start, 0, &start_bit); uint32_t end_bit; Address end_word_address = Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, 0, &end_bit); // We want to clear the bits in the starting word starting with the // first bit, and in the ending word up to and including the last // bit. Build a pair of bitmasks to do that. uint32_t start_bitmask = start_bit - 1; uint32_t end_bitmask = ~((end_bit << 1) - 1); // If the start address and end address are the same, we mask that // word once, otherwise mask the starting and ending word // separately and all the ones in between. if (start_word_address == end_word_address) { Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask); } else { Memory::uint32_at(start_word_address) &= start_bitmask; Memory::uint32_at(end_word_address) &= end_bitmask; start_word_address += kIntSize; memset(start_word_address, 0, end_word_address - start_word_address); }}class UpdateRSetVisitor: public ObjectVisitor { public: void VisitPointer(Object** p) { UpdateRSet(p); } void VisitPointers(Object** start, Object** end) { // Update a store into slots [start, end), used (a) to update remembered // set when promoting a young object to old space or (b) to rebuild // remembered sets after a mark-compact collection. for (Object** p = start; p < end; p++) UpdateRSet(p); } private: void UpdateRSet(Object** p) { // The remembered set should not be set. It should be clear for objects // newly copied to old space, and it is cleared before rebuilding in the // mark-compact collector. ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0)); if (Heap::InNewSpace(*p)) { Page::SetRSet(reinterpret_cast<Address>(p), 0); } }};int Heap::UpdateRSet(HeapObject* obj) { ASSERT(!InNewSpace(obj)); // Special handling of fixed arrays to iterate the body based on the start // address and offset. Just iterating the pointers as in UpdateRSetVisitor // will not work because Page::SetRSet needs to have the start of the // object. if (obj->IsFixedArray()) { FixedArray* array = FixedArray::cast(obj); int length = array->length(); for (int i = 0; i < length; i++) { int offset = FixedArray::kHeaderSize + i * kPointerSize; ASSERT(!Page::IsRSetSet(obj->address(), offset)); if (Heap::InNewSpace(array->get(i))) { Page::SetRSet(obj->address(), offset); } } } else if (!obj->IsCode()) { // Skip code object, we know it does not contain inter-generational // pointers. UpdateRSetVisitor v; obj->Iterate(&v); } return obj->Size();}void Heap::RebuildRSets() { // By definition, we do not care about remembered set bits in code or data // spaces. map_space_->ClearRSet(); RebuildRSets(map_space_); old_pointer_space_->ClearRSet(); RebuildRSets(old_pointer_space_); Heap::lo_space_->ClearRSet(); RebuildRSets(lo_space_);}void Heap::RebuildRSets(PagedSpace* space) { HeapObjectIterator it(space); while (it.has_next()) Heap::UpdateRSet(it.next());}void Heap::RebuildRSets(LargeObjectSpace* space) { LargeObjectIterator it(space); while (it.has_next()) Heap::UpdateRSet(it.next());}#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)void Heap::RecordCopiedObject(HeapObject* obj) { bool should_record = false;#ifdef DEBUG should_record = FLAG_heap_stats;#endif#ifdef ENABLE_LOGGING_AND_PROFILING should_record = should_record || FLAG_log_gc;#endif if (should_record) { if (new_space_->Contains(obj)) { new_space_->RecordAllocation(obj); } else { new_space_->RecordPromotion(obj); } }}#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)HeapObject* Heap::MigrateObject(HeapObject** source_p, HeapObject* target, int size) { void** src = reinterpret_cast<void**>((*source_p)->address()); void** dst = reinterpret_cast<void**>(target->address()); int counter = size/kPointerSize - 1; do { *dst++ = *src++; } while (counter-- > 0); // Set the forwarding address. (*source_p)->set_map_word(MapWord::FromForwardingAddress(target)); // Update NewSpace stats if necessary.#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) RecordCopiedObject(target);#endif return target;}void Heap::CopyObject(HeapObject** p) { ASSERT(InFromSpace(*p)); HeapObject* object = *p; // We use the first word (where the map pointer usually is) of a heap // object to record the forwarding pointer. A forwarding pointer can // point to an old space, the code space, or the to space of the new // generation. MapWord first_word = object->map_word(); // If the first word is a forwarding address, the object has already been // copied. if (first_word.IsForwardingAddress()) { *p = first_word.ToForwardingAddress(); return; } // Optimization: Bypass ConsString objects where the right-hand side is // Heap::empty_string(). We do not use object->IsConsString because we // already know that object has the heap object tag. InstanceType type = first_word.ToMap()->instance_type(); if (type < FIRST_NONSTRING_TYPE && String::cast(object)->representation_tag() == kConsStringTag && ConsString::cast(object)->second() == Heap::empty_string()) { object = HeapObject::cast(ConsString::cast(object)->first()); *p = object; // After patching *p we have to repeat the checks that object is in the // active semispace of the young generation and not already copied. if (!InFromSpace(object)) return; first_word = object->map_word(); if (first_word.IsForwardingAddress()) { *p = first_word.ToForwardingAddress(); return; } type = first_word.ToMap()->instance_type(); } int object_size = object->SizeFromMap(first_word.ToMap()); Object* result; // If the object should be promoted, we try to copy it to old space. if (ShouldBePromoted(object->address(), object_size)) { OldSpace* target_space = Heap::TargetSpace(object); ASSERT(target_space == Heap::old_pointer_space_ || target_space == Heap::old_data_space_); result = target_space->AllocateRaw(object_size); if (!result->IsFailure()) { *p = MigrateObject(p, HeapObject::cast(result), object_size); if (target_space == Heap::old_pointer_space_) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -