📄 mark-compact.cc.svn-base
字号:
}// Fill the marking stack with overflowed objects returned by the given// iterator. Stop when the marking stack is filled or the end of the space// is reached, whichever comes first.template<class T>static void ScanOverflowedObjects(T* it) { // The caller should ensure that the marking stack is initially not full, // so that we don't waste effort pointlessly scanning for objects. ASSERT(!marking_stack.is_full()); while (it->has_next()) { HeapObject* object = it->next(); if (object->IsOverflowed()) { object->ClearOverflow(); ASSERT(object->IsMarked()); ASSERT(Heap::Contains(object)); marking_stack.Push(object); if (marking_stack.is_full()) return; } }}bool MarkCompactCollector::MustBeMarked(Object** p) { // Check whether *p is a HeapObject pointer. if (!(*p)->IsHeapObject()) return false; return !HeapObject::cast(*p)->IsMarked();}void MarkCompactCollector::ProcessRoots(RootMarkingVisitor* visitor) { // Mark the heap roots gray, including global variables, stack variables, // etc. Heap::IterateStrongRoots(visitor); // Take care of the symbol table specially. SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table()); // 1. Mark the prefix of the symbol table gray. symbol_table->IteratePrefix(visitor);#ifdef DEBUG UpdateLiveObjectCount(symbol_table);#endif // 2. Mark the symbol table black (ie, do not push it on the marking stack // or mark it overflowed). symbol_table->SetMark(); tracer_->increment_marked_count(); // There may be overflowed objects in the heap. Visit them now. while (marking_stack.overflowed()) { RefillMarkingStack(); EmptyMarkingStack(visitor->stack_visitor()); }}void MarkCompactCollector::MarkObjectGroups() { List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups(); for (int i = 0; i < object_groups.length(); i++) { ObjectGroup* entry = object_groups[i]; if (entry == NULL) continue; List<Object**>& objects = entry->objects_; bool group_marked = false; for (int j = 0; j < objects.length(); j++) { Object* object = *objects[j]; if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { group_marked = true; break; } } if (!group_marked) continue; // An object in the group is marked, so mark as gray all white heap // objects in the group. for (int j = 0; j < objects.length(); ++j) { if ((*objects[j])->IsHeapObject()) { MarkObject(HeapObject::cast(*objects[j])); } } // Once the entire group has been colored gray, set the object group // to NULL so it won't be processed again. delete object_groups[i]; object_groups[i] = NULL; }}// Mark all objects reachable from the objects on the marking stack.// Before: the marking stack contains zero or more heap object pointers.// After: the marking stack is empty, and all objects reachable from the// marking stack have been marked, or are overflowed in the heap.void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) { while (!marking_stack.is_empty()) { HeapObject* object = marking_stack.Pop(); ASSERT(object->IsHeapObject()); ASSERT(Heap::Contains(object)); ASSERT(object->IsMarked()); ASSERT(!object->IsOverflowed()); // Because the object is marked, we have to recover the original map // pointer and use it to mark the object's body. MapWord map_word = object->map_word(); map_word.ClearMark(); Map* map = map_word.ToMap(); MarkObject(map); object->IterateBody(map->instance_type(), object->SizeFromMap(map), visitor); }}// Sweep the heap for overflowed objects, clear their overflow bits, and// push them on the marking stack. Stop early if the marking stack fills// before sweeping completes. If sweeping completes, there are no remaining// overflowed objects in the heap so the overflow flag on the markings stack// is cleared.void MarkCompactCollector::RefillMarkingStack() { ASSERT(marking_stack.overflowed()); SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize); ScanOverflowedObjects(&new_it); if (marking_stack.is_full()) return; HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), &OverflowObjectSize); ScanOverflowedObjects(&old_pointer_it); if (marking_stack.is_full()) return; HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize); ScanOverflowedObjects(&old_data_it); if (marking_stack.is_full()) return; HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize); ScanOverflowedObjects(&code_it); if (marking_stack.is_full()) return; HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize); ScanOverflowedObjects(&map_it); if (marking_stack.is_full()) return; LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); ScanOverflowedObjects(&lo_it); if (marking_stack.is_full()) return; marking_stack.clear_overflowed();}// Mark all objects reachable (transitively) from objects on the marking// stack. Before: the marking stack contains zero or more heap object// pointers. After: the marking stack is empty and there are no overflowed// objects in the heap.void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) { EmptyMarkingStack(visitor); while (marking_stack.overflowed()) { RefillMarkingStack(); EmptyMarkingStack(visitor); }}void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) { bool work_to_do = true; ASSERT(marking_stack.is_empty()); while (work_to_do) { MarkObjectGroups(); work_to_do = !marking_stack.is_empty(); ProcessMarkingStack(visitor); }}void MarkCompactCollector::MarkLiveObjects() {#ifdef DEBUG ASSERT(state_ == PREPARE_GC); state_ = MARK_LIVE_OBJECTS;#endif // The to space contains live objects, the from space is used as a marking // stack. marking_stack.Initialize(Heap::new_space()->FromSpaceLow(), Heap::new_space()->FromSpaceHigh()); ASSERT(!marking_stack.overflowed()); RootMarkingVisitor root_visitor; ProcessRoots(&root_visitor); // The objects reachable from the roots are marked black, unreachable // objects are white. Mark objects reachable from object groups with at // least one marked object, and continue until no new objects are // reachable from the object groups. ProcessObjectGroups(root_visitor.stack_visitor()); // The objects reachable from the roots or object groups are marked black, // unreachable objects are white. Process objects reachable only from // weak global handles. // // First we mark weak pointers not yet reachable. GlobalHandles::MarkWeakRoots(&MustBeMarked); // Then we process weak pointers and process the transitive closure. GlobalHandles::IterateWeakRoots(&root_visitor); while (marking_stack.overflowed()) { RefillMarkingStack(); EmptyMarkingStack(root_visitor.stack_visitor()); } // Repeat the object groups to mark unmarked groups reachable from the // weak roots. ProcessObjectGroups(root_visitor.stack_visitor()); // Prune the symbol table removing all symbols only pointed to by the // symbol table. Cannot use SymbolTable::cast here because the symbol // table is marked. SymbolTable* symbol_table = reinterpret_cast<SymbolTable*>(Heap::symbol_table()); SymbolTableCleaner v; symbol_table->IterateElements(&v); symbol_table->ElementsRemoved(v.PointersRemoved());#ifdef DEBUG if (FLAG_verify_global_gc) VerifyHeapAfterMarkingPhase();#endif // Remove object groups after marking phase. GlobalHandles::RemoveObjectGroups();}#ifdef DEBUGvoid MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { live_bytes_ += obj->Size(); if (Heap::new_space()->Contains(obj)) { live_young_objects_++; } else if (Heap::map_space()->Contains(obj)) { ASSERT(obj->IsMap()); live_map_objects_++; } else if (Heap::old_pointer_space()->Contains(obj)) { live_old_pointer_objects_++; } else if (Heap::old_data_space()->Contains(obj)) { live_old_data_objects_++; } else if (Heap::code_space()->Contains(obj)) { live_code_objects_++; } else if (Heap::lo_space()->Contains(obj)) { live_lo_objects_++; } else { UNREACHABLE(); }}static int CountMarkedCallback(HeapObject* obj) { MapWord map_word = obj->map_word(); map_word.ClearMark(); return obj->SizeFromMap(map_word.ToMap());}void MarkCompactCollector::VerifyHeapAfterMarkingPhase() { Heap::new_space()->Verify(); Heap::old_pointer_space()->Verify(); Heap::old_data_space()->Verify(); Heap::code_space()->Verify(); Heap::map_space()->Verify(); int live_objects;#define CHECK_LIVE_OBJECTS(it, expected) \ live_objects = 0; \ while (it.has_next()) { \ HeapObject* obj = HeapObject::cast(it.next()); \ if (obj->IsMarked()) live_objects++; \ } \ ASSERT(live_objects == expected); SemiSpaceIterator new_it(Heap::new_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(new_it, live_young_objects_); HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(old_pointer_it, live_old_pointer_objects_); HeapObjectIterator old_data_it(Heap::old_data_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(old_data_it, live_old_data_objects_); HeapObjectIterator code_it(Heap::code_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(code_it, live_code_objects_); HeapObjectIterator map_it(Heap::map_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(map_it, live_map_objects_); LargeObjectIterator lo_it(Heap::lo_space(), &CountMarkedCallback); CHECK_LIVE_OBJECTS(lo_it, live_lo_objects_);#undef CHECK_LIVE_OBJECTS}#endif // DEBUGvoid MarkCompactCollector::SweepLargeObjectSpace() {#ifdef DEBUG ASSERT(state_ == MARK_LIVE_OBJECTS); state_ = compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;#endif // Deallocate unmarked objects and clear marked bits for marked objects. Heap::lo_space()->FreeUnmarkedObjects();}// -------------------------------------------------------------------------// Phase 2: Encode forwarding addresses.// When compacting, forwarding addresses for objects in old space and map// space are encoded in their map pointer word (along with an encoding of// their map pointers).//// 31 21 20 10 9 0// +-----------------+------------------+-----------------+// |forwarding offset|page offset of map|page index of map|// +-----------------+------------------+-----------------+// 11 bits 11 bits 10 bits//// An address range [start, end) can have both live and non-live objects.// Maximal non-live regions are marked so they can be skipped on subsequent// sweeps of the heap. A distinguished map-pointer encoding is used to mark// free regions of one-word size (in which case the next word is the start// of a live object). A second distinguished map-pointer encoding is used// to mark free regions larger than one word, and the size of the free// region (including the first word) is written to the second word of the// region.//// Any valid map page offset must lie in the object area of the page, so map// page offsets less than Page::kObjectStartOffset are invalid. We use a// pair of distinguished invalid map encodings (for single word and multiple// words) to indicate free regions in the page found during computation of// forwarding addresses and skipped over in subsequent sweeps.static const uint32_t kSingleFreeEncoding = 0;static const uint32_t kMultiFreeEncoding = 1;// Encode a free region, defined by the given start address and size, in the// first word or two of the region.void EncodeFreeRegion(Address free_start, int free_size) { ASSERT(free_size >= kIntSize); if (free_size == kIntSize) { Memory::uint32_at(free_start) = kSingleFreeEncoding; } else { ASSERT(free_size >= 2 * kIntSize); Memory::uint32_at(free_start) = kMultiFreeEncoding; Memory::int_at(free_start + kIntSize) = free_size; }#ifdef DEBUG // Zap the body of the free region. if (FLAG_enable_slow_asserts) { for (int offset = 2 * kIntSize; offset < free_size; offset += kPointerSize) { Memory::Address_at(free_start + offset) = kZapValue; } }#endif}// Try to promote all objects in new space. Heap numbers and sequential// strings are promoted to the code space, all others to the old space.inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) { OldSpace* target_space = Heap::TargetSpace(object); ASSERT(target_space == Heap::old_pointer_space() || target_space == Heap::old_data_space()); Object* forwarded = target_space->MCAllocateRaw(object_size); if (forwarded->IsFailure()) { forwarded = Heap::new_space()->MCAllocateRaw(object_size); } return forwarded;}// Allocation functions for the paged spaces call the space's MCAllocateRaw.inline Object* MCAllocateFromOldPointerSpace(HeapObject* object, int object_size) { return Heap::old_pointer_space()->MCAllocateRaw(object_size);}inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) { return Heap::old_data_space()->MCAllocateRaw(object_size);}inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) { return Heap::code_space()->MCAllocateRaw(object_size);}inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) { return Heap::map_space()->MCAllocateRaw(object_size);}// The forwarding address is encoded at the same offset as the current// to-space object, but in from space.inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object, int object_size, Object* new_object, int* ignored) { int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_object->address()); Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) = HeapObject::cast(new_object)->address();}// The forwarding address is encoded in the map pointer of the object as an// offset (in terms of live bytes) from the address of the first live object// in the page.inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object, int object_size, Object* new_object, int* offset) { // Record the forwarding address of the first live object if necessary. if (*offset == 0) { Page::FromAddress(old_object->address())->mc_first_forwarded = HeapObject::cast(new_object)->address(); } MapWord encoding = MapWord::EncodeAddress(old_object->map()->address(), *offset); old_object->set_map_word(encoding); *offset += object_size; ASSERT(*offset <= Page::kObjectAreaSize);}// Most non-live objects are ignored.inline void IgnoreNonLiveObject(HeapObject* object) {}// A code deletion event is logged for non-live code objects.inline void LogNonLiveCodeObject(HeapObject* object) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -