📄 spaces.cc.svn-base
字号:
#endif start_ = NULL; capacity_ = 0; allocation_info_.top = NULL; allocation_info_.limit = NULL; mc_forwarding_info_.top = NULL; mc_forwarding_info_.limit = NULL; if (to_space_ != NULL) { to_space_->TearDown(); delete to_space_; to_space_ = NULL; } if (from_space_ != NULL) { from_space_->TearDown(); delete from_space_; from_space_ = NULL; }}void NewSpace::Flip() { SemiSpace* tmp = from_space_; from_space_ = to_space_; to_space_ = tmp;}bool NewSpace::Double() { ASSERT(capacity_ <= maximum_capacity_ / 2); // TODO(1240712): Failure to double the from space can result in // semispaces of different sizes. In the event of that failure, the // to space doubling should be rolled back before returning false. if (!to_space_->Double() || !from_space_->Double()) return false; capacity_ *= 2; allocation_info_.limit = to_space_->high(); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); return true;}void NewSpace::ResetAllocationInfo() { allocation_info_.top = to_space_->low(); allocation_info_.limit = to_space_->high(); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);}void NewSpace::MCResetRelocationInfo() { mc_forwarding_info_.top = from_space_->low(); mc_forwarding_info_.limit = from_space_->high(); ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);}void NewSpace::MCCommitRelocationInfo() { // Assumes that the spaces have been flipped so that mc_forwarding_info_ is // valid allocation info for the to space. allocation_info_.top = mc_forwarding_info_.top; allocation_info_.limit = to_space_->high(); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);}#ifdef DEBUG// We do not use the SemispaceIterator because verification doesn't assume// that it works (it depends on the invariants we are checking).void NewSpace::Verify() { // The allocation pointer should be in the space or at the very end. ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); // There should be objects packed in from the low address up to the // allocation pointer. Address current = to_space_->low(); while (current < top()) { HeapObject* object = HeapObject::FromAddress(current); // The first word should be a map, and we expect all map pointers to // be in map space. Map* map = object->map(); ASSERT(map->IsMap()); ASSERT(Heap::map_space()->Contains(map)); // The object should not be code or a map. ASSERT(!object->IsMap()); ASSERT(!object->IsCode()); // The object itself should look OK. object->Verify(); // All the interior pointers should be contained in the heap. VerifyPointersVisitor visitor; int size = object->Size(); object->IterateBody(map->instance_type(), size, &visitor); current += size; } // The allocation pointer should not be in the middle of an object. ASSERT(current == top());}#endif// -----------------------------------------------------------------------------// SemiSpace implementationSemiSpace::SemiSpace(int initial_capacity, int maximum_capacity, AllocationSpace id) : Space(id, NOT_EXECUTABLE), capacity_(initial_capacity), maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {}bool SemiSpace::Setup(Address start, int size) { ASSERT(size == maximum_capacity_); if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) { return false; } start_ = start; address_mask_ = ~(size - 1); object_mask_ = address_mask_ | kHeapObjectTag; object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag; age_mark_ = start_; return true;}void SemiSpace::TearDown() { start_ = NULL; capacity_ = 0;}bool SemiSpace::Double() { if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) { return false; } capacity_ *= 2; return true;}#ifdef DEBUGvoid SemiSpace::Print() { }void SemiSpace::Verify() { }#endif// -----------------------------------------------------------------------------// SemiSpaceIterator implementation.SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { Initialize(space, space->bottom(), space->top(), NULL);}SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func) { Initialize(space, space->bottom(), space->top(), size_func);}SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { Initialize(space, start, space->top(), NULL);}void SemiSpaceIterator::Initialize(NewSpace* space, Address start, Address end, HeapObjectCallback size_func) { ASSERT(space->ToSpaceContains(start)); ASSERT(space->ToSpaceLow() <= end && end <= space->ToSpaceHigh()); space_ = space->to_space_; current_ = start; limit_ = end; size_func_ = size_func;}#ifdef DEBUG// A static array of histogram info for each type.static HistogramInfo heap_histograms[LAST_TYPE+1];static JSObject::SpillInformation js_spill_information;// heap_histograms is shared, always clear it before using it.static void ClearHistograms() { // We reset the name each time, though it hasn't changed.#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name); INSTANCE_TYPE_LIST(DEF_TYPE_NAME)#undef DEF_TYPE_NAME#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear(); INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)#undef CLEAR_HISTOGRAM js_spill_information.Clear();}static int code_kind_statistics[Code::NUMBER_OF_KINDS];static void ClearCodeKindStatistics() { for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { code_kind_statistics[i] = 0; }}static void ReportCodeKindStatistics() { const char* table[Code::NUMBER_OF_KINDS];#define CASE(name) \ case Code::name: table[Code::name] = #name; \ break for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { switch (static_cast<Code::Kind>(i)) { CASE(FUNCTION); CASE(STUB); CASE(BUILTIN); CASE(LOAD_IC); CASE(KEYED_LOAD_IC); CASE(STORE_IC); CASE(KEYED_STORE_IC); CASE(CALL_IC); } }#undef CASE PrintF("\n Code kind histograms: \n"); for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { if (code_kind_statistics[i] > 0) { PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]); } } PrintF("\n");}static int CollectHistogramInfo(HeapObject* obj) { InstanceType type = obj->map()->instance_type(); ASSERT(0 <= type && type <= LAST_TYPE); ASSERT(heap_histograms[type].name() != NULL); heap_histograms[type].increment_number(1); heap_histograms[type].increment_bytes(obj->Size()); if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information); } return obj->Size();}static void ReportHistogram(bool print_spill) { PrintF("\n Object Histogram:\n"); for (int i = 0; i <= LAST_TYPE; i++) { if (heap_histograms[i].number() > 0) { PrintF(" %-33s%10d (%10d bytes)\n", heap_histograms[i].name(), heap_histograms[i].number(), heap_histograms[i].bytes()); } } PrintF("\n"); // Summarize string types. int string_number = 0; int string_bytes = 0;#define INCREMENT(type, size, name) \ string_number += heap_histograms[type].number(); \ string_bytes += heap_histograms[type].bytes(); STRING_TYPE_LIST(INCREMENT)#undef INCREMENT if (string_number > 0) { PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, string_bytes); } if (FLAG_collect_heap_spill_statistics && print_spill) { js_spill_information.Print(); }}#endif // DEBUG// Support for statistics gathering for --heap-stats and --log-gc.#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)void NewSpace::ClearHistograms() { for (int i = 0; i <= LAST_TYPE; i++) { allocated_histogram_[i].clear(); promoted_histogram_[i].clear(); }}// Because the copying collector does not touch garbage objects, we iterate// the new space before a collection to get a histogram of allocated objects.// This only happens (1) when compiled with DEBUG and the --heap-stats flag is// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc// flag is set.void NewSpace::CollectStatistics() { ClearHistograms(); SemiSpaceIterator it(this); while (it.has_next()) RecordAllocation(it.next());}#ifdef ENABLE_LOGGING_AND_PROFILINGstatic void DoReportStatistics(HistogramInfo* info, const char* description) { LOG(HeapSampleBeginEvent("NewSpace", description)); // Lump all the string types together. int string_number = 0; int string_bytes = 0;#define INCREMENT(type, size, name) \ string_number += info[type].number(); \ string_bytes += info[type].bytes(); STRING_TYPE_LIST(INCREMENT)#undef INCREMENT if (string_number > 0) { LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); } // Then do the other types. for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { if (info[i].number() > 0) { LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), info[i].bytes())); } } LOG(HeapSampleEndEvent("NewSpace", description));}#endif // ENABLE_LOGGING_AND_PROFILINGvoid NewSpace::ReportStatistics() {#ifdef DEBUG if (FLAG_heap_stats) { float pct = static_cast<float>(Available()) / Capacity(); PrintF(" capacity: %d, available: %d, %%%d\n", Capacity(), Available(), static_cast<int>(pct*100)); PrintF("\n Object Histogram:\n"); for (int i = 0; i <= LAST_TYPE; i++) { if (allocated_histogram_[i].number() > 0) { PrintF(" %-33s%10d (%10d bytes)\n", allocated_histogram_[i].name(), allocated_histogram_[i].number(), allocated_histogram_[i].bytes()); } } PrintF("\n"); }#endif // DEBUG#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_gc) { DoReportStatistics(allocated_histogram_, "allocated"); DoReportStatistics(promoted_histogram_, "promoted"); }#endif // ENABLE_LOGGING_AND_PROFILING}void NewSpace::RecordAllocation(HeapObject* obj) { InstanceType type = obj->map()->instance_type(); ASSERT(0 <= type && type <= LAST_TYPE); allocated_histogram_[type].increment_number(1); allocated_histogram_[type].increment_bytes(obj->Size());}void NewSpace::RecordPromotion(HeapObject* obj) { InstanceType type = obj->map()->instance_type(); ASSERT(0 <= type && type <= LAST_TYPE); promoted_histogram_[type].increment_number(1); promoted_histogram_[type].increment_bytes(obj->Size());}#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)// -----------------------------------------------------------------------------// Free lists for old object spaces implementationvoid FreeListNode::set_size(int size_in_bytes) { ASSERT(size_in_bytes > 0); ASSERT(IsAligned(size_in_bytes, kPointerSize)); // We write a map and possibly size information to the block. If the block // is big enough to be a ByteArray with at least one extra word (the next // pointer), we set its map to be the byte array map and its size to an // appropriate array length for the desired size from HeapObject::Size(). // If the block is too small (eg, one or two words), to hold both a size // field and a next pointer, we give it a filler map that gives it the // correct size. if (size_in_bytes > Array::kHeaderSize) { set_map(Heap::byte_array_map()); ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); } else if (size_in_bytes == kPointerSize) { set_map(Heap::one_word_filler_map()); } else if (size_in_bytes == 2 * kPointerSize) { set_map(Heap::two_word_filler_map()); } else { UNREACHABLE();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -