⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 spaces.cc.svn-base

📁 Google浏览器V8内核代码
💻 SVN-BASE
📖 第 1 页 / 共 5 页
字号:
};// must be small, since an iteration is used for lookupconst int kMaxComments = 64;static CommentStatistic comments_statistics[kMaxComments+1];void PagedSpace::ReportCodeStatistics() {  ReportCodeKindStatistics();  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "         "count  (average)\"):\n");  for (int i = 0; i <= kMaxComments; i++) {    const CommentStatistic& cs = comments_statistics[i];    if (cs.size > 0) {      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,             cs.size/cs.count);    }  }  PrintF("\n");}void PagedSpace::ResetCodeStatistics() {  ClearCodeKindStatistics();  for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();  comments_statistics[kMaxComments].comment = "Unknown";  comments_statistics[kMaxComments].size = 0;  comments_statistics[kMaxComments].count = 0;}// Adds comment to 'comment_statistics' table. Performance OK sa long as// 'kMaxComments' is smallstatic void EnterComment(const char* comment, int delta) {  // Do not count empty comments  if (delta <= 0) return;  CommentStatistic* cs = &comments_statistics[kMaxComments];  // Search for a free or matching entry in 'comments_statistics': 'cs'  // points to result.  for (int i = 0; i < kMaxComments; i++) {    if (comments_statistics[i].comment == NULL) {      cs = &comments_statistics[i];      cs->comment = comment;      break;    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {      cs = &comments_statistics[i];      break;    }  }  // Update entry for 'comment'  cs->size += delta;  cs->count += 1;}// Call for each nested comment start (start marked with '[ xxx', end marked// with ']'.  RelocIterator 'it' must point to a comment reloc info.static void CollectCommentStatistics(RelocIterator* it) {  ASSERT(!it->done());  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());  if (tmp[0] != '[') {    // Not a nested comment; skip    return;  }  // Search for end of nested comment or a new nested comment  const char* const comment_txt =      reinterpret_cast<const char*>(it->rinfo()->data());  const byte* prev_pc = it->rinfo()->pc();  int flat_delta = 0;  it->next();  while (true) {    // All nested comments must be terminated properly, and therefore exit    // from loop.    ASSERT(!it->done());    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {      const char* const txt =          reinterpret_cast<const char*>(it->rinfo()->data());      flat_delta += it->rinfo()->pc() - prev_pc;      if (txt[0] == ']') break;  // End of nested  comment      // A new comment      CollectCommentStatistics(it);      // Skip code that was covered with previous comment      prev_pc = it->rinfo()->pc();    }    it->next();  }  EnterComment(comment_txt, flat_delta);}// Collects code size statistics:// - by code kind// - by code commentvoid PagedSpace::CollectCodeStatistics() {  HeapObjectIterator obj_it(this);  while (obj_it.has_next()) {    HeapObject* obj = obj_it.next();    if (obj->IsCode()) {      Code* code = Code::cast(obj);      code_kind_statistics[code->kind()] += code->Size();      RelocIterator it(code);      int delta = 0;      const byte* prev_pc = code->instruction_start();      while (!it.done()) {        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {          delta += it.rinfo()->pc() - prev_pc;          CollectCommentStatistics(&it);          prev_pc = it.rinfo()->pc();        }        it.next();      }      ASSERT(code->instruction_start() <= prev_pc &&             prev_pc <= code->relocation_start());      delta += code->relocation_start() - prev_pc;      EnterComment("NoComment", delta);    }  }}void OldSpace::ReportStatistics() {  int pct = Available() * 100 / Capacity();  PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",         Capacity(), Waste(), Available(), pct);  // Report remembered set statistics.  int rset_marked_pointers = 0;  int rset_marked_arrays = 0;  int rset_marked_array_elements = 0;  int cross_gen_pointers = 0;  int cross_gen_array_elements = 0;  PageIterator page_it(this, PageIterator::PAGES_IN_USE);  while (page_it.has_next()) {    Page* p = page_it.next();    for (Address rset_addr = p->RSetStart();         rset_addr < p->RSetEnd();         rset_addr += kIntSize) {      int rset = Memory::int_at(rset_addr);      if (rset != 0) {        // Bits were set        int intoff = rset_addr - p->address();        int bitoff = 0;        for (; bitoff < kBitsPerInt; ++bitoff) {          if ((rset & (1 << bitoff)) != 0) {            int bitpos = intoff*kBitsPerByte + bitoff;            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);            Object** obj = reinterpret_cast<Object**>(slot);            if (*obj == Heap::fixed_array_map()) {              rset_marked_arrays++;              FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));              rset_marked_array_elements += fa->length();              // Manually inline FixedArray::IterateBody              Address elm_start = slot + FixedArray::kHeaderSize;              Address elm_stop = elm_start + fa->length() * kPointerSize;              for (Address elm_addr = elm_start;                   elm_addr < elm_stop; elm_addr += kPointerSize) {                // Filter non-heap-object pointers                Object** elm_p = reinterpret_cast<Object**>(elm_addr);                if (Heap::InNewSpace(*elm_p))                  cross_gen_array_elements++;              }            } else {              rset_marked_pointers++;              if (Heap::InNewSpace(*obj))                cross_gen_pointers++;            }          }        }      }    }  }  pct = rset_marked_pointers == 0 ?        0 : cross_gen_pointers * 100 / rset_marked_pointers;  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",            rset_marked_pointers, cross_gen_pointers, pct);  PrintF("  rset_marked arrays %d, ", rset_marked_arrays);  PrintF("  elements %d, ", rset_marked_array_elements);  pct = rset_marked_array_elements == 0 ? 0           : cross_gen_array_elements * 100 / rset_marked_array_elements;  PrintF("  pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);  PrintF("  total rset-marked bits %d\n",            (rset_marked_pointers + rset_marked_arrays));  pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0        : (cross_gen_pointers + cross_gen_array_elements) * 100 /          (rset_marked_pointers + rset_marked_array_elements);  PrintF("  total rset pointers %d, true cross generation ones %d (%%%d)\n",         (rset_marked_pointers + rset_marked_array_elements),         (cross_gen_pointers + cross_gen_array_elements),         pct);  ClearHistograms();  HeapObjectIterator obj_it(this);  while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }  ReportHistogram(true);}// Dump the range of remembered set words between [start, end) corresponding// to the pointers starting at object_p.  The allocation_top is an object// pointer which should not be read past.  This is important for large object// pages, where some bits in the remembered set range do not correspond to// allocated addresses.static void PrintRSetRange(Address start, Address end, Object** object_p,                           Address allocation_top) {  Address rset_address = start;  // If the range starts on on odd numbered word (eg, for large object extra  // remembered set ranges), print some spaces.  if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {    PrintF("                                    ");  }  // Loop over all the words in the range.  while (rset_address < end) {    uint32_t rset_word = Memory::uint32_at(rset_address);    int bit_position = 0;    // Loop over all the bits in the word.    while (bit_position < kBitsPerInt) {      if (object_p == reinterpret_cast<Object**>(allocation_top)) {        // Print a bar at the allocation pointer.        PrintF("|");      } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {        // Do not dereference object_p past the allocation pointer.        PrintF("#");      } else if ((rset_word & (1 << bit_position)) == 0) {        // Print a dot for zero bits.        PrintF(".");      } else if (Heap::InNewSpace(*object_p)) {        // Print an X for one bits for pointers to new space.        PrintF("X");      } else {        // Print a circle for one bits for pointers to old space.        PrintF("o");      }      // Print a space after every 8th bit except the last.      if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {        PrintF(" ");      }      // Advance to next bit.      bit_position++;      object_p++;    }    // Print a newline after every odd numbered word, otherwise a space.    if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {      PrintF("\n");    } else {      PrintF(" ");    }    // Advance to next remembered set word.    rset_address += kIntSize;  }}void PagedSpace::DoPrintRSet(const char* space_name) {  PageIterator it(this, PageIterator::PAGES_IN_USE);  while (it.has_next()) {    Page* p = it.next();    PrintF("%s page 0x%x:\n", space_name, p);    PrintRSetRange(p->RSetStart(), p->RSetEnd(),                   reinterpret_cast<Object**>(p->ObjectAreaStart()),                   p->AllocationTop());    PrintF("\n");  }}void OldSpace::PrintRSet() { DoPrintRSet("old"); }#endif// -----------------------------------------------------------------------------// MapSpace implementationvoid MapSpace::PrepareForMarkCompact(bool will_compact) {  if (will_compact) {    // Reset relocation info.    MCResetRelocationInfo();    // Initialize map index entry.    int page_count = 0;    PageIterator it(this, PageIterator::ALL_PAGES);    while (it.has_next()) {      ASSERT_MAP_PAGE_INDEX(page_count);      Page* p = it.next();      ASSERT(p->mc_page_index == page_count);      page_addresses_[page_count++] = p->address();    }    // During a compacting collection, everything in the space is considered    // 'available' (set by the call to MCResetRelocationInfo) and we will    // rediscover live and wasted bytes during the collection.    ASSERT(Available() == Capacity());  } else {    // During a non-compacting collection, everything below the linear    // allocation pointer except wasted top-of-page blocks is considered    // allocated and we will rediscover available bytes during the    // collection.    accounting_stats_.AllocateBytes(free_list_.available());  }  // Clear the free list before a full GC---it will be rebuilt afterward.  free_list_.Reset();}void MapSpace::MCCommitRelocationInfo() {  // Update fast allocation info.  allocation_info_.top = mc_forwarding_info_.top;  allocation_info_.limit = mc_forwarding_info_.limit;  ASSERT(allocation_info_.VerifyPagedAllocation());  // The space is compacted and we haven't yet wasted any space.  ASSERT(Waste() == 0);  // Update allocation_top of each page in use and compute waste.  int computed_size = 0;  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);  while (it.has_next()) {    Page* page = it.next();    Address page_top = page->AllocationTop();    computed_size += page_top - page->ObjectAreaStart();    if (it.has_next()) {      accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);    }  }  // Make sure the computed size - based on the used portion of the  // pages in use - matches the size we adjust during allocation.  ASSERT(computed_size == Size());}// Slow case for normal allocation. Try in order: (1) allocate in the next// page in the space, (2) allocate off the space's free list, (3) expand the// space, (4) fail.HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {  // Linear allocation in this space has failed.  If there is another page  // in the space, move to that page and allocate there.  This allocation  // should succeed.  Page* current_page = TopPageOf(allocation_info_);  if (current_page->next_page()->is_valid()) {    return AllocateInNextPage(current_page, size_in_bytes);  }  // There is no next page in this space.  Try free list allocation.  The  // map space free list implicitly assumes that all free blocks are map  // sized.  if (size_in_bytes == Map::kSize) {    Object* result = free_list_.Allocate();    if (!result->IsFailure()) {      accounting_stats_.AllocateBytes(size_in_bytes);      return HeapObject::cast(result);    }  }  // Free list allocation failed and there is no next page.  Try to expand  // the space and allocate in the new next page.  ASSERT(!current_page->next_page()->is_valid());  if (Expand(current_page)) {    return AllocateInNextPage(current_page, size_in_bytes);  }  // Finally, fail.  return NULL;}// Move to the next page (there is assumed to be one) and allocate there.// The top of page block is always wasted, because it is too small to hold a// map.HeapObject* MapSpace::AllocateInNextPage(Page* current_page,                                         int size_in_bytes) {  ASSERT(current_page->next_page()->is_valid());  ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);  accounting_stats_.WasteBytes(kPageExtra);  SetAllocationInfo(&allocation_info_, current_page->next_page());  return AllocateLinearly(&allocation_info_, size_in_bytes);}#ifdef DEBUG// We do not assume that the PageIterator works, because it depends on the// invariants we are checking during verification.void MapSpace::Verify() {  // The allocation pointer should be valid, and it should be in a page in the  // space.  ASSERT(allocation_info_.VerifyPagedAllocation());  Page* top_page = Page::FromAllocationTop(allocation_info_.top);  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));  // Loop over all the pages.  bool above_allocation_top = false;  Page* curr

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -