⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 spaces.cc.svn-base

📁 Google浏览器V8内核代码
💻 SVN-BASE
📖 第 1 页 / 共 5 页
字号:
  }  ASSERT(Size() == size_in_bytes);}Address FreeListNode::next() {  ASSERT(map() == Heap::byte_array_map());  ASSERT(Size() >= kNextOffset + kPointerSize);  return Memory::Address_at(address() + kNextOffset);}void FreeListNode::set_next(Address next) {  ASSERT(map() == Heap::byte_array_map());  ASSERT(Size() >= kNextOffset + kPointerSize);  Memory::Address_at(address() + kNextOffset) = next;}OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {  Reset();}void OldSpaceFreeList::Reset() {  available_ = 0;  for (int i = 0; i < kFreeListsLength; i++) {    free_[i].head_node_ = NULL;  }  needs_rebuild_ = false;  finger_ = kHead;  free_[kHead].next_size_ = kEnd;}void OldSpaceFreeList::RebuildSizeList() {  ASSERT(needs_rebuild_);  int cur = kHead;  for (int i = cur + 1; i < kFreeListsLength; i++) {    if (free_[i].head_node_ != NULL) {      free_[cur].next_size_ = i;      cur = i;    }  }  free_[cur].next_size_ = kEnd;  needs_rebuild_ = false;}int OldSpaceFreeList::Free(Address start, int size_in_bytes) {#ifdef DEBUG  for (int i = 0; i < size_in_bytes; i += kPointerSize) {    Memory::Address_at(start + i) = kZapValue;  }#endif  FreeListNode* node = FreeListNode::FromAddress(start);  node->set_size(size_in_bytes);  // Early return to drop too-small blocks on the floor (one or two word  // blocks cannot hold a map pointer, a size field, and a pointer to the  // next block in the free list).  if (size_in_bytes < kMinBlockSize) {    return size_in_bytes;  }  // Insert other blocks at the head of an exact free list.  int index = size_in_bytes >> kPointerSizeLog2;  node->set_next(free_[index].head_node_);  free_[index].head_node_ = node->address();  available_ += size_in_bytes;  needs_rebuild_ = true;  return 0;}Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {  ASSERT(0 < size_in_bytes);  ASSERT(size_in_bytes <= kMaxBlockSize);  ASSERT(IsAligned(size_in_bytes, kPointerSize));  if (needs_rebuild_) RebuildSizeList();  int index = size_in_bytes >> kPointerSizeLog2;  // Check for a perfect fit.  if (free_[index].head_node_ != NULL) {    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);    // If this was the last block of its size, remove the size.    if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);    available_ -= size_in_bytes;    *wasted_bytes = 0;    return node;  }  // Search the size list for the best fit.  int prev = finger_ < index ? finger_ : kHead;  int cur = FindSize(index, &prev);  ASSERT(index < cur);  if (cur == kEnd) {    // No large enough size in list.    *wasted_bytes = 0;    return Failure::RetryAfterGC(size_in_bytes, owner_);  }  int rem = cur - index;  int rem_bytes = rem << kPointerSizeLog2;  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +                                                     size_in_bytes);  // Distinguish the cases prev < rem < cur and rem <= prev < cur  // to avoid many redundant tests and calls to Insert/RemoveSize.  if (prev < rem) {    // Simple case: insert rem between prev and cur.    finger_ = prev;    free_[prev].next_size_ = rem;    // If this was the last block of size cur, remove the size.    if ((free_[cur].head_node_ = cur_node->next()) == NULL) {      free_[rem].next_size_ = free_[cur].next_size_;    } else {      free_[rem].next_size_ = cur;    }    // Add the remainder block.    rem_node->set_size(rem_bytes);    rem_node->set_next(free_[rem].head_node_);    free_[rem].head_node_ = rem_node->address();  } else {    // If this was the last block of size cur, remove the size.    if ((free_[cur].head_node_ = cur_node->next()) == NULL) {      finger_ = prev;      free_[prev].next_size_ = free_[cur].next_size_;    }    if (rem_bytes < kMinBlockSize) {      // Too-small remainder is wasted.      rem_node->set_size(rem_bytes);      available_ -= size_in_bytes + rem_bytes;      *wasted_bytes = rem_bytes;      return cur_node;    }    // Add the remainder block and, if needed, insert its size.    rem_node->set_size(rem_bytes);    rem_node->set_next(free_[rem].head_node_);    free_[rem].head_node_ = rem_node->address();    if (rem_node->next() == NULL) InsertSize(rem);  }  available_ -= size_in_bytes;  *wasted_bytes = 0;  return cur_node;}#ifdef DEBUGbool OldSpaceFreeList::Contains(FreeListNode* node) {  for (int i = 0; i < kFreeListsLength; i++) {    Address cur_addr = free_[i].head_node_;    while (cur_addr != NULL) {      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);      if (cur_node == node) return true;      cur_addr = cur_node->next();    }  }  return false;}#endifMapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {  owner_ = owner;  Reset();}void MapSpaceFreeList::Reset() {  available_ = 0;  head_ = NULL;}void MapSpaceFreeList::Free(Address start) {#ifdef DEBUG  for (int i = 0; i < Map::kSize; i += kPointerSize) {    Memory::Address_at(start + i) = kZapValue;  }#endif  FreeListNode* node = FreeListNode::FromAddress(start);  node->set_size(Map::kSize);  node->set_next(head_);  head_ = node->address();  available_ += Map::kSize;}Object* MapSpaceFreeList::Allocate() {  if (head_ == NULL) {    return Failure::RetryAfterGC(Map::kSize, owner_);  }  FreeListNode* node = FreeListNode::FromAddress(head_);  head_ = node->next();  available_ -= Map::kSize;  return node;}// -----------------------------------------------------------------------------// OldSpace implementationvoid OldSpace::PrepareForMarkCompact(bool will_compact) {  if (will_compact) {    // Reset relocation info.  During a compacting collection, everything in    // the space is considered 'available' and we will rediscover live data    // and waste during the collection.    MCResetRelocationInfo();    mc_end_of_relocation_ = bottom();    ASSERT(Available() == Capacity());  } else {    // During a non-compacting collection, everything below the linear    // allocation pointer is considered allocated (everything above is    // available) and we will rediscover available and wasted bytes during    // the collection.    accounting_stats_.AllocateBytes(free_list_.available());    accounting_stats_.FillWastedBytes(Waste());  }  // Clear the free list before a full GC---it will be rebuilt afterward.  free_list_.Reset();}void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {  ASSERT(Contains(address));  Address current_top = mc_end_of_relocation_;  Page* current_page = Page::FromAllocationTop(current_top);  // No more objects relocated to this page?  Move to the next.  ASSERT(current_top <= current_page->mc_relocation_top);  if (current_top == current_page->mc_relocation_top) {    // The space should already be properly expanded.    Page* next_page = current_page->next_page();    CHECK(next_page->is_valid());    mc_end_of_relocation_ = next_page->ObjectAreaStart();  }  ASSERT(mc_end_of_relocation_ == address);  mc_end_of_relocation_ += size_in_bytes;}void OldSpace::MCCommitRelocationInfo() {  // Update fast allocation info.  allocation_info_.top = mc_forwarding_info_.top;  allocation_info_.limit = mc_forwarding_info_.limit;  ASSERT(allocation_info_.VerifyPagedAllocation());  // The space is compacted and we haven't yet built free lists or  // wasted any space.  ASSERT(Waste() == 0);  ASSERT(AvailableFree() == 0);  // Build the free list for the space.  int computed_size = 0;  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);  while (it.has_next()) {    Page* p = it.next();    // Space below the relocation pointer is allocated.    computed_size += p->mc_relocation_top - p->ObjectAreaStart();    if (it.has_next()) {      // Free the space at the top of the page.  We cannot use      // p->mc_relocation_top after the call to Free (because Free will clear      // remembered set bits).      int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;      if (extra_size > 0) {        int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);        // The bytes we have just "freed" to add to the free list were        // already accounted as available.        accounting_stats_.WasteBytes(wasted_bytes);      }    }  }  // Make sure the computed size - based on the used portion of the pages in  // use - matches the size obtained while computing forwarding addresses.  ASSERT(computed_size == Size());}// Slow case for normal allocation.  Try in order: (1) allocate in the next// page in the space, (2) allocate off the space's free list, (3) expand the// space, (4) fail.HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {  // Linear allocation in this space has failed.  If there is another page  // in the space, move to that page and allocate there.  This allocation  // should succeed (size_in_bytes should not be greater than a page's  // object area size).  Page* current_page = TopPageOf(allocation_info_);  if (current_page->next_page()->is_valid()) {    return AllocateInNextPage(current_page, size_in_bytes);  }  // There is no next page in this space.  Try free list allocation.  int wasted_bytes;  Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);  accounting_stats_.WasteBytes(wasted_bytes);  if (!result->IsFailure()) {    accounting_stats_.AllocateBytes(size_in_bytes);    return HeapObject::cast(result);  }  // Free list allocation failed and there is no next page.  Try to expand  // the space and allocate in the new next page.  ASSERT(!current_page->next_page()->is_valid());  if (Expand(current_page)) {    return AllocateInNextPage(current_page, size_in_bytes);  }  // Finally, fail.  return NULL;}// Add the block at the top of the page to the space's free list, set the// allocation info to the next page (assumed to be one), and allocate// linearly there.HeapObject* OldSpace::AllocateInNextPage(Page* current_page,                                         int size_in_bytes) {  ASSERT(current_page->next_page()->is_valid());  // Add the block at the top of this page to the free list.  int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;  if (free_size > 0) {    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);    accounting_stats_.WasteBytes(wasted_bytes);  }  SetAllocationInfo(&allocation_info_, current_page->next_page());  return AllocateLinearly(&allocation_info_, size_in_bytes);}#ifdef DEBUG// We do not assume that the PageIterator works, because it depends on the// invariants we are checking during verification.void OldSpace::Verify() {  // The allocation pointer should be valid, and it should be in a page in the  // space.  ASSERT(allocation_info_.VerifyPagedAllocation());  Page* top_page = Page::FromAllocationTop(allocation_info_.top);  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));  // Loop over all the pages.  bool above_allocation_top = false;  Page* current_page = first_page_;  while (current_page->is_valid()) {    if (above_allocation_top) {      // We don't care what's above the allocation top.    } else {      // Unless this is the last page in the space containing allocated      // objects, the allocation top should be at the object area end.      Address top = current_page->AllocationTop();      if (current_page == top_page) {        ASSERT(top == allocation_info_.top);        // The next page will be above the allocation top.        above_allocation_top = true;      } else {        ASSERT(top == current_page->ObjectAreaEnd());      }      // It should be packed with objects from the bottom to the top.      Address current = current_page->ObjectAreaStart();      while (current < top) {        HeapObject* object = HeapObject::FromAddress(current);        // The first word should be a map, and we expect all map pointers to        // be in map space.        Map* map = object->map();        ASSERT(map->IsMap());        ASSERT(Heap::map_space()->Contains(map));        // The object should not be a map.        ASSERT(!object->IsMap());        // The object itself should look OK.        object->Verify();        // All the interior pointers should be contained in the heap and have        // their remembered set bits set if they point to new space.  Code        // objects do not have remembered set bits that we care about.        VerifyPointersAndRSetVisitor rset_visitor;        VerifyPointersVisitor no_rset_visitor;        int size = object->Size();        if (object->IsCode()) {          Code::cast(object)->ConvertICTargetsFromAddressToObject();          object->IterateBody(map->instance_type(), size, &no_rset_visitor);          Code::cast(object)->ConvertICTargetsFromObjectToAddress();        } else {          object->IterateBody(map->instance_type(), size, &rset_visitor);        }        current += size;      }      // The allocation pointer should not be in the middle of an object.      ASSERT(current == top);    }    current_page = current_page->next_page();  }}struct CommentStatistic {  const char* comment;  int size;  int count;  void Clear() {    comment = NULL;    size = 0;    count = 0;  }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -