⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 spaces.cc.svn-base

📁 Google浏览器V8内核代码
💻 SVN-BASE
📖 第 1 页 / 共 5 页
字号:
// Copyright 2006-2008 the V8 project authors. All rights reserved.// Redistribution and use in source and binary forms, with or without// modification, are permitted provided that the following conditions are// met:////     * Redistributions of source code must retain the above copyright//       notice, this list of conditions and the following disclaimer.//     * Redistributions in binary form must reproduce the above//       copyright notice, this list of conditions and the following//       disclaimer in the documentation and/or other materials provided//       with the distribution.//     * Neither the name of Google Inc. nor the names of its//       contributors may be used to endorse or promote products derived//       from this software without specific prior written permission.//// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.#include "v8.h"#include "macro-assembler.h"#include "mark-compact.h"#include "platform.h"namespace v8 { namespace internal {// For contiguous spaces, top should be in the space (or at the end) and limit// should be the end of the space.#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \  ASSERT((space)->low() <= (info).top                 \         && (info).top <= (space)->high()             \         && (info).limit == (space)->high())// ----------------------------------------------------------------------------// HeapObjectIteratorHeapObjectIterator::HeapObjectIterator(PagedSpace* space) {  Initialize(space->bottom(), space->top(), NULL);}HeapObjectIterator::HeapObjectIterator(PagedSpace* space,                                       HeapObjectCallback size_func) {  Initialize(space->bottom(), space->top(), size_func);}HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {  Initialize(start, space->top(), NULL);}HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,                                       HeapObjectCallback size_func) {  Initialize(start, space->top(), size_func);}void HeapObjectIterator::Initialize(Address cur, Address end,                                    HeapObjectCallback size_f) {  cur_addr_ = cur;  end_addr_ = end;  end_page_ = Page::FromAllocationTop(end);  size_func_ = size_f;  Page* p = Page::FromAllocationTop(cur_addr_);  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();#ifdef DEBUG  Verify();#endif}bool HeapObjectIterator::HasNextInNextPage() {  if (cur_addr_ == end_addr_) return false;  Page* cur_page = Page::FromAllocationTop(cur_addr_);  cur_page = cur_page->next_page();  ASSERT(cur_page->is_valid());  cur_addr_ = cur_page->ObjectAreaStart();  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();  ASSERT(cur_addr_ < cur_limit_);#ifdef DEBUG  Verify();#endif  return true;}#ifdef DEBUGvoid HeapObjectIterator::Verify() {  Page* p = Page::FromAllocationTop(cur_addr_);  ASSERT(p == Page::FromAllocationTop(cur_limit_));  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));}#endif// -----------------------------------------------------------------------------// PageIteratorPageIterator::PageIterator(PagedSpace* space, Mode mode) {  cur_page_ = space->first_page_;  switch (mode) {    case PAGES_IN_USE:      stop_page_ = space->AllocationTopPage()->next_page();      break;    case PAGES_USED_BY_MC:      stop_page_ = space->MCRelocationTopPage()->next_page();      break;    case ALL_PAGES:      stop_page_ = Page::FromAddress(NULL);      break;    default:      UNREACHABLE();  }}// -----------------------------------------------------------------------------// Page#ifdef DEBUGPage::RSetState Page::rset_state_ = Page::IN_USE;#endif// -----------------------------------------------------------------------------// MemoryAllocator//int MemoryAllocator::capacity_   = 0;int MemoryAllocator::size_       = 0;VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;// 270 is an estimate based on the static default heap size of a pair of 256K// semispaces and a 64M old generation.const int kEstimatedNumberOfChunks = 270;List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(    kEstimatedNumberOfChunks);List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);int MemoryAllocator::max_nof_chunks_ = 0;int MemoryAllocator::top_ = 0;void MemoryAllocator::Push(int free_chunk_id) {  ASSERT(max_nof_chunks_ > 0);  ASSERT(top_ < max_nof_chunks_);  free_chunk_ids_[top_++] = free_chunk_id;}int MemoryAllocator::Pop() {  ASSERT(top_ > 0);  return free_chunk_ids_[--top_];}bool MemoryAllocator::Setup(int capacity) {  capacity_ = RoundUp(capacity, Page::kPageSize);  // Over-estimate the size of chunks_ array.  It assumes the expansion of old  // space is always in the unit of a chunk (kChunkSize) except the last  // expansion.  //  // Due to alignment, allocated space might be one page less than required  // number (kPagesPerChunk) of pages for old spaces.  //  // Reserve two chunk ids for semispaces, one for map space, one for old  // space, and one for code space.  max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;  if (max_nof_chunks_ > kMaxNofChunks) return false;  size_ = 0;  ChunkInfo info;  // uninitialized element.  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {    chunks_.Add(info);    free_chunk_ids_.Add(i);  }  top_ = max_nof_chunks_;  return true;}void MemoryAllocator::TearDown() {  for (int i = 0; i < max_nof_chunks_; i++) {    if (chunks_[i].address() != NULL) DeleteChunk(i);  }  chunks_.Clear();  free_chunk_ids_.Clear();  if (initial_chunk_ != NULL) {    LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));    delete initial_chunk_;    initial_chunk_ = NULL;  }  ASSERT(top_ == max_nof_chunks_);  // all chunks are free  top_ = 0;  capacity_ = 0;  size_ = 0;  max_nof_chunks_ = 0;}void* MemoryAllocator::AllocateRawMemory(const size_t requested,                                         size_t* allocated,                                         Executability executable) {  if (size_ + static_cast<int>(requested) > capacity_) return NULL;  void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);  int alloced = *allocated;  size_ += alloced;  Counters::memory_allocated.Increment(alloced);  return mem;}void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {  OS::Free(mem, length);  Counters::memory_allocated.Decrement(length);  size_ -= length;  ASSERT(size_ >= 0);}void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {  ASSERT(initial_chunk_ == NULL);  initial_chunk_ = new VirtualMemory(requested);  CHECK(initial_chunk_ != NULL);  if (!initial_chunk_->IsReserved()) {    delete initial_chunk_;    initial_chunk_ = NULL;    return NULL;  }  // We are sure that we have mapped a block of requested addresses.  ASSERT(initial_chunk_->size() == requested);  LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));  size_ += requested;  return initial_chunk_->address();}static int PagesInChunk(Address start, size_t size) {  // The first page starts on the first page-aligned address from start onward  // and the last page ends on the last page-aligned address before  // start+size.  Page::kPageSize is a power of two so we can divide by  // shifting.  return (RoundDown(start + size, Page::kPageSize)          - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;}Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,                                     PagedSpace* owner) {  if (requested_pages <= 0) return Page::FromAddress(NULL);  size_t chunk_size = requested_pages * Page::kPageSize;  // There is not enough space to guarantee the desired number pages can be  // allocated.  if (size_ + static_cast<int>(chunk_size) > capacity_) {    // Request as many pages as we can.    chunk_size = capacity_ - size_;    requested_pages = chunk_size >> Page::kPageSizeBits;    if (requested_pages <= 0) return Page::FromAddress(NULL);  }  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());  if (chunk == NULL) return Page::FromAddress(NULL);  LOG(NewEvent("PagedChunk", chunk, chunk_size));  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);  if (*allocated_pages == 0) {    FreeRawMemory(chunk, chunk_size);    LOG(DeleteEvent("PagedChunk", chunk));    return Page::FromAddress(NULL);  }  int chunk_id = Pop();  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);  return InitializePagesInChunk(chunk_id, *allocated_pages, owner);}Page* MemoryAllocator::CommitPages(Address start, size_t size,                                   PagedSpace* owner, int* num_pages) {  ASSERT(start != NULL);  *num_pages = PagesInChunk(start, size);  ASSERT(*num_pages > 0);  ASSERT(initial_chunk_ != NULL);  ASSERT(initial_chunk_->address() <= start);  ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())                             + initial_chunk_->size());  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {    return Page::FromAddress(NULL);  }  Counters::memory_allocated.Increment(size);  // So long as we correctly overestimated the number of chunks we should not  // run out of chunk ids.  CHECK(!OutOfChunkIds());  int chunk_id = Pop();  chunks_[chunk_id].init(start, size, owner);  return InitializePagesInChunk(chunk_id, *num_pages, owner);}bool MemoryAllocator::CommitBlock(Address start,                                  size_t size,                                  Executability executable) {  ASSERT(start != NULL);  ASSERT(size > 0);  ASSERT(initial_chunk_ != NULL);  ASSERT(initial_chunk_->address() <= start);  ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())                             + initial_chunk_->size());  if (!initial_chunk_->Commit(start, size, executable)) return false;  Counters::memory_allocated.Increment(size);  return true;}Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,                                              PagedSpace* owner) {  ASSERT(IsValidChunk(chunk_id));  ASSERT(pages_in_chunk > 0);  Address chunk_start = chunks_[chunk_id].address();  Address low = RoundUp(chunk_start, Page::kPageSize);#ifdef DEBUG  size_t chunk_size = chunks_[chunk_id].size();  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);  ASSERT(pages_in_chunk <=        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));#endif  Address page_addr = low;  for (int i = 0; i < pages_in_chunk; i++) {    Page* p = Page::FromAddress(page_addr);    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;    p->is_normal_page = 1;    page_addr += Page::kPageSize;  }  // Set the next page of the last page to 0.  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);  last_page->opaque_header = OffsetFrom(0) | chunk_id;  return Page::FromAddress(low);}Page* MemoryAllocator::FreePages(Page* p) {  if (!p->is_valid()) return p;  // Find the first page in the same chunk as 'p'  Page* first_page = FindFirstPageInSameChunk(p);  Page* page_to_return = Page::FromAddress(NULL);  if (p != first_page) {    // Find the last page in the same chunk as 'prev'.    Page* last_page = FindLastPageInSameChunk(p);    first_page = GetNextPage(last_page);  // first page in next chunk    // set the next_page of last_page to NULL    SetNextPage(last_page, Page::FromAddress(NULL));    page_to_return = p;  // return 'p' when exiting  }  while (first_page->is_valid()) {    int chunk_id = GetChunkId(first_page);    ASSERT(IsValidChunk(chunk_id));    // Find the first page of the next chunk before deleting this chunk.    first_page = GetNextPage(FindLastPageInSameChunk(first_page));    // Free the current chunk.    DeleteChunk(chunk_id);  }  return page_to_return;}void MemoryAllocator::DeleteChunk(int chunk_id) {  ASSERT(IsValidChunk(chunk_id));  ChunkInfo& c = chunks_[chunk_id];  // We cannot free a chunk contained in the initial chunk because it was not  // allocated with AllocateRawMemory.  Instead we uncommit the virtual  // memory.  bool in_initial_chunk = false;  if (initial_chunk_ != NULL) {    Address start = static_cast<Address>(initial_chunk_->address());

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -