📄 spaces.h.svn-base
字号:
// Blocks are put on exact free lists in an array, indexed by size in words. // The available sizes are kept in an increasingly ordered list. Entries // corresponding to sizes < kMinBlockSize always have an empty free list // (but index kHead is used for the head of the size list). struct SizeNode { // Address of the head FreeListNode of the implied block size or NULL. Address head_node_; // Size (words) of the next larger available size if head_node_ != NULL. int next_size_; }; static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; SizeNode free_[kFreeListsLength]; // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. static const int kHead = kMinBlockSize / kPointerSize - 1; static const int kEnd = kMaxInt; // We keep a "finger" in the size list to speed up a common pattern: // repeated requests for the same or increasing sizes. int finger_; // Starting from *prev, find and return the smallest size >= index (words), // or kEnd. Update *prev to be the largest size < index, or kHead. int FindSize(int index, int* prev) { int cur = free_[*prev].next_size_; while (cur < index) { *prev = cur; cur = free_[cur].next_size_; } return cur; } // Remove an existing element from the size list. void RemoveSize(int index) { int prev = kHead; int cur = FindSize(index, &prev); ASSERT(cur == index); free_[prev].next_size_ = free_[cur].next_size_; finger_ = prev; } // Insert a new element into the size list. void InsertSize(int index) { int prev = kHead; int cur = FindSize(index, &prev); ASSERT(cur != index); free_[prev].next_size_ = index; free_[index].next_size_ = cur; } // The size list is not updated during a sequence of calls to Free, but is // rebuilt before the next allocation. void RebuildSizeList(); bool needs_rebuild_;#ifdef DEBUG // Does this free list contain a free block located at the address of 'node'? bool Contains(FreeListNode* node);#endif DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);};// The free list for the map space.class MapSpaceFreeList BASE_EMBEDDED { public: explicit MapSpaceFreeList(AllocationSpace owner); // Clear the free list. void Reset(); // Return the number of bytes available on the free list. int available() { return available_; } // Place a node on the free list. The block starting at 'start' (assumed to // have size Map::kSize) is placed on the free list. Bookkeeping // information will be written to the block, ie, its contents will be // destroyed. The start address should be word aligned. void Free(Address start); // Allocate a map-sized block from the free list. The block is unitialized. // A failure is returned if no block is available. Object* Allocate(); private: // Available bytes on the free list. int available_; // The head of the free list. Address head_; // The identity of the owning space, for building allocation Failure // objects. AllocationSpace owner_; DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);};// -----------------------------------------------------------------------------// Old object space (excluding map objects)class OldSpace : public PagedSpace { public: // Creates an old space object with a given maximum capacity. // The constructor does not allocate pages from OS. explicit OldSpace(int max_capacity, AllocationSpace id, Executability executable) : PagedSpace(max_capacity, id, executable), free_list_(id) { } // The bytes available on the free list (ie, not above the linear allocation // pointer). int AvailableFree() { return free_list_.available(); } // The top of allocation in a page in this space. Undefined if page is unused. virtual Address PageAllocationTop(Page* page) { return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd(); } // Give a block of memory to the space's free list. It might be added to // the free list or accounted as waste. void Free(Address start, int size_in_bytes) { int wasted_bytes = free_list_.Free(start, size_in_bytes); accounting_stats_.DeallocateBytes(size_in_bytes); accounting_stats_.WasteBytes(wasted_bytes); } // Prepare for full garbage collection. Resets the relocation pointer and // clears the free list. virtual void PrepareForMarkCompact(bool will_compact); // Adjust the top of relocation pointer to point to the end of the object // given by 'address' and 'size_in_bytes'. Move it to the next page if // necessary, ensure that it points to the address, then increment it by the // size. void MCAdjustRelocationEnd(Address address, int size_in_bytes); // Updates the allocation pointer to the relocation top after a mark-compact // collection. virtual void MCCommitRelocationInfo();#ifdef DEBUG // Verify integrity of this space. virtual void Verify(); // Reports statistics for the space void ReportStatistics(); // Dump the remembered sets in the space to stdout. void PrintRSet();#endif protected: // Virtual function in the superclass. Slow path of AllocateRaw. HeapObject* SlowAllocateRaw(int size_in_bytes); // Virtual function in the superclass. Allocate linearly at the start of // the page after current_page (there is assumed to be one). HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); private: // The space's free list. OldSpaceFreeList free_list_; // During relocation, we keep a pointer to the most recently relocated // object in order to know when to move to the next page. Address mc_end_of_relocation_; public: TRACK_MEMORY("OldSpace")};// -----------------------------------------------------------------------------// Old space for all map objectsclass MapSpace : public PagedSpace { public: // Creates a map space object with a maximum capacity. explicit MapSpace(int max_capacity, AllocationSpace id) : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { } // The top of allocation in a page in this space. Undefined if page is unused. virtual Address PageAllocationTop(Page* page) { return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd() - kPageExtra; } // Give a map-sized block of memory to the space's free list. void Free(Address start) { free_list_.Free(start); accounting_stats_.DeallocateBytes(Map::kSize); } // Given an index, returns the page address. Address PageAddress(int page_index) { return page_addresses_[page_index]; } // Prepares for a mark-compact GC. virtual void PrepareForMarkCompact(bool will_compact); // Updates the allocation pointer to the relocation top after a mark-compact // collection. virtual void MCCommitRelocationInfo();#ifdef DEBUG // Verify integrity of this space. virtual void Verify(); // Reports statistic info of the space void ReportStatistics(); // Dump the remembered sets in the space to stdout. void PrintRSet();#endif // Constants. static const int kMapPageIndexBits = 10; static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1; static const int kPageExtra = Page::kObjectAreaSize % Map::kSize; protected: // Virtual function in the superclass. Slow path of AllocateRaw. HeapObject* SlowAllocateRaw(int size_in_bytes); // Virtual function in the superclass. Allocate linearly at the start of // the page after current_page (there is assumed to be one). HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); private: // The space's free list. MapSpaceFreeList free_list_; // An array of page start address in a map space. Address page_addresses_[kMaxMapPageIndex]; public: TRACK_MEMORY("MapSpace")};// -----------------------------------------------------------------------------// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by// the large object space. A large object is allocated from OS heap with// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).// A large object always starts at Page::kObjectStartOffset to a page.// Large objects do not move during garbage collections.// A LargeObjectChunk holds exactly one large object page with exactly one// large object.class LargeObjectChunk { public: // Allocates a new LargeObjectChunk that contains a large object page // (Page::kPageSize aligned) that has at least size_in_bytes (for a large // object and possibly extra remembered set words) bytes after the object // area start of that page. The allocated chunk size is set in the output // parameter chunk_size. static LargeObjectChunk* New(int size_in_bytes, size_t* chunk_size, Executability executable); // Interpret a raw address as a large object chunk. static LargeObjectChunk* FromAddress(Address address) { return reinterpret_cast<LargeObjectChunk*>(address); } // Returns the address of this chunk. Address address() { return reinterpret_cast<Address>(this); } // Accessors for the fields of the chunk. LargeObjectChunk* next() { return next_; } void set_next(LargeObjectChunk* chunk) { next_ = chunk; } size_t size() { return size_; } void set_size(size_t size_in_bytes) { size_ = size_in_bytes; } // Returns the object in this chunk. inline HeapObject* GetObject(); // Given a requested size (including any extra remembereed set words), // returns the physical size of a chunk to be allocated. static int ChunkSizeFor(int size_in_bytes); // Given a chunk size, returns the object size it can accomodate (not // including any extra remembered set words). Used by // LargeObjectSpace::Available. Note that this can overestimate the size // of object that will fit in a chunk---if the object requires extra // remembered set words (eg, for large fixed arrays), the actual object // size for the chunk will be smaller than reported by this function. static int ObjectSizeFor(int chunk_size) { if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; return chunk_size - Page::kPageSize - Page::kObjectStartOffset; } private: // A pointer to the next large object chunk in the space or NULL. LargeObjectChunk* next_; // The size of this chunk. size_t size_; public: TRACK_MEMORY("LargeObjectChunk")};class LargeObjectSpace : public Space { friend class LargeObjectIterator; public: explicit LargeObjectSpace(AllocationSpace id); virtual ~LargeObjectSpace() {} // Initializes internal data structures. bool Setup(); // Releases internal resources, frees objects in this space. void TearDown(); // Allocates a (non-FixedArray, non-Code) large object. Object* AllocateRaw(int size_in_bytes); // Allocates a large Code object. Object* AllocateRawCode(int size_in_bytes); // Allocates a large FixedArray. Object* AllocateRawFixedArray(int size_in_bytes); // Available bytes for objects in this space, not including any extra // remembered set words. int Available() { return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available()); } virtual int Size() { return size_; } int PageCount() { return page_count_; } // Finds an object for a given address, returns Failure::Exception() // if it is not found. The function iterates through all objects in this // space, may be slow. Object* FindObject(Address a); // Clears remembered sets. void ClearRSet(); // Iterates objects whose remembered set bits are set. void IterateRSet(ObjectSlotCallback func); // Frees unmarked objects. void FreeUnmarkedObjects(); // Checks whether a heap object is in this space; O(1). bool Contains(HeapObject* obj); // Checks whether the space is empty. bool IsEmpty() { return first_chunk_ == NULL; }#ifdef DEBUG virtual void Verify(); virtual void Print(); void ReportStatistics(); void CollectCodeStatistics(); // Dump the remembered sets in the space to stdout. void PrintRSet();#endif // Checks whether an address is in the object area in this space. It // iterates all objects in the space. May be slow. bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } private: // The head of the linked list of large object chunks. LargeObjectChunk* first_chunk_; int size_; // allocated bytes int page_count_; // number of chunks // Shared implementation of AllocateRaw, AllocateRawCode and // AllocateRawFixedArray. Object* AllocateRawInternal(int requested_size, int object_size, Executability executable); // Returns the number of extra bytes (rounded up to the nearest full word) // required for extra_object_bytes of extra pointers (in bytes). static inline int ExtraRSetBytesFor(int extra_object_bytes); public: TRACK_MEMORY("LargeObjectSpace")};class LargeObjectIterator: public ObjectIterator { public: explicit LargeObjectIterator(LargeObjectSpace* space); LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); bool has_next() { return current_ != NULL; } HeapObject* next(); // implementation of ObjectIterator. virtual bool has_next_object() { return has_next(); } virtual HeapObject* next_object() { return next(); } private: LargeObjectChunk* current_; HeapObjectCallback size_func_;};} } // namespace v8::internal#endif // V8_SPACES_H_
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -