📄 fastmalloc.cpp
字号:
if (result != NULL) { metadata_system_bytes += bytes; } return result;}template <class T>class PageHeapAllocator { private: // How much to allocate from system at a time static const size_t kAllocIncrement = 32 << 10; // Aligned size of T static const size_t kAlignedSize = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment); // Free area from which to carve new objects char* free_area_; size_t free_avail_; // Linked list of all regions allocated by this allocator void* allocated_regions_; // Free list of already carved objects void* free_list_; // Number of allocated but unfreed objects int inuse_; public: void Init() { ASSERT(kAlignedSize <= kAllocIncrement); inuse_ = 0; allocated_regions_ = 0; free_area_ = NULL; free_avail_ = 0; free_list_ = NULL; } T* New() { // Consult free list void* result; if (free_list_ != NULL) { result = free_list_; free_list_ = *(reinterpret_cast<void**>(result)); } else { if (free_avail_ < kAlignedSize) { // Need more room char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement)); if (!new_allocation) CRASH(); *(void**)new_allocation = allocated_regions_; allocated_regions_ = new_allocation; free_area_ = new_allocation + kAlignedSize; free_avail_ = kAllocIncrement - kAlignedSize; } result = free_area_; free_area_ += kAlignedSize; free_avail_ -= kAlignedSize; } inuse_++; return reinterpret_cast<T*>(result); } void Delete(T* p) { *(reinterpret_cast<void**>(p)) = free_list_; free_list_ = p; inuse_--; } int inuse() const { return inuse_; }#if defined(WTF_CHANGES) && PLATFORM(DARWIN) template <class Recorder> void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader) { vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_); while (adminAllocation) { recorder.recordRegion(adminAllocation, kAllocIncrement); adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation)); } }#endif};// -------------------------------------------------------------------------// Span - a contiguous run of pages// -------------------------------------------------------------------------// Type that can hold a page numbertypedef uintptr_t PageID;// Type that can hold the length of a run of pagestypedef uintptr_t Length;static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;// Convert byte size into pages. This won't overflow, but may return// an unreasonably large value if bytes is huge enough.static inline Length pages(size_t bytes) { return (bytes >> kPageShift) + ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);}// Convert a user size into the number of bytes that will actually be// allocatedstatic size_t AllocationSize(size_t bytes) { if (bytes > kMaxSize) { // Large object: we allocate an integral number of pages ASSERT(bytes <= (kMaxValidPages << kPageShift)); return pages(bytes) << kPageShift; } else { // Small object: find the size class to which it belongs return ByteSizeForClass(SizeClass(bytes)); }}// Information kept for a span (a contiguous run of pages).struct Span { PageID start; // Starting page number Length length; // Number of pages in span Span* next; // Used when in link list Span* prev; // Used when in link list void* objects; // Linked list of free objects unsigned int free : 1; // Is the span free#ifndef NO_TCMALLOC_SAMPLES unsigned int sample : 1; // Sampled object?#endif unsigned int sizeclass : 8; // Size-class for small objects (or 0) unsigned int refcount : 11; // Number of non-free objects bool decommitted : 1;#undef SPAN_HISTORY#ifdef SPAN_HISTORY // For debugging, we can keep a log events per span int nexthistory; char history[64]; int value[64];#endif};#if TCMALLOC_TRACK_DECOMMITED_SPANS#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)#else#define ASSERT_SPAN_COMMITTED(span)#endif#ifdef SPAN_HISTORYvoid Event(Span* span, char op, int v = 0) { span->history[span->nexthistory] = op; span->value[span->nexthistory] = v; span->nexthistory++; if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;}#else#define Event(s,o,v) ((void) 0)#endif// Allocator/deallocator for spansstatic PageHeapAllocator<Span> span_allocator;static Span* NewSpan(PageID p, Length len) { Span* result = span_allocator.New(); memset(result, 0, sizeof(*result)); result->start = p; result->length = len;#ifdef SPAN_HISTORY result->nexthistory = 0;#endif return result;}static inline void DeleteSpan(Span* span) {#ifndef NDEBUG // In debug mode, trash the contents of deleted Spans memset(span, 0x3f, sizeof(*span));#endif span_allocator.Delete(span);}// -------------------------------------------------------------------------// Doubly linked list of spans.// -------------------------------------------------------------------------static inline void DLL_Init(Span* list) { list->next = list; list->prev = list;}static inline void DLL_Remove(Span* span) { span->prev->next = span->next; span->next->prev = span->prev; span->prev = NULL; span->next = NULL;}static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) { return list->next == list;}static int DLL_Length(const Span* list) { int result = 0; for (Span* s = list->next; s != list; s = s->next) { result++; } return result;}#if 0 /* Not needed at the moment -- causes compiler warnings if not used */static void DLL_Print(const char* label, const Span* list) { MESSAGE("%-10s %p:", label, list); for (const Span* s = list->next; s != list; s = s->next) { MESSAGE(" <%p,%u,%u>", s, s->start, s->length); } MESSAGE("\n");}#endifstatic inline void DLL_Prepend(Span* list, Span* span) { ASSERT(span->next == NULL); ASSERT(span->prev == NULL); span->next = list->next; span->prev = list; list->next->prev = span; list->next = span;}// -------------------------------------------------------------------------// Stack traces kept for sampled allocations// The following state is protected by pageheap_lock_.// -------------------------------------------------------------------------// size/depth are made the same size as a pointer so that some generic// code below can conveniently cast them back and forth to void*.static const int kMaxStackDepth = 31;struct StackTrace { uintptr_t size; // Size of object uintptr_t depth; // Number of PC values stored in array below void* stack[kMaxStackDepth];};static PageHeapAllocator<StackTrace> stacktrace_allocator;static Span sampled_objects;// -------------------------------------------------------------------------// Map from page-id to per-page data// -------------------------------------------------------------------------// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,// because sometimes the sizeclass is all the information we need.// Selector class -- general selector uses 3-level maptemplate <int BITS> class MapSelector { public: typedef TCMalloc_PageMap3<BITS-kPageShift> Type; typedef PackedCache<BITS, uint64_t> CacheType;};#if defined(WTF_CHANGES)#if PLATFORM(X86_64)// On all known X86-64 platforms, the upper 16 bits are always unused and therefore // can be excluded from the PageMap key.// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_detailsstatic const size_t kBitsUnusedOn64Bit = 16;#elsestatic const size_t kBitsUnusedOn64Bit = 0;#endif// A three-level map for 64-bit machinestemplate <> class MapSelector<64> { public: typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type; typedef PackedCache<64, uint64_t> CacheType;};#endif// A two-level map for 32-bit machinestemplate <> class MapSelector<32> { public: typedef TCMalloc_PageMap2<32 - kPageShift> Type; typedef PackedCache<32 - kPageShift, uint16_t> CacheType;};// -------------------------------------------------------------------------// Page-level allocator// * Eager coalescing//// Heap for page-level allocation. We allow allocating and freeing a// contiguous runs of pages (called a "span").// -------------------------------------------------------------------------class TCMalloc_PageHeap { public: void init(); // Allocate a run of "n" pages. Returns zero if out of memory. Span* New(Length n); // Delete the span "[p, p+n-1]". // REQUIRES: span was returned by earlier call to New() and // has not yet been deleted. void Delete(Span* span); // Mark an allocated span as being used for small objects of the // specified size-class. // REQUIRES: span was returned by an earlier call to New() // and has not yet been deleted. void RegisterSizeClass(Span* span, size_t sc); // Split an allocated span into two spans: one of length "n" pages // followed by another span of length "span->length - n" pages. // Modifies "*span" to point to the first span of length "n" pages. // Returns a pointer to the second span. // // REQUIRES: "0 < n < span->length" // REQUIRES: !span->free // REQUIRES: span->sizeclass == 0 Span* Split(Span* span, Length n); // Return the descriptor for the specified page. inline Span* GetDescriptor(PageID p) const { return reinterpret_cast<Span*>(pagemap_.get(p)); }#ifdef WTF_CHANGES inline Span* GetDescriptorEnsureSafe(PageID p) { pagemap_.Ensure(p, 1); return GetDescriptor(p); } size_t ReturnedBytes() const;#endif // Dump state to stderr#ifndef WTF_CHANGES void Dump(TCMalloc_Printer* out);#endif // Return number of bytes allocated from system inline uint64_t SystemBytes() const { return system_bytes_; } // Return number of free bytes in heap uint64_t FreeBytes() const { return (static_cast<uint64_t>(free_pages_) << kPageShift); } bool Check(); bool CheckList(Span* list, Length min_pages, Length max_pages); // Release all pages on the free list for reuse by the OS: void ReleaseFreePages(); // Return 0 if we have no information, or else the correct sizeclass for p. // Reads and writes to pagemap_cache_ do not require locking. // The entries are 64 bits on 64-bit hardware and 16 bits on // 32-bit hardware, and we don't mind raciness as long as each read of // an entry yields a valid entry, not a partially updated entry. size_t GetSizeClassIfCached(PageID p) const { return pagemap_cache_.GetOrDefault(p, 0); } void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } private: // Pick the appropriate map and cache types based on pointer size typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; PageMap pagemap_; mutable PageMapCache pagemap_cache_; // We segregate spans of a given size into two circular linked // lists: one for normal spans, and one for spans whose memory // has been returned to the system. struct SpanList { Span normal; Span returned; }; // List of free spans of length >= kMaxPages SpanList large_; // Array mapping from span length to a doubly linked list of free spans SpanList free_[kMaxPages]; // Number of pages kept in free lists uintptr_t free_pages_; // Bytes allocated from system uint64_t system_bytes_; bool GrowHeap(Length n); // REQUIRES span->length >= n // Remove span from its free list, and move any leftover part of // span into appropriate free lists. Also update "span" to have // length exactly "n" and mark it as non-free so it can be returned // to the client. // // "released" is true iff "span" was found on a "returned" list. void Carve(Span* span, Length n, bool released); void RecordSpan(Span* span) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -