⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fastmalloc.cpp

📁 linux下开源浏览器WebKit的源码,市面上的很多商用浏览器都是移植自WebKit
💻 CPP
📖 第 1 页 / 共 5 页
字号:
      // Try growing just "n" pages      ask = n;      ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);    }    if (ptr == NULL) return false;  }  ask = actual_size >> kPageShift;  uint64_t old_system_bytes = system_bytes_;  system_bytes_ += (ask << kPageShift);  const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;  ASSERT(p > 0);  // If we have already a lot of pages allocated, just pre allocate a bunch of  // memory for the page map. This prevents fragmentation by pagemap metadata  // when a program keeps allocating and freeing large blocks.  if (old_system_bytes < kPageMapBigAllocationThreshold      && system_bytes_ >= kPageMapBigAllocationThreshold) {    pagemap_.PreallocateMoreMemory();  }  // Make sure pagemap_ has entries for all of the new pages.  // Plus ensure one before and one after so coalescing code  // does not need bounds-checking.  if (pagemap_.Ensure(p-1, ask+2)) {    // Pretend the new area is allocated and then Delete() it to    // cause any necessary coalescing to occur.    //    // We do not adjust free_pages_ here since Delete() will do it for us.    Span* span = NewSpan(p, ask);    RecordSpan(span);    Delete(span);    ASSERT(Check());    return true;  } else {    // We could not allocate memory within "pagemap_"    // TODO: Once we can return memory to the system, return the new span    return false;  }}bool TCMalloc_PageHeap::Check() {  ASSERT(free_[0].normal.next == &free_[0].normal);  ASSERT(free_[0].returned.next == &free_[0].returned);  CheckList(&large_.normal, kMaxPages, 1000000000);  CheckList(&large_.returned, kMaxPages, 1000000000);  for (Length s = 1; s < kMaxPages; s++) {    CheckList(&free_[s].normal, s, s);    CheckList(&free_[s].returned, s, s);  }  return true;}#if ASSERT_DISABLEDbool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {  return true;}#elsebool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {  for (Span* s = list->next; s != list; s = s->next) {    CHECK_CONDITION(s->free);    CHECK_CONDITION(s->length >= min_pages);    CHECK_CONDITION(s->length <= max_pages);    CHECK_CONDITION(GetDescriptor(s->start) == s);    CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);  }  return true;}#endifstatic void ReleaseFreeList(Span* list, Span* returned) {  // Walk backwards through list so that when we push these  // spans on the "returned" list, we preserve the order.  while (!DLL_IsEmpty(list)) {    Span* s = list->prev;    DLL_Remove(s);    DLL_Prepend(returned, s);    TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),                           static_cast<size_t>(s->length << kPageShift));  }}void TCMalloc_PageHeap::ReleaseFreePages() {  for (Length s = 0; s < kMaxPages; s++) {    ReleaseFreeList(&free_[s].normal, &free_[s].returned);  }  ReleaseFreeList(&large_.normal, &large_.returned);  ASSERT(Check());}//-------------------------------------------------------------------// Free list//-------------------------------------------------------------------class TCMalloc_ThreadCache_FreeList { private:  void*    list_;       // Linked list of nodes  uint16_t length_;     // Current length  uint16_t lowater_;    // Low water mark for list length public:  void Init() {    list_ = NULL;    length_ = 0;    lowater_ = 0;  }  // Return current length of list  int length() const {    return length_;  }  // Is list empty?  bool empty() const {    return list_ == NULL;  }  // Low-water mark management  int lowwatermark() const { return lowater_; }  void clear_lowwatermark() { lowater_ = length_; }  ALWAYS_INLINE void Push(void* ptr) {    SLL_Push(&list_, ptr);    length_++;  }  void PushRange(int N, void *start, void *end) {    SLL_PushRange(&list_, start, end);    length_ = length_ + static_cast<uint16_t>(N);  }  void PopRange(int N, void **start, void **end) {    SLL_PopRange(&list_, N, start, end);    ASSERT(length_ >= N);    length_ = length_ - static_cast<uint16_t>(N);    if (length_ < lowater_) lowater_ = length_;  }  ALWAYS_INLINE void* Pop() {    ASSERT(list_ != NULL);    length_--;    if (length_ < lowater_) lowater_ = length_;    return SLL_Pop(&list_);  }#ifdef WTF_CHANGES  template <class Finder, class Reader>  void enumerateFreeObjects(Finder& finder, const Reader& reader)  {      for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))          finder.visit(nextObject);  }#endif};//-------------------------------------------------------------------// Data kept per thread//-------------------------------------------------------------------class TCMalloc_ThreadCache { private:  typedef TCMalloc_ThreadCache_FreeList FreeList;#if COMPILER(MSVC)  typedef DWORD ThreadIdentifier;#else  typedef pthread_t ThreadIdentifier;#endif  size_t        size_;                  // Combined size of data  ThreadIdentifier tid_;                // Which thread owns it  bool          in_setspecific_;           // Called pthread_setspecific?  FreeList      list_[kNumClasses];     // Array indexed by size-class  // We sample allocations, biased by the size of the allocation  uint32_t      rnd_;                   // Cheap random number generator  size_t        bytes_until_sample_;    // Bytes until we sample next  // Allocate a new heap. REQUIRES: pageheap_lock is held.  static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);  // Use only as pthread thread-specific destructor function.  static void DestroyThreadCache(void* ptr); public:  // All ThreadCache objects are kept in a linked list (for stats collection)  TCMalloc_ThreadCache* next_;  TCMalloc_ThreadCache* prev_;  void Init(ThreadIdentifier tid);  void Cleanup();  // Accessors (mostly just for printing stats)  int freelist_length(size_t cl) const { return list_[cl].length(); }  // Total byte size in cache  size_t Size() const { return size_; }  void* Allocate(size_t size);  void Deallocate(void* ptr, size_t size_class);  void FetchFromCentralCache(size_t cl, size_t allocationSize);  void ReleaseToCentralCache(size_t cl, int N);  void Scavenge();  void Print() const;  // Record allocation of "k" bytes.  Return true iff allocation  // should be sampled  bool SampleAllocation(size_t k);  // Pick next sampling point  void PickNextSample(size_t k);  static void                  InitModule();  static void                  InitTSD();  static TCMalloc_ThreadCache* GetThreadHeap();  static TCMalloc_ThreadCache* GetCache();  static TCMalloc_ThreadCache* GetCacheIfPresent();  static TCMalloc_ThreadCache* CreateCacheIfNecessary();  static void                  DeleteCache(TCMalloc_ThreadCache* heap);  static void                  BecomeIdle();  static void                  RecomputeThreadCacheSize();#ifdef WTF_CHANGES  template <class Finder, class Reader>  void enumerateFreeObjects(Finder& finder, const Reader& reader)  {      for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)          list_[sizeClass].enumerateFreeObjects(finder, reader);  }#endif};//-------------------------------------------------------------------// Data kept per size-class in central cache//-------------------------------------------------------------------class TCMalloc_Central_FreeList { public:  void Init(size_t cl);  // These methods all do internal locking.  // Insert the specified range into the central freelist.  N is the number of  // elements in the range.  void InsertRange(void *start, void *end, int N);  // Returns the actual number of fetched elements into N.  void RemoveRange(void **start, void **end, int *N);  // Returns the number of free objects in cache.  size_t length() {    SpinLockHolder h(&lock_);    return counter_;  }  // Returns the number of free objects in the transfer cache.  int tc_length() {    SpinLockHolder h(&lock_);    return used_slots_ * num_objects_to_move[size_class_];  }#ifdef WTF_CHANGES  template <class Finder, class Reader>  void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)  {    for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))      ASSERT(!span->objects);    ASSERT(!nonempty_.objects);    static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);    Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);    Span* remoteSpan = nonempty_.next;    for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {      for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))        finder.visit(nextObject);    }  }#endif private:  // REQUIRES: lock_ is held  // Remove object from cache and return.  // Return NULL if no free entries in cache.  void* FetchFromSpans();  // REQUIRES: lock_ is held  // Remove object from cache and return.  Fetches  // from pageheap if cache is empty.  Only returns  // NULL on allocation failure.  void* FetchFromSpansSafe();  // REQUIRES: lock_ is held  // Release a linked list of objects to spans.  // May temporarily release lock_.  void ReleaseListToSpans(void *start);  // REQUIRES: lock_ is held  // Release an object to spans.  // May temporarily release lock_.  void ReleaseToSpans(void* object);  // REQUIRES: lock_ is held  // Populate cache by fetching from the page heap.  // May temporarily release lock_.  void Populate();  // REQUIRES: lock is held.  // Tries to make room for a TCEntry.  If the cache is full it will try to  // expand it at the cost of some other cache size.  Return false if there is  // no space.  bool MakeCacheSpace();  // REQUIRES: lock_ for locked_size_class is held.  // Picks a "random" size class to steal TCEntry slot from.  In reality it  // just iterates over the sizeclasses but does so without taking a lock.  // Returns true on success.  // May temporarily lock a "random" size class.  static bool EvictRandomSizeClass(size_t locked_size_class, bool force);  // REQUIRES: lock_ is *not* held.  // Tries to shrink the Cache.  If force is true it will relase objects to  // spans if it allows it to shrink the cache.  Return false if it failed to  // shrink the cache.  Decrements cache_size_ on succeess.  // May temporarily take lock_.  If it takes lock_, the locked_size_class  // lock is released to the thread from holding two size class locks  // concurrently which could lead to a deadlock.  bool ShrinkCache(int locked_size_class, bool force);  // This lock protects all the data members.  cached_entries and cache_size_  // may be looked at without holding the lock.  SpinLock lock_;  // We keep linked lists of empty and non-empty spans.  size_t   size_class_;     // My size class  Span     empty_;          // Dummy header for list of empty spans  Span     nonempty_;       // Dummy header for list of non-empty spans  size_t   counter_;        // Number of free objects in cache entry  // Here we reserve space for TCEntry cache slots.  Since one size class can  // end up getting all the TCEntries quota in the system we just preallocate  // sufficient number of entries here.  TCEntry tc_slots_[kNumTransferEntries];  // Number of currently used cached entries in tc_slots_.  This variable is  // updated under a lock but can be read without one.  int32_t used_slots_;  // The current number of slots for this size class.  This is an  // adaptive value that is increased if there is lots of traffic  // on a given size class.  int32_t cache_size_;};// Pad each CentralCache object to multiple of 64 bytesclass TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList { private:  char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];};//-------------------------------------------------------------------// Global variables//-------------------------------------------------------------------// Central cache -- a collection of free-lists, one per size-class.// We have a separate lock per free-list to reduce contention.static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];// Page-level allocatorstatic SpinLock pageheap_lock = SPINLOCK_INITIALIZER;static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];static bool phinited = false;// Avoid extra level of indirection by making "pageheap" be just an alias// of pageheap_memory.typedef union {    void* m_memory;    TCMalloc_PageHeap* m_pageHeap;} PageHeapUnion;static inline TCMalloc_PageHeap* getPageHeap(){    PageHeapUnion u = { &pageheap_memory[0] };    return u.m_pageHeap;}#define pageheap getPageHeap()// If TLS is available, we also store a copy// of the per-thread object in a __thread variable// since __thread variables are faster to read// than pthread_getspecific().  We still need// pthread_setspecific() because __thread// variables provide no way to run cleanup// code when a thread is destroyed.#ifdef HAVE_TLSstatic __thread TCMalloc_ThreadCache *threadlocal_heap;#endif// Thread-specific key.  Initialization here is somewhat tricky// because some Linux startup code invokes malloc() before it// is in a good enough state to handle pthread_keycreate().// Therefore, we use TSD keys only after tsd_inited is set to true.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -