⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 stl_alloc.h

📁 俄罗斯高人Mamaich的Pocket gcc编译器(运行在PocketPC上)的全部源代码。
💻 H
📖 第 1 页 / 共 3 页
字号:
   *  approach.  If you do not wish to share the free lists with the main   *  default_alloc instance, instantiate this with a non-zero __inst.   *   *  @endif   *  (See @link Allocators allocators info @endlink for more.)   */  template<bool __threads, int __inst>    class __default_alloc_template    {    private:      enum {_ALIGN = 8};      enum {_MAX_BYTES = 128};      enum {_NFREELISTS = _MAX_BYTES / _ALIGN};      union _Obj      {        union _Obj* _M_free_list_link;        char        _M_client_data[1];    // The client sees this.      };      static _Obj* volatile         _S_free_list[_NFREELISTS];      // Chunk allocation state.      static char*                  _S_start_free;      static char*                  _S_end_free;      static size_t                 _S_heap_size;      static _STL_mutex_lock        _S_node_allocator_lock;      static size_t      _S_round_up(size_t __bytes)      { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }      static size_t      _S_freelist_index(size_t __bytes)      { return (((__bytes) + (size_t)_ALIGN - 1)/(size_t)_ALIGN - 1); }      // Returns an object of size __n, and optionally adds to size __n      // free list.      static void*      _S_refill(size_t __n);      // Allocates a chunk for nobjs of size size.  nobjs may be reduced      // if it is inconvenient to allocate the requested number.      static char*      _S_chunk_alloc(size_t __size, int& __nobjs);      // It would be nice to use _STL_auto_lock here.  But we need a      // test whether threads are in use.      struct _Lock      {        _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }        ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }      } __attribute__ ((__unused__));      friend struct _Lock;      static _Atomic_word _S_force_new;    public:      // __n must be > 0      static void*      allocate(size_t __n)      {	void* __ret = 0;	// If there is a race through here, assume answer from getenv	// will resolve in same direction.  Inspired by techniques	// to efficiently support threading found in basic_string.h.	if (_S_force_new == 0)	  {	    if (getenv("GLIBCPP_FORCE_NEW"))	      __atomic_add(&_S_force_new, 1);	    else	      __atomic_add(&_S_force_new, -1);	  }	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))	  __ret = __new_alloc::allocate(__n);	else	  {	    _Obj* volatile* __my_free_list = _S_free_list	      + _S_freelist_index(__n);	    // Acquire the lock here with a constructor call.  This	    // ensures that it is released in exit or during stack	    // unwinding.	    _Lock __lock_instance;	    _Obj* __restrict__ __result = *__my_free_list;	    if (__builtin_expect(__result == 0, 0))	      __ret = _S_refill(_S_round_up(__n));	    else	      {		*__my_free_list = __result -> _M_free_list_link;		__ret = __result;	      }	    	    if (__builtin_expect(__ret == 0, 0))	      __throw_bad_alloc();	  }	return __ret;      }      // __p may not be 0      static void      deallocate(void* __p, size_t __n)      {	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))	  __new_alloc::deallocate(__p, __n);	else	  {	    _Obj* volatile*  __my_free_list = _S_free_list	      + _S_freelist_index(__n);	    _Obj* __q = (_Obj*)__p;	    // Acquire the lock here with a constructor call.  This	    // ensures that it is released in exit or during stack	    // unwinding.	    _Lock __lock_instance;	    __q -> _M_free_list_link = *__my_free_list;	    *__my_free_list = __q;	  }      }      static void*      reallocate(void* __p, size_t __old_sz, size_t __new_sz);    };  template<bool __threads, int __inst> _Atomic_word  __default_alloc_template<__threads, __inst>::_S_force_new = 0;  template<bool __threads, int __inst>    inline bool    operator==(const __default_alloc_template<__threads,__inst>&,               const __default_alloc_template<__threads,__inst>&)    { return true; }  template<bool __threads, int __inst>    inline bool    operator!=(const __default_alloc_template<__threads,__inst>&,               const __default_alloc_template<__threads,__inst>&)    { return false; }  // We allocate memory in large chunks in order to avoid fragmenting the  // heap too much.  We assume that __size is properly aligned.  We hold  // the allocation lock.  template<bool __threads, int __inst>    char*    __default_alloc_template<__threads, __inst>::    _S_chunk_alloc(size_t __size, int& __nobjs)    {      char* __result;      size_t __total_bytes = __size * __nobjs;      size_t __bytes_left = _S_end_free - _S_start_free;      if (__bytes_left >= __total_bytes)        {          __result = _S_start_free;          _S_start_free += __total_bytes;          return __result ;        }      else if (__bytes_left >= __size)        {          __nobjs = (int)(__bytes_left/__size);          __total_bytes = __size * __nobjs;          __result = _S_start_free;          _S_start_free += __total_bytes;          return __result;        }      else        {          size_t __bytes_to_get =            2 * __total_bytes + _S_round_up(_S_heap_size >> 4);          // Try to make use of the left-over piece.          if (__bytes_left > 0)            {              _Obj* volatile* __my_free_list =                _S_free_list + _S_freelist_index(__bytes_left);              ((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;              *__my_free_list = (_Obj*)(void*)_S_start_free;            }          _S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);          if (_S_start_free == 0)            {              size_t __i;              _Obj* volatile* __my_free_list;              _Obj* __p;              // Try to make do with what we have.  That can't hurt.  We              // do not try smaller requests, since that tends to result              // in disaster on multi-process machines.              __i = __size;              for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)                {                  __my_free_list = _S_free_list + _S_freelist_index(__i);                  __p = *__my_free_list;                  if (__p != 0)                    {                      *__my_free_list = __p -> _M_free_list_link;                      _S_start_free = (char*)__p;                      _S_end_free = _S_start_free + __i;                      return _S_chunk_alloc(__size, __nobjs);                      // Any leftover piece will eventually make it to the                      // right free list.                    }                }              _S_end_free = 0;        // In case of exception.              _S_start_free = (char*)__new_alloc::allocate(__bytes_to_get);              // This should either throw an exception or remedy the situation.              // Thus we assume it succeeded.            }          _S_heap_size += __bytes_to_get;          _S_end_free = _S_start_free + __bytes_to_get;          return _S_chunk_alloc(__size, __nobjs);        }    }  // Returns an object of size __n, and optionally adds to "size  // __n"'s free list.  We assume that __n is properly aligned.  We  // hold the allocation lock.  template<bool __threads, int __inst>    void*    __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)    {      int __nobjs = 20;      char* __chunk = _S_chunk_alloc(__n, __nobjs);      _Obj* volatile* __my_free_list;      _Obj* __result;      _Obj* __current_obj;      _Obj* __next_obj;      int __i;      if (1 == __nobjs)        return __chunk;      __my_free_list = _S_free_list + _S_freelist_index(__n);      // Build free list in chunk.      __result = (_Obj*)(void*)__chunk;      *__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);      for (__i = 1; ; __i++)        {	  __current_obj = __next_obj;          __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);	  if (__nobjs - 1 == __i)	    {	      __current_obj -> _M_free_list_link = 0;	      break;	    }	  else	    __current_obj -> _M_free_list_link = __next_obj;	}      return __result;    }  template<bool threads, int inst>    void*    __default_alloc_template<threads, inst>::    reallocate(void* __p, size_t __old_sz, size_t __new_sz)    {      void* __result;      size_t __copy_sz;      if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)        return(realloc(__p, __new_sz));      if (_S_round_up(__old_sz) == _S_round_up(__new_sz))        return(__p);      __result = allocate(__new_sz);      __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;      memcpy(__result, __p, __copy_sz);      deallocate(__p, __old_sz);      return __result;    }  template<bool __threads, int __inst>    _STL_mutex_lock    __default_alloc_template<__threads,__inst>::_S_node_allocator_lock    __STL_MUTEX_INITIALIZER;  template<bool __threads, int __inst>    char* __default_alloc_template<__threads,__inst>::_S_start_free = 0;  template<bool __threads, int __inst>    char* __default_alloc_template<__threads,__inst>::_S_end_free = 0;  template<bool __threads, int __inst>    size_t __default_alloc_template<__threads,__inst>::_S_heap_size = 0;  template<bool __threads, int __inst>    typename __default_alloc_template<__threads,__inst>::_Obj* volatile    __default_alloc_template<__threads,__inst>::_S_free_list[_NFREELISTS];  typedef __default_alloc_template<true,0>    __alloc;  typedef __default_alloc_template<false,0>   __single_client_alloc;  /**   *  @brief  The "standard" allocator, as per [20.4].   *   *  The private _Alloc is "SGI" style.  (See comments at the top   *  of stl_alloc.h.)   *   *  The underlying allocator behaves as follows.   *    - __default_alloc_template is used via two typedefs   *    - "__single_client_alloc" typedef does no locking for threads   *    - "__alloc" typedef is threadsafe via the locks   *    - __new_alloc is used for memory requests   *   *  (See @link Allocators allocators info @endlink for more.)   */  template<typename _Tp>    class allocator    {      typedef __alloc _Alloc;          // The underlying allocator.    public:      typedef size_t     size_type;      typedef ptrdiff_t  difference_type;      typedef _Tp*       pointer;      typedef const _Tp* const_pointer;      typedef _Tp&       reference;      typedef const _Tp& const_reference;      typedef _Tp        value_type;      template<typename _Tp1>        struct rebind        { typedef allocator<_Tp1> other; };

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -