⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mt_allocator.h

📁 mingw32.rar
💻 H
📖 第 1 页 / 共 2 页
字号:
		  __bin._M_first[__thread_id] = __bin._M_first[0];
		  if (__block_count >= __bin._M_free[0])
		    {
		      __bin._M_free[__thread_id] = __bin._M_free[0];
		      __bin._M_free[0] = 0;
		      __bin._M_first[0] = NULL;
		    }
		  else
		    {
		      __bin._M_free[__thread_id] = __block_count;
		      __bin._M_free[0] -= __block_count;
		      --__block_count;
		      __block = __bin._M_first[0];
		      while (__block_count-- > 0)
			__block = __block->_M_next;
		      __bin._M_first[0] = __block->_M_next;
		      __block->_M_next = NULL;
		    }
		  __gthread_mutex_unlock(__bin._M_mutex);
		}
	    }
	  else
#endif
	    {
	      void* __v = ::operator new(_S_options._M_chunk_size);
	      __bin._M_first[0] = static_cast<_Block_record*>(__v);
	      
	      --__block_count;
	      __block = __bin._M_first[0];
	      while (__block_count-- > 0)
		{
		  char* __c = reinterpret_cast<char*>(__block) + __bin_size;
		  __block->_M_next = reinterpret_cast<_Block_record*>(__c);
		  __block = __block->_M_next;
		}
	      __block->_M_next = NULL;
	    }
	}

      __block = __bin._M_first[__thread_id];
      __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
#ifdef __GTHREADS
      if (__gthread_active_p())
	{
	  __block->_M_thread_id = __thread_id;
	  --__bin._M_free[__thread_id];
	  ++__bin._M_used[__thread_id];
	}
#endif

      char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align;
      return static_cast<_Tp*>(static_cast<void*>(__c));
    }
  
  template<typename _Tp>
    void
    __mt_alloc<_Tp>::
    deallocate(pointer __p, size_type __n)
    {
      // Requests larger than _M_max_bytes are handled by operators
      // new/delete directly.
      const size_t __bytes = __n * sizeof(_Tp);
      if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
	{
	  ::operator delete(__p);
	  return;
	}
      
      // Round up to power of 2 and figure out which bin to use.
      const size_t __which = _S_binmap[__bytes];
      const _Bin_record& __bin = _S_bin[__which];

      char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align;
      _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
      
#ifdef __GTHREADS
      if (__gthread_active_p())
	{
	  // Calculate the number of records to remove from our freelist:
	  // in order to avoid too much contention we wait until the
	  // number of records is "high enough".
	  const size_t __thread_id = _S_get_thread_id();

	  long __remove = ((__bin._M_free[__thread_id]
			    * _S_options._M_freelist_headroom)
			   - __bin._M_used[__thread_id]);
	  if (__remove > static_cast<long>(100 * (_S_bin_size - __which)
					   * _S_options._M_freelist_headroom)
	      && __remove > static_cast<long>(__bin._M_free[__thread_id]))
	    {
	      _Block_record* __tmp = __bin._M_first[__thread_id];
	      _Block_record* __first = __tmp;
	      __remove /= _S_options._M_freelist_headroom;
	      const long __removed = __remove;
	      --__remove;
	      while (__remove-- > 0)
		__tmp = __tmp->_M_next;
	      __bin._M_first[__thread_id] = __tmp->_M_next;
	      __bin._M_free[__thread_id] -= __removed;

	      __gthread_mutex_lock(__bin._M_mutex);
	      __tmp->_M_next = __bin._M_first[0];
	      __bin._M_first[0] = __first;
	      __bin._M_free[0] += __removed;
	      __gthread_mutex_unlock(__bin._M_mutex);
	    }
	  
	  // Return this block to our list and update counters and
	  // owner id as needed.
	  --__bin._M_used[__block->_M_thread_id];

	  __block->_M_next = __bin._M_first[__thread_id];
	  __bin._M_first[__thread_id] = __block;
	  
	  ++__bin._M_free[__thread_id];
	}
      else
#endif
	{
	  // Single threaded application - return to global pool.
	  __block->_M_next = __bin._M_first[0];
	  __bin._M_first[0] = __block;
	}
    }
  
  template<typename _Tp>
    void
    __mt_alloc<_Tp>::
    _S_initialize()
    {
      // This method is called on the first allocation (when _S_init is still
      // false) to create the bins.
      
      // Ensure that the static initialization of _S_options has
      // happened.  This depends on (a) _M_align == 0 being an invalid
      // value that is only present at startup, and (b) the real
      // static initialization that happens later not actually
      // changing anything.
      if (_S_options._M_align == 0) 
        new (&_S_options) _Tune;
  
      // _M_force_new must not change after the first allocate(),
      // which in turn calls this method, so if it's false, it's false
      // forever and we don't need to return here ever again.
      if (_S_options._M_force_new) 
	{
	  _S_init = true;
	  return;
	}

      // Calculate the number of bins required based on _M_max_bytes.
      // _S_bin_size is statically-initialized to one.
      size_t __bin_size = _S_options._M_min_bin;
      while (_S_options._M_max_bytes > __bin_size)
	{
	  __bin_size <<= 1;
	  ++_S_bin_size;
	}

      // Setup the bin map for quick lookup of the relevant bin.
      const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
      _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));

      _Binmap_type* __bp = _S_binmap;
      _Binmap_type __bin_max = _S_options._M_min_bin;
      _Binmap_type __bint = 0;
      for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
        {
          if (__ct > __bin_max)
            {
              __bin_max <<= 1;
              ++__bint;
            }
          *__bp++ = __bint;
        }

      // Initialize _S_bin and its members.
      void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
      _S_bin = static_cast<_Bin_record*>(__v);

      // If __gthread_active_p() create and initialize the list of
      // free thread ids. Single threaded applications use thread id 0
      // directly and have no need for this.
#ifdef __GTHREADS
      if (__gthread_active_p())
        {
	  const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
	  __v = ::operator new(__k);
          _S_thread_freelist_first = static_cast<_Thread_record*>(__v);

	  // NOTE! The first assignable thread id is 1 since the
	  // global pool uses id 0
          size_t __i;
          for (__i = 1; __i < _S_options._M_max_threads; ++__i)
            {
	      _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
              __tr._M_next = &_S_thread_freelist_first[__i];
              __tr._M_id = __i;
            }

          // Set last record.
          _S_thread_freelist_first[__i - 1]._M_next = NULL;
          _S_thread_freelist_first[__i - 1]._M_id = __i;

	  // Make sure this is initialized.
#ifndef __GTHREAD_MUTEX_INIT
	  __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
#endif
          // Initialize per thread key to hold pointer to
          // _S_thread_freelist.
          __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);

	  const size_t __max_threads = _S_options._M_max_threads + 1;
	  for (size_t __n = 0; __n < _S_bin_size; ++__n)
	    {
	      _Bin_record& __bin = _S_bin[__n];
	      __v = ::operator new(sizeof(_Block_record*) * __max_threads);
	      __bin._M_first = static_cast<_Block_record**>(__v);

	      __v = ::operator new(sizeof(size_t) * __max_threads);
              __bin._M_free = static_cast<size_t*>(__v);

	      __v = ::operator new(sizeof(size_t) * __max_threads);
              __bin._M_used = static_cast<size_t*>(__v);

	      __v = ::operator new(sizeof(__gthread_mutex_t));
              __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);

#ifdef __GTHREAD_MUTEX_INIT
              {
                // Do not copy a POSIX/gthr mutex once in use.
                __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
                *__bin._M_mutex = __tmp;
              }
#else
              { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
#endif

	      for (size_t __threadn = 0; __threadn < __max_threads;
		   ++__threadn)
		{
		  __bin._M_first[__threadn] = NULL;
		  __bin._M_free[__threadn] = 0;
		  __bin._M_used[__threadn] = 0;
		}
	    }
	}
      else
#endif	
	for (size_t __n = 0; __n < _S_bin_size; ++__n)
	  {
	    _Bin_record& __bin = _S_bin[__n];
	    __v = ::operator new(sizeof(_Block_record*));
	    __bin._M_first = static_cast<_Block_record**>(__v);
	    __bin._M_first[0] = NULL;
	  }

      _S_init = true;
    }

  template<typename _Tp>
    size_t
    __mt_alloc<_Tp>::
    _S_get_thread_id()
    {
#ifdef __GTHREADS
      // If we have thread support and it's active we check the thread
      // key value and return its id or if it's not set we take the
      // first record from _S_thread_freelist and sets the key and
      // returns it's id.
      if (__gthread_active_p())
        {
          _Thread_record* __freelist_pos =
	    static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key)); 
	  if (__freelist_pos == NULL)
            {
	      // Since _S_options._M_max_threads must be larger than
	      // the theoretical max number of threads of the OS the
	      // list can never be empty.
              __gthread_mutex_lock(&_S_thread_freelist_mutex);
              __freelist_pos = _S_thread_freelist_first;
              _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
              __gthread_mutex_unlock(&_S_thread_freelist_mutex);

              __gthread_setspecific(_S_thread_key, 
				    static_cast<void*>(__freelist_pos));
            }
          return __freelist_pos->_M_id;
        }
#endif
      // Otherwise (no thread support or inactive) all requests are
      // served from the global pool 0.
      return 0;
    }

#ifdef __GTHREADS
  template<typename _Tp>
    void
    __mt_alloc<_Tp>::
    _S_destroy_thread_key(void* __freelist_pos)
    {
      // Return this thread id record to front of thread_freelist.
      __gthread_mutex_lock(&_S_thread_freelist_mutex);
      _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
      __tr->_M_next = _S_thread_freelist_first;
      _S_thread_freelist_first = __tr;
      __gthread_mutex_unlock(&_S_thread_freelist_mutex);
    }
#endif

  template<typename _Tp>
    inline bool
    operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
    { return true; }
  
  template<typename _Tp>
    inline bool
    operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
    { return false; }

  template<typename _Tp> 
    bool __mt_alloc<_Tp>::_S_init = false;

  template<typename _Tp> 
    typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;

  template<typename _Tp> 
    typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;

  template<typename _Tp> 
    typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;

  template<typename _Tp> 
    size_t __mt_alloc<_Tp>::_S_bin_size = 1;

  // Actual initialization in _S_initialize().
#ifdef __GTHREADS
  template<typename _Tp> 
    __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;

  template<typename _Tp> 
    typename __mt_alloc<_Tp>::_Thread_record*
    volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;

  template<typename _Tp> 
    __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;

  template<typename _Tp> 
    __gthread_mutex_t
#ifdef __GTHREAD_MUTEX_INIT
    __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
#else
    __mt_alloc<_Tp>::_S_thread_freelist_mutex;
#endif
#endif
} // namespace __gnu_cxx

#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -