⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 allocators.cpp

📁 使用QT为linux 下的mplayer写的一个新的gui
💻 CPP
📖 第 1 页 / 共 3 页
字号:
    // to result in disaster on multi-process machines.
    for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
      __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
      __p = *__my_free_list;
      if (0 != __p) {
        *__my_free_list = __p -> _M_next;
        _S_start_free = __REINTERPRET_CAST(char*, __p);
        _S_end_free = _S_start_free + __i;
        return _S_chunk_alloc(_p_size, __nobjs);
        // Any leftover piece will eventually make it to the
        // right free list.
      }
    }
    _S_end_free = 0;    // In case of exception.
    _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
    /*
    (char*)malloc_alloc::allocate(__bytes_to_get);
    */

    // This should either throw an
    // exception or remedy the situation.  Thus we assume it
    // succeeded.
  }

  _S_heap_size += __bytes_to_get;
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
  _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
#  endif
  _S_end_free = _S_start_free + __bytes_to_get;
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  _S_start_free += sizeof(_Obj);
#  endif
  return _S_chunk_alloc(_p_size, __nobjs);
}

/* Returns an object of size __n, and optionally adds to size __n free list.*/
/* We assume that __n is properly aligned.                                  */
/* We hold the allocation lock.                                             */
_Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
  int __nobjs = 20;
  char* __chunk = _S_chunk_alloc(__n, __nobjs);

  if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);

  _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  _Obj* __result;
  _Obj* __current_obj;
  _Obj* __next_obj;

  /* Build free list in chunk */
  __result = __REINTERPRET_CAST(_Obj*, __chunk);
  *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
  for (--__nobjs; --__nobjs; ) {
    __current_obj = __next_obj;
    __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
    __current_obj->_M_next = __next_obj;
  }
  __next_obj->_M_next = 0;
  return __result;
}

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
void __node_alloc_impl::_S_alloc_call()
{ ++_S_alloc_counter(); }

void __node_alloc_impl::_S_dealloc_call() {
  __stl_atomic_t &counter = _S_alloc_counter();
  if (--counter == 0)
  { _S_chunk_dealloc(); }
}

/* We deallocate all the memory chunks      */
void __node_alloc_impl::_S_chunk_dealloc() {
  _Obj *__pcur = _S_chunks, *__pnext;
  while (__pcur != 0) {
    __pnext = __pcur->_M_next;
    __stlp_chunck_free(__pcur);
    __pcur = __pnext;
  }
  _S_chunks = 0;
  _S_start_free = _S_end_free = 0;
  _S_heap_size = 0;
  memset(__REINTERPRET_CAST(char*, __CONST_CAST(_Obj**, &_S_free_list[0])), 0, _STLP_NFREELISTS * sizeof(_Obj*));
}
#  endif /* _STLP_DO_CLEAN_NODE_ALLOC */

#else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */

void* __node_alloc_impl::_M_allocate(size_t& __n) {
  __n = _S_round_up(__n);
  _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
  if (__r  == 0)
  { __r = _S_refill(__n); }

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  _S_alloc_call();
#  endif
  return __r;
}

void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
  _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  _S_dealloc_call();
#  endif
}

/* Returns an object of size __n, and optionally adds additional ones to    */
/* freelist of objects of size __n.                                         */
/* We assume that __n is properly aligned.                                  */
__node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
  int __nobjs = 20;
  char* __chunk = _S_chunk_alloc(__n, __nobjs);

  if (__nobjs <= 1)
    return __REINTERPRET_CAST(_Obj*, __chunk);

  // Push all new nodes (minus first one) onto freelist
  _Obj* __result   = __REINTERPRET_CAST(_Obj*, __chunk);
  _Obj* __cur_item = __result;
  _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
  for (--__nobjs; __nobjs != 0; --__nobjs) {
    __cur_item  = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
    __my_freelist->push(__cur_item);
  }
  return __result;
}

/* We allocate memory in large chunks in order to avoid fragmenting     */
/* the malloc heap too much.                                            */
/* We assume that size is properly aligned.                             */
char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  //We are going to add a small memory block to keep all the allocated blocks
  //address, we need to do so respecting the memory alignment. The following
  //static assert checks that the reserved block is big enough to store a pointer.
  _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
#  endif
  char*  __result       = 0;
  __stl_atomic_t __total_bytes  = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;

  _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
  if (__block != 0) {
    // We checked a block out and can now mess with it with impugnity.
    // We'll put the remainder back into the list if we're done with it below.
    char*  __buf_start  = __REINTERPRET_CAST(char*, __block);
    __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;

    if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
      // There's enough left for at least one object, but not as much as we wanted
      __result      = __buf_start;
      __nobjs       = (int)(__bytes_left/_p_size);
      __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
      __bytes_left -= __total_bytes;
      __buf_start  += __total_bytes;
    }
    else if (__bytes_left >= __total_bytes) {
      // The block has enough left to satisfy all that was asked for
      __result      = __buf_start;
      __bytes_left -= __total_bytes;
      __buf_start  += __total_bytes;
    }

    if (__bytes_left != 0) {
      // There is still some memory left over in block after we satisfied our request.
      if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
        // We were able to allocate at least one object and there is still enough
        // left to put remainder back into list.
        _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
        __newblock->_M_end  = __block->_M_end;
        _S_free_mem_blocks.push(__newblock);
      }
      else {
        // We were not able to allocate enough for at least one object.
        // Shove into freelist of nearest (rounded-down!) size.
        size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
        if (__rounded_down > 0)
          _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
      }
    }
    if (__result != 0)
      return __result;
  }

  // We couldn't satisfy it from the list of free blocks, get new memory.
  __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
    + _ALIGN
#  endif
    ;

  __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
  // Alignment check
  _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)

  if (0 == __result) {
    // Allocation failed; try to canibalize from freelist of a larger object size.
    for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
      _Obj* __p  = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
      if (0 != __p) {
        if (__i < sizeof(_FreeBlockHeader)) {
          // Not enough to put into list of free blocks, divvy it up here.
          // Use as much as possible for this request and shove remainder into freelist.
          __nobjs = (int)(__i/_p_size);
          __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
          size_t __bytes_left = __i - __total_bytes;
          size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
          if (__rounded_down > 0) {
            _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
          }
          return __REINTERPRET_CAST(char*, __p);
        }
        else {
          // Add node to list of available blocks and recursively allocate from it.
          _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
          __newblock->_M_end  = __REINTERPRET_CAST(char*, __p) + __i;
          _S_free_mem_blocks.push(__newblock);
          return _S_chunk_alloc(_p_size, __nobjs);
        }
      }
    }

    // We were not able to find something in a freelist, try to allocate a smaller amount.
    __bytes_to_get  = __total_bytes
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
      + _ALIGN
#  endif
      ;
    __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
    // Alignment check
    _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)

    // This should either throw an exception or remedy the situation.
    // Thus we assume it succeeded.
  }

  _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  // We have to track the allocated memory chunks for release on exit.
  _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
  __result       += _ALIGN;
  __bytes_to_get -= _ALIGN;
#  endif

  if (__bytes_to_get > __total_bytes) {
    // Push excess memory allocated in this chunk into list of free memory blocks
    _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
    __freeblock->_M_end  = __result + __bytes_to_get;
    _S_free_mem_blocks.push(__freeblock);
  }
  return __result;
}

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
void __node_alloc_impl::_S_alloc_call()
{ _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }

void __node_alloc_impl::_S_dealloc_call() {
  _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
  if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
    _S_chunk_dealloc();
}

/* We deallocate all the memory chunks      */
void __node_alloc_impl::_S_chunk_dealloc() {
  // Note: The _Node_alloc_helper class ensures that this function
  // will only be called when the (shared) library is unloaded or the
  // process is shutdown.  It's thus not possible that another thread
  // is currently trying to allocate a node (we're not thread-safe here).
  //

  // Clear the free blocks and all freelistst.  This makes sure that if
  // for some reason more memory is allocated again during shutdown
  // (it'd also be really nasty to leave references to deallocated memory).
  _S_free_mem_blocks.clear();
  _S_heap_size      = 0;

  for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
    _S_free_list[__i].clear();
  }

  // Detach list of chunks and free them all
  _Obj* __chunk = _S_chunks.clear();
  while (__chunk != 0) {
    _Obj* __next = __chunk->_M_next;
    __stlp_chunck_free(__chunk);
    __chunk  = __next;
  }
}
#  endif /* _STLP_DO_CLEAN_NODE_ALLOC */

#endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */

#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
struct __node_alloc_cleaner {
  ~__node_alloc_cleaner()
  { __node_alloc_impl::_S_dealloc_call(); }
};

#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
_STLP_VOLATILE __stl_atomic_t& _STLP_CALL
#  else
__stl_atomic_t& _STLP_CALL
#  endif
__node_alloc_impl::_S_alloc_counter() {
  static _AllocCounter _S_counter = 1;
  static __node_alloc_cleaner _S_node_alloc_cleaner;
  return _S_counter;
}
#endif

#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
_Node_alloc_obj * _STLP_VOLATILE
__node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
= {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
// The 16 zeros are necessary to make version 4.1 of the SunPro
// compiler happy.  Otherwise it appears to allocate too little
// space for the array.
#else
_STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
_STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
#endif

#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
char *__node_alloc_impl::_S_start_free = 0;
char *__node_alloc_impl::_S_end_free = 0;
#endif

#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
_STLP_VOLATILE __stl_atomic_t
#else
size_t
#endif
__node_alloc_impl::_S_heap_size = 0;

#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
_STLP_atomic_freelist __node_alloc_impl::_S_chunks;
#  else
_Node_alloc_obj* __node_alloc_impl::_S_chunks  = 0;
#  endif
#endif

void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
{ return __node_alloc_impl::_M_allocate(__n); }

void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
{ __node_alloc_impl::_M_deallocate(__p, __n); }

#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)

#  define _STLP_DATA_ALIGNMENT 8

_STLP_MOVE_TO_PRIV_NAMESPACE

// *******************************************************
// __perthread_alloc implementation
union _Pthread_alloc_obj {
  union _Pthread_alloc_obj * __free_list_link;
  char __client_data[_STLP_DATA_ALIGNMENT];    /* The client sees this.    */
};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -