⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 allocators.cpp

📁 使用QT为linux 下的mplayer写的一个新的gui
💻 CPP
📖 第 1 页 / 共 3 页
字号:
/*
 *
 * Copyright (c) 1996,1997
 * Silicon Graphics Computer Systems, Inc.
 *
 * Copyright (c) 1997
 * Moscow Center for SPARC Technology
 *
 * Copyright (c) 1999
 * Boris Fomitchev
 *
 * This material is provided "as is", with absolutely no warranty expressed
 * or implied. Any use is at your own risk.
 *
 * Permission to use or copy this software for any purpose is hereby granted
 * without fee, provided the above notices are retained on all copies.
 * Permission to modify the code and to distribute modified code is granted,
 * provided the above notices are retained, and a notice that the code was
 * modified is included with the above copyright notice.
 *
 */

#include "stlport_prefix.h"

#include <memory>

#if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__))
#  include <malloc.h>
//#  define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
#endif

#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
#  include <pthread_alloc>
#  include <cerrno>
#endif

#include <stl/_threads.h>

#include "lock_free_slist.h"

#if defined (__WATCOMC__)
#  pragma warning 13 9
#  pragma warning 367 9
#  pragma warning 368 9
#endif

#if defined (_STLP_SGI_THREADS)
  // We test whether threads are in use before locking.
  // Perhaps this should be moved into stl_threads.h, but that
  // probably makes it harder to avoid the procedure call when
  // it isn't needed.
extern "C" {
  extern int __us_rsthread_malloc;
}
#endif

// Specialised debug form of malloc which does not provide "false"
// memory leaks when run with debug CRT libraries.
#if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
#  include <crtdbg.h>
inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
#else  // !_DEBUG
#  ifdef _STLP_NODE_ALLOC_USE_MALLOC
#    include <cstdlib>
inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
#  else
inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
inline void __stlp_chunck_free(void* __p) { _STLP_STD::__stl_delete(__p); }
#  endif
#endif  // !_DEBUG

#define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)

_STLP_BEGIN_NAMESPACE

class __malloc_alloc_impl {
private:
  static void* _S_oom_malloc(size_t __n) {
    __oom_handler_type __my_malloc_handler;
    void * __result;

    for (;;) {
      __my_malloc_handler = __oom_handler;
      if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
      (*__my_malloc_handler)();
      __result = malloc(__n);
      if (__result) return(__result);
    }
#if defined (_STLP_NEED_UNREACHABLE_RETURN)
    return 0;
#endif
  }
  static __oom_handler_type __oom_handler;
public:
  // this one is needed for proper simple_alloc wrapping
  typedef char value_type;
  static void* allocate(size_t& __n) {
    void* __result = malloc(__n);
    if (0 == __result) {
      __result = _S_oom_malloc(__n);
    }
#if defined (_STLP_MALLOC_USABLE_SIZE)
    else {
      size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
      /*
      if (__n != __new_n) {
        printf("requested size %d, usable %d\n", __n, __new_n);
      }
      */
      __n = __new_n;
    }
#endif
    return __result;
  }
  static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
  static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
    __oom_handler_type __old = __oom_handler;
    __oom_handler = __f;
    return __old;
  }
};

// malloc_alloc out-of-memory handling
__oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);

void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
{ return __malloc_alloc_impl::allocate(__n); }
__oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
{ return __malloc_alloc_impl::set_malloc_handler(__f); }

// *******************************************************
// Default node allocator.
// With a reasonable compiler, this should be roughly as fast as the
// original STL class-specific allocators, but with less fragmentation.
//
// Important implementation properties:
// 1. If the client request an object of size > _MAX_BYTES, the resulting
//    object will be obtained directly from malloc.
// 2. In all other cases, we allocate an object of size exactly
//    _S_round_up(requested_size).  Thus the client has enough size
//    information that we can return the object to the proper free list
//    without permanently losing part of the object.
//

#define _STLP_NFREELISTS 16

#if defined (_STLP_LEAKS_PEDANTIC) && defined (_STLP_USE_DYNAMIC_LIB)
/*
 * We can only do cleanup of the node allocator memory pool if we are
 * sure that the STLport library is used as a shared one as it guaranties
 * the unicity of the node allocator instance. Without that guaranty node
 * allocator instances might exchange memory blocks making the implementation
 * of a cleaning process much more complicated.
 */
#  define _STLP_DO_CLEAN_NODE_ALLOC
#endif

/* When STLport is used without multi threaded safety we use the node allocator
 * implementation with locks as locks becomes no-op. The lock free implementation
 * always use system specific atomic operations which are slower than 'normal'
 * ones.
 */
#if defined (_STLP_THREADS) && \
    defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
/*
 * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
 * for this architecture and compiler.  That means we can use the non-blocking
 * implementation of the node-allocation engine.*/
#  define _STLP_USE_LOCK_FREE_IMPLEMENTATION
#endif

#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
#  if defined (_STLP_THREADS)

class _Node_Alloc_Lock {
public:
  _Node_Alloc_Lock() {
#  if defined (_STLP_SGI_THREADS)
    if (__us_rsthread_malloc)
#  endif
      _S_lock._M_acquire_lock();
  }

  ~_Node_Alloc_Lock() {
#  if defined (_STLP_SGI_THREADS)
    if (__us_rsthread_malloc)
#  endif
        _S_lock._M_release_lock();
  }

  static _STLP_STATIC_MUTEX _S_lock;
};

_STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
#  else

class _Node_Alloc_Lock {
public:
  _Node_Alloc_Lock() { }
  ~_Node_Alloc_Lock() { }
};

#  endif

struct _Node_alloc_obj {
  _Node_alloc_obj * _M_next;
};
#endif

class __node_alloc_impl {
_STLP_PRIVATE:
  static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
  { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }

#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  typedef _STLP_atomic_freelist::item   _Obj;
  typedef _STLP_atomic_freelist         _Freelist;
  typedef _STLP_atomic_freelist         _ChunkList;

  // Header of blocks of memory that have been allocated as part of
  // a larger chunk but have not yet been chopped up into nodes.
  struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
    char* _M_end;     // pointer to end of free memory
  };
#else
  typedef _Node_alloc_obj       _Obj;
  typedef _Obj* _STLP_VOLATILE  _Freelist;
  typedef _Obj*                 _ChunkList;
#endif

private:
  // Returns an object of size __n, and optionally adds to size __n free list.
  static _Obj* _S_refill(size_t __n);
  // Allocates a chunk for nobjs of size __p_size.  nobjs may be reduced
  // if it is inconvenient to allocate the requested number.
  static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
  // Chunk allocation state.
  static _Freelist _S_free_list[_STLP_NFREELISTS];
  // Amount of total allocated memory
#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
#else
  static size_t _S_heap_size;
#endif

#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  // List of blocks of free memory
  static _STLP_atomic_freelist  _S_free_mem_blocks;
#else
  // Start of the current free memory buffer
  static char* _S_start_free;
  // End of the current free memory buffer
  static char* _S_end_free;
#endif

#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
public:
  // Methods to report alloc/dealloc calls to the counter system.
#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
#  else
  typedef __stl_atomic_t _AllocCounter;
#  endif
  static _AllocCounter& _STLP_CALL _S_alloc_counter();
  static void _S_alloc_call();
  static void _S_dealloc_call();

private:
  // Free all the allocated chuncks of memory
  static void _S_chunk_dealloc();
  // Beginning of the linked list of allocated chunks of memory
  static _ChunkList _S_chunks;
#endif /* _STLP_DO_CLEAN_NODE_ALLOC */

public:
  /* __n must be > 0      */
  static void* _M_allocate(size_t& __n);
  /* __p may not be 0 */
  static void _M_deallocate(void *__p, size_t __n);
};

#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
void* __node_alloc_impl::_M_allocate(size_t& __n) {
  __n = _S_round_up(__n);
  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  _Obj *__r;

  // Acquire the lock here with a constructor call.
  // This ensures that it is released in exit or during stack
  // unwinding.
  _Node_Alloc_Lock __lock_instance;

  if ( (__r  = *__my_free_list) != 0 ) {
    *__my_free_list = __r->_M_next;
  } else {
    __r = _S_refill(__n);
  }
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  _S_alloc_call();
#  endif
  // lock is released here
  return __r;
}

void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  _Obj * __pobj = __STATIC_CAST(_Obj*, __p);

  // acquire lock
  _Node_Alloc_Lock __lock_instance;
  __pobj->_M_next = *__my_free_list;
  *__my_free_list = __pobj;

#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  _S_dealloc_call();
#  endif
  // lock is released here
}

/* We allocate memory in large chunks in order to avoid fragmenting     */
/* the malloc heap too much.                                            */
/* We assume that size is properly aligned.                             */
/* We hold the allocation lock.                                         */
char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
  char* __result;
  size_t __total_bytes = _p_size * __nobjs;
  size_t __bytes_left = _S_end_free - _S_start_free;

  if (__bytes_left > 0) {
    if (__bytes_left >= __total_bytes) {
      __result = _S_start_free;
      _S_start_free += __total_bytes;
      return __result;
    }

    if (__bytes_left >= _p_size) {
      __nobjs = (int)(__bytes_left / _p_size);
      __total_bytes = _p_size * __nobjs;
      __result = _S_start_free;
      _S_start_free += __total_bytes;
      return __result;
    }

    // Try to make use of the left-over piece.
    _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
    __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
    *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
  }

  size_t __bytes_to_get =
    2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
    + sizeof(_Obj)
#  endif
    ;

  _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
  if (0 == _S_start_free) {
    _Obj* _STLP_VOLATILE* __my_free_list;
    _Obj* __p;
    // Try to do with what we have.  That can't hurt.
    // We do not try smaller requests, since that tends

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -