⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pthread_alloc

📁 粗糙集应用软件
💻
📖 第 1 页 / 共 2 页
字号:
/*
 *
 * Copyright (c) 1994
 * Hewlett-Packard Company
 *
 * Copyright (c) 1996,1997
 * Silicon Graphics Computer Systems, Inc.
 *
 * Copyright (c) 1997
 * Moscow Center for SPARC Technology
 *
 * Copyright (c) 1999 
 * Boris Fomitchev
 *
 * This material is provided "as is", with absolutely no warranty expressed
 * or implied. Any use is at your own risk.
 *
 * Permission to use or copy this software for any purpose is hereby granted 
 * without fee, provided the above notices are retained on all copies.
 * Permission to modify the code and to distribute modified code is granted,
 * provided the above notices are retained, and a notice that the code was
 * modified is included with the above copyright notice.
 *
 */

#ifndef __SGI_STL_PTHREAD_ALLOC
#define __SGI_STL_PTHREAD_ALLOC

// Pthread-specific node allocator.
// This is similar to the default allocator, except that free-list
// information is kept separately for each thread, avoiding locking.
// This should be reasonably fast even in the presence of threads.
// The down side is that storage may not be well-utilized.
// It is not an error to allocate memory in thread A and deallocate
// it in thread B.  But this effectively transfers ownership of the memory,
// so that it can only be reallocated by thread B.  Thus this can effectively
// result in a storage leak if it's done on a regular basis.
// It can also result in frequent sharing of
// cache lines among processors, with potentially serious performance
// consequences.

#ifndef __SGI_STL_INTERNAL_ALLOC_H
#include <stl_alloc.h>
#endif
#ifndef __RESTRICT
#  define __RESTRICT
#endif

__STL_BEGIN_NAMESPACE

#define __STL_DATA_ALIGNMENT 8

union _Pthread_alloc_obj {
    union _Pthread_alloc_obj * __free_list_link;
    char __client_data[__STL_DATA_ALIGNMENT];    /* The client sees this.    */
};

// Pthread allocators don't appear to the client to have meaningful
// instances.  We do in fact need to associate some state with each
// thread.  That state is represented by
// _Pthread_alloc_per_thread_state<_Max_size>.

template<size_t _Max_size>
struct _Pthread_alloc_per_thread_state {
  typedef _Pthread_alloc_obj __obj;
  enum { _S_NFREELISTS = _Max_size/__STL_DATA_ALIGNMENT };
  _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS]; 
  _Pthread_alloc_per_thread_state<_Max_size> * __next; 
	// Free list link for list of available per thread structures.
  	// When one of these becomes available for reuse due to thread
	// termination, any objects in its free list remain associated
	// with it.  The whole structure may then be used by a newly
	// created thread.
  _Pthread_alloc_per_thread_state() : __next(0)
  {
    memset((void *)__free_list, 0, (size_t)_S_NFREELISTS * sizeof(__obj *));
  }
  // Returns an object of size __n, and possibly adds to size n free list.
  void *_M_refill(size_t __n);
};

// Pthread-specific allocator.
// The argument specifies the largest object size allocated from per-thread
// free lists.  Larger objects are allocated using malloc_alloc.
// Max_size must be a power of 2.
template <size_t _Max_size = 128>
class _Pthread_alloc_template {

public: // but only for internal use:

  typedef _Pthread_alloc_obj __obj;

  // Allocates a chunk for nobjs of size size.  nobjs may be reduced
  // if it is inconvenient to allocate the requested number.
  static char *_S_chunk_alloc(size_t __size, int &__nobjs);

  enum {_S_ALIGN = __STL_DATA_ALIGNMENT};

  static size_t _S_round_up(size_t __bytes) {
        return (((__bytes) + (int)_S_ALIGN-1) & ~((int)_S_ALIGN - 1));
  }
  static size_t _S_freelist_index(size_t __bytes) {
        return (((__bytes) + (int)_S_ALIGN-1)/(int)_S_ALIGN - 1);
  }

private:
  // Chunk allocation state. And other shared state.
  // Protected by _S_chunk_allocator_lock.
  static pthread_mutex_t _S_chunk_allocator_lock;
  static char *_S_start_free;
  static char *_S_end_free;
  static size_t _S_heap_size;
  static _Pthread_alloc_per_thread_state<_Max_size>* _S_free_per_thread_states;
  static pthread_key_t _S_key;
  static bool _S_key_initialized;
        // Pthread key under which per thread state is stored. 
        // Allocator instances that are currently unclaimed by any thread.
  static void _S_destructor(void *instance);
        // Function to be called on thread exit to reclaim per thread
        // state.
  static _Pthread_alloc_per_thread_state<_Max_size> *_S_new_per_thread_state();
        // Return a recycled or new per thread state.
  static _Pthread_alloc_per_thread_state<_Max_size> *_S_get_per_thread_state();
        // ensure that the current thread has an associated
        // per thread state.
  class _M_lock;
  friend class _M_lock;
  class _M_lock {
      public:
        _M_lock () { pthread_mutex_lock(&_S_chunk_allocator_lock); }
        ~_M_lock () { pthread_mutex_unlock(&_S_chunk_allocator_lock); }
  };

public:

  /* n must be > 0      */
  static void * allocate(size_t __n)
  {
    __obj * volatile * __my_free_list;
    __obj * __RESTRICT __result;
    _Pthread_alloc_per_thread_state<_Max_size>* __a;

    if (__n > _Max_size) {
        return(__malloc_alloc<0>::allocate(__n));
    }
    if (!_S_key_initialized ||
        !(__a = (_Pthread_alloc_per_thread_state<_Max_size>*)
                                 pthread_getspecific(_S_key))) {
        __a = _S_get_per_thread_state();
    }
    __my_free_list = __a -> __free_list + _S_freelist_index(__n);
    __result = *__my_free_list;
    if (__result == 0) {
        void *__r = __a -> _M_refill(_S_round_up(__n));
        return __r;
    }
    *__my_free_list = __result -> __free_list_link;
    return (__result);
  };

  /* p may not be 0 */
  static void deallocate(void *__p, size_t __n)
  {
    __obj *__q = (__obj *)__p;
    __obj * volatile * __my_free_list;
    _Pthread_alloc_per_thread_state<_Max_size>* __a;

    if (__n > _Max_size) {
        __malloc_alloc<0>::deallocate(__p, __n);
        return;
    }
    if (!_S_key_initialized ||
        !(__a = (_Pthread_alloc_per_thread_state<_Max_size> *)
                pthread_getspecific(_S_key))) {
        __a = _S_get_per_thread_state();
    }
    __my_free_list = __a->__free_list + _S_freelist_index(__n);
    __q -> __free_list_link = *__my_free_list;
    *__my_free_list = __q;
  }

  static void * reallocate(void *__p, size_t __old_sz, size_t __new_sz);

} ;

typedef _Pthread_alloc_template<> pthread_alloc;


template <size_t _Max_size>
void _Pthread_alloc_template<_Max_size>::_S_destructor(void * __instance)
{
    _M_lock __lock_instance;	// Need to acquire lock here.
    _Pthread_alloc_per_thread_state<_Max_size>* __s =
        (_Pthread_alloc_per_thread_state<_Max_size> *)__instance;
    __s -> __next = _S_free_per_thread_states;
    _S_free_per_thread_states = __s;
}

template <size_t _Max_size>
_Pthread_alloc_per_thread_state<_Max_size> *
_Pthread_alloc_template<_Max_size>::_S_new_per_thread_state()
{    
    /* lock already held here.	*/
    if (0 != _S_free_per_thread_states) {
        _Pthread_alloc_per_thread_state<_Max_size> *__result =
					_S_free_per_thread_states;
        _S_free_per_thread_states = _S_free_per_thread_states -> __next;
        return __result;
    } else {
        return __STL_NEW _Pthread_alloc_per_thread_state<_Max_size>;
    }
}

template <size_t _Max_size>
_Pthread_alloc_per_thread_state<_Max_size> *
_Pthread_alloc_template<_Max_size>::_S_get_per_thread_state()
{
    /*REFERENCED*/
    _M_lock __lock_instance;	// Need to acquire lock here.
    int __ret_code;
    _Pthread_alloc_per_thread_state<_Max_size> * __result;
    if (!_S_key_initialized) {
        if (pthread_key_create(&_S_key, _S_destructor)) {
            __THROW_BAD_ALLOC;  // failed
        }
        _S_key_initialized = true;
    }
    __result = _S_new_per_thread_state();
    __ret_code = pthread_setspecific(_S_key, __result);
    if (__ret_code) {
      if (__ret_code == ENOMEM) {
	__THROW_BAD_ALLOC;
      } else {
	// EINVAL
	abort();
      }
    }
    return __result;
}

/* We allocate memory in large chunks in order to avoid fragmenting     */
/* the malloc heap too much.                                            */
/* We assume that size is properly aligned.                             */
template <size_t _Max_size>
char *_Pthread_alloc_template<_Max_size>
::_S_chunk_alloc(size_t __p_size, int &__nobjs)
{
  {
    char * __result;
    size_t __total_bytes;
    size_t __bytes_left;
    /*REFERENCED*/
    _M_lock __lock_instance;         // Acquire lock for this routine

    __total_bytes = __p_size * __nobjs;
    __bytes_left = _S_end_free - _S_start_free;
    if (__bytes_left >= __total_bytes) {
        __result = _S_start_free;
        _S_start_free += __total_bytes;
        return(__result);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -