⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 stl_alloc.h

📁 c++编程宝典源码及Quincy99编译器 是《标准C++编程宝典》电子工业出版社的光盘
💻 H
📖 第 1 页 / 共 3 页
字号:
  static char* _S_chunk_alloc(size_t __size, int& __nobjs);  // Chunk allocation state.  static char* _S_start_free;  static char* _S_end_free;  static size_t _S_heap_size;# ifdef __STL_SGI_THREADS    static volatile unsigned long _S_node_allocator_lock;    static void _S_lock(volatile unsigned long*);    static inline void _S_unlock(volatile unsigned long*);# endif# ifdef __STL_PTHREADS    static pthread_mutex_t _S_node_allocator_lock;# endif# ifdef __STL_SOLTHREADS    static mutex_t _S_node_allocator_lock;# endif# ifdef __STL_WIN32THREADS    static CRITICAL_SECTION _S_node_allocator_lock;    static bool _S_node_allocator_lock_initialized;  public:    __default_alloc_template() {	// This assumes the first constructor is called before threads	// are started.        if (!_S_node_allocator_lock_initialized) {            InitializeCriticalSection(&_S_node_allocator_lock);            _S_node_allocator_lock_initialized = true;        }    }  private:# endif    class _Lock {        public:            _Lock() { __NODE_ALLOCATOR_LOCK; }            ~_Lock() { __NODE_ALLOCATOR_UNLOCK; }    };    friend class _Lock;public:  /* __n must be > 0      */  static void* allocate(size_t __n)  {    _Obj* __VOLATILE* __my_free_list;    _Obj* __RESTRICT __result;    if (__n > (size_t) _MAX_BYTES) {        return(malloc_alloc::allocate(__n));    }    __my_free_list = _S_free_list + _S_freelist_index(__n);    // Acquire the lock here with a constructor call.    // This ensures that it is released in exit or during stack    // unwinding.#       ifndef _NOTHREADS        /*REFERENCED*/        _Lock __lock_instance;#       endif    __result = *__my_free_list;    if (__result == 0) {        void* __r = _S_refill(_S_round_up(__n));        return __r;    }    *__my_free_list = __result -> _M_free_list_link;    return (__result);  };  /* __p may not be 0 */  static void deallocate(void* __p, size_t __n)  {    _Obj* __q = (_Obj*)__p;    _Obj* __VOLATILE* __my_free_list;    if (__n > (size_t) _MAX_BYTES) {        malloc_alloc::deallocate(__p, __n);        return;    }    __my_free_list = _S_free_list + _S_freelist_index(__n);    // acquire lock#       ifndef _NOTHREADS        /*REFERENCED*/        _Lock __lock_instance;#       endif /* _NOTHREADS */    __q -> _M_free_list_link = *__my_free_list;    *__my_free_list = __q;    // lock is released here  }  static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz);} ;typedef __default_alloc_template<__NODE_ALLOCATOR_THREADS, 0> alloc;typedef __default_alloc_template<false, 0> single_client_alloc;/* We allocate memory in large chunks in order to avoid fragmenting     *//* the malloc heap too much.                                            *//* We assume that size is properly aligned.                             *//* We hold the allocation lock.                                         */template <bool __threads, int __inst>char*__default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size,                                                            int& __nobjs){    char* __result;    size_t __total_bytes = __size * __nobjs;    size_t __bytes_left = _S_end_free - _S_start_free;    if (__bytes_left >= __total_bytes) {        __result = _S_start_free;        _S_start_free += __total_bytes;        return(__result);    } else if (__bytes_left >= __size) {        __nobjs = (int)(__bytes_left/__size);        __total_bytes = __size * __nobjs;        __result = _S_start_free;        _S_start_free += __total_bytes;        return(__result);    } else {        size_t __bytes_to_get =	  2 * __total_bytes + _S_round_up(_S_heap_size >> 4);        // Try to make use of the left-over piece.        if (__bytes_left > 0) {            _Obj* __VOLATILE* __my_free_list =                        _S_free_list + _S_freelist_index(__bytes_left);            ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;            *__my_free_list = (_Obj*)_S_start_free;        }        _S_start_free = (char*)malloc(__bytes_to_get);        if (0 == _S_start_free) {            size_t __i;            _Obj* __VOLATILE* __my_free_list;	    _Obj* __p;            // Try to make do with what we have.  That can't            // hurt.  We do not try smaller requests, since that tends            // to result in disaster on multi-process machines.            for (__i = __size; __i <= _MAX_BYTES; __i += _ALIGN) {                __my_free_list = _S_free_list + _S_freelist_index(__i);                __p = *__my_free_list;                if (0 != __p) {                    *__my_free_list = __p -> _M_free_list_link;                    _S_start_free = (char*)__p;                    _S_end_free = _S_start_free + __i;                    return(_S_chunk_alloc(__size, __nobjs));                    // Any leftover piece will eventually make it to the                    // right free list.                }            }	    _S_end_free = 0;	// In case of exception.            _S_start_free = (char*)malloc_alloc::allocate(__bytes_to_get);            // This should either throw an            // exception or remedy the situation.  Thus we assume it            // succeeded.        }        _S_heap_size += __bytes_to_get;        _S_end_free = _S_start_free + __bytes_to_get;        return(_S_chunk_alloc(__size, __nobjs));    }}/* Returns an object of size __n, and optionally adds to size __n free list.*//* We assume that __n is properly aligned.                                *//* We hold the allocation lock.                                         */template <bool __threads, int __inst>void*__default_alloc_template<__threads, __inst>::_S_refill(size_t __n){    int __nobjs = 20;    char* __chunk = _S_chunk_alloc(__n, __nobjs);    _Obj* __VOLATILE* __my_free_list;    _Obj* __result;    _Obj* __current_obj;    _Obj* __next_obj;    int __i;    if (1 == __nobjs) return(__chunk);    __my_free_list = _S_free_list + _S_freelist_index(__n);    /* Build free list in chunk */      __result = (_Obj*)__chunk;      *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);      for (__i = 1; ; __i++) {        __current_obj = __next_obj;        __next_obj = (_Obj*)((char*)__next_obj + __n);        if (__nobjs - 1 == __i) {            __current_obj -> _M_free_list_link = 0;            break;        } else {            __current_obj -> _M_free_list_link = __next_obj;        }      }    return(__result);}template <bool threads, int inst>void*__default_alloc_template<threads, inst>::reallocate(void* __p,                                                    size_t __old_sz,                                                    size_t __new_sz){    void* __result;    size_t __copy_sz;    if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) {        return(realloc(__p, __new_sz));    }    if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);    __result = allocate(__new_sz);    __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;    memcpy(__result, __p, __copy_sz);    deallocate(__p, __old_sz);    return(__result);}#ifdef __STL_PTHREADS    template <bool __threads, int __inst>    pthread_mutex_t    __default_alloc_template<__threads, __inst>::_S_node_allocator_lock        = PTHREAD_MUTEX_INITIALIZER;#endif#ifdef __STL_SOLTHREADS    template <bool __threads, int __inst>    mutex_t    __default_alloc_template<__threads, __inst>::_S_node_allocator_lock        = DEFAULTMUTEX;#endif#ifdef __STL_WIN32THREADS    template <bool __threads, int __inst>    CRITICAL_SECTION    __default_alloc_template<__threads, __inst>::      _S_node_allocator_lock;    template <bool __threads, int __inst>    bool    __default_alloc_template<__threads, __inst>::      _S_node_allocator_lock_initialized	= false;#endif#ifdef __STL_SGI_THREADS__STL_END_NAMESPACE#include <mutex.h>#include <time.h>  /* XXX should use <ctime> */__STL_BEGIN_NAMESPACE// Somewhat generic lock implementations.  We need only test-and-set// and some way to sleep.  These should work with both SGI pthreads// and sproc threads.  They may be useful on other systems.template <bool __threads, int __inst>volatile unsigned long__default_alloc_template<__threads, __inst>::_S_node_allocator_lock = 0;#if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__)#   define __test_and_set(l,v) test_and_set(l,v)#endiftemplate <bool __threads, int __inst>void__default_alloc_template<__threads, __inst>::  _S_lock(volatile unsigned long* __lock){    const unsigned __low_spin_max = 30;  // spins if we suspect uniprocessor    const unsigned __high_spin_max = 1000; // spins for multiprocessor    static unsigned __spin_max = __low_spin_max;    unsigned __my_spin_max;    static unsigned __last_spins = 0;    unsigned __my_last_spins;    unsigned __junk;#   define __ALLOC_PAUSE \      __junk *= __junk; __junk *= __junk; __junk *= __junk; __junk *= __junk    int __i;    if (!__test_and_set((unsigned long*)__lock, 1)) {        return;    }    __my_spin_max = __spin_max;    __my_last_spins = __last_spins;    for (__i = 0; __i < __my_spin_max; __i++) {        if (__i < __my_last_spins/2 || *__lock) {            __ALLOC_PAUSE;            continue;        }        if (!__test_and_set((unsigned long*)__lock, 1)) {            // got it!            // Spinning worked.  Thus we're probably not being scheduled            // against the other process with which we were contending.            // Thus it makes sense to spin longer the next time.            __last_spins = __i;            __spin_max = __high_spin_max;            return;        }    }    // We are probably being scheduled against the other process.  Sleep.    __spin_max = __low_spin_max;    for (__i = 0 ;; ++__i) {        struct timespec __ts;        int __log_nsec = __i + 6;        if (!__test_and_set((unsigned long *)__lock, 1)) {            return;        }        if (__log_nsec > 27) __log_nsec = 27;		/* Max sleep is 2**27nsec ~ 60msec      */        __ts.tv_sec = 0;        __ts.tv_nsec = 1 << __log_nsec;        nanosleep(&__ts, 0);    }}template <bool __threads, int __inst>inline void__default_alloc_template<__threads, __inst>::_S_unlock(  volatile unsigned long* __lock){#   if defined(__GNUC__) && __mips >= 3        asm("sync");        *__lock = 0;#   elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))        __lock_release(__lock);#   else        *__lock = 0;        // This is not sufficient on many multiprocessors, since        // writes to protected variables and the lock may be reordered.#   endif}#endiftemplate <bool __threads, int __inst>char* __default_alloc_template<__threads, __inst>::_S_start_free = 0;template <bool __threads, int __inst>char* __default_alloc_template<__threads, __inst>::_S_end_free = 0;template <bool __threads, int __inst>size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0;template <bool __threads, int __inst>__default_alloc_template<__threads, __inst>::_Obj* __VOLATILE__default_alloc_template<__threads, __inst> ::_S_free_list[    _NFREELISTS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };// The 16 zeros are necessary to make version 4.1 of the SunPro// compiler happy.  Otherwise it appears to allocate too little

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -