📄 allocators
字号:
// header for extended allocators library
#pragma once
#ifndef _ALLOCATORS_
#define _ALLOCATORS_
#ifndef RC_INVOKED
#include <new>
#include <stddef.h>
#pragma pack(push,_CRT_PACKING)
#pragma warning(push,3)
#pragma push_macro("new")
#undef new
namespace stdext {
namespace threads { // MINITHREADS
_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Mtx_new(void *&);
_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Mtx_delete(void *);
_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Mtx_lock(void *);
_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Mtx_unlock(void *);
class mutex {
public:
friend class _Scoped_lock;
typedef _Scoped_lock scoped_lock;
mutex() : _Ptr(0) {
_Mtx_new(_Ptr);
}
~mutex() {
_Mtx_delete(_Ptr);
}
private:
mutex(const mutex&);
mutex& operator=(const mutex&);
void * _Ptr;
};
class _Scoped_lock {
public:
explicit _Scoped_lock(mutex& _Mut) : _Ptr(_Mut._Ptr) {
_Mtx_lock(_Ptr);
}
~_Scoped_lock() {
_Mtx_unlock(_Ptr);
}
private:
_Scoped_lock(const _Scoped_lock&);
_Scoped_lock& operator=(const _Scoped_lock&);
void * _Ptr;
};
} // namespace threads
namespace allocators {
// SYNCHRONIZATION FILTERS
template<class _Cache>
class sync_none
{ // cache with no synchronization
public:
void *allocate(size_t _Count)
{ // allocate from the cache
return (_Mycache.allocate(_Count));
}
void deallocate(void *_Ptr, size_t _Count)
{ // deallocate through the cache
_Mycache.deallocate(_Ptr, _Count);
}
bool equals(const sync_none<_Cache>& _Other) const
{ // compare two caches for equality
return (_Mycache.equals(_Other._Mycache));
}
private:
static _Cache _Mycache;
};
template<class _Cache>
_Cache sync_none<_Cache>::_Mycache;
template<class _Cache>
class sync_per_container
: public _Cache
{ // cache with per-container synchronization
public:
bool equals(const sync_per_container<_Cache>&) const
{ // per-container allocators never share memory blocks
return (false);
}
};
#define SYNC_DEFAULT stdext::allocators::sync_shared
template<class _Cache>
class sync_shared
{ // cache with shared synchronization
public:
void *allocate(size_t _Count)
{ // allocate from the cache
threads::mutex::scoped_lock lock(_Mymtx);
return (_Mycache.allocate(_Count));
}
void deallocate(void *_Ptr, size_t _Count)
{ // deallocate through the cache
threads::mutex::scoped_lock lock(_Mymtx);
_Mycache.deallocate(_Ptr, _Count);
}
bool equals(const sync_shared<_Cache>& _Other) const
{ // compare two caches for equality
return (_Mycache.equals(_Other._Mycache));
}
private:
static _Cache _Mycache;
static threads::mutex _Mymtx;
};
template<class _Cache>
_Cache sync_shared<_Cache>::_Mycache;
template<class _Cache>
threads::mutex sync_shared<_Cache>::_Mymtx;
// BEFORE MINITHREADS
//
// template<class _Cache>
// class sync_per_thread
// { // cache with per-thread synchronization
// public:
// void *allocate(size_t _Count)
// { // allocate from cache
// _Cache *_Cp = _Mycache_ptr.get();
// if (_Cp == 0)
// _Mycache_ptr.reset(_Cp = new _Cache());
// return (_Cp != 0 ? _Cp->allocate(_Count) : 0);
// }
//
// void deallocate(void *_Ptr, size_t _Count)
// { // deallocate through cache
// _Cache *_Cp = _Mycache_ptr.get();
// if (_Cp != 0)
// _Cp->deallocate(_Ptr, _Count);
// }
//
// bool equals(const sync_per_thread<_Cache>& _Other) const
// { // compare two caches for equality
// _Cache *_Cp0, *_Cp1;
// return ((_Cp0 = _Mycache_ptr.get()) != 0
// && (_Cp1 = _Other._Mycache_ptr.get()) != 0
// && _Cp0->equals(*_Cp1));
// }
//
// private:
// static threads::thread_specific_ptr<_Cache> _Mycache_ptr;
// };
//
// template<class _Cache>
// threads::thread_specific_ptr<_Cache>
// sync_per_thread<_Cache>::_Mycache_ptr;
template<class _Cache>
class sync_per_thread
{ // cache with per-thread synchronization
public:
void *allocate(size_t _Count)
{ // allocate from cache
if (_Mycache_ptr == 0)
_Mycache_ptr = new _Cache();
return (_Mycache_ptr != 0 ? _Mycache_ptr->allocate(_Count) : 0);
}
void deallocate(void *_Ptr, size_t _Count)
{ // deallocate through cache
if (_Mycache_ptr != 0)
_Mycache_ptr->deallocate(_Ptr, _Count);
}
bool equals(const sync_per_thread<_Cache>& _Other) const
{ // compare two caches for equality
return (_Mycache_ptr != 0
&& _Other._Mycache_ptr != 0
&& _Mycache_ptr->equals(*_Other._Mycache_ptr));
}
private:
static __declspec(thread) _Cache * _Mycache_ptr;
};
template<class _Cache>
__declspec(thread) _Cache *
sync_per_thread<_Cache>::_Mycache_ptr;
// MAX CLASSES AND TEMPLATES
class max_none
{ // max class for no caching
public:
bool full() const
{ // always full
return (true);
}
void saved()
{ // do nothing
}
void released()
{ // do nothing
}
void allocated(size_t = 1)
{ // do nothing
}
void deallocated(size_t = 1)
{ // do nothing
}
};
class max_unbounded
{ // max class for free list with unlimited size
public:
bool full() const
{ // never full
return (false);
}
void saved()
{ // do nothing
}
void released()
{ // do nothing
}
void allocated(size_t = 1)
{ // do nothing
}
void deallocated(size_t = 1)
{ // do nothing
}
};
template<size_t _Max>
class max_fixed_size
{ // max class for free list with fixed size
public:
max_fixed_size()
: _Nblocks(0)
{ // construct with no blocks
}
bool full() const
{ // test for full
return (_Max <= _Nblocks);
}
void saved()
{ // increment saved count
++_Nblocks;
}
void released()
{ // decrement saved count
--_Nblocks;
}
void allocated(size_t = 1)
{ // do nothing
}
void deallocated(size_t = 1)
{ // do nothing
}
private:
unsigned long _Nblocks;
};
class max_variable_size
{ // max class for free list with size proportional to allocations
public:
max_variable_size()
: _Nblocks(0), _Nallocs(0)
{ // construct with no blocks or allocations
}
bool full() const
{ // test for full
return (_Nallocs / 16 + 16 <= _Nblocks);
}
void saved()
{ // increment saved count
++_Nblocks;
}
void released()
{ // decrement saved count
--_Nblocks;
}
void allocated(size_t _Nx = 1)
{ // increment allocated count
_Nallocs = (unsigned long)(_Nallocs + _Nx);
}
void deallocated(size_t _Nx = 1)
{ // decrement allocated count
_Nallocs = (unsigned long)(_Nallocs - _Nx);
}
private:
unsigned long _Nblocks;
unsigned long _Nallocs;
};
// CACHES
template<size_t _Sz,
class _Max>
class freelist
: public _Max
{ // class for basic free list logic
public:
freelist()
: _Head(0)
{ // construct with empty list
}
bool push(void *_Ptr)
{ // push onto free list depending on max
if (this->full())
return (false);
else
{ // push onto free list
((node*)_Ptr)->_Next = _Head;
_Head = (node*)_Ptr;
this->saved();
return (true);
}
}
void *pop()
{ // pop node from free list
void *_Ptr = _Head;
if (_Ptr != 0)
{ // relink
_Head = _Head->_Next;
this->released();
}
return (_Ptr);
}
private:
struct node
{ // list node
node *_Next;
};
node *_Head;
};
template<size_t _Sz,
class _Max>
class cache_freelist
{ // class for allocating from free store and caching in free list
public:
~cache_freelist()
{ // destroy the list
void *_Ptr;
while ((_Ptr = _Fl.pop()) != 0)
::operator delete(_Ptr);
}
void *allocate(size_t _Count)
{ // pop from free list or allocate from free store
void *_Res = _Fl.pop();
if (_Res == 0)
{ // free list empty, allocate from free store
if (_Count < sizeof (void *))
_Res = ::operator new(sizeof (void *));
else
_Res = ::operator new(_Count);
_Fl.allocated();
}
return (_Res);
}
void deallocate(void *_Ptr, size_t)
{ // push onto free list or deallocate to free store
if (!_Fl.push(_Ptr))
{ // free list full, deallocate to free store
::operator delete(_Ptr);
_Fl.deallocated();
}
}
bool equals(const cache_freelist<_Sz, _Max>&) const
{ // report that caches can share data
return (true);
}
private:
freelist<_Sz, _Max> _Fl;
};
template<size_t _Sz,
size_t _Nelts = 20>
class cache_suballoc
{ // suballocate without reblocking
public:
cache_suballoc()
: _Begin(0), _End(0)
{ // construct with empty list
}
void *allocate(size_t _Count)
{ // pop from free list or suballocate
void *_Res = _Helper.pop();
if (_Res == 0)
{ // free list empty, suballocate
if (_Begin == _End)
{ // no room in block, allocate new block
if (_Count * _Nelts < sizeof (void *))
_Begin = (char*)::operator new(sizeof (void *));
else
_Begin = (char*)::operator new(_Count * _Nelts);
_End = _Begin + _Count * _Nelts;
_Helper.allocated(_Nelts);
}
_Res = _Begin;
_Begin += _Count;
}
return (_Res);
}
void deallocate(void *_Ptr, size_t)
{ // push onto free list
_Helper.push(_Ptr);
}
bool equals(const cache_suballoc<_Sz, _Nelts>&) const
{ // report that caches can share data
return (true);
}
private:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -