📄 bitmap_allocator.h
字号:
class bitmap_allocator : private free_list { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef _Tp* pointer; typedef const _Tp* const_pointer; typedef _Tp& reference; typedef const _Tp& const_reference; typedef _Tp value_type; template<typename _Tp1> struct rebind { typedef bitmap_allocator<_Tp1> other; }; private: template<size_t _BSize, size_t _AlignSize> struct aligned_size { enum { modulus = _BSize % _AlignSize, value = _BSize + (modulus ? _AlignSize - (modulus) : 0) }; }; struct _Alloc_block { char __M_unused[aligned_size<sizeof(value_type), _BALLOC_ALIGN_BYTES>::value]; }; typedef typename std::pair<_Alloc_block*, _Alloc_block*> _Block_pair; typedef typename balloc::__mini_vector<_Block_pair> _BPVector;#if defined _BALLOC_SANITY_CHECK // Complexity: O(lg(N)). Where, N is the number of block of size // sizeof(value_type). void _S_check_for_free_blocks() throw() { typedef typename __gnu_cxx::balloc::_Ffit_finder<_Alloc_block*> _FFF; _FFF __fff; typedef typename _BPVector::iterator _BPiter; _BPiter __bpi = __gnu_cxx::balloc::__find_if (_S_mem_blocks.begin(), _S_mem_blocks.end(), __gnu_cxx::balloc::_Functor_Ref<_FFF>(__fff)); _BALLOC_ASSERT(__bpi == _S_mem_blocks.end()); }#endif /** @brief Responsible for exponentially growing the internal * memory pool. * * @throw std::bad_alloc. If memory can not be allocated. * * @detail Complexity: O(1), but internally depends upon the * complexity of the function free_list::_M_get. The part where * the bitmap headers are written has complexity: O(X),where X * is the number of blocks of size sizeof(value_type) within * the newly acquired block. Having a tight bound. */ void _S_refill_pool() throw(std::bad_alloc) {#if defined _BALLOC_SANITY_CHECK _S_check_for_free_blocks();#endif const size_t __num_bitmaps = (_S_block_size / size_t(balloc::bits_per_block)); const size_t __size_to_allocate = sizeof(size_t) + _S_block_size * sizeof(_Alloc_block) + __num_bitmaps * sizeof(size_t); size_t* __temp = reinterpret_cast<size_t*> (this->_M_get(__size_to_allocate)); *__temp = 0; ++__temp; // The Header information goes at the Beginning of the Block. _Block_pair __bp = std::make_pair(reinterpret_cast<_Alloc_block*> (__temp + __num_bitmaps), reinterpret_cast<_Alloc_block*> (__temp + __num_bitmaps) + _S_block_size - 1); // Fill the Vector with this information. _S_mem_blocks.push_back(__bp); size_t __bit_mask = 0; // 0 Indicates all Allocated. __bit_mask = ~__bit_mask; // 1 Indicates all Free. for (size_t __i = 0; __i < __num_bitmaps; ++__i) __temp[__i] = __bit_mask; _S_block_size *= 2; } static _BPVector _S_mem_blocks; static size_t _S_block_size; static __gnu_cxx::balloc:: _Bitmap_counter<_Alloc_block*> _S_last_request; static typename _BPVector::size_type _S_last_dealloc_index;#if defined __GTHREADS static _Mutex _S_mut;#endif public: /** @brief Allocates memory for a single object of size * sizeof(_Tp). * * @throw std::bad_alloc. If memory can not be allocated. * * @detail Complexity: Worst case complexity is O(N), but that * is hardly ever hit. If and when this particular case is * encountered, the next few cases are guaranteed to have a * worst case complexity of O(1)! That's why this function * performs very well on average. You can consider this * function to have a complexity referred to commonly as: * Amortized Constant time. */ pointer _M_allocate_single_object() throw(std::bad_alloc) {#if defined __GTHREADS _Auto_Lock __bit_lock(&_S_mut);#endif // The algorithm is something like this: The last_request // variable points to the last accessed Bit Map. When such a // condition occurs, we try to find a free block in the // current bitmap, or succeeding bitmaps until the last bitmap // is reached. If no free block turns up, we resort to First // Fit method. // WARNING: Do not re-order the condition in the while // statement below, because it relies on C++'s short-circuit // evaluation. The return from _S_last_request->_M_get() will // NOT be dereference able if _S_last_request->_M_finished() // returns true. This would inevitably lead to a NULL pointer // dereference if tinkered with. while (_S_last_request._M_finished() == false && (*(_S_last_request._M_get()) == 0)) { _S_last_request.operator++(); } if (__builtin_expect(_S_last_request._M_finished() == true, false)) { // Fall Back to First Fit algorithm. typedef typename __gnu_cxx::balloc::_Ffit_finder<_Alloc_block*> _FFF; _FFF __fff; typedef typename _BPVector::iterator _BPiter; _BPiter __bpi = __gnu_cxx::balloc::__find_if (_S_mem_blocks.begin(), _S_mem_blocks.end(), __gnu_cxx::balloc::_Functor_Ref<_FFF>(__fff)); if (__bpi != _S_mem_blocks.end()) { // Search was successful. Ok, now mark the first bit from // the right as 0, meaning Allocated. This bit is obtained // by calling _M_get() on __fff. size_t __nz_bit = _Bit_scan_forward(*__fff._M_get()); balloc::__bit_allocate(__fff._M_get(), __nz_bit); _S_last_request._M_reset(__bpi - _S_mem_blocks.begin()); // Now, get the address of the bit we marked as allocated. pointer __ret = reinterpret_cast<pointer> (__bpi->first + __fff._M_offset() + __nz_bit); size_t* __puse_count = reinterpret_cast<size_t*> (__bpi->first) - (__gnu_cxx::balloc::__num_bitmaps(*__bpi) + 1); ++(*__puse_count); return __ret; } else { // Search was unsuccessful. We Add more memory to the // pool by calling _S_refill_pool(). _S_refill_pool(); // _M_Reset the _S_last_request structure to the first // free block's bit map. _S_last_request._M_reset(_S_mem_blocks.size() - 1); // Now, mark that bit as allocated. } } // _S_last_request holds a pointer to a valid bit map, that // points to a free block in memory. size_t __nz_bit = _Bit_scan_forward(*_S_last_request._M_get()); balloc::__bit_allocate(_S_last_request._M_get(), __nz_bit); pointer __ret = reinterpret_cast<pointer> (_S_last_request._M_base() + _S_last_request._M_offset() + __nz_bit); size_t* __puse_count = reinterpret_cast<size_t*> (_S_mem_blocks[_S_last_request._M_where()].first) - (__gnu_cxx::balloc:: __num_bitmaps(_S_mem_blocks[_S_last_request._M_where()]) + 1); ++(*__puse_count); return __ret; } /** @brief Deallocates memory that belongs to a single object of * size sizeof(_Tp). * * @detail Complexity: O(lg(N)), but the worst case is not hit * often! This is because containers usually deallocate memory * close to each other and this case is handled in O(1) time by * the deallocate function. */ void _M_deallocate_single_object(pointer __p) throw() {#if defined __GTHREADS _Auto_Lock __bit_lock(&_S_mut);#endif _Alloc_block* __real_p = reinterpret_cast<_Alloc_block*>(__p); typedef typename _BPVector::iterator _Iterator; typedef typename _BPVector::difference_type _Difference_type; _Difference_type __diff; long __displacement; _BALLOC_ASSERT(_S_last_dealloc_index >= 0); if (__gnu_cxx::balloc::_Inclusive_between<_Alloc_block*> (__real_p) (_S_mem_blocks[_S_last_dealloc_index])) { _BALLOC_ASSERT(_S_last_dealloc_index <= _S_mem_blocks.size() - 1); // Initial Assumption was correct! __diff = _S_last_dealloc_index; __displacement = __real_p - _S_mem_blocks[__diff].first; } else { _Iterator _iter = __gnu_cxx::balloc:: __find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(), __gnu_cxx::balloc:: _Inclusive_between<_Alloc_block*>(__real_p)); _BALLOC_ASSERT(_iter != _S_mem_blocks.end()); __diff = _iter - _S_mem_blocks.begin(); __displacement = __real_p - _S_mem_blocks[__diff].first; _S_last_dealloc_index = __diff; } // Get the position of the iterator that has been found. const size_t __rotate = (__displacement % size_t(balloc::bits_per_block)); size_t* __bitmapC = reinterpret_cast<size_t*> (_S_mem_blocks[__diff].first) - 1; __bitmapC -= (__displacement / size_t(balloc::bits_per_block)); balloc::__bit_free(__bitmapC, __rotate); size_t* __puse_count = reinterpret_cast<size_t*> (_S_mem_blocks[__diff].first) - (__gnu_cxx::balloc::__num_bitmaps(_S_mem_blocks[__diff]) + 1); _BALLOC_ASSERT(*__puse_count != 0); --(*__puse_count); if (__builtin_expect(*__puse_count == 0, false)) { _S_block_size /= 2; // We can safely remove this block. // _Block_pair __bp = _S_mem_blocks[__diff]; this->_M_insert(__puse_count); _S_mem_blocks.erase(_S_mem_blocks.begin() + __diff); // Reset the _S_last_request variable to reflect the // erased block. We do this to protect future requests // after the last block has been removed from a particular // memory Chunk, which in turn has been returned to the // free list, and hence had been erased from the vector, // so the size of the vector gets reduced by 1. if ((_Difference_type)_S_last_request._M_where() >= __diff--) _S_last_request._M_reset(__diff); // If the Index into the vector of the region of memory // that might hold the next address that will be passed to // deallocated may have been invalidated due to the above // erase procedure being called on the vector, hence we // try to restore this invariant too. if (_S_last_dealloc_index >= _S_mem_blocks.size()) { _S_last_dealloc_index =(__diff != -1 ? __diff : 0); _BALLOC_ASSERT(_S_last_dealloc_index >= 0); } } } public: bitmap_allocator() throw() { } bitmap_allocator(const bitmap_allocator&) { } template<typename _Tp1> bitmap_allocator(const bitmap_allocator<_Tp1>&) throw() { } ~bitmap_allocator() throw() { } pointer allocate(size_type __n) { if (__builtin_expect(__n > this->max_size(), false)) std::__throw_bad_alloc(); if (__builtin_expect(__n == 1, true)) return this->_M_allocate_single_object(); else { const size_type __b = __n * sizeof(value_type); return reinterpret_cast<pointer>(::operator new(__b)); } } pointer allocate(size_type __n, typename bitmap_allocator<void>::const_pointer) { return allocate(__n); } void deallocate(pointer __p, size_type __n) throw() { if (__builtin_expect(__p != 0, true)) { if (__builtin_expect(__n == 1, true)) this->_M_deallocate_single_object(__p); else ::operator delete(__p); } } pointer address(reference __r) const { return &__r; } const_pointer address(const_reference __r) const { return &__r; } size_type max_size() const throw() { return size_type(-1) / sizeof(value_type); } void construct(pointer __p, const_reference __data) { ::new(__p) value_type(__data); } void destroy(pointer __p) { __p->~value_type(); } }; template<typename _Tp1, typename _Tp2> bool operator==(const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw() { return true; } template<typename _Tp1, typename _Tp2> bool operator!=(const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw() { return false; } // Static member definitions. template<typename _Tp> typename bitmap_allocator<_Tp>::_BPVector bitmap_allocator<_Tp>::_S_mem_blocks; template<typename _Tp> size_t bitmap_allocator<_Tp>::_S_block_size = 2 * size_t(balloc::bits_per_block); template<typename _Tp> typename __gnu_cxx::bitmap_allocator<_Tp>::_BPVector::size_type bitmap_allocator<_Tp>::_S_last_dealloc_index = 0; template<typename _Tp> __gnu_cxx::balloc::_Bitmap_counter <typename bitmap_allocator<_Tp>::_Alloc_block*> bitmap_allocator<_Tp>::_S_last_request(_S_mem_blocks);#if defined __GTHREADS template<typename _Tp> __gnu_cxx::_Mutex bitmap_allocator<_Tp>::_S_mut;#endif}#endif // LocalWords: namespace GTHREADS bool const gthread endif Mutex mutex
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -