📄 bitmap_allocator.h
字号:
// Bitmapped Allocator. -*- C++ -*-// Copyright (C) 2004 Free Software Foundation, Inc.//// This file is part of the GNU ISO C++ Library. This library is free// software; you can redistribute it and/or modify it under the// terms of the GNU General Public License as published by the// Free Software Foundation; either version 2, or (at your option)// any later version.// This library is distributed in the hope that it will be useful,// but WITHOUT ANY WARRANTY; without even the implied warranty of// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the// GNU General Public License for more details.// You should have received a copy of the GNU General Public License along// with this library; see the file COPYING. If not, write to the Free// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,// USA.// As a special exception, you may use this file as part of a free software// library without restriction. Specifically, if other files instantiate// templates or use macros or inline functions from this file, or you compile// this file and link it with other files to produce an executable, this// file does not by itself cause the resulting executable to be covered by// the GNU General Public License. This exception does not however// invalidate any other reasons why the executable file might be covered by// the GNU General Public License.#if !defined _BITMAP_ALLOCATOR_H#define _BITMAP_ALLOCATOR_H 1#include <cstddef>//For std::size_t, and ptrdiff_t.#include <utility>//For std::pair.#include <algorithm>//std::find_if, and std::lower_bound.#include <vector>//For the free list of exponentially growing memory blocks. At max,//size of the vector should be not more than the number of bits in an//integer or an unsigned integer.#include <functional>//For greater_equal, and less_equal.#include <new>//For operator new.#include <bits/gthr.h>//For __gthread_mutex_t, __gthread_mutex_lock and __gthread_mutex_unlock.#include <ext/new_allocator.h>//For __gnu_cxx::new_allocator for std::vector.#include <cassert>#define NDEBUG//#define CHECK_FOR_ERRORS//#define __CPU_HAS_BACKWARD_BRANCH_PREDICTIONnamespace __gnu_cxx{ namespace {#if defined __GTHREADS bool const __threads_enabled = __gthread_active_p();#endif }#if defined __GTHREADS class _Mutex { __gthread_mutex_t _M_mut; //Prevent Copying and assignment. _Mutex (_Mutex const&); _Mutex& operator= (_Mutex const&); public: _Mutex () { if (__threads_enabled) {#if !defined __GTHREAD_MUTEX_INIT __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mut);#else __gthread_mutex_t __mtemp = __GTHREAD_MUTEX_INIT; _M_mut = __mtemp;#endif } } ~_Mutex () { //Gthreads does not define a Mutex Destruction Function. } __gthread_mutex_t *_M_get() { return &_M_mut; } }; class _Lock { _Mutex* _M_pmt; bool _M_locked; //Prevent Copying and assignment. _Lock (_Lock const&); _Lock& operator= (_Lock const&); public: _Lock(_Mutex* __mptr) : _M_pmt(__mptr), _M_locked(false) { this->_M_lock(); } void _M_lock() { if (__threads_enabled) { _M_locked = true; __gthread_mutex_lock(_M_pmt->_M_get()); } } void _M_unlock() { if (__threads_enabled) { if (__builtin_expect(_M_locked, true)) { __gthread_mutex_unlock(_M_pmt->_M_get()); _M_locked = false; } } } ~_Lock() { this->_M_unlock(); } };#endif namespace __aux_balloc { static const unsigned int _Bits_Per_Byte = 8; static const unsigned int _Bits_Per_Block = sizeof(unsigned int) * _Bits_Per_Byte; template <typename _Addr_Pair_t> inline size_t __balloc_num_blocks (_Addr_Pair_t __ap) { return (__ap.second - __ap.first) + 1; } template <typename _Addr_Pair_t> inline size_t __balloc_num_bit_maps (_Addr_Pair_t __ap) { return __balloc_num_blocks(__ap) / _Bits_Per_Block; } //T should be a pointer type. template <typename _Tp> class _Inclusive_between : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> { typedef _Tp pointer; pointer _M_ptr_value; typedef typename std::pair<_Tp, _Tp> _Block_pair; public: _Inclusive_between (pointer __ptr) : _M_ptr_value(__ptr) { } bool operator () (_Block_pair __bp) const throw () { if (std::less_equal<pointer> ()(_M_ptr_value, __bp.second) && std::greater_equal<pointer> ()(_M_ptr_value, __bp.first)) return true; else return false; } }; //Used to pass a Functor to functions by reference. template <typename _Functor> class _Functor_Ref : public std::unary_function<typename _Functor::argument_type, typename _Functor::result_type> { _Functor& _M_fref; public: typedef typename _Functor::argument_type argument_type; typedef typename _Functor::result_type result_type; _Functor_Ref (_Functor& __fref) : _M_fref(__fref) { } result_type operator() (argument_type __arg) { return _M_fref (__arg); } }; //T should be a pointer type, and A is the Allocator for the vector. template <typename _Tp, typename _Alloc> class _Ffit_finder : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> { typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector; typedef typename _BPVector::difference_type _Counter_type; typedef typename std::pair<_Tp, _Tp> _Block_pair; unsigned int *_M_pbitmap; unsigned int _M_data_offset; public: _Ffit_finder () : _M_pbitmap (0), _M_data_offset (0) { } bool operator() (_Block_pair __bp) throw() { //Set the _rover to the last unsigned integer, which is the //bitmap to the first free block. Thus, the bitmaps are in exact //reverse order of the actual memory layout. So, we count down //the bimaps, which is the same as moving up the memory. //If the used count stored at the start of the Bit Map headers //is equal to the number of Objects that the current Block can //store, then there is definitely no space for another single //object, so just return false. _Counter_type __diff = __gnu_cxx::__aux_balloc::__balloc_num_bit_maps (__bp); assert (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) <= __gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp)); if (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) == __gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp)) return false; unsigned int *__rover = reinterpret_cast<unsigned int*>(__bp.first) - 1; for (_Counter_type __i = 0; __i < __diff; ++__i) { _M_data_offset = __i; if (*__rover) { _M_pbitmap = __rover; return true; } --__rover; } return false; } unsigned int *_M_get () { return _M_pbitmap; } unsigned int _M_offset () { return _M_data_offset * _Bits_Per_Block; } }; //T should be a pointer type. template <typename _Tp, typename _Alloc> class _Bit_map_counter { typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector; typedef typename _BPVector::size_type _Index_type; typedef _Tp pointer; _BPVector& _M_vbp; unsigned int *_M_curr_bmap; unsigned int *_M_last_bmap_in_block; _Index_type _M_curr_index; public: //Use the 2nd parameter with care. Make sure that such an entry //exists in the vector before passing that particular index to //this ctor. _Bit_map_counter (_BPVector& Rvbp, int __index = -1) : _M_vbp(Rvbp) { this->_M_reset(__index); } void _M_reset (int __index = -1) throw() { if (__index == -1) { _M_curr_bmap = 0; _M_curr_index = (_Index_type)-1; return; } _M_curr_index = __index; _M_curr_bmap = reinterpret_cast<unsigned int*>(_M_vbp[_M_curr_index].first) - 1; assert (__index <= (int)_M_vbp.size() - 1); _M_last_bmap_in_block = _M_curr_bmap - ((_M_vbp[_M_curr_index].second - _M_vbp[_M_curr_index].first + 1) / _Bits_Per_Block - 1); } //Dangerous Function! Use with extreme care. Pass to this //function ONLY those values that are known to be correct, //otherwise this will mess up big time. void _M_set_internal_bit_map (unsigned int *__new_internal_marker) throw() { _M_curr_bmap = __new_internal_marker; } bool _M_finished () const throw() { return (_M_curr_bmap == 0); } _Bit_map_counter& operator++ () throw() { if (_M_curr_bmap == _M_last_bmap_in_block) { if (++_M_curr_index == _M_vbp.size()) { _M_curr_bmap = 0; } else { this->_M_reset (_M_curr_index); } } else { --_M_curr_bmap; } return *this; } unsigned int *_M_get () { return _M_curr_bmap; } pointer _M_base () { return _M_vbp[_M_curr_index].first; } unsigned int _M_offset () { return _Bits_Per_Block * ((reinterpret_cast<unsigned int*>(this->_M_base()) - _M_curr_bmap) - 1); } unsigned int _M_where () { return _M_curr_index; } }; } //Generic Version of the bsf instruction. typedef unsigned int _Bit_map_type; static inline unsigned int _Bit_scan_forward (register _Bit_map_type __num) { return static_cast<unsigned int>(__builtin_ctz(__num)); } struct _OOM_handler { static std::new_handler _S_old_handler; static bool _S_handled_oom; typedef void (*_FL_clear_proc)(void); static _FL_clear_proc _S_oom_fcp; _OOM_handler (_FL_clear_proc __fcp) { _S_oom_fcp = __fcp; _S_old_handler = std::set_new_handler (_S_handle_oom_proc); _S_handled_oom = false; } static void _S_handle_oom_proc() { _S_oom_fcp(); std::set_new_handler (_S_old_handler); _S_handled_oom = true; } ~_OOM_handler () { if (!_S_handled_oom) std::set_new_handler (_S_old_handler); } }; std::new_handler _OOM_handler::_S_old_handler; bool _OOM_handler::_S_handled_oom = false; _OOM_handler::_FL_clear_proc _OOM_handler::_S_oom_fcp = 0; class _BA_free_list_store { struct _LT_pointer_compare { template <typename _Tp> bool operator() (_Tp* __pt, _Tp const& __crt) const throw() { return *__pt < __crt; } };#if defined __GTHREADS static _Mutex _S_bfl_mutex;#endif static std::vector<unsigned int*> _S_free_list; typedef std::vector<unsigned int*>::iterator _FLIter; static void _S_validate_free_list(unsigned int *__addr) throw() { const unsigned int __max_size = 64; if (_S_free_list.size() >= __max_size) { //Ok, the threshold value has been reached. //We determine which block to remove from the list of free //blocks. if (*__addr >= *_S_free_list.back()) { //Ok, the new block is greater than or equal to the last //block in the list of free blocks. We just free the new //block. operator delete((void*)__addr); return; } else { //Deallocate the last block in the list of free lists, and //insert the new one in it's correct position. operator delete((void*)_S_free_list.back()); _S_free_list.pop_back(); } } //Just add the block to the list of free lists //unconditionally. _FLIter __temp = std::lower_bound(_S_free_list.begin(), _S_free_list.end(), *__addr, _LT_pointer_compare ()); //We may insert the new free list before _temp; _S_free_list.insert(__temp, __addr); } static bool _S_should_i_give(unsigned int __block_size, unsigned int __required_size) throw() { const unsigned int __max_wastage_percentage = 36; if (__block_size >= __required_size && (((__block_size - __required_size) * 100 / __block_size) < __max_wastage_percentage)) return true; else return false; } public: typedef _BA_free_list_store _BFL_type; static inline void _S_insert_free_list(unsigned int *__addr) throw() {#if defined __GTHREADS _Lock __bfl_lock(&_S_bfl_mutex);#endif //Call _S_validate_free_list to decide what should be done with this //particular free list. _S_validate_free_list(--__addr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -