rbtree_best_fit.hpp
来自「Boost provides free peer-reviewed portab」· HPP 代码 · 共 1,348 行 · 第 1/4 页
HPP
1,348 行
static const std::size_t AlignmentMask = (Alignment - 1); static const std::size_t BlockCtrlBytes = detail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value; static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment; static const std::size_t AllocatedCtrlBytes = detail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value; static const std::size_t AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment; static const std::size_t EndCtrlBlockBytes = detail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value; static const std::size_t EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment; static const std::size_t MinBlockUnits = BlockCtrlUnits; static const std::size_t UsableByPreviousChunk = sizeof(std::size_t); //Make sure the maximum alignment is power of two BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - std::size_t(1u))))); /// @endcond public: static const std::size_t PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;};/// @condtemplate<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment> ::priv_first_block_offset(const void *this_ptr, std::size_t extra_hdr_bytes){ std::size_t uint_this = (std::size_t)this_ptr; std::size_t main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes; std::size_t aligned_main_hdr_end = detail::get_rounded_size(main_hdr_end, Alignment); std::size_t block1_off = aligned_main_hdr_end - uint_this; algo_impl_t::assert_alignment(aligned_main_hdr_end); algo_impl_t::assert_alignment(uint_this + block1_off); return block1_off;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: rbtree_best_fit(std::size_t size, std::size_t extra_hdr_bytes){ //Initialize the header m_header.m_allocated = 0; m_header.m_size = size; m_header.m_extra_hdr_bytes = extra_hdr_bytes; //Now write calculate the offset of the first big block that will //cover the whole segment assert(get_min_size(extra_hdr_bytes) <= size); std::size_t block1_off = priv_first_block_offset(this, extra_hdr_bytes); priv_add_segment(reinterpret_cast<char*>(this) + block1_off, size - block1_off);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit(){ //There is a memory leak!// assert(m_header.m_allocated == 0);// assert(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(std::size_t extra_size){ //Get the address of the first block std::size_t block1_off = priv_first_block_offset(this, m_header.m_extra_hdr_bytes); block_ctrl *first_block = reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(this) + block1_off); block_ctrl *old_end_block = priv_end_block(first_block); assert(priv_is_allocated_block(old_end_block)); std::size_t old_border_offset = (reinterpret_cast<char*>(old_end_block) - reinterpret_cast<char*>(this)) + EndCtrlBlockBytes; //Update managed buffer's size m_header.m_size += extra_size; //We need at least MinBlockUnits blocks to create a new block// assert((m_header.m_size - old_end) >= MinBlockUnits); if((m_header.m_size - old_border_offset) < MinBlockUnits){ return; } //Now create a new block between the old end and the new end std::size_t align_offset = (m_header.m_size - old_border_offset)/Alignment; block_ctrl *new_end_block = reinterpret_cast<block_ctrl*> (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment); new_end_block->m_size = (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment; first_block->m_prev_size = new_end_block->m_size; assert(first_block == priv_next_block(new_end_block)); priv_mark_new_allocated_block(new_end_block); assert(new_end_block == priv_end_block(first_block)); //The old end block is the new block block_ctrl *new_block = old_end_block; new_block->m_size = (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(new_block))/Alignment; assert(new_block->m_size >= BlockCtrlUnits); priv_mark_new_allocated_block(new_block); assert(priv_next_block(new_block) == new_end_block); m_header.m_allocated += new_block->m_size*Alignment; //Now deallocate the newly created block this->priv_deallocate(priv_get_user_buffer(new_block));}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit(){ //Get the address of the first block std::size_t block1_off = priv_first_block_offset(this, m_header.m_extra_hdr_bytes); block_ctrl *first_block = reinterpret_cast<block_ctrl*> (reinterpret_cast<char*>(this) + block1_off); algo_impl_t::assert_alignment(first_block); block_ctrl *old_end_block = priv_end_block(first_block); algo_impl_t::assert_alignment(old_end_block); assert(priv_is_allocated_block(old_end_block)); algo_impl_t::assert_alignment(old_end_block); std::size_t old_end_block_size = old_end_block->m_size; void *unique_buffer = 0; block_ctrl *last_block; if(priv_next_block(first_block) == old_end_block){ std::size_t ignore; unique_buffer = priv_allocate(allocate_new, 0, 0, ignore).first; if(!unique_buffer) return; algo_impl_t::assert_alignment(unique_buffer); block_ctrl *unique_block = priv_get_block(unique_buffer); assert(priv_is_allocated_block(unique_block)); algo_impl_t::assert_alignment(unique_block); last_block = priv_next_block(unique_block); assert(!priv_is_allocated_block(last_block)); algo_impl_t::assert_alignment(last_block); } else{ if(priv_is_prev_allocated(old_end_block)) return; last_block = priv_prev_block(old_end_block); } std::size_t last_block_size = last_block->m_size; //Erase block from the free tree, since we will erase it m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block)); std::size_t shrunk_border_offset = (reinterpret_cast<char*>(last_block) - reinterpret_cast<char*>(this)) + EndCtrlBlockBytes; block_ctrl *new_end_block = last_block; algo_impl_t::assert_alignment(new_end_block); new_end_block->m_size = old_end_block_size + last_block_size; priv_mark_as_allocated_block(new_end_block); //Although the first block might be allocated, we'll //store the offset to the end block since in the previous //offset can't be overwritten by a previous block first_block->m_prev_size = new_end_block->m_size; assert(priv_end_block(first_block) == new_end_block); //Update managed buffer's size m_header.m_size = shrunk_border_offset; if(unique_buffer) priv_deallocate(unique_buffer);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_add_segment(void *addr, std::size_t size){ //Check alignment algo_impl_t::check_alignment(addr); //Check size assert(size >= (BlockCtrlBytes + EndCtrlBlockBytes)); //Initialize the first big block and the "end" node block_ctrl *first_big_block = new(addr)block_ctrl; first_big_block->m_size = size/Alignment - EndCtrlBlockUnits; assert(first_big_block->m_size >= BlockCtrlUnits); //The "end" node is just a node of size 0 with the "end" bit set block_ctrl *end_block = static_cast<block_ctrl*> (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder); //This will overwrite the prev part of the "end" node priv_mark_as_free_block (first_big_block); first_big_block->m_prev_size = end_block->m_size = (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment; priv_mark_as_allocated_block(end_block); assert(priv_next_block(first_big_block) == end_block); assert(priv_next_block(end_block) == first_big_block); assert(priv_end_block(first_big_block) == end_block); assert(priv_prev_block(end_block) == first_big_block); //Some check to validate the algorithm, since it makes some assumptions //to optimize the space wasted in bookkeeping: //Check that the sizes of the header are placed before the rbtree assert(static_cast<void*>(static_cast<SizeHolder*>(first_big_block)) < static_cast<void*>(static_cast<TreeHook*>(first_big_block))); //Check that the alignment is power of two (we use some optimizations based on this) //assert((Alignment % 2) == 0); //Insert it in the intrusive containers m_header.m_imultiset.insert(*first_big_block);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_mark_new_allocated_block(block_ctrl *new_block){ priv_mark_as_allocated_block(new_block); }template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const{ return m_header.m_size; }template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const{ return m_header.m_size - m_header.m_allocated - priv_first_block_offset(this, m_header.m_extra_hdr_bytes);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: get_min_size (std::size_t extra_hdr_bytes){ return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) + algo_impl_t::ceil_units(extra_hdr_bytes) + MinBlockUnits + EndCtrlBlockUnits)*Alignment;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: all_memory_deallocated(){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- std::size_t block1_off = priv_first_block_offset(this, m_header.m_extra_hdr_bytes); return m_header.m_allocated == 0 && m_header.m_imultiset.begin() != m_header.m_imultiset.end() && (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end() && m_header.m_imultiset.begin()->m_size == (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: check_sanity(){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end()); std::size_t free_memory = 0; //Iterate through all blocks obtaining their size for(; ib != ie; ++ib){ free_memory += ib->m_size*Alignment; algo_impl_t::assert_alignment(&*ib); if(!algo_impl_t::check_alignment(&*ib)) return false; } //Check allocated bytes are less than size if(m_header.m_allocated > m_header.m_size){ return false; } std::size_t block1_off = priv_first_block_offset(this, m_header.m_extra_hdr_bytes); //Check free bytes are less than size if(free_memory > (m_header.m_size - block1_off)){ return false; } return true;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: allocate(std::size_t nbytes){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- std::size_t ignore; void * ret = priv_allocate(allocate_new, nbytes, nbytes, ignore).first; return ret;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: allocate_aligned(std::size_t nbytes, std::size_t alignment){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- return algo_impl_t::allocate_aligned(this, nbytes, alignment); }template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>template<class T>inline std::pair<T*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: allocation_command (allocation_type command, std::size_t limit_size, std::size_t preferred_size,std::size_t &received_size, T *reuse_ptr){ std::pair<void*, bool> ret = priv_allocation_command (command, limit_size, preferred_size, received_size, static_cast<void*>(reuse_ptr), sizeof(T)); BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of<T>::value)); return std::pair<T *, bool>(static_cast<T*>(ret.first), ret.second);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: raw_allocation_command (allocation_type command, std::size_t limit_objects, std::size_t preferred_objects,std::size_t &received_objects, void *reuse_ptr, std::size_t sizeof_object){ if(!sizeof_object) return std::pair<void *, bool>(static_cast<void*>(0), 0); if(command & try_shrink_in_place){ bool success = algo_impl_t::try_shrink ( this, reuse_ptr, limit_objects*sizeof_object , preferred_objects*sizeof_object, received_objects); received_objects /= sizeof_object;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?