rbtree_best_fit.hpp
来自「Boost provides free peer-reviewed portab」· HPP 代码 · 共 1,348 行 · 第 1/4 页
HPP
1,348 行
block_ctrl *block = priv_get_block(ptr); std::size_t old_block_units = block->m_size; //The block must be marked as allocated and the sizes must be equal assert(priv_is_allocated_block(block)); //assert(old_block_units == priv_tail_size(block)); //Put this to a safe value received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; if(received_size >= preferred_size || received_size >= min_size) return true; //Now translate it to Alignment units const std::size_t min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk); const std::size_t preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk); //Some parameter checks assert(min_user_units <= preferred_user_units); block_ctrl *next_block; if(priv_is_allocated_block(next_block = priv_next_block(block))){ return received_size >= min_size ? true : false; } algo_impl_t::assert_alignment(next_block); //Is "block" + "next_block" big enough? const std::size_t merged_units = old_block_units + next_block->m_size; //Now get the expansion size const std::size_t merged_user_units = merged_units - AllocatedCtrlUnits; if(merged_user_units < min_user_units){ received_size = merged_units*Alignment - UsableByPreviousChunk; return false; } //Now get the maximum size the user can allocate std::size_t intended_user_units = (merged_user_units < preferred_user_units) ? merged_user_units : preferred_user_units; //These are total units of the merged block (supposing the next block can be split) const std::size_t intended_units = AllocatedCtrlUnits + intended_user_units; //Check if we can split the next one in two parts if((merged_units - intended_units) >= BlockCtrlUnits){ //This block is bigger than needed, split it in //two blocks, the first one will be merged and //the second's size will be the remaining space assert(next_block->m_size == priv_next_block(next_block)->m_prev_size); const std::size_t rem_units = merged_units - intended_units; //Check if we we need to update the old next block in the free blocks tree //If the new size fulfills tree invariants, we just need to replace the node //(the block start has been displaced), otherwise erase() + insert(). // //This fixup must be done in two parts, because the new next block might //overwrite the tree hook of the old next block. So we first erase the //old if needed and we'll insert the new one after creating the new next imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block)); const bool size_invariants_broken = (next_block->m_size - rem_units ) < BlockCtrlUnits || (old_next_block_it != m_header.m_imultiset.begin() && (--imultiset_iterator(old_next_block_it))->m_size > rem_units); if(size_invariants_broken){ m_header.m_imultiset.erase(old_next_block_it); } //This is the remaining block block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*> (reinterpret_cast<char*>(block) + intended_units*Alignment))block_ctrl; rem_block->m_size = rem_units; algo_impl_t::assert_alignment(rem_block); assert(rem_block->m_size >= BlockCtrlUnits); priv_mark_as_free_block(rem_block); //Now the second part of the fixup if(size_invariants_broken) m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); else m_header.m_imultiset.replace_node(old_next_block_it, *rem_block); //Write the new length block->m_size = intended_user_units + AllocatedCtrlUnits; assert(block->m_size >= BlockCtrlUnits); m_header.m_allocated += (intended_units - old_block_units)*Alignment; } //There is no free space to create a new node: just merge both blocks else{ //Now we have to update the data in the tree m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); //Write the new length block->m_size = merged_units; assert(block->m_size >= BlockCtrlUnits); m_header.m_allocated += (merged_units - old_block_units)*Alignment; } priv_mark_as_allocated_block(block); received_size = (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; return true;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinetypename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr){ assert(!ptr->m_prev_allocated); return reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinebool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr){ if(ptr->m_prev_allocated){ return true; } else{ block_ctrl *prev = priv_prev_block(ptr); (void)prev; assert(!priv_is_allocated_block(prev)); return false; }}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinetypename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block){ assert(first_segment_block->m_prev_allocated); block_ctrl *end_block = reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(first_segment_block) - first_segment_block->m_prev_size*Alignment); (void)end_block; assert(priv_is_allocated_block(end_block)); assert(end_block > first_segment_block); return end_block;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinetypename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr){ return reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinebool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block){ bool allocated = block->m_allocated != 0; block_ctrl *next_block = reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(block) + block->m_size*Alignment); bool next_block_prev_allocated = next_block->m_prev_allocated != 0; (void)next_block_prev_allocated; assert(allocated == next_block_prev_allocated); return allocated;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinevoid rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block){ //assert(!priv_is_allocated_block(block)); block->m_allocated = 1; reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinevoid rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block){ block->m_allocated = 0; reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(block) + block->m_size*Alignment)->m_prev_allocated = 0; //assert(!priv_is_allocated_block(ptr)); priv_next_block(block)->m_prev_size = block->m_size;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inlinevoid* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate (std::size_t nunits ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block ,std::size_t &received_size){ std::size_t upper_nunits = nunits + BlockCtrlUnits; imultiset_iterator it_old = Imultiset::s_iterator_to(*block); algo_impl_t::assert_alignment(block); if (block->m_size >= upper_nunits){ //This block is bigger than needed, split it in //two blocks, the first's size will be "units" and //the second's size "block->m_size-units" std::size_t block_old_size = block->m_size; block->m_size = nunits; assert(block->m_size >= BlockCtrlUnits); //This is the remaining block block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*> (reinterpret_cast<char*>(block) + Alignment*nunits))block_ctrl; algo_impl_t::assert_alignment(rem_block); rem_block->m_size = block_old_size - nunits; assert(rem_block->m_size >= BlockCtrlUnits); priv_mark_as_free_block(rem_block); imultiset_iterator it_hint; if(it_old == m_header.m_imultiset.begin() || (--imultiset_iterator(it_old))->m_size < rem_block->m_size){ //option a: slow but secure //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block); //option b: Construct an empty node and swap //Imultiset::init_node(*rem_block); //block->swap_nodes(*rem_block); //option c: replace the node directly m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block); } else{ //Now we have to update the data in the tree m_header.m_imultiset.erase(it_old); m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); } } else if (block->m_size >= nunits){ m_header.m_imultiset.erase(it_old); } else{ assert(0); return 0; } //We need block_ctrl for deallocation stuff, so //return memory user can overwrite m_header.m_allocated += block->m_size*Alignment; received_size = (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; //Mark the block as allocated priv_mark_as_allocated_block(block); //Clear the memory occupied by the tree hook, since this won't be //cleared with zero_free_memory TreeHook *t = static_cast<TreeHook*>(block); std::memset(t, 0, sizeof(*t)); this->priv_next_block(block)->m_prev_size = 0; return priv_get_user_buffer(block);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr){ if(!addr) return; //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- return this->priv_deallocate(addr);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr){ if(!addr) return; block_ctrl *block = priv_get_block(addr); //The blocks must be marked as allocated and the sizes must be equal assert(priv_is_allocated_block(block));// assert(block->m_size == priv_tail_size(block)); //Check if alignment and block size are right algo_impl_t::assert_alignment(addr); std::size_t block_old_size = Alignment*block->m_size; assert(m_header.m_allocated >= block_old_size); //Update used memory count m_header.m_allocated -= block_old_size; //The block to insert in the tree block_ctrl *block_to_insert = block; //Get the next block block_ctrl *next_block = priv_next_block(block); bool merge_with_prev = !priv_is_prev_allocated(block); bool merge_with_next = !priv_is_allocated_block(next_block); //Merge logic. First just update block sizes, then fix free blocks tree if(merge_with_prev || merge_with_next){ //Merge if the previous is free if(merge_with_prev){ //Get the previous block block_ctrl *prev_block = priv_prev_block(block); prev_block->m_size += block->m_size; assert(prev_block->m_size >= BlockCtrlUnits); block_to_insert = prev_block; } //Merge if the next is free if(merge_with_next){ block_to_insert->m_size += next_block->m_size; assert(block_to_insert->m_size >= BlockCtrlUnits); if(merge_with_prev) m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); } bool only_merge_next = !merge_with_prev && merge_with_next; imultiset_iterator free_block_to_check_it (Imultiset::s_iterator_to(only_merge_next ? *next_block : *block_to_insert)); imultiset_iterator was_bigger_it(free_block_to_check_it); //Now try to shortcut erasure + insertion (O(log(N))) with //a O(1) operation if merging does not alter tree positions if(++was_bigger_it != m_header.m_imultiset.end() && block_to_insert->m_size > was_bigger_it->m_size ){ m_header.m_imultiset.erase(free_block_to_check_it); m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); } else if(only_merge_next){ m_header.m_imultiset.replace_node(free_block_to_check_it, *block_to_insert); } } else{ m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); } priv_mark_as_free_block(block_to_insert);}/// @endcond} //namespace interprocess {} //namespace boost {#include <boost/interprocess/detail/config_end.hpp>#endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?