rbtree_best_fit.hpp
来自「Boost provides free peer-reviewed portab」· HPP 代码 · 共 1,348 行 · 第 1/4 页
HPP
1,348 行
return std::pair<void *, bool> ((success ? reuse_ptr : 0), true); } return priv_allocation_command (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_allocation_command (allocation_type command, std::size_t limit_size, std::size_t preferred_size,std::size_t &received_size, void *reuse_ptr, std::size_t sizeof_object){ std::pair<void*, bool> ret; std::size_t max_count = m_header.m_size/sizeof_object; if(limit_size > max_count || preferred_size > max_count){ ret.first = 0; return ret; } std::size_t l_size = limit_size*sizeof_object; std::size_t p_size = preferred_size*sizeof_object; std::size_t r_size; { //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- ret = priv_allocate(command, l_size, p_size, r_size, reuse_ptr, sizeof_object); } received_size = r_size/sizeof_object; return ret;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: size(const void *ptr) const{ //We need no synchronization since this block's size is not going //to be modified by anyone else //Obtain the real size of the block return (priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory(){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end()); //Iterate through all blocks obtaining their size for(; ib != ie; ++ib){ //Just clear user the memory part reserved for the user std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes , 0 , ib->m_size*Alignment - BlockCtrlBytes); }}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_expand_both_sides(allocation_type command ,std::size_t min_size ,std::size_t preferred_size ,std::size_t &received_size ,void *reuse_ptr ,bool only_preferred_backwards ,std::size_t backwards_multiple){ algo_impl_t::assert_alignment(reuse_ptr); if(command & expand_fwd){ if(priv_expand(reuse_ptr, min_size, preferred_size, received_size)) return reuse_ptr; } else{ received_size = this->size(reuse_ptr); if(received_size >= preferred_size || received_size >= min_size) return reuse_ptr; } if(backwards_multiple){ BOOST_ASSERT(0 == (min_size % backwards_multiple)); BOOST_ASSERT(0 == (preferred_size % backwards_multiple)); } if(command & expand_bwd){ //Obtain the real size of the block block_ctrl *reuse = priv_get_block(reuse_ptr); //Sanity check //assert(reuse->m_size == priv_tail_size(reuse)); algo_impl_t::assert_alignment(reuse); block_ctrl *prev_block; //If the previous block is not free, there is nothing to do if(priv_is_prev_allocated(reuse)){ return 0; } prev_block = priv_prev_block(reuse); assert(!priv_is_allocated_block(prev_block)); //Some sanity checks assert(prev_block->m_size == reuse->m_prev_size); algo_impl_t::assert_alignment(prev_block); std::size_t needs_backwards_aligned; std::size_t lcm; if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed ( backwards_multiple , received_size , only_preferred_backwards ? preferred_size : min_size , lcm, needs_backwards_aligned)){ return 0; } //Check if previous block has enough size if(std::size_t(prev_block->m_size*Alignment) >= needs_backwards_aligned){ //Now take all next space. This will succeed if(command & expand_fwd){ std::size_t received_size2; if(!priv_expand(reuse_ptr, received_size, received_size, received_size2)){ assert(0); } assert(received_size = received_size2); } //We need a minimum size to split the previous one if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){ block_ctrl *new_block = reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(reuse) - needs_backwards_aligned); //Free old previous buffer new_block->m_size = AllocatedCtrlUnits + (needs_backwards_aligned + (received_size - UsableByPreviousChunk))/Alignment; assert(new_block->m_size >= BlockCtrlUnits); priv_mark_new_allocated_block(new_block); prev_block->m_size = (reinterpret_cast<char*>(new_block) - reinterpret_cast<char*>(prev_block))/Alignment; assert(prev_block->m_size >= BlockCtrlUnits); priv_mark_as_free_block(prev_block); //Update the old previous block in the free blocks tree //If the new size fulfills tree invariants do nothing, //otherwise erase() + insert() { imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block)); imultiset_iterator was_smaller_it(prev_block_it); if(prev_block_it != m_header.m_imultiset.begin() && (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){ m_header.m_imultiset.erase(prev_block_it); m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block); } } received_size = needs_backwards_aligned + received_size; m_header.m_allocated += needs_backwards_aligned; //Check alignment algo_impl_t::assert_alignment(new_block); //If the backwards expansion has remaining bytes in the //first bytes, fill them with a pattern void *p = priv_get_user_buffer(new_block); void *user_ptr = reinterpret_cast<char*>(p); assert((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0); algo_impl_t::assert_alignment(user_ptr); return user_ptr; } //Check if there is no place to create a new block and //the whole new block is multiple of the backwards expansion multiple else if(prev_block->m_size >= needs_backwards_aligned/Alignment && 0 == ((prev_block->m_size*Alignment) % lcm)) { //Erase old previous block, since we will change it m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block)); //Just merge the whole previous block //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple) received_size = received_size + prev_block->m_size*Alignment; m_header.m_allocated += prev_block->m_size*Alignment; //Now update sizes prev_block->m_size = prev_block->m_size + reuse->m_size; assert(prev_block->m_size >= BlockCtrlUnits); priv_mark_new_allocated_block(prev_block); //If the backwards expansion has remaining bytes in the //first bytes, fill them with a pattern void *user_ptr = priv_get_user_buffer(prev_block); assert((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0); algo_impl_t::assert_alignment(user_ptr); return user_ptr; } else{ //Alignment issues } } } return 0;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: allocate_many(std::size_t elem_bytes, std::size_t num_elements){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- return algo_impl_t::allocate_many(this, elem_bytes, num_elements);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator it){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- while(it){ void *addr = &*it; ++it; this->priv_deallocate(addr); }}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element){ //----------------------- boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header); //----------------------- return algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>std::pair<void *, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_allocate(allocation_type command ,std::size_t limit_size ,std::size_t preferred_size ,std::size_t &received_size ,void *reuse_ptr ,std::size_t backwards_multiple){ //Remove me. Forbid backwards allocation //command &= (~expand_bwd); if(command & shrink_in_place){ bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, preferred_size, received_size); return std::pair<void *, bool> ((success ? reuse_ptr : 0), true); } typedef std::pair<void *, bool> return_type; received_size = 0; if(limit_size > preferred_size) return return_type(static_cast<void*>(0), false); //Number of units to request (including block_ctrl header) std::size_t preferred_units = priv_get_total_units(preferred_size); //Number of units to request (including block_ctrl header) std::size_t limit_units = priv_get_total_units(limit_size); //Expand in place if(reuse_ptr && (command & (expand_fwd | expand_bwd))){ void *ret = priv_expand_both_sides (command, limit_size, preferred_size, received_size, reuse_ptr, true, backwards_multiple); if(ret) return return_type(ret, true); } if(command & allocate_new){ size_block_ctrl_compare comp; imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp)); if(it != m_header.m_imultiset.end()){ return return_type(this->priv_check_and_allocate (preferred_units, detail::get_pointer(&*it), received_size), false); } if(it != m_header.m_imultiset.begin()&& (--it)->m_size >= limit_units){ return return_type(this->priv_check_and_allocate (it->m_size, detail::get_pointer(&*it), received_size), false); } } //Now try to expand both sides with min size if(reuse_ptr && (command & (expand_fwd | expand_bwd))){ return return_type(priv_expand_both_sides (command, limit_size, preferred_size, received_size, reuse_ptr, false, backwards_multiple), true); } return return_type(static_cast<void*>(0), false);}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inlinetypename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr){ return const_cast<block_ctrl*> (reinterpret_cast<const block_ctrl*> (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inlinevoid *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block){ return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>inlinestd::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_get_total_units(std::size_t userbytes){ if(userbytes < UsableByPreviousChunk) userbytes = UsableByPreviousChunk; std::size_t units = detail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits; if(units < BlockCtrlUnits) units = BlockCtrlUnits; return units;}template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>:: priv_expand (void *ptr ,const std::size_t min_size ,const std::size_t preferred_size ,std::size_t &received_size){ //Obtain the real size of the block
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?