multi_simple_seq_fit_impl.hpp

来自「Boost provides free peer-reviewed portab」· HPP 代码 · 共 972 行 · 第 1/3 页

HPP
972
字号
   do{      //Just clear user the memory part reserved for the user            std::memset( reinterpret_cast<char*>(block) + BlockCtrlBytes                 , 0                 , block->m_size*Alignment - BlockCtrlBytes);      block = detail::get_pointer(block->m_next);   }   while(block != &m_header.m_root);}template<class MutexFamily, class VoidPointer>inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::    check_sanity(){   //-----------------------   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);   //-----------------------   block_ctrl *block = detail::get_pointer(m_header.m_root.m_next);   std::size_t free_memory = 0;   //Iterate through all blocks obtaining their size   do{      //Free blocks's next must be always valid      block_ctrl *next = detail::get_pointer(block->m_next);      if(!next){         return false;      }      free_memory += block->m_size*Alignment;      block = next;   }   while(block != &m_header.m_root);   //Check allocated bytes are less than size   if(m_header.m_allocated > m_header.m_size){      return false;   }   //Check free bytes are less than size   if(free_memory > m_header.m_size){      return false;   }   return true;}template<class MutexFamily, class VoidPointer>inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::   allocate(std::size_t nbytes){     //-----------------------   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);   //-----------------------   std::size_t ignore;   return priv_allocate(allocate_new, nbytes, nbytes, ignore).first;}template<class MutexFamily, class VoidPointer>inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::   allocate_aligned(std::size_t nbytes, std::size_t alignment){     //-----------------------   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);   //-----------------------   return priv_allocate_aligned(nbytes, alignment);}template<class MutexFamily, class VoidPointer>inline std::pair<void *, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::   allocation_command  (allocation_type command,   std::size_t min_size,                        std::size_t preferred_size,std::size_t &received_size,                         void *reuse_ptr, std::size_t backwards_multiple){   //-----------------------   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);   //-----------------------   (void)backwards_multiple;   command &= ~expand_bwd;   if(!command)      return std::pair<void *, bool>(0, false);   return priv_allocate(command, min_size, preferred_size, received_size, reuse_ptr);}template<class MutexFamily, class VoidPointer>inline std::size_t simple_seq_fit_impl<MutexFamily, VoidPointer>::   size(void *ptr) const{   //We need no synchronization since this block is not going   //to be modified   //Obtain the real size of the block   block_ctrl *block = reinterpret_cast<block_ctrl*>                        (reinterpret_cast<char*>(ptr) - BlockCtrlBytes);   return block->m_size*Alignment - BlockCtrlBytes;}template<class MutexFamily, class VoidPointer>inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::   multi_allocate(std::size_t nbytes){   //-----------------------   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);   //-----------------------   //Multisegment pointer. Let's try first the normal allocation   //since it's faster.   std::size_t ignore;   void *addr = this->priv_allocate(allocate_new, nbytes, nbytes, ignore).first;   if(!addr){      //If this fails we will try the allocation through the segment      //creator.      std::size_t group, id;      //Obtain the segment group of this segment      void_pointer::get_group_and_id(this, group, id);      if(group == 0){         //Ooops, group 0 is not valid.         return 0;      }      //Now obtain the polymorphic functor that creates      //new segments and try to allocate again.      boost::interprocess::multi_segment_services *p_services =          static_cast<boost::interprocess::multi_segment_services*>                     (void_pointer::find_group_data(group));      assert(p_services);      std::pair<void *, std::size_t> ret =          p_services->create_new_segment(MinBlockSize > nbytes ? MinBlockSize : nbytes);      if(ret.first){         priv_add_segment(ret.first, ret.second);         addr = this->priv_allocate(allocate_new, nbytes, nbytes, ignore).first;      }   }   return addr;}template<class MutexFamily, class VoidPointer>void* simple_seq_fit_impl<MutexFamily, VoidPointer>::   priv_expand_both_sides(allocation_type command                         ,std::size_t min_size                         ,std::size_t preferred_size                         ,std::size_t &received_size                         ,void *reuse_ptr                         ,bool only_preferred_backwards){   typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;   block_ctrl *reuse = block_ctrl::get_block_from_addr(reuse_ptr);   received_size = 0;   if(this->size(reuse_ptr) > min_size){      received_size = this->size(reuse_ptr);      return reuse_ptr;   }   if(command & expand_fwd){      if(priv_expand(reuse_ptr, min_size, preferred_size, received_size))         return reuse_ptr;   }   else{      received_size = this->size(reuse_ptr);   }   if(command & expand_bwd){      std::size_t extra_forward = !received_size ? 0 : received_size + BlockCtrlBytes;      prev_block_t prev_pair = priv_prev_block_if_free(reuse);      block_ctrl *prev = prev_pair.second;      if(!prev){         return 0;      }      std::size_t needs_backwards =          detail::get_rounded_size(preferred_size - extra_forward, Alignment);         if(!only_preferred_backwards){         needs_backwards =             max_value(detail::get_rounded_size(min_size - extra_forward, Alignment)                     ,min_value(prev->get_user_bytes(), needs_backwards));      }      //Check if previous block has enough size      if((prev->get_user_bytes()) >=  needs_backwards){         //Now take all next space. This will succeed         if(!priv_expand(reuse_ptr, received_size, received_size, received_size)){            assert(0);         }                  //We need a minimum size to split the previous one         if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){            block_ctrl *new_block = reinterpret_cast<block_ctrl *>               (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);            new_block->m_next = 0;            new_block->m_size =                BlockCtrlSize + (needs_backwards + extra_forward)/Alignment;            prev->m_size =                (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlSize;            received_size = needs_backwards + extra_forward;            m_header.m_allocated += needs_backwards + BlockCtrlBytes;            return new_block->get_addr();         }         else{            //Just merge the whole previous block            block_ctrl *prev_2_block = prev_pair.first;            //Update received size and allocation            received_size = extra_forward + prev->get_user_bytes();            m_header.m_allocated += prev->get_total_bytes();            //Now unlink it from previous block            prev_2_block->m_next = prev->m_next;            prev->m_size = reuse->m_size + prev->m_size;            prev->m_next = 0;            return prev->get_addr();         }      }   }   return 0;}template<class MutexFamily, class VoidPointer>std::pair<void *, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::   priv_allocate(allocation_type command                ,std::size_t limit_size                ,std::size_t preferred_size                ,std::size_t &received_size                ,void *reuse_ptr){   if(command & shrink_in_place){      bool success =          this->priv_shrink(reuse_ptr, limit_size, preferred_size, received_size);      return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);   }   typedef std::pair<void *, bool> return_type;   received_size = 0;   if(limit_size > preferred_size)      return return_type(0, false);   //Number of units to request (including block_ctrl header)   std::size_t nunits = detail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlSize;   //Get the root and the first memory block   block_ctrl *prev                 = &m_header.m_root;   block_ctrl *block                = detail::get_pointer(prev->m_next);   block_ctrl *root                 = &m_header.m_root;   block_ctrl *biggest_block        = 0;   block_ctrl *prev_biggest_block   = 0;   std::size_t biggest_size         = limit_size;   //Expand in place   //reuse_ptr, limit_size, preferred_size, received_size   //   if(reuse_ptr && (command & (expand_fwd | expand_bwd))){      void *ret = priv_expand_both_sides         (command, limit_size, preferred_size, received_size, reuse_ptr, true);      if(ret)         return return_type(ret, true);   }   if(command & allocate_new){      received_size = 0;      while(block != root){         //Update biggest block pointers         if(block->m_size > biggest_size){            prev_biggest_block = prev;            biggest_size  = block->m_size;            biggest_block = block;         }         void *addr = this->priv_check_and_allocate(nunits, prev, block, received_size);         if(addr) return return_type(addr, false);         //Bad luck, let's check next block         prev  = block;         block = detail::get_pointer(block->m_next);      }      //Bad luck finding preferred_size, now if we have any biggest_block      //try with this block      if(biggest_block){         received_size = biggest_block->m_size*Alignment - BlockCtrlSize;         nunits = detail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlSize;         void *ret = this->priv_check_and_allocate                        (nunits, prev_biggest_block, biggest_block, received_size);         if(ret)            return return_type(ret, false);      }   }   //Now try to expand both sides with min size   if(reuse_ptr && (command & (expand_fwd | expand_bwd))){      return return_type(priv_expand_both_sides         (command, limit_size, preferred_size, received_size, reuse_ptr, false), true);   }   return return_type(0, false);}template<class MutexFamily, class VoidPointer>inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *   simple_seq_fit_impl<MutexFamily, VoidPointer>::      priv_next_block_if_free         (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr){   //Take the address where the next block should go   block_ctrl *next_block = reinterpret_cast<block_ctrl*>      (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);   //Check if the adjacent block is in the managed segment   std::size_t distance = (reinterpret_cast<char*>(next_block) - reinterpret_cast<char*>(this))/Alignment;   if(distance >= (m_header.m_size/Alignment)){      //"next_block" does not exist so we can't expand "block"      return 0;   }   if(!next_block->m_next)      return 0;   return next_block;}template<class MutexFamily, class VoidPointer>inline    std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *            ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>   simple_seq_fit_impl<MutexFamily, VoidPointer>::      priv_prev_block_if_free         (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr){   typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;   //Take the address where the previous block should go   block_ctrl *root           = &m_header.m_root;   block_ctrl *prev_2_block   = root;   block_ctrl *prev_block = detail::get_pointer(root->m_next);   while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)            != (reinterpret_cast<char*>(ptr))         && prev_block != root){

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?