📄 concurrent_vector.cpp
字号:
/***
* ==++==
*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Microsoft would like to acknowledge that this concurrency data structure implementation
* is based on Intel抯 implementation in its Threading Building Blocks ("Intel Material").
*
* ==--==
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* concurrent_vector.cpp
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
/*
Intel Material Copyright 2005-2008 Intel Corporation. All Rights Reserved.
*/
#include "concrtinternal.h"
#include "concurrent_vector.h"
#include "cds_cache_aligned_allocator.h"
#include <stdexcept>
#if defined(_MSC_VER) && defined(_Wp64)
// Workaround for compiler warnings in /Wp64 mode
#pragma warning (disable: 4267)
#endif /* _MSC_VER && _Wp64 */
using namespace std;
namespace Concurrency
{
namespace details
{
class _Concurrent_vector_base_v4::_Helper
{
public:
// memory page size
static const _Size_type page_size = 4096;
inline static bool incompact_predicate(_Size_type size)
{
return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128);
}
inline static _Size_type find_segment_end(const _Concurrent_vector_base_v4 &v)
{
_Segment_index_t u = v._My_segment==(&(v._My_storage[0])) ? _Pointers_per_short_table
: _Pointers_per_long_table;
_Segment_index_t k = 0;
while( k < u && v._My_segment[k]._My_array )
++k;
return k;
}
// assign first segment size. k - is index of last segment to be allocated, not a count of segments
static void assign_first_segment_if_necessary(_Concurrent_vector_base_v4 &v, _Segment_index_t k)
{
if( !v._My_first_block )
{
v._My_first_block._CompareAndSwap(k+1, 0); // store number of segments
}
}
inline static void *allocate_segment(_Concurrent_vector_base_v4 &v, _Size_type n)
{
void *_Ptr = v._My_vector_allocator_ptr(v, n);
if(!_Ptr)
throw bad_alloc(); // check for bad allocation, throw exception
return _Ptr;
}
// Publish segment so other threads can see it.
inline static void publish_segment( _Segment_t& s, void* rhs )
{
_Subatomic_impl<sizeof s._My_array>::_StoreWithRelease(s._My_array, rhs);
}
inline static _Size_type enable_segment(_Concurrent_vector_base_v4 &v, _Size_type k, _Size_type element_size)
{
_ASSERTE( !v._My_segment[k]._My_array ); // concurrent operation during growth?
if( !k )
{
assign_first_segment_if_necessary(v, _Default_initial_segments-1);
try
{
publish_segment(v._My_segment[0], allocate_segment(v, _Segment_size(v._My_first_block) ) );
}
catch(...)
{
// intercept exception here, assign _BAD_ALLOC_MARKER value, re-throw exception
publish_segment(v._My_segment[0], _BAD_ALLOC_MARKER);
throw;
}
return 2;
}
_Size_type m = _Segment_size(k);
if( !v._My_first_block )
SpinwaitWhileEq( v._My_first_block, _Segment_index_t(0) );
if( k < v._My_first_block )
{
_Segment_t* s = v._My_segment;
// s[0]._My_array is changed only once ( 0 -> !0 ) and points to uninitialized memory
void *array0 = _Subatomic_impl<sizeof s[0]._My_array>::_LoadWithAquire(s[0]._My_array);
if( !array0 )
{
details::SpinwaitWhileEq( s[0]._My_array, (void*)0 );
array0 = _Subatomic_impl<sizeof s[0]._My_array>::_LoadWithAquire(s[0]._My_array);
}
if( array0 <= _BAD_ALLOC_MARKER ) // check for _BAD_ALLOC_MARKER of initial segment
{
publish_segment(s[k], _BAD_ALLOC_MARKER); // and assign _BAD_ALLOC_MARKER here
throw std::bad_alloc();
}
publish_segment(s[k],
static_cast<void*>( static_cast<char*>(array0) + _Segment_base(k)*element_size ) );
}
else
{
try
{
publish_segment(v._My_segment[k], allocate_segment(v, m));
}
catch(...)
{
// intercept exception here, assign _BAD_ALLOC_MARKER value, re-throw exception
publish_segment(v._My_segment[k], _BAD_ALLOC_MARKER);
throw;
}
}
return m;
}
inline static void extend_table_if_necessary(_Concurrent_vector_base_v4 &v, _Size_type k)
{
if(k >= _Pointers_per_short_table && v._My_segment == v._My_storage)
extend_segment_table(v);
}
static void extend_segment_table(_Concurrent_vector_base_v4 &v)
{
_Segment_t* s = (_Segment_t*)NFS_Allocate( _Pointers_per_long_table, sizeof(_Segment_t), NULL );
// if( !s ) throw bad_alloc() -- implemented in NFS_Allocate
memset( s, 0, _Pointers_per_long_table*sizeof(_Segment_t) );
// If other threads are trying to set pointers in the short segment, wait for them to finish their
// assigments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it.
for(_Segment_index_t i = 0; i < _Pointers_per_short_table; i++)
{
if(!v._My_storage[i]._My_array)
SpinwaitWhileEq(v._My_storage[i]._My_array, (void*)0);
}
for( _Segment_index_t i = 0; i < _Pointers_per_short_table; i++)
s[i] = v._My_storage[i];
if( v._My_segment._CompareAndSwap( s, v._My_storage ) != v._My_storage )
NFS_Free( s );
}
};
_CRTIMP2 _Concurrent_vector_base_v4::~_Concurrent_vector_base_v4()
{
_Segment_t* s = _My_segment;
if( s != _My_storage )
{
// Clear short segment.
for( _Segment_index_t i = 0; i < _Pointers_per_short_table; i++)
_My_storage[i]._My_array = NULL;
_My_segment = _My_storage;
NFS_Free( s );
}
}
_CRTIMP2 _Concurrent_vector_base_v4::_Segment_index_t __cdecl _Concurrent_vector_base_v4::_Segment_index_of( _Size_type _Index )
{
return _Segment_index_t( Log2( _Index|1 ) );
}
_CRTIMP2 _Concurrent_vector_base_v4::_Size_type _Concurrent_vector_base_v4::_Internal_capacity() const
{
return _Segment_base( _Helper::find_segment_end(*this) );
}
_CRTIMP2 void _Concurrent_vector_base_v4::_Internal_throw_exception(_Size_type t) const
{
switch(t)
{
case 0: throw out_of_range("Index out of range");
case 1: throw out_of_range("Index out of segments table range");
case 2: throw range_error ("Index is inside segment which failed to be allocated");
}
}
_CRTIMP2 void _Concurrent_vector_base_v4::_Internal_reserve( _Size_type n, _Size_type element_size, _Size_type max_size )
{
if( n>max_size )
throw length_error("argument to concurrent_vector::reserve() exceeds concurrent_vector::max_size()");
_Helper::assign_first_segment_if_necessary(*this, _Segment_index_of(n-1));
_Segment_index_t k = _Helper::find_segment_end(*this);
try
{
for (; _Segment_base(k) < n; ++k)
{
_Helper::extend_table_if_necessary(*this, k);
_Helper::enable_segment(*this, k, element_size);
}
}
catch (...)
{
// repair and rethrow
_My_segment[k]._My_array = NULL;
throw;
}
}
_CRTIMP2 void _Concurrent_vector_base_v4::_Internal_copy( const _Concurrent_vector_base_v4& src, _Size_type element_size, _My_internal_array_op2 copy )
{
_Size_type n = src._My_early_size;
_My_early_size = n;
_My_segment = _My_storage;
if( n )
{
_Helper::assign_first_segment_if_necessary(*this, _Segment_index_of(n));
_Size_type b;
for( _Segment_index_t k=0; (b=_Segment_base(k))<n; ++k )
{
if( (src._My_segment == (_Segment_t*)src._My_storage && k >= _Pointers_per_short_table)
|| src._My_segment[k]._My_array <= _BAD_ALLOC_MARKER )
{
_My_early_size = b;
break;
}
_Helper::extend_table_if_necessary(*this, k);
_Size_type m = _Helper::enable_segment(*this, k, element_size);
if( m > n-b )
m = n-b;
copy( _My_segment[k]._My_array, src._My_segment[k]._My_array, m );
}
}
}
_CRTIMP2 void _Concurrent_vector_base_v4::_Internal_assign( const _Concurrent_vector_base_v4& src, _Size_type element_size, _My_internal_array_op1 destroy, _My_internal_array_op2 assign, _My_internal_array_op2 copy )
{
_Size_type n = src._My_early_size;
while( _My_early_size>n )
{
_Segment_index_t k = _Segment_index_of( _My_early_size-1 );
_Size_type b=_Segment_base(k);
_Size_type new_end = b>=n ? b : n;
_ASSERTE( _My_early_size>new_end );
if( _My_segment[k]._My_array <= _BAD_ALLOC_MARKER) // check vector was broken before
throw std::bad_alloc();
// destructors are supposed to not throw any exceptions
destroy( (char*)_My_segment[k]._My_array+element_size*(new_end-b), _My_early_size-new_end );
_My_early_size = new_end;
}
_Size_type dst_initialized_size = _My_early_size;
_My_early_size = n;
_Helper::assign_first_segment_if_necessary(*this, _Segment_index_of(n));
_Size_type b;
for( _Segment_index_t k=0; (b=_Segment_base(k))<n; ++k )
{
_Helper::extend_table_if_necessary(*this, k);
if(!_My_segment[k]._My_array)
_Helper::enable_segment(*this, k, element_size);
if( (src._My_segment == (_Segment_t*)src._My_storage && k >= _Pointers_per_short_table)
|| src._My_segment[k]._My_array <= _BAD_ALLOC_MARKER ) // if source is damaged
{
_My_early_size = b;
break;
}
_Size_type m = k? _Segment_size(k) : 2;
if( m > n-b ) m = n-b;
_Size_type a = 0;
if( dst_initialized_size>b )
{
a = dst_initialized_size-b;
if( a>m )
a = m;
assign( _My_segment[k]._My_array, src._My_segment[k]._My_array, a );
m -= a;
a *= element_size;
}
if( m>0 )
copy( (char*)_My_segment[k]._My_array+a, (char*)src._My_segment[k]._My_array+a, m );
}
_ASSERTE( src._My_early_size==n ); // detected use of ConcurrentVector::operator= with right side that was concurrently modified
}
_CRTIMP2 void* _Concurrent_vector_base_v4::_Internal_push_back( _Size_type element_size, _Size_type& index )
{
_Size_type tmp = _My_early_size++;
index = tmp;
_Segment_index_t k_old = _Segment_index_of( tmp );
_Size_type base = _Segment_base(k_old);
_Helper::extend_table_if_necessary(*this, k_old);
_Segment_t& s = _My_segment[k_old];
if ( !_Subatomic_impl<sizeof s._My_array>::_LoadWithAquire(s._My_array) )
{
// do not check for _BAD_ALLOC_MARKER because it's hard to recover after _BAD_ALLOC_MARKER correctly
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -