📄 concurrent_vector.h
字号:
}
/// <summary>
/// Erases the elements of the concurrent vector and assigns to it either <paramref name="_N"/> copies of <paramref name="_Item"/>,
/// or values specified by the iterator range [<paramref name="_Begin"/>, <paramref name="_End"/>).
/// This method is not concurrency-safe.
/// </summary>
/// <typeparam name="_InputIterator">
/// The type of the specified iterator.
/// </typeparam>
/// <param name="_Begin">
/// An iterator to the first element of the source range.
/// </param>
/// <param name="_End">
/// An iterator to one past the last element of the source range.
/// </param>
/// <remarks>
/// <c>assign</c> is not concurrency-safe. You must ensure that no other threads are invoking methods
/// on the concurrent vector when you call this method.
/// </remarks>
/**/
template<class _InputIterator>
void assign(_InputIterator _Begin, _InputIterator _End)
{
clear();
_Internal_assign( _Begin, _End, static_cast<_Is_integer_tag<std::numeric_limits<_InputIterator>::is_integer> *>(0) );
}
/// <summary>
/// Swaps the contents of two concurrent vectors. This method is not concurrency-safe.
/// </summary>
/// <param name="_Vector">
/// The <c>concurrent_vector</c> object to swap contents with.
/// </param>
/**/
void swap(concurrent_vector &_Vector)
{
if( this != &_Vector )
{
_Concurrent_vector_base_v4::_Internal_swap(static_cast<_Concurrent_vector_base_v4&>(_Vector));
std::swap(this->_My_allocator, _Vector._My_allocator);
}
}
/// <summary>
/// Erases all elements in the concurrent vector. This method is not concurrency-safe.
/// </summary>
/// <remarks>
/// <c>clear</c> is not concurrency-safe. You must ensure that no other threads are invoking methods
/// on the concurrent vector when you call this method. <c>clear</c> does not free internal arrays. To free internal arrays,
/// call the function <c>shrink_to_fit</c> after <c>clear</c>.
/// </remarks>
/**/
void clear()
{
_Internal_clear(&_Destroy_array);
}
/// <summary>
/// Erases all elements and destroys this concurrent vector.
/// </summary>
/**/
~concurrent_vector()
{
_Segment_t *_Table = _My_segment;
_Internal_free_segments( reinterpret_cast<void**>(_Table), _Internal_clear(&_Destroy_array), _My_first_block );
// base class destructor call
}
const ::Concurrency::details::_Concurrent_vector_base_v4 &_Internal_vector_base() const { return *this; }
private:
// Allocate _K items
static void *_Internal_allocator(::Concurrency::details::_Concurrent_vector_base_v4 &_Vb, size_t _K)
{
return static_cast<concurrent_vector<_Ty, _Ax>&>(_Vb)._My_allocator.allocate(_K);
}
// Free _K segments from table
void _Internal_free_segments(void *_Table[], _Segment_index_t _K, _Segment_index_t _First_block);
// Get reference to element at given _Index.
_Ty& _Internal_subscript( size_type _Index ) const;
// Get reference to element at given _Index with errors checks
_Ty& _Internal_subscript_with_exceptions( size_type _Index ) const;
// assign _N items by copying _Item
void _Internal_assign(size_type _N, const_reference _Item);
// helper class
template<bool B> class _Is_integer_tag;
// assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
template<class _I>
void _Internal_assign(_I _First, _I _Last, _Is_integer_tag<true> *)
{
_Internal_assign(static_cast<size_type>(_First), static_cast<_Ty>(_Last));
}
// inline proxy assign by iterators
template<class _I>
void _Internal_assign(_I _First, _I _Last, _Is_integer_tag<false> *) {
internal_assign_iterators(_First, _Last);
}
// assign by iterators
template<class _I>
void internal_assign_iterators(_I _First, _I _Last);
// Construct _N instances of _Ty, starting at "begin".
static void _Initialize_array( void* _Begin, const void*, size_type _N );
// Construct _N instances of _Ty, starting at "begin".
static void _Initialize_array_by( void* _Begin, const void* _Src, size_type _N );
// Construct _N instances of _Ty, starting at "begin".
static void _Copy_array( void* _Dst, const void* _Src, size_type _N );
// Assign _N instances of _Ty, starting at "begin".
static void _Assign_array( void* _Dst, const void* _Src, size_type _N );
// Destroy _N instances of _Ty, starting at "begin".
static void _Destroy_array( void* _Begin, size_type _N );
// Exception-aware helper class for filling a segment by exception-danger operators of user class
class _Internal_loop_guide
{
public:
const pointer _My_array;
const size_type _N;
size_type _I;
_Internal_loop_guide(size_type _NTrials, void *_Ptr)
: _My_array(static_cast<pointer>(_Ptr)), _N(_NTrials), _I(0)
{
}
void _Init()
{
for(; _I < _N; ++_I)
new( &_My_array[_I] ) _Ty();
}
void _Init(const void *_Src)
{
for(; _I < _N; ++_I)
new( &_My_array[_I] ) _Ty(*static_cast<const _Ty*>(_Src));
}
void _Copy(const void *_Src)
{
for(; _I < _N; ++_I)
new( &_My_array[_I] ) _Ty(static_cast<const _Ty*>(_Src)[_I]);
}
void _Assign(const void *_Src)
{
for(; _I < _N; ++_I)
_My_array[_I] = static_cast<const _Ty*>(_Src)[_I];
}
template<class _It> void _Iterate(_It &_Src)
{
for(; _I < _N; ++_I, ++_Src)
new( &_My_array[_I] ) _Ty( *_Src );
}
~_Internal_loop_guide()
{
if(_I < _N) // if exception raised, do zeroing on the rest of items
std::memset(_My_array+_I, 0, (_N-_I)*sizeof(value_type));
}
private:
void operator=(const _Internal_loop_guide&); // prevent warning: assign operator can't be generated
};
};
/// <summary>
/// Compacts the internal representation of the concurrent vector to reduce fragmentation and optimize memory usage.
/// </summary>
/// <remarks>
/// This method will internally re-allocate memory move elements around, invalidating all the iterators.
/// <c>shrink_to_fit</c> is not concurrency-safe. You must ensure that no other threads are invoking methods
/// on the concurrent vector when you call this function.
/// </remarks>
/**/
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::shrink_to_fit()
{
_Internal_segments_table _Old;
try
{
if( _Internal_compact( sizeof(_Ty), &_Old, &_Destroy_array, &_Copy_array ) )
_Internal_free_segments( _Old._Table, _Pointers_per_long_table, _Old._First_block ); // free joined and unnecessary segments
}
catch(...)
{
if( _Old._First_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user _Ty[pe]
_Internal_free_segments( _Old._Table, 1, _Old._First_block );
throw;
}
}
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::_Internal_free_segments(void *_Table[], _Segment_index_t _K, _Segment_index_t _First_block)
{
// Free the arrays
while( _K > _First_block )
{
--_K;
_Ty* _Array = static_cast<_Ty*>(_Table[_K]);
_Table[_K] = NULL;
if( _Array > _BAD_ALLOC_MARKER ) // check for correct segment pointer
this->_My_allocator.deallocate( _Array, _Segment_size(_K) );
}
_Ty* _Array = static_cast<_Ty*>(_Table[0]);
if( _Array > _BAD_ALLOC_MARKER )
{
_ASSERTE( _First_block > 0 );
while(_K > 0)
_Table[--_K] = NULL;
this->_My_allocator.deallocate( _Array, _Segment_size(_First_block) );
}
}
template<typename _Ty, class _Ax>
_Ty& concurrent_vector<_Ty, _Ax>::_Internal_subscript( size_type _Index ) const
{
_ASSERTE( _Index<_My_early_size ); // index out of bounds
size_type _J = _Index;
_Segment_index_t _K = _Segment_base_index_of( _J );
_ASSERTE( _My_segment != (_Segment_t*)_My_storage || _K < _Pointers_per_short_table ); // index is under construction
// no need in load_with_acquire since thread works in own space or gets
_Ty* _Array = static_cast<_Ty*>(_My_segment[_K]._My_array);
_ASSERTE( _Array != _BAD_ALLOC_MARKER ); // instance may be broken by bad allocation; use at() instead
_ASSERTE( _Array != NULL ); // index is being allocated
return _Array[_J];
}
template<typename _Ty, class _Ax>
_Ty& concurrent_vector<_Ty, _Ax>::_Internal_subscript_with_exceptions( size_type _Index ) const
{
if( _Index >= _My_early_size )
_Internal_throw_exception(0); // throw std::out_of_range
size_type _J = _Index;
_Segment_index_t _K = _Segment_base_index_of( _J );
if( _My_segment == (_Segment_t*)_My_storage && _K >= _Pointers_per_short_table )
_Internal_throw_exception(1); // throw std::out_of_range
void *_Array = _My_segment[_K]._My_array; // no need in load_with_acquire
if( _Array <= _BAD_ALLOC_MARKER ) // check for correct segment pointer
_Internal_throw_exception(2); // throw std::range_error
return static_cast<_Ty*>(_Array)[_J];
}
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::_Internal_assign(size_type _N, const_reference _Item)
{
_ASSERTE( _My_early_size == 0 );
if( !_N )
return;
_Internal_reserve(_N, sizeof(_Ty), max_size());
_My_early_size = _N;
_Segment_index_t _K = 0;
_Size_type _Sz = _Segment_size( _My_first_block );
while (_Sz < _N)
{
_Initialize_array_by(static_cast<_Ty*>(_My_segment[_K]._My_array), static_cast<const void*>(&_Item), _Sz);
_N -= _Sz;
if (!_K)
{
_K = _My_first_block;
}
else {
++_K;
_Sz <<= 1;
}
}
_Initialize_array_by(static_cast<_Ty*>(_My_segment[_K]._My_array), static_cast<const void*>(&_Item), _N);
}
template<typename _Ty, class _Ax> template<class _I>
void concurrent_vector<_Ty, _Ax>::internal_assign_iterators(_I _First, _I _Last)
{
_ASSERTE(_My_early_size == 0);
size_type _N = std::distance(_First, _Last);
if( !_N ) return;
_Internal_reserve(_N, sizeof(_Ty), max_size());
_My_early_size = _N;
_Segment_index_t _K = 0;
_Size_type _Sz = _Segment_size( _My_first_block );
while (_Sz < _N)
{
_Internal_loop_guide _Loop(_Sz, _My_segment[_K]._My_array);
_Loop._Iterate(_First);
_N -= _Sz;
if (!_K)
{
_K = _My_first_block;
}
else {
++_K;
_Sz <<= 1;
}
}
_Internal_loop_guide _Loop(_N, _My_segment[_K]._My_array);
_Loop._Iterate(_First);
}
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::_Initialize_array( void* _Begin, const void *, size_type _N )
{
_Internal_loop_guide _Loop(_N, _Begin); _Loop._Init();
}
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::_Initialize_array_by( void* _Begin, const void *_Src, size_type _N )
{
_Internal_loop_guide _Loop(_N, _Begin); _Loop._Init(_Src);
}
template<typename _Ty, class _Ax>
void concurrent_vector<_Ty, _Ax>::_Copy_array( void* _Dst, const void* _Src, size_type _N ) {
_Internal_loop_guide _Loop
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -