📄 concrt.h
字号:
return _Rhs;
}
_Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand) {
return _Subatomic_impl<sizeof(_Ty)>::_CompareAndSwap(_M_value, _NewValue, _Comperand);
}
_Ty _FetchAndAdd(_Ty _Addend) {
return _Subatomic_impl<sizeof(_Ty)>::_FetchAndAdd(_M_value, _Addend);
}
_Ty operator++() {
return _Subatomic_impl<sizeof(_Ty)>::_Increment(_M_value);
}
_Ty operator++(int) {
return _Subatomic_impl<sizeof(_Ty)>::_Increment(_M_value) - 1;
}
_Ty operator--() {
return _Subatomic_impl<sizeof(_Ty)>::_Decrement(_M_value);
}
_Ty operator--(int) {
return _Subatomic_impl<sizeof(_Ty)>::_Decrement(_M_value) + 1;
}
_Ty operator+=(_Ty _Addend) {
return _FetchAndAdd(_Addend) + _Addend;
}
};
//
// An RAII class that spin-waits on a "rented" flag.
//
class _SpinLock
{
private:
volatile long& _M_flag;
public:
_CRTIMP _SpinLock(volatile long& _Flag);
_CRTIMP ~_SpinLock();
private:
_SpinLock(const _SpinLock&);
void operator=(const _SpinLock&);
};
//
// A class that holds the count used for spinning and is dependent
// on the number of hardware threads
//
struct _SpinCount
{
// Initializes the spinCount to either 0 or SPIN_COUNT, depending on
// the number of hardware threads.
static void __cdecl _Initialize();
// Returns the current value of s_spinCount
_CRTIMP static unsigned int __cdecl _Value();
// The number of iterations used for spinning
static unsigned int _S_spinCount;
};
/// <summary>
/// Default method for yielding during a spin wait
/// </summary>
/**/
void _CRTIMP __cdecl _UnderlyingYield();
/// <summary>
/// Implements busy wait with no backoff
/// </summary>
/**/
template<unsigned int _YieldCount = 1>
class _CRTIMP _SpinWait
{
public:
typedef void (__cdecl *_YieldFunction)();
/// <summary>
/// Construct a spin wait object
/// </summary>
/**/
_SpinWait(_YieldFunction _YieldMethod = _UnderlyingYield)
: _M_yieldFunction(_YieldMethod), _M_state(_StateInitial)
{
// Defer initialization of other fields to _SpinOnce().
}
/// <summary>
/// Set a dynamic spin count.
/// </summary>
/**/
void _SetSpinCount(unsigned int _Count)
{
_ASSERTE(_M_state == _StateInitial);
if (_Count == 0)
{
// Specify a count of 0 if we are on a single proc.
_M_state = _StateSingle;
}
else
{
_M_currentSpin = _Count;
_M_currentYield = _YieldCount;
_M_state = _StateSpin;
}
}
/// <summary>
/// Spins for one time quantum,until a maximum spin is reached.
/// </summary>
/// <returns>
/// false if spin count has reached steady state, true otherwise.
/// </returns>
/// <remarks>
/// If the spin count is not changing that means that it is probably not a good idea to spin again
/// because there is either only one processor, or maximum spin has been reached and blocking is
/// probably a better solution. However, if called again, SpinOnce will spin for a maximum spin count.
/// </remarks>
/**/
bool _SpinOnce()
{
switch (_M_state)
{
case _StateSpin:
{
unsigned long _Count = _NumberOfSpins();
for (unsigned long _I = 0; _I < _Count; _I++)
{
_YieldProcessor();
}
if (!_ShouldSpinAgain())
{
_M_state = (_M_currentYield == 0) ? _StateBlock : _StateYield;
}
return true;
}
case _StateYield:
_ASSERTE(_M_currentYield > 0);
if (--_M_currentYield == 0)
{
_M_state = _StateBlock;
}
// Execute the yield
_DoYield();
return true;
case _StateBlock:
// Reset to defaults if client does not block
_Reset();
return false;
case _StateSingle:
// No need to spin on a single processor: just execute the yield
_DoYield();
return false;
case _StateInitial:
// Reset counters to their default value and Spin once.
_Reset();
return _SpinOnce();
default:
// Unreached
return false;
};
}
protected:
/// <summary>
/// State of the spin wait class.
/// </summary>
/**/
enum _SpinState
{
_StateInitial,
_StateSpin,
_StateYield,
_StateBlock,
_StateSingle
};
/// <summary>
/// Yields its time slice using the specified yieldFunciton
/// </summary>
/**/
void _DoYield()
{
bool _ShouldYield = (_YieldCount != 0);
if (_ShouldYield)
{
_ASSERTE(_M_yieldFunction != NULL);
_M_yieldFunction();
}
else
{
_YieldProcessor();
}
}
/// <summary>
/// Resets the counts and state to the default.
/// </summary>
/**/
void _Reset()
{
_M_state = _StateInitial;
// Reset to the default spin value. The value specified
// by the client is ignored on a reset.
_SetSpinCount(_SpinCount::_Value());
_ASSERTE(_M_state != _StateInitial);
}
/// <summary>
/// Determines the current spin count
/// </summary>
/// <returns>
/// The number of spins to execute for this iteration
/// </returns>
/**/
unsigned long _NumberOfSpins()
{
return 1;
}
/// <summary>
/// Determines whether maximum spin has been reached
/// </summary>
/// <returns>
/// false if spin count has reached steady state, true otherwise.
/// </returns>
/**/
bool _ShouldSpinAgain()
{
return (--_M_currentSpin > 0);
}
unsigned long _M_currentSpin;
unsigned long _M_currentYield;
_SpinState _M_state;
_YieldFunction _M_yieldFunction;
};
typedef _SpinWait<> _SpinWaitBackoffNone;
typedef _SpinWait<0> _SpinWaitNoYield;
//
// This reentrant lock uses CRITICAL_SECTION and is intended for use when kernel blocking
// is desirable and where it is either known that the lock will be taken recursively in
// the same thread, or when it is just not known that a non-reentrant lock can be used safely.
//
class _ReentrantBlockingLock
{
public:
// Constructor for _ReentrantBlockingLock
_CRTIMP _ReentrantBlockingLock();
// Destructor for _ReentrantBlockingLock
_CRTIMP ~_ReentrantBlockingLock();
// Acquire the lock, spin if necessary
_CRTIMP void _Acquire();
// Tries to acquire the lock, does not spin.
// Returns true if the acquisition worked, false otherwise
_CRTIMP bool _TryAcquire();
// Releases the lock
_CRTIMP void _Release();
// An exception safe RAII wrapper.
class _Scoped_lock
{
public:
// Constructs a holder and acquires the specified lock
explicit _Scoped_lock(_ReentrantBlockingLock& _Lock) : _M_lock(_Lock)
{
_M_lock._Acquire();
}
// Destroys the holder and releases the lock
~_Scoped_lock()
{
_M_lock._Release();
}
private:
_ReentrantBlockingLock& _M_lock;
_Scoped_lock(const _Scoped_lock&); // no copy constructor
_Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
};
private:
// Critical section requires windows.h. Hide the implementation so that
// user code need not include windows.
_CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
};
//
// This reentrant lock is a pure spin lock and is intended for use when kernel blocking
// is desirable and where it is either known that the lock will be taken recursively in
// the same thread, or when it is just not known that a non-reentrant lock can be used safely.
//
class _ReentrantLock
{
public:
// Constructor for _ReentrantLock
_CRTIMP _ReentrantLock();
// Acquire the lock, spin if necessary
_CRTIMP void _Acquire();
// Tries to acquire the lock, does not spin
// Returns true if the acquisition worked, false otherwise
_CRTIMP bool _TryAcquire();
// Releases the lock
_CRTIMP void _Release();
// An exception safe RAII wrapper.
class _Scoped_lock
{
public:
// Constructs a holder and acquires the specified lock
explicit _Scoped_lock(_ReentrantLock& _Lock) : _M_lock(_Lock)
{
_M_lock._Acquire();
}
// Destroys the holder and releases the lock
~_Scoped_lock()
{
_M_lock._Release();
}
private:
_ReentrantLock& _M_lock;
_Scoped_lock(const _Scoped_lock&); // no copy constructor
_Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
};
private:
long _M_recursionCount;
volatile long _M_owner;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -