📄 rtlocks.cpp
字号:
{
_M_recursionCount++;
}
else
{
_M_criticalSection._Acquire_lock(_Lock_node, true);
_M_owner = id;
_M_recursionCount = 1;
}
}
void _ReentrantPPLLock::_Release()
{
ASSERT(_M_owner == (LONG) GetCurrentThreadId());
ASSERT(_M_recursionCount >= 1);
_M_recursionCount--;
if ( _M_recursionCount == 0 )
{
_M_owner = NULL_THREAD_ID;
_M_criticalSection.unlock();
}
}
//
// A Non-Reentrant Reader-Writer spin lock, designed for rare writers.
//
// A writer request immediately blocks future readers and then waits until all current
// readers drain. A reader request does not block future writers and must wait until
// all writers are done, even those that cut in front In any race between requesting
// and reader and a writer, the writer always wins.
//
_ReaderWriterLock::_ReaderWriterLock()
: _M_state(_ReaderWriterLock::_Free), _M_numberOfWriters(0)
{
}
//
// Acquires the RWLock for reading. Waits for the number of writers to drain.
//
void _ReaderWriterLock::_AcquireRead()
{
#if defined(_DEBUG)
LONG dbgBits = GetDebugBits();
LONG val = _M_numberOfWriters;
for(;;)
{
LONG xchgVal = InterlockedCompareExchange(&_M_numberOfWriters, val | dbgBits, val);
if (xchgVal == val)
break;
val = xchgVal;
}
#endif // _DEBUG
for (;;)
{
if (_M_numberOfWriters > 0)
#if defined(_DEBUG)
_WaitEquals(_M_numberOfWriters, 0, ~DebugBitsMask);
#else // !_DEBUG
_WaitEquals(_M_numberOfWriters, 0);
#endif // _DEBUG
int currentState = _M_state;
// Try to acquire read lock by incrememting the current State.
if (currentState != _Write &&
InterlockedCompareExchange(&_M_state, currentState + 1, currentState) == currentState)
{
#if defined(_DEBUG)
ValidateDebugBits(_M_numberOfWriters);
#endif // _DEBUG
return;
}
}
}
//
// Release read lock -- the last reader will decrement _M_state to _Free
//
void _ReaderWriterLock::_ReleaseRead()
{
ASSERT(_M_state >= _Read);
InterlockedDecrement(&_M_state);
}
//
// Acquire write lock -- spin until there are no existing readers, no new readers will
// be added
//
void _ReaderWriterLock::_AcquireWrite()
{
InterlockedIncrement(&_M_numberOfWriters);
for (;;)
{
if (InterlockedCompareExchange(&_M_state, _Write, _Free) == _Free)
{
#if defined(_DEBUG)
ValidateDebugBits(_M_numberOfWriters);
#endif // _DEBUG
return;
}
_WaitEquals(_M_state, _Free);
}
}
//
// Release writer lock -- there can only be one active, but a bunch might be pending
//
void _ReaderWriterLock::_ReleaseWrite()
{
ASSERT(_M_state == _Write);
#if defined(_DEBUG)
ASSERT((_M_numberOfWriters & ~DebugBitsMask) > 0);
#else // !_DEBUG
ASSERT(_M_numberOfWriters > 0);
#endif // _DEBUG
// The following assignment does not need to be interlocked, as the interlocked
// decrement can take care of the fence.
_M_state = _Free;
InterlockedDecrement(&_M_numberOfWriters);
}
//
// Tries to acquire the write lock. Returns true if the lock was acquired.
//
bool _ReaderWriterLock::_TryAcquireWrite()
{
if (InterlockedCompareExchange(&_M_state, _Write, _Free) == _Free)
{
InterlockedIncrement(&_M_numberOfWriters);
#if defined(_DEBUG)
ValidateDebugBits(_M_numberOfWriters);
#endif // _DEBUG
return true;
}
return false;
}
// Spin-Wait-Until variant -- spin for s_spinCount iterations, then Sleep(0) then repeat
// 10 times (tunable), thereafter we spin and Sleep(1)
void _ReaderWriterLock::_WaitEquals(volatile const LONG& location, LONG value, LONG mask)
{
unsigned int retries = 0;
int spinInterval = 10; // tuning
for (;;)
{
if ((location & mask) == value)
return;
YieldProcessor();
if (++retries >= _SpinCount::_S_spinCount)
{
if (spinInterval > 0)
{
--spinInterval;
Sleep(0);
}
else
Sleep(1);
retries = 0;
}
}
}
// Guarantees that all writers are out of the lock. This does nothing if there are no pending writers.
void _ReaderWriterLock::_FlushWriteOwners()
{
//
// Ideally, if the read lock is held and we have pending writers, this would not need to grab the lock and release
// it; however -- we must guarantee that any writer which was in the lock as of this call is completely out
// of everything including _ReleaseWrite. Since the last thing which happens there is the decrement of _M_numberOfWriters,
// that is *currently* what we must key off. It's possible that after the change of _M_state to free there, a reader
// gets the lock because it was preempted after the initial check of _M_numberOfWriters which saw 0. Hence, we cannot
// rely on _M_state.
//
if (_M_numberOfWriters > 0)
{
#if defined(_DEBUG)
_WaitEquals(_M_numberOfWriters, 0, ~DebugBitsMask);
#else // !_DEBUG
_WaitEquals(_M_numberOfWriters, 0);
#endif // _DEBUG
}
}
//***************************************************************************
// Locking primitives and structures:
//***************************************************************************
// Reader-writer lock constants
static const long RWLockWriterInterested = 0x1; // Writer interested or active
static const long RWLockWriterExclusive = 0x2; // Writer active, no reader entry
static const long RWLockReaderInterested = 0x4; // Reader interested but not active
static const long RWLockReaderCountIncrement = 0x8; // Reader count step (reader counter is scaled by it)
/// <summary>
/// Node element used in the lock queues.
/// </summary>
class LockQueueNode
{
public:
/// <summary>
/// Constructor for queue node. It keeps the context pointer in order
/// to block in a fashion visible to ConcRT.
/// </summary>
LockQueueNode() : m_pNextNode(NULL), m_ticketState(StateIsBlocked)
{
m_pContext = SchedulerBase::CurrentContext();
}
/// <summary>
/// Constructor for queue node. It keeps the context pointer in order
/// to block in a fashion visible to ConcRT.
/// </summary>
LockQueueNode(Context * pContext, unsigned int ticket) : m_pNextNode(NULL), m_pContext(pContext), m_ticketState(ticket)
{
}
/// <summary>
/// Waits until lock is available.
/// </summary>
/// <param name="currentTicketState">
/// The number of the node that is currently owning the lock, or has last owned it.
/// </param>
void Block(unsigned int currentTicketState = 0)
{
// Get the number of physical processors to determine the best spin times
unsigned int numberOfProcessors = Concurrency::GetProcessorCount();
_ASSERTE(numberOfProcessors > 0);
// If the previous node is blocked then there is no need to spin and waste cycles
if (!IsPreviousBlocked())
{
// If there is a race and the ticket is not valid then use the default spin
unsigned int placeInLine = IsTicketValid() ? ((m_ticketState >> NumberOfBooleanStates) - (currentTicketState >> NumberOfBooleanStates)) : 1;
_ASSERTE(placeInLine > 0);
//
// If the node is back in line by more than a processor count plus a threshold
// then simply don't spin and block immediately. Otherwise, progressively increase the
// amount of spin for the subsequent nodes until a double default spin count is reached.
//
if (placeInLine <= numberOfProcessors + TicketThreshold)
{
const unsigned int defaultSpin = _SpinCount::_Value();
unsigned int totalSpin = defaultSpin + (defaultSpin * (placeInLine - 1)) / (numberOfProcessors + TicketThreshold);
_SpinWaitNoYield spinWait;
spinWait._SetSpinCount(totalSpin);
while (IsBlocked() && spinWait._SpinOnce())
{
// _YieldProcessor is called inside _SpinOnce
}
}
}
//
// After spin waiting for a while use the ConcRT blocking mechanism. It will return
// immediately if the unblock already happened.
//
m_pContext->Block();
}
/// <summary>
/// Notifies that lock is available without context blocking.
/// </summary>
void UnblockWithoutContext()
{
m_ticketState &= ~StateIsBlocked;
}
/// <summary>
/// Notifies that lock is available.
/// </summary>
void Unblock()
{
UnblockWithoutContext();
//
// This call implies a fence which serves two purposes:
// a) it makes m_fIsBlocked visible sooner (in UnblockWithoutContext)
// b) it makes sure that we never block a context without unblocking it
//
m_pContext->Unblock();
}
/// <summary>
/// Waits until the next node is set.
/// </summary>
/// <returns>
/// The next node.
/// </returns>
LockQueueNode * WaitForNextNode()
{
LockQueueNode * volatile pNextNode = m_pNextNode;
_SpinWaitBackoffNone spinWait;
while (pNextNode == NULL)
{
//
// There in no context blocking here so continue to spin even if maximum
// spin is already reached. Since setting the tail and setting next pointer
// are back-to-back operations it is very likely that while loop will not take
// a long time.
//
spinWait._SpinOnce();
pNextNode = m_pNextNode;
}
return pNextNode;
}
/// <summary>
/// Copies the contents of the passed in node to this node.
/// </summary>
/// <param name="pNextNode">
/// The node copy from.
/// </param>
/// <remarks>
/// Used only to transfer data to the internally allocated node.
/// </remarks>
void Copy(LockQueueNode * pCopyFromNode)
{
_ASSERTE(pCopyFromNode->IsTicketValid());
_ASSERTE(!pCopyFromNode->IsBlocked());
m_ticketState = pCopyFromNode->m_ticketState;
m_pNextNode = pCopyFromNode->m_pNextNode;
m_pContext = pCopyFromNode->m_pContext;
}
/// <summary>
/// Estimates the position of this node in the node queue based on the previous node.
/// </summary>
/// <param name="pPreviousNode">
/// The node to get the base number from, if available.
/// </param>
/// <remarks>
/// Used only as a heuristic for critical section and writers in reader writer lock.
/// </remarks>
void UpdateQueuePosition(LockQueueNode * pPreviousNode)
{
if (!IsTicketValid())
{
// If the previous node has a valid ticket then this one will have it as well
if (pPreviousNode->IsTicketValid())
{
unsigned int newState = (pPreviousNode->m_ticketState + TicketIncrement) & MaskBlockedStates;
_ASSERTE((newState & StateIsTicketValid) != 0);
// If the previous node is blocked then set this information on the current node to save the spin
if (pPreviousNode->IsBlocked() && (pPreviousNode->IsPreviousBlocked() || pPreviousNode->m_pContext->IsSynchronouslyBlocked()))
{
newState |= StateIsPreviousBlocked;
}
m_ticketState |= newState;
}
}
}
/// <summary>
/// Estimates the state of this node based on the state of previous node.
/// </summary>
/// <param name="pPreviousNode">
/// The node to get the base from, if available.
/// </param>
/// <remarks>
/// Used only as a heuristic for readers in reader writer lock.
/// </remarks>
void UpdateBlockingState(LockQueueNode * pPreviousNode)
{
// If the previous node is blocked then set this information on the current node to save the spin
if (pPreviousNode->IsBlocked() && (pPreviousNode->IsPreviousBlocked() || pPreviousNode->m_pContext->IsSynchronouslyBlocked()))
{
m_ticketState |= StateIsPreviousBlocked;
}
}
private:
friend class critical_section;
friend class reader_writer_lock;
bool IsBlocked()
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -