📄 rtlocks.cpp
字号:
/// Reader blocks by doing spinning on a local variable. All readers cache previous reader (if available)
/// locally, so they could all be unblocked once the lock is available.
/// </remarks>
_CRTIMP void reader_writer_lock::lock_read()
{
LockQueueNode newReaderNode;
LockQueueNode * pNewReader = &newReaderNode;
// Locks are non-reentrant, so throw if this condition is detected.
if (pNewReader->m_pContext == reinterpret_cast<LockQueueNode *>(_M_activeWriter)->m_pContext)
{
throw improper_lock("Lock already taken as a writer");
}
LockQueueNode * pNextReader = reinterpret_cast<LockQueueNode *>(InterlockedExchangePointer(&_M_pReaderHead, pNewReader));
//
// If this is the only read that currently exists and there are no interested writers
// then unblock this read.
//
if (pNextReader == NULL)
{
if ((InterlockedOr(&_M_lockState, RWLockReaderInterested) & (RWLockWriterInterested | RWLockWriterExclusive)) == 0)
{
LockQueueNode * pHeadReader = reinterpret_cast<LockQueueNode *>(_Get_reader_convoy());
//
// If the new reader is still the head of the reader list that means that it is
// unblocking itself, in which case using UnblockWithoutContext will not include
// context unblocking. Otherwise, the full unblock/block mechanism is needed.
//
if (pHeadReader == pNewReader)
{
pHeadReader->UnblockWithoutContext();
return;
}
_ASSERTE(pHeadReader != pNewReader);
pHeadReader->Unblock();
}
}
else
{
pNewReader->UpdateBlockingState(pNextReader);
}
pNewReader->Block();
// Unblock the reader that preceeded this one as a head or the list
if (pNextReader != NULL)
{
InterlockedExchangeAdd(&_M_lockState, RWLockReaderCountIncrement);
pNextReader->Unblock();
}
}
/// <summary>
/// Try to take a reader lock.
/// </summary>
/// <returns>
/// true if the lock is immediately available and lock succeeded; false otherwise.
/// </returns>
_CRTIMP bool reader_writer_lock::try_lock_read()
{
long oldState = _M_lockState;
//
// Try to increment the reader count while no writer is interested.
//
while ((oldState & (RWLockWriterInterested | RWLockWriterExclusive)) == 0)
{
if (InterlockedCompareExchange(&_M_lockState, oldState + RWLockReaderCountIncrement, oldState) == oldState)
{
return true;
}
oldState = _M_lockState;
}
return false;
}
/// <summary>
/// Unlock the lock based on who locked it, reader or writer.
/// </summary>
_CRTIMP void reader_writer_lock::unlock()
{
if (_M_lockState >= RWLockReaderCountIncrement)
{
_Unlock_reader();
}
else if ((_M_lockState & RWLockWriterExclusive) != 0)
{
_Unlock_writer();
}
else
{
_ASSERT_EXPR(false, L"Lock not being held");
}
}
/// <summary>
/// Called for the first context in the writer queue. It sets the queue head and it tries to
/// claim the lock if readers are not active.
/// </summary>
/// <param name="_PWriter">
/// The first writer in the queue.
/// </param>
bool reader_writer_lock::_Set_next_writer(void * _PWriter)
{
_M_pWriterHead = _PWriter;
if (((InterlockedOr(&_M_lockState, RWLockWriterInterested) & RWLockReaderInterested) == 0) &&
(InterlockedOr(&_M_lockState, RWLockWriterExclusive) < RWLockReaderCountIncrement))
{
return true;
}
return false;
}
/// <summary>
/// Called when writers are done with the lock, or when lock was free for claiming by
/// the first reader coming in. If in the meantime there are more writers interested
/// the list of readers is finalized and they are convoyed, while head of the list
/// is reset to NULL.
/// </summary>
/// <returns>
/// Pointer to the head of the reader list.
/// </returns>
void * reader_writer_lock::_Get_reader_convoy()
{
// In one interlocked step, clear reader interested flag and increment the reader count.
long prevLockState = InterlockedExchangeAdd(&_M_lockState, RWLockReaderCountIncrement - RWLockReaderInterested);
//
// If a lock is in the race between a reader and a writer allow this last reader batch
// to go through and then close the lock for the new incoming readers, granting
// exclusive access to writers.
//
if ((prevLockState & RWLockWriterInterested) != 0 && (prevLockState & RWLockWriterExclusive) == 0)
{
InterlockedOr(&_M_lockState, RWLockWriterExclusive);
}
// Return the batch of readers to be unblocked
return reinterpret_cast<void *>(InterlockedExchangePointer(&_M_pReaderHead, NULL));
}
/// <summary>
/// Called from unlock() when a writer is holding the lock. Writer unblocks the next writer in the list
/// and is being retired. If there are no more writers, but there are readers interested, then readers
/// are unblocked.
/// </summary>
/// <remarks>
/// If there wasn't for a race to add a writer while the last writer is unlocking the lock, there would be
/// no need for the writer structure in unlock. However, because of this race there is an ABA problem and
/// writer information had to be passed onto a scratch writer (_M_activeWriter), internal to the lock.
/// </remarks>
void reader_writer_lock::_Unlock_writer()
{
_ASSERTE((_M_lockState & RWLockWriterExclusive) != 0);
_ASSERTE(_M_pWriterHead != NULL);
LockQueueNode * pCurrentNode = reinterpret_cast<LockQueueNode *>(_M_pWriterHead);
_ASSERT_EXPR(pCurrentNode->m_pContext == SchedulerBase::SafeFastCurrentContext(), L"Lock being held by different writer");
LockQueueNode * pNextNode = pCurrentNode->m_pNextNode;
_M_pWriterHead = pNextNode;
// Reset context on the active writer to ensure that it is possible to detect the error case
// where the same writer tries to enter the lock twice.
reinterpret_cast<LockQueueNode *>(&_M_activeWriter)->m_pContext = NULL;
if (pNextNode != NULL)
{
pNextNode->Unblock();
}
else
{
// If there are readers lined up, then unblock them
if ((InterlockedAnd(&_M_lockState, ~(RWLockWriterInterested | RWLockWriterExclusive)) & RWLockReaderInterested) != 0)
{
LockQueueNode * pHeadNode = reinterpret_cast<LockQueueNode *>(_Get_reader_convoy());
pHeadNode->Unblock();
}
// Safely remove this writer, keeping in mind there might be a race for the queue tail.
_Remove_last_writer(pCurrentNode);
}
}
/// <summary>
/// When last writer leaves the lock it needs to reset the tail to NULL so that the next coming
/// writer would know to try to grab the lock. If the CAS to NULL fails, then some other writer
/// managed to grab the tail before the reset, so this writer needs to wait until the link to
/// the next writer is complete before trying to release the next writer.
/// </summary>
/// <param name="_PWriter">
/// Last writer in the queue.
/// </param>
void reader_writer_lock::_Remove_last_writer(void * _PWriter)
{
// If someone is adding a writer then wait until next node pointer is populated.
if (reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pWriterTail, NULL, _PWriter)) != _PWriter)
{
LockQueueNode * pWriter = reinterpret_cast<LockQueueNode *>(_PWriter);
LockQueueNode * pNextWriter = pWriter->WaitForNextNode();
if (_Set_next_writer(pNextWriter))
{
pNextWriter->Unblock();
}
}
}
/// <summary>
/// Acquires a write lock given a specific write node to lock.
/// </summary>
/// <param name="_PLockingNode">
/// The node that needs to own the lock.
/// </param>
/// <param name="_FHasExternalNode">
/// Whether the node being locked is external to the reader_writer_lock.
/// </param>
/// <remarks>
/// Throws a improper_lock exception if the lock is acquired recursively
/// </remarks>
void reader_writer_lock::_Acquire_lock(void * _PLockingNode, bool _FHasExternalNode)
{
LockQueueNode * pNewWriter = reinterpret_cast<LockQueueNode *>(_PLockingNode);
LockQueueNode * pActiveWriter = reinterpret_cast<LockQueueNode *>(_M_activeWriter);
// Locks are non-reentrant, so throw if this condition is detected.
if (pNewWriter->m_pContext == reinterpret_cast<LockQueueNode *>(pActiveWriter)->m_pContext)
{
throw improper_lock("Lock already taken");
}
LockQueueNode * pPreviousWriter = reinterpret_cast<LockQueueNode *>(InterlockedExchangePointer(&_M_pWriterTail, pNewWriter));
bool doNeedBlock = true;
if (pPreviousWriter == NULL)
{
pNewWriter->UpdateQueuePosition(pActiveWriter);
// This is the only write that currently exists
if (_Set_next_writer(pNewWriter))
{
doNeedBlock = false;
pNewWriter->UnblockWithoutContext();
}
}
else
{
pNewWriter->UpdateQueuePosition(pPreviousWriter);
pPreviousWriter->m_pNextNode = pNewWriter;
// Note: pPreviousWriter is *unsafe* after the assignment above!
}
// Don't block if the context unblocked itself already
if (doNeedBlock)
{
pNewWriter->Block(pActiveWriter->m_ticketState);
// Do another position estimation in case we missed the previous number due to race
pNewWriter->UpdateQueuePosition(pActiveWriter);
}
// Since calls with external nodes will not call _Switch_to_active, make
// sure that we are setting the head and the active node properly.
if (_FHasExternalNode)
{
pActiveWriter->Copy(pNewWriter);
_M_pWriterHead = pNewWriter;
}
}
/// <summary>
/// The writer node allocated on the stack never really owns the lock. The reason for that is that
/// it would go out of scope and its insides would not be visible in unlock() where it would potentially
/// need to unblock the next writer in the queue. Instead, its state is transferred to the internal
/// writer node which is used as a scratch node.
/// </summary>
/// <param name="_PWriter">
/// The writer that needs to own the lock.
/// </param>
void reader_writer_lock::_Switch_to_active(void * _PWriter)
{
_ASSERTE((_M_lockState & RWLockWriterExclusive) != 0);
LockQueueNode * pWriter = reinterpret_cast<LockQueueNode *>(_PWriter);
LockQueueNode * pActiveWriter = reinterpret_cast<LockQueueNode *>(_M_activeWriter);
//
// Copy the contents of the writer allocated on the stack which now owns the lock, so that we would
// have its information available during unlock.
//
pActiveWriter->Copy(pWriter);
//
// If someone is adding a writer then wait until next node pointer is populated. Otherwise, there will be no way
// to unblock the next writer after newWriterNode goes out of scope.
//
if (pActiveWriter->m_pNextNode == NULL)
{
//
// If the compare-and-swap to active writer succeeds that means that a new writer coming in will call _Set_next_writer, which
// will properly set the _M_pWriterHead. Otherwise, it has to be set manually when next node is done.
//
if (reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pWriterTail, pActiveWriter, pWriter)) != pWriter)
{
pWriter->WaitForNextNode();
//
// During the initial copy the next pointer was not copied over and it has been populated in the meantime.
// This copy can now be safely performed because tail has moved, so next will point to the second element.
//
pActiveWriter->Copy(pWriter);
}
}
_ASSERTE(_PWriter != _M_pWriterTail);
_M_pWriterHead = pActiveWriter;
}
/// <summary>
/// Called from unlock() when a reader is holding the lock. Reader count is decremented and if this
/// is the last reader it checks whether there are interested writers that need to be unblocked.
/// </summary>
void reader_writer_lock::_Unlock_reader()
{
long resultState = InterlockedExchangeAdd(&_M_lockState, -RWLockReaderCountIncrement);
//
// If this is the last reader and there are writers lined up then unblock them. However,
// if exclusive writer flag is not set, then writers will take care of themselves.
//
if ((resultState & (~RWLockReaderInterested)) == (RWLockReaderCountIncrement | RWLockWriterInterested | RWLockWriterExclusive))
{
_ASSERTE(_M_pWriterTail != NULL);
reinterpret_cast<LockQueueNode *>(_M_pWriterHead)->Unblock();
}
}
/// <summary>
/// Constructs a holder object and acquires the reader_writer_lock passed to it.
// If the reader_writer_lock is held by another thread this call will block.
/// </summary>
/// <param name="_Reader_writer_lock">
/// Reader writer to lock.
/// </param>
reader_writer_lock::scoped_lock::scoped_lock(reader_writer_lock& _Reader_writer_lock) : _M_reader_writer_lock(_Reader_writer_lock)
{
static_assert(sizeof(LockQueueNode) <= sizeof(_M_writerNode), "_M_writerNode buffer too small");
LockQueueNode * pNewWriterNode = reinterpret_cast<LockQueueNode *>(_M_writerNode);
new(pNewWriterNode) LockQueueNode;
_M_reader_writer_lock._Acquire_lock(pNewWriterNode, true);
}
/// <summary>
/// Destructs a holder object and releases the reader_writer_lock.
/// </summary>
reader_writer_lock::scoped_lock::~scoped_lock()
{
_M_reader_writer_lock.unlock();
}
/// <summary>
/// Constructs a holder object and acquires the reader_writer_lock passed to it.
// If the reader_writer_lock is held by another thread this call will block.
/// </summary>
/// <param name="_Reader_writer_lock">
/// Reader Writer to lock.
/// </param>
reader_writer_lock::scoped_lock_read::scoped_lock_read(reader_writer_lock& _Reader_writer_lock) : _M_reader_writer_lock(_Reader_writer_lock)
{
_M_reader_writer_lock.lock_read();
}
/// <summary>
/// Destructs a holder object and releases the reader_writer_lock.
/// </summary>
reader_writer_lock::scoped_lock_read::~scoped_lock_read()
{
_M_reader_writer_lock.unlock();
}
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -