📄 event.cpp
字号:
// Do *NOT* touch the this pointer after this marker:
//
//
// The wait_for_multiple() or wait() may be in the process of chaining the context to wait
// chain of the event. Before chaining it has taken a lock on the event. It is possible
// that the current context being unblocked (in this SingleSatisfy()) could be the one
// blocked because of the lock taken above. In this case m_fOkToUnblock flag was set to FALSE in
// wait_for_multiple() or wait() and so here(in set()) we should not Unblock() the context and also set
// a flag to not Block() the context in the wait_for_multiple(), for which m_fDoNotBlock flag
// is set here. This cancels out the Block() Unblock().
// If we do not take this measure and Unblock if above situation occurs, then Context blocked on
// above lock will run, thus the critical region will be executed concurrently, which is
// disastrous. Also this (Unblocking here) could result in Unblock/Unblock sequence on a
// context which is illegal.
//
bool bSkip = !(pNode->m_pWaitBlock->m_smSkip_BlockUnblock == WaitBlock::DONT_SKIP // Avoid unneccessary InterlockedCompareExchange for optimizing.
|| InterlockedCompareExchange(&pNode->m_pWaitBlock->m_smSkip_BlockUnblock, WaitBlock::SKIP, WaitBlock::UNDECIDED) == WaitBlock::DONT_SKIP);
if(bSkip)
{
if(pContextOut != NULL)
*pContextOut = NULL; // No context in list, hence no Unblocking in set()
}
else if (pContextOut != NULL)
*pContextOut = pContext;
else
pContext->Unblock();
}
}
/// <summary>
/// Called when a timer on an event is signaled.
/// </summary>
void MultiWaitBlock::DispatchEventTimer(LPVOID pContext, BOOLEAN)
{
MultiWaitBlock *pWaitBlock = reinterpret_cast<MultiWaitBlock *> (pContext);
Context *pUnblockContext = NULL;
if (InterlockedIncrement(&pWaitBlock->m_finalTrigger) == 1)
{
pUnblockContext = pWaitBlock->m_pContext;
for(;;)
{
if (!DeleteTimerQueueTimer(GetSharedTimerQueue(), pWaitBlock->m_hTimer, NULL))
{
if (GetLastError() == ERROR_IO_PENDING)
break;
}
else
break;
}
//
// Note that after this point, m_hTimer is invalid. Only the entity that transitions m_finalTrigger
// to 1 is allowed to play with deleting the timer.
//
}
if (pUnblockContext != NULL)
{
pWaitBlock->m_pSatisfiedBy = NULL;
pUnblockContext->Unblock();
}
pWaitBlock->NotifyCompletedNode();
}
/// <summary>
/// Called to indicate that the event wait has been satisfied.
/// </summary>
bool SingleWaitBlock::Satisfy(Context **pContextOut, EventWaitNode *pNode)
{
//
// For explanation of skipping Block/Unblock please see the comments in MultiWaitBlock::SingleSatisfy() method.
//
bool bSkip = !(pNode->m_pWaitBlock->m_smSkip_BlockUnblock == WaitBlock::DONT_SKIP // Avoid unneccessary InterlockedCompareExchange for optimizing.
|| InterlockedCompareExchange(&pNode->m_pWaitBlock->m_smSkip_BlockUnblock, WaitBlock::SKIP, WaitBlock::UNDECIDED) == WaitBlock::DONT_SKIP );
if( bSkip )
{
if(pContextOut)
*pContextOut = NULL; // No context in list, hence no Unblocking in set()
}
else if (pContextOut != NULL)
*pContextOut = m_pContext;
else
m_pContext->Unblock();
return false;
}
#pragma warning(disable : 4702)
/// <summary>
/// Called to indicate that the event for a single wait has been reset.
/// </summary>
bool SingleWaitBlock::Reset()
{
ASSERT(false);
return false;
}
/// <summary>
/// Called to indicate that the event node was on the rundown list at event destruction.
/// </summary>
void SingleWaitBlock::Destroy()
{
ASSERT(false);
}
#pragma warning(default : 4702)
/// <summary>
/// Called during a sweep to check whether this node still needs to be alive.
/// </summary>
bool SingleWaitBlock::Sweep()
{
return true;
}
void MultiWaitBlock::NotifyCompletedNode()
{
size_t waiters = m_waiters;
//
// Once satisfied, we are responsible for incrementing the completion counter. When it hits
// the number of waiters, we can destroy the shared wait block.
//
if (InterlockedIncrementSizeT(&m_completions) == waiters + 1)
delete[] (reinterpret_cast <BYTE *> (this));
}
/// <summary>
/// Called to indicate that an event for the wait-any has triggered and we should satisfy this
/// wait block.
/// </summary>
bool WaitAnyBlock::Satisfy(Context **pContextOut, EventWaitNode *pNode)
{
if (pContextOut != NULL)
*pContextOut = NULL;
//
// NOTE: m_pWaitBlock is unsafe as soon as we increment the counter if we are not the entity
// to increment the counter to the wait limit. Cache everything up front!
//
ASSERT(m_triggerLimit == 1);
size_t triggerCount = InterlockedIncrementSizeT(&m_count);
if (triggerCount == m_triggerLimit)
SingleSatisfy(pContextOut, pNode);
NotifyCompletedNode();
//
// On a wait-any, we no longer need the wait node. The single wait block containing the node is
// freed by the last satisfied waiter.
//
return false;
}
/// <summary>
/// Called to indicate that an event in the wait-any has reset. This is irrelevant to us.
/// </summary>
bool WaitAnyBlock::Reset()
{
return false;
}
/// <summary>
/// Called to indicate that an event with the node present on the rundown list is being
/// destroyed. This should never be called for a wait any.
/// </summary>
void WaitAnyBlock::Destroy()
{
}
/// <summary>
/// Called during a sweep to check whether this node still needs to be alive.
/// </summary>
bool WaitAnyBlock::Sweep()
{
if (m_count >= m_triggerLimit)
{
Context *pContext;
Satisfy(&pContext, NULL);
ASSERT(pContext == NULL);
return false;
}
return true;
}
/// <summary>
/// Called to indicate that an event for the wait-all has triggered and we should satisfy this
/// wait node. Note that this does *NOT* indicate that the wait should be satisfied yet.
/// </summary>
bool WaitAllBlock::Satisfy(Context **pContextOut, EventWaitNode *pNode)
{
if (pContextOut != NULL)
*pContextOut = NULL;
ASSERT(m_triggerLimit >= 1);
size_t triggerCount = InterlockedIncrementSizeT(&m_count);
if (triggerCount == m_triggerLimit)
{
SingleSatisfy(pContextOut, pNode);
NotifyCompletedNode();
return false;
}
return true;
}
/// <summary>
/// Called to indicate that an event which was previously signaled and counting towards a satisfied
/// wait all block has reset.
/// </summary>
bool WaitAllBlock::Reset()
{
size_t triggerLimit = m_triggerLimit;
//
// Ensure that we never decrement once the wait is satisfied. We need to make sure that a reset subsequent
// just gets rid of the wait block.
//
size_t previousTriggerCount = m_count;
for(;;)
{
if (previousTriggerCount == triggerLimit)
break;
size_t xchgCount = InterlockedCompareExchangeSizeT(&m_count, previousTriggerCount - 1, previousTriggerCount);
if (xchgCount == previousTriggerCount)
break;
previousTriggerCount = xchgCount;
}
if (previousTriggerCount == triggerLimit)
{
NotifyCompletedNode();
return false;
}
return true;
}
/// <summary>
/// Called during a sweep to check whether this node still needs to be alive.
/// </summary>
bool WaitAllBlock::Sweep()
{
ASSERT(m_count <= m_triggerLimit);
if (m_count >= m_triggerLimit)
{
//
// The reset will clear us out.
//
Reset();
return false;
}
return true;
}
/// <summary>
/// Called when an event with an all-node is destroyed with the event present on a rundown list, this
/// destroys the wait node and releases its shared reference on the wait block.
/// </summary>
void WaitAllBlock::Destroy()
{
NotifyCompletedNode();
}
/// <summary>
/// Called in order to sweep out unused entries from a given node list. This clears dead wait-for-all nodes
/// on a reset-list or dead wait-for-any nodes on the wait-list.
/// </summary>
EventWaitNode * Sweep(EventWaitNode *pNode)
{
EventWaitNode *pRoot = NULL;
EventWaitNode *pNext = NULL;
for (; pNode != NULL; pNode = pNext)
{
pNext = pNode->m_pNext;
if (pNode->Sweep())
{
pNode->m_pNext = pRoot;
pRoot = pNode;
}
}
return pRoot;
}
//
// A StructuredEvent is simply a pointer with a few distinguished values. A newly
// initialized StructuredEvent will be set to 0. A StructuredEvent that has one or more waiters
// on it, that is, contexts which called StructuredEvent::Wait before StructuredEvent::Set has
// signaled the StructuredEvent, will simply point to a linked list of those waiters,
// via stack-blocks so no heap allocation is required. A StructuredEvent that is
// signaled is set to 1. Once an event is signaled, it can be safely
// deallocated, even if StructuredEvent::Set is still running.
//
//
// StructuredEvent - Synchronization object mediating access to the low-level context
// Block and Unblock APIs.
//
struct StructuredEventWaitNode
{
StructuredEventWaitNode *m_next;
::Concurrency::Context *m_context;
};
//
// Wait until the event is signaled (via some other context calling Set())
//
void StructuredEvent::Wait()
{
//
// Spin a short time waiting to be signaled before we block
//
void *oldPtr = m_ptr;
if (oldPtr == EVENT_SIGNALED)
return;
_SpinWaitBackoffNone spinWait;
for (;;)
{
oldPtr = m_ptr;
if (oldPtr == EVENT_SIGNALED)
return;
if ( !spinWait._SpinOnce())
break;
}
//
// Give up and block, first putting our context on a stack-based
// list of waiting contexts for this event
//
::Concurrency::Context *context = SchedulerBase::FastCurrentContext();
StructuredEventWaitNode node;
node.m_context = context;
for (;;)
{
node.m_next = (StructuredEventWaitNode*)oldPtr;
void *xchgPtr = InterlockedCompareExchangePointer(&m_ptr, &node, oldPtr);
if (xchgPtr == oldPtr)
break;
oldPtr = xchgPtr;
if (oldPtr == EVENT_SIGNALED)
{
//
// Event was signaled before we could add ourself to the wait
// list, so no need to block any longer
//
return;
}
}
context->Block();
}
//
// Set the event as signaled, and unblock any other contexts waiting
// on the event.
//
void StructuredEvent::Set()
{
void *oldPtr = m_ptr;
//
// Mark the event signaled, and get the waiters list, if any
//
for (;;)
{
void *xchgPtr = InterlockedCompareExchangePointer(&m_ptr, EVENT_SIGNALED, oldPtr);
if (xchgPtr == oldPtr)
break;
oldPtr = xchgPtr;
}
//
// If the event had any waiters, then unblock them
//
if (oldPtr > EVENT_SIGNALED)
{
for (StructuredEventWaitNode *node = (StructuredEventWaitNode *)oldPtr, *next; node != NULL; node = next)
{
//
// Need to cache the next pointer, since as soon as we unblock,
// the stack-based StructuredEventWaitNode may be deallocated.
//
// Technically, there should be a memory fence after retrieving
// the next pointer, but practically it's unnecessary, as long
// as there is a locked operation inside the call to Unblock
// before the blocked context starts running. I don't think
// it's possible to write a scheduler unblock operation without
// needing a locked op, so I'm avoiding the extra cost per
// waiter here.
//
next = node->m_next;
node->m_context->Unblock();
}
}
}
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -