📄 event.cpp
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// event.h
//
// The core implementation of events which understand the cooperative nature of the scheduler and are designed
// to be scalable.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#include "concrtinternal.h"
//
// NOTE: The design of the wait-for-multiple semantic tries to keep the following goals:
//
// - Single event waits (create/wait/set) are very efficient requiring few memory barriers and
// absolutely no shared locking. This is necessary to support utilizing the event for stolen chore
// signaling. Currently, there are N*2+1 memory barriers (where N is the number to grab/release
// a spin-lock).
//
// - Multiple event wait can be supported on the same Event type. There is (as yet) no bifurcation between
// an event that can be used in a WaitForMultiple and one that can't.
//
// - There are (few) shared locks between multiple events.
//
// This leads to a few unfortunate side effects in the implementation described below:
//
// - Each event has a spinlock which now guards its wait chains. No longer do we have a simple CAS loop
// for light-weight events. This will add extra memory barriers in the fast path (5 or 3 depending
// on the lock versus 2 in the prototype implementation).
//
// - Each wait-for-multiple requires a single heap allocation. With normal sized wait lists, this should
// come from the concurrent suballocator.
//
// - Wait-for-multiple on N events requires N spinlock acquisitons although the code is left open to the
// possibility of lock pooling on a granularity to be decided by the scheduler.
namespace Concurrency
{
namespace details
{
//**************************************************************************
// Shared Functionality:
//
// This functionality is shared between agents infrastructure in msvcp for timeouts
// there as well as in the eventing infrastructure here in msvcr for timeouts as well.
// This is an msvcr export to msvcp to keep a shared timer queue throughout ConcRT.
//
//**************************************************************************
volatile LONG g_TimerQueueDemandInit = 0;
volatile HANDLE g_hTimerQueue = NULL;
/// <summary>
/// Returns the demand initialized single timer queue used for event timeouts, timer agents, etc...
/// </summary>
HANDLE GetSharedTimerQueue()
{
if (g_hTimerQueue == NULL)
{
if (InterlockedCompareExchange(&g_TimerQueueDemandInit, 1, 0) == 0)
{
g_hTimerQueue = CreateTimerQueue();
if (g_hTimerQueue == NULL)
{
InterlockedExchange(&g_TimerQueueDemandInit, 0);
}
}
else
{
_SpinWaitBackoffNone spinWait;
while(g_hTimerQueue == NULL && g_TimerQueueDemandInit == 1)
{
spinWait._SpinOnce();
}
}
if (!g_hTimerQueue)
{
throw std::bad_alloc();
}
}
return g_hTimerQueue;
}
//**************************************************************************
// Internal Prototypes and Definitions:
//
// These are purely internal to the event implementation are placed here in lieu
// of generally visible headers.
//**************************************************************************
class EventWaitNode;
/// <summary>
/// Represents a wait block. It is indirectly chained to an event via a Wait*Node.
/// </summary>
class WaitBlock
{
public:
enum STATE {UNDECIDED, SKIP, DONT_SKIP};
/// <summary>
/// Wait block constructor
/// </summary>
WaitBlock() : m_pContext(NULL), m_smSkip_BlockUnblock(UNDECIDED)
{
m_pContext = Context::CurrentContext();
}
/// <summary>
/// Called when the wait is satisfied (the event is signaled). Note that the derived class may or may
/// not unblock depending on the exact wait semantics.
/// </summary>
/// <returns>
/// An indication of whether the event needs to track this node after a signal due to the potential
/// for a reset to impact the overall wait.
/// </returns>
virtual bool Satisfy(Context **pContextOut, EventWaitNode *pNode) = 0;
/// <summary>
/// Called when the event is reset. A wait-all may need to adjust counters to prevent the wait from being
/// satisfied.
/// </summary>
/// <returns>
/// An indication of whether the wait node is still valid
/// </returns>
virtual bool Reset() = 0;
/// <summary>
/// Called when the underlying event is being destroyed / rundown. Allows cleaning up of wait blocks.
/// </summary>
virtual void Destroy() = 0;
/// <summary>
/// Called in order to check whether a node is still alive or dead during a list sweep.
/// </summary>
virtual bool Sweep() = 0;
// The context which this wait must block/unblock.
Context *m_pContext;
// Flag to decide on skipping a pair of block/unblock to avoid unblocking of a context blocked due
// to scoped lock and unblocking it via event's set operation, which is the wrong/mismatched reason for unblocking.
// Further comments in MultiWaitBlock::SingleSatisfy() method.
volatile long m_smSkip_BlockUnblock;
};
/// <summary>
/// Represents a wait on a single object (with or without a timer).
/// </summary>
class SingleWaitBlock : public WaitBlock
{
public:
virtual bool Satisfy(Context **pContextOut, EventWaitNode *pNode);
virtual bool Reset();
virtual void Destroy();
virtual bool Sweep();
};
class MultiWaitBlock : public WaitBlock
{
public:
// An indication of which object caused the wait to be satisfied.
EventWaitNode *m_pSatisfiedBy;
// Timer queue timer.
HANDLE m_hTimer;
// The final trigger count.
volatile long m_finalTrigger;
// The number of things pointing at the wait block (wait nodes or timers).
size_t m_waiters;
// When the count reaches the trigger limit, the wait block is satisfied.
volatile size_t m_triggerLimit;
// The number of signaled objects
volatile size_t m_count;
// The number of completed waiters (master counter of when the block can be freed)
volatile size_t m_completions;
// An indication of whether this wait has a timeout or not. Timeouts are handled by a two stage
// wait (m_count -> m_finalTrigger).
bool m_fTimeout;
/// <summary>
/// MultiWaitBlock constructor.
/// </summary>
MultiWaitBlock(size_t waitObjects, bool timeout, bool timer) :
m_count(0), m_completions(0), m_fTimeout(timeout), m_waiters(waitObjects + (timer ? 1 : 0)), m_pSatisfiedBy(NULL), m_hTimer(NULL), m_finalTrigger(0)
{
}
/// <summary>
/// Called when a node (or something masquerading as such) is done with its reference on the block.
/// </summary>
void NotifyCompletedNode();
/// <summary>
/// Called when a timer on the wait block fires.
/// </summary>
static void CALLBACK DispatchEventTimer(PVOID pContext, BOOLEAN timerOrWaitFired);
protected:
virtual void SingleSatisfy(Context **pContextOut, EventWaitNode *pNode);
};
class MultiWaitBlockHolder
{
public:
MultiWaitBlockHolder(bool fWaitAll, size_t count, bool timeout, bool timer);
~MultiWaitBlockHolder();
void Release()
{
m_count++;
}
MultiWaitBlock *GetWaitBlock() const
{
return m_pWaitBlock;
}
EventWaitNode *GetWaitNode(size_t i) const
{
return reinterpret_cast <EventWaitNode *> (m_pMemBlock + m_blockSize + m_nodeSize * i);
}
size_t GetIndexOfNode(EventWaitNode *pNode) const
{
return (size_t) (reinterpret_cast <PBYTE> (pNode) - (m_pMemBlock + m_blockSize)) / m_nodeSize;
}
private:
size_t m_blockSize;
size_t m_nodeSize;
size_t m_totalBlockSize;
BYTE *m_pMemBlock;
MultiWaitBlock *m_pWaitBlock;
size_t m_count;
size_t m_refs;
};
class WaitAllBlock : public MultiWaitBlock
{
public:
WaitAllBlock(size_t waitObjects, bool timeout, bool timer) : MultiWaitBlock(waitObjects, timeout, timer)
{
m_triggerLimit = waitObjects;
}
virtual bool Satisfy(Context **pContextOut, EventWaitNode *pNode);
virtual bool Reset();
virtual void Destroy();
virtual bool Sweep();
};
class WaitAnyBlock : public MultiWaitBlock
{
public:
WaitAnyBlock(size_t waitObjects, bool timeout, bool timer) : MultiWaitBlock(waitObjects, timeout, timer)
{
m_triggerLimit = 1;
}
virtual bool Satisfy(Context **pContextOut, EventWaitNode *pNode);
virtual bool Reset();
virtual void Destroy();
virtual bool Sweep();
};
/// <summary>
/// An event wait node represents an abstract wait block which is chained to each event such that when the
/// event is signaled, the wait block is notified and performs the appropriate unblocking (or additional
/// waiting) required.
/// </summary>
class EventWaitNode
{
public:
EventWaitNode* m_pNext;
WaitBlock *m_pWaitBlock;
__declspec(nothrow) EventWaitNode(WaitBlock *pWaitBlock) : m_pWaitBlock(pWaitBlock)
{
}
bool Satisfy(Context **pContextOut)
{
return m_pWaitBlock->Satisfy(pContextOut, this);
}
bool Reset()
{
return m_pWaitBlock->Reset();
}
void Destroy()
{
m_pWaitBlock->Destroy();
}
bool Sweep()
{
return m_pWaitBlock->Sweep();
}
};
EventWaitNode * Sweep(EventWaitNode *pNode);
} // namespace details
/// <summary>
/// Constructs an event.
/// </summary>
event::event() :
_M_pWaitChain(EVENT_UNSIGNALED),
_M_pResetChain(NULL)
{
}
/// <summary>
/// Destroys an event.
/// </summary>
event::~event()
{
//
// It's entirely possible that some other thread is currently executing inside ::set, and is currently holding the lock.
// Since the waiter that was woken up could destroy the event, either by deleting a heap allocated, or unwinding the
// stack, we need to let that other thread (that invoked ::set) get out of the lock before we proceed.
//
_M_lock._Flush_current_owner();
//
// Go through and make sure any event blocks are satisfied. One would expect items only on the reset list,
// but we'll handle both cases -- the runtime should not be leaking regardless.
//
EventWaitNode *pRoot = NULL;
EventWaitNode *pNext;
EventWaitNode *pNode = reinterpret_cast <EventWaitNode *> (_M_pWaitChain);
if (pNode > EVENT_SIGNALED)
{
for(; pNode != NULL; pNode = pNext)
{
pNext = pNode->m_pNext;
if (pNode->Satisfy(NULL))
{
pNode->m_pNext = pRoot;
pRoot = pNode;
}
}
}
if (pRoot)
{
pRoot->m_pNext = reinterpret_cast <EventWaitNode *> (_M_pResetChain);
_M_pResetChain = pRoot;
}
for (pNode = reinterpret_cast <EventWaitNode *> (_M_pResetChain); pNode != NULL; pNode = pNext)
{
pNext = pNode->m_pNext;
pNode->Destroy();
}
}
/// <summary>
/// Waits on the specified event.
/// </summary>
size_t event::wait(unsigned int timeout)
{
const EventWaitNode *pOldChain;
//
// Waits with timeout fall back on the heavy weight "wait for multiple" mechanism. The only place
// we use a light-weight spin/stack semantic is with a single *WAIT*.
//
// We can specially handle a 0 timeout "check" here though.
//
if (timeout != COOPERATIVE_TIMEOUT_INFINITE)
{
if (timeout == 0)
{
if (reinterpret_cast <const EventWaitNode *> (_M_pWaitChain) == EVENT_SIGNALED)
return 0;
else
return COOPERATIVE_WAIT_TIMEOUT;
}
event *pThis = this;
return event::wait_for_multiple(&pThis, 1, true, timeout);
}
// Spin wait (no yielding) for the event to be set.
_SpinWaitNoYield spinWait;
do
{
pOldChain = reinterpret_cast <const EventWaitNode *> (_M_pWaitChain);
if (pOldChain == EVENT_SIGNALED)
{
return 0;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -