📄 taskcollection.cpp
字号:
//
if (pCurrentContext->HasAnyCancellation())
_Interrupt(false);
return _Completed;
}
/// <summary>
/// Aborts chores related to the task collection and waits for those which cannot be forcibly aborted.
/// </summary>
void _StructuredTaskCollection::_Abort()
{
//
// _Abort cannot be called unless Schedule was called which guaranteed _M_pOwningContext != NULL
//
ASSERT(_M_pOwningContext != NULL);
ContextBase *pCurrentContext = reinterpret_cast<ContextBase *> (_M_pOwningContext);
while (_M_unpoppedChores > 0)
{
_UnrealizedChore *pChore = pCurrentContext->PopStructured();
if (pChore == NULL)
break;
pChore->_M_pTaskCollection = NULL;
//
// Update the statistical information with the fact that a task has been dequeued
//
if (pCurrentContext->IsExternal())
static_cast<ExternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();
else
static_cast<InternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();
--_M_unpoppedChores;
}
if (_M_unpoppedChores > 0)
{
//
// If there are stolen chores outstanding, redo the cancellation to trigger marking of them in special circumstances. It's entirely possible
// that the cancellation only happened as a result of chaining and all our chores were stolen at the time.
//
_Cancel();
_WaitOnStolenChores(_M_unpoppedChores);
_M_unpoppedChores = 0;
}
//
// Any caught exception on the collection should be rethrown on this thread. The exception might be one of several things:
//
// task_cancelled (or another internal runtime exception):
//
// - We want to let this exception continue propagating unless there's a *more important* one (like an arbitrary exception) that occurred
// elsewhere.
//
// an arbitrary exception:
//
// - We are allowed to choose an arbitrary exception to flow back.
//
_SpinWaitBackoffNone spinWait;
while ((size_t) _Exception() == _S_nonNull) // make sure the exception is ready
spinWait._SpinOnce();
if (_IsMarkedForCancellation())
{
pCurrentContext->PendingCancelComplete();
}
if (_PerformedInlineCancel())
{
pCurrentContext->CollectionCancelComplete(_M_inliningDepth);
}
_RethrowException();
}
/// <summary>
/// Cancels work on the task collection.
/// </summary>
void _StructuredTaskCollection::_Cancel()
{
if (_M_pOwningContext == NULL)
_M_pOwningContext = SchedulerBase::CurrentContext();
//
// Multiple stolen chores might cancel at the same time. We can only allow one person into the path
// which fires down threads so the counters get set correctly.
//
if (_MarkCancellation())
{
//
// Determine which inline context needs to be aborted (we could be canceling from
// a stolen chore which is perfectly legal under the structured semantic).
//
// Note that the original context may not have inlined yet. If we arbitrarily cancel the owning context,
// we place a heavy restriction on structured task collection that it cannot have an interruption point between its
// declaration and its Wait. At the moment, this is deemed to be too heavy
// a restriction. As such, we only cancel if it is inlining. There is a subtle implication to this too. Because a
// full fence is too expensive on the inlining side, the setting of inline can be reordered with respect to the read of
// the cancellation bit. If that reordering is perceived by a canceling thread, chores may execute despite cancellation
// on the inline context. This would be unfortunate, but perfectly legal according to the cancellation semantic.
//
// In order to avoid this type of race for the inline chore of a _RunAndWait, we are going to exploit special properties
// of a structured task collection: since we have a guarantee that this collection will be inlined on this thread. We
// are going to mark the thread as pending cancellation. This will allow us to elide a fence during a local
// chore in _RunAndWait.
//
// It is *IMPERATIVE* that PendingCancel happen **BEFORE** the read of _M_inliningDepth.
//
ContextBase *pContext = reinterpret_cast <ContextBase *> (_M_pOwningContext);
pContext->PendingCancel();
if (_M_inliningDepth >= 0)
{
//
// _M_inliningDepth is guaranteed to be stable if we perceive this. Only the inline context or a stolen chore can
// cancel a structured collection. If the collection is currently inlined, we're in a wait which won't be satisfied until
// this thread completes.
//
pContext->CancelCollection(_M_inliningDepth);
_FinishCancelState(_S_cancelShotdownOwner);
}
else
{
_FinishCancelState(_S_cancelDeferredShootdownOwner);
}
_CancelStolenContexts();
}
}
/// <summary>
/// Called to cancel any contexts which stole chores from the given collection.
/// </summary>
void _StructuredTaskCollection::_CancelStolenContexts()
{
ContextBase *pContext = reinterpret_cast <ContextBase *> (_M_pOwningContext);
pContext->CancelStealers(this);
}
/// <summary>
/// Informs the caller whether or not the task collection is currently in the midst of cancellation. Note that this
/// does not necessarily indicate that Cancel was called on the collection (although such certainly qualifies this function
/// to return true). It may be the case that the task collection is executing inline and a task collection further up in the work
/// tree was canceled. In cases such as these where we can determine ahead of time that cancellation will flow through
/// this collection, true will be returned as well.
/// </summary>
/// <returns>
/// An indication of whether the task collection is in the midst of a cancellation (or is guaranteed to be shortly).
/// </returns>
bool _StructuredTaskCollection::_IsCanceling()
{
if (_M_pOwningContext == NULL)
_M_pOwningContext = SchedulerBase::CurrentContext();
ContextBase *pContext = reinterpret_cast <ContextBase *> (_M_pOwningContext);
long cancellationDepth = pContext->MinimumCancellationDepth();
//
// Either we were canceled or someone higher than us on our context was canceled. This is all safe without lock because of the rules for using
// a structured task collection. NOTHING changes those rules. You may only call this from the owning context or a thread within the work tree. This has
// the same "special" properties as ::Cancel in that regard.
//
return (_M_pException != NULL || (cancellationDepth != -1 && cancellationDepth <= _M_inliningDepth) ||
(pContext->IsPendingCancellation() && _WillInterruptForPendingCancel()));
}
/// <summary>
/// Waits on a specified number of stolen chores.
/// </summary>
/// <param name="stolenChoreCount">
/// The number of stolen chores to wait upon
/// </param>
void _StructuredTaskCollection::_WaitOnStolenChores(long stolenChoreCount)
{
if (_M_completedStolenChores <= _CollectionInitializationInProgress)
_Initialize();
long count = InterlockedExchangeAdd(&_M_completedStolenChores, -stolenChoreCount) - stolenChoreCount;
if (count < 0)
reinterpret_cast <::Concurrency::details::StructuredEvent*> (_M_event)->Wait();
}
/// <summary>
/// Indicates that a stolen chore has completed.
/// </summary>
void _StructuredTaskCollection::_CountUp()
{
if (_M_completedStolenChores <= _CollectionInitializationInProgress)
_Initialize();
LONG count = InterlockedIncrement(&_M_completedStolenChores);
if (count == 0)
reinterpret_cast <::Concurrency::details::StructuredEvent*> (_M_event)->Set();
}
/// <summary>
/// Initializes the structured task collection to count stolen chores.
/// </summary>
void _StructuredTaskCollection::_Initialize()
{
if (InterlockedCompareExchange(&_M_completedStolenChores,
_CollectionInitializationInProgress,
_CollectionNotInitialized) == _CollectionNotInitialized)
{
new (reinterpret_cast <void *> (_M_event)) ::Concurrency::details::StructuredEvent();
#if _DEBUG
long previousCompleted = InterlockedExchange(&_M_completedStolenChores, _CollectionInitialized);
ASSERT(previousCompleted == _CollectionInitializationInProgress);
#else
InterlockedExchange(&_M_completedStolenChores, _CollectionInitialized);
#endif
}
else
{
_SpinWaitBackoffNone spinWait;
while (_M_completedStolenChores <= _CollectionInitializationInProgress)
spinWait._SpinOnce();
}
}
// **********************************************************************
// Unstructured Task Collections:
// **********************************************************************
/// <summary>
/// Constructs a new unstructured task collection
/// </summary>
_TaskCollection::_TaskCollection() :
_M_pTaskExtension(NULL),
_M_pNextAlias(NULL),
_M_executionStatus(TASKCOLLECTION_EXECUTION_STATUS_CLEAR),
_M_flags(0),
_M_stackPos(0)
{
//
// CurrentContext may create a context
//
_M_pOwningContext = SchedulerBase::CurrentContext();
ContextBase *pCurrentContext = reinterpret_cast<ContextBase*> (_M_pOwningContext);
_M_pParent = pCurrentContext->GetExecutingCollection();
_Initialize();
_M_event.set();
_M_pOriginalCollection = this;
_M_boundQueueId = SchedulerBase::FastCurrentContext()->GetWorkQueueIdentity();
_M_inlineFlags = 0;
}
/// <summary>
/// Performs task cleanup normally done at destruction time.
/// </summary>
/// <param name="fExceptional">
/// An indication if the cleanup is exceptional and the collection should be left in a canceled state.
/// </param>
bool _TaskCollection::_TaskCleanup(bool fExceptional)
{
bool fThrow = false;
//
// Direct alias destruction should not attempt to go through any wait/abort cycle. It's simply the deletion/abandonment
// of the alias. The original collection might not even be around to touch.
//
if (!_IsDirectAlias())
{
if (!__uncaught_exception())
{
//
// Users are required to call Wait() before letting the destructor run. Otherwise, throw. Note that before throwing,
// we must actually wait on the tasks since they contain pointers into stack frames and unwinding without the wait is
// instant stack corruption.
//
fThrow = (_M_unpoppedChores > 0);
//
// We must check all direct aliases as well.
//
if (_M_pOriginalCollection == this && _M_pNextAlias != NULL)
{
_TaskCollection *pAlias = _M_pNextAlias;
while (pAlias != NULL)
{
if (pAlias->_M_unpoppedChores > 0)
fThrow = true;
pAlias = pAlias->_M_pNextAlias;
}
}
if (fThrow)
_Abort(fExceptional);
}
else
_Abort(fExceptional);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -