📄 contextbase.cpp
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// Context.cpp
//
// Source file containing the implementation for an execution ContextBase/stack/thread.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#include "concrtinternal.h"
namespace Concurrency
{
namespace details
{
/// <summary>
/// Constructor
/// </summary>
ContextBase::ContextBase(SchedulerBase *pScheduler, bool fIsExternal) :
m_pScheduler(pScheduler),
m_blockedState(CONTEXT_BLOCKED),
m_contextSwitchingFence(0),
m_pWorkQueue(NULL),
m_pParentContext(NULL),
m_criticalRegionCount(0),
m_hyperCriticalRegionCount(0),
m_oversubscribeCount(0),
//
// The alias table must be sufficiently small that clearing it at the end of a stolen chore isn't a huge penalty, yet
// large enough to splay a few task collections. Hopefully, the number of pools being utilized in stolen chores isn't very
// large (1 or 2), so this size should be sufficient.
//
m_aliasTable(7),
m_pExecutingCollection(NULL),
m_pRootCollection(NULL),
m_cancellationRefCount(0),
m_minCancellationDepth(-1),
m_canceledCount(0),
m_canceledContext(0),
m_pendingCancellations(0),
m_pIndirectAlias(NULL),
m_fIsExternal(fIsExternal),
m_threadId(0)
#if defined(_DEBUG)
,m_fShutdownValidations(false)
#endif // _DEBUG
{
m_id = m_pScheduler->GetNewContextId();
TraceContextEvent(CONCRT_EVENT_START, TRACE_LEVEL_INFORMATION, m_pScheduler->Id(), m_id);
}
unsigned int ContextBase::ScheduleGroupRefCount() const
{
return m_pGroup != NULL ? (unsigned int)m_pGroup->m_refCount : UINT_MAX;
}
/// <summary>
/// Returns a unique identifier to the context
/// </summary>
unsigned int ContextBase::GetId() const
{
return m_id;
}
/// <summary>
/// Returns an identifier to the schedule group the context is currently working on, if any.
/// </summary>
unsigned int ContextBase::GetScheduleGroupId() const
{
return (m_pGroup != NULL) ? m_pGroup->Id() : UINT_MAX;
}
/// <summary>
/// Places a reference on the context preventing it from being destroyed until such time as the stealer is added to the chain
/// via AddStealer. Note that the operation of AddStealer should happen rapidly as it will *BLOCK* cleanup of the context.
/// </summary>
void ContextBase::ReferenceForCancellation()
{
InterlockedIncrement(&m_cancellationRefCount);
}
/// <summary>
/// Removes a reference on the context which was preventing it from being destroyed.
/// </summary>
void ContextBase::DereferenceForCancellation()
{
InterlockedDecrement(&m_cancellationRefCount);
}
/// <summary>
/// Adds a stealing context.
/// </summary>
void ContextBase::AddStealer(ContextBase *pStealer, bool fDereferenceForCancellation)
{
m_stealers.AddTail(&(pStealer->m_stealChain));
pStealer->m_fContextChainedStealer = true;
if (fDereferenceForCancellation)
DereferenceForCancellation();
}
/// <summary>
/// Removes a stealing context.
/// </summary>
void ContextBase::RemoveStealer(ContextBase *pStealer)
{
m_stealers.Remove(&(pStealer->m_stealChain));
}
/// <summary>
/// Cancel everything stolen from pCollection outward from this context.
/// </summary>
void ContextBase::CancelStealers(_TaskCollectionBase *pCollection)
{
SafeRWList<ListEntry>::_Scoped_lock_read readLock(m_stealers);
ListEntry *pLE = m_stealers.First();
while (pLE != NULL)
{
ContextBase *pContext = CONTAINING_RECORD(pLE, ContextBase, m_stealChain);
//
// We don't want to be recursively traversing the tree needlessly every time the exception propagates back
// up a given context. If a context is already canceled, nothing can steal from it and we don't need to traverse
// there.
//
if (!pContext->IsCanceledContext())
{
_TaskCollectionBase *pRootCollection = pContext->GetRootCollection();
_ASSERTE(pRootCollection != NULL);
//
// If pCollection != NULL, it is an indication that we're at the first level. We can only cancel things that are stolen
// from greater inlining depth or things from equal if the root collection is pCollection. Further, we cannot cancel things which are not
// inlined. For example:
//
// _TaskCollection p1;
// p1.Schedule( [] {
// _TaskCollection *p2 = new _TaskCollection;
// p2.Schedule(alpha);
// _TaskCollection p3;
// p3.Schedule( [] {
// Blah;
// });
// });
//
// A cancel of p1 while p1->p3 is running inline cannot cancel p2. The exception that backflows might indeed cancel p2 if it was stack
// based, but remember we can have task collection pointers which are passed amongst threads and detached.
//
// Keep in mind that it's entirely possible to have a situation similar to above during the recursion where one of the stolen chores declared
// a task collection and pushed chores that will not be waited upon but instead will be passed out to another thread. We cannot tear down contexts
// that stole in this manner either.
//
//
// Any context which is executing a chore from the task collection being canceled is fair game.
//
if (pRootCollection == pCollection ||
//
// On recursion, as long as the root collection is inlined (no matter the depth), we are safe to cancel as it was inlined on a canceled
// context and that by definition gives it the correct parentage to be shot down.
//
(pCollection == NULL && pRootCollection->_InliningDepth() != -1) ||
//
// The only way cancellation can be satisfied if both aren't inlined is above. Otherwise, the one that stole must have greater
// inline depth than the one we're canceling.
//
(pCollection != NULL && pRootCollection->_InliningDepth() > pCollection->_InliningDepth() &&
pRootCollection->_InliningDepth() != -1 &&
pCollection->_InliningDepth() != -1))
{
pContext->CancelEntireContext();
pContext->CancelStealers(NULL);
}
}
pLE = m_stealers.Next(pLE);
}
}
/// <summary>
/// Cleans up the context.
/// </summary>
void ContextBase::Cleanup()
{
ReleaseWorkQueue();
TraceContextEvent(CONCRT_EVENT_END, TRACE_LEVEL_INFORMATION, m_pScheduler->Id(), m_id);
}
/// <summary>
/// Called on both internal and external contexts, either when the are put into an idle pool to
/// be recycled, or when they are ready to be deleted. The API moves the contexts that are in
/// the list of 'stealers' (used for cancellation) to lists in the task collections from which
/// those contexts have stolen chores.
/// </summary>
void ContextBase::DetachStealers()
{
//
// Make sure no one has a ref on us to add to the stealers list. We need to wait on that before running down the cancellation list.
// Note that waiting here should be *EXTREMELY RARE*. The only time we'd ever see it would be if a task collection was used between threads and
// and between the time of the steal and the time the wrapper executed the original thread went away.
//
if (m_cancellationRefCount != 0)
{
// Spin wait (no yielding)
_SpinWaitNoYield spinWait;
do
{
spinWait._SpinOnce();
} while (m_cancellationRefCount != 0);
}
if (m_aliasTable.Count() > 0)
ClearAliasTable();
if (m_stealers.Empty())
{
//
// After a DetachStealers, it is entirely possible that the context (the *this*) pointer goes away. Normally, the lock on the stealers
// list is what guards against manipulation by stolen chores; however -- the early exit above presents an interesting risk. It is now entirely
// possible that the last stolen chore is removing its context from the stealers list under the governance of the write lock and makes the
// list empty. The detachment wants to bail due to the above check (there's nothing there) and the context pointer is freed before the stealing
// thread releases the write lock.
//
// We do want the early bail to avoid taking and releasing a reader/writer frequently in this case for scenarios like parallel for. In order to
// prevent touching freed memory, we need to flush out any write owner (take and release the lock if someone holds a write).
//
m_stealers.FlushWriteOwners();
return;
}
//
// If there is anything left on the stealers list, it means that a context is dying while a task collection bound to that context lives
// on and still has stolen chores. In order to continue to facilitate cancellation of those task collections, any stealers in the list have
// to be moved to the individual task collection lists.
//
bool isDone = false;
while(!isDone)
{
bool fContinue = true;
m_stealers.AcquireWrite();
__try
{
fContinue = true;
ListEntry *pEntry = m_stealers.First();
while (pEntry != NULL && fContinue)
{
ListEntry *pNext = m_stealers.Next(pEntry);
ContextBase *pContext = CONTAINING_RECORD(pEntry, ContextBase, m_stealChain);
_TaskCollectionBase *pCollectionBase = pContext->GetRootCollection();
_ASSERTE(pCollectionBase != NULL && !pCollectionBase->_IsStructured());
_TaskCollection *pCollection = static_cast<_TaskCollection *>(pCollectionBase);
//
// In all likelihood, we rarely get here; however -- there is an issue in that the lock ordering here is from the bottom up
// (task collection then context) in order to preserve patterns in stealing and cancellation.
//
// When we move, we must do so in a backwards order. The only time we should see contention on these locks is during minimal
// periods where we are cancelling or for tiny time frames during steal. We will play a pseudo-atomic lock acquire game. If we cannot
// get both, we back off and let the other thread through.
//
SafeRWList<ListEntry> *pCollectionList = reinterpret_cast<SafeRWList<ListEntry> *> (pCollection->_GetStealTrackingList());
if (!pCollectionList->TryAcquireWrite())
{
//
// Yield in an attempt to force the other thread through.
//
m_stealers.ReleaseWrite();
fContinue = false;
Sleep(1);
break;
}
__try
{
m_stealers.UnlockedRemove(&(pContext->m_stealChain));
pContext->m_fContextChainedStealer = false;
pCollectionList->UnlockedAddTail(&(pContext->m_stealChain));
}
__finally
{
pCollectionList->ReleaseWrite();
}
pEntry = pNext;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -