📄 umsschedulingcontext.cpp
字号:
blockingType = UMSThreadInternalContext::BlockingCritical;
}
bool fCritical = (m_pBoundVProc->m_pCriticalContext != NULL);
//
// Any context which made a virtual processor available darn well better be in a critical region until they claim it again.
//
UMSThreadInternalContext *pCriticalContext = m_pBoundVProc->m_pCriticalContext;
CORE_ASSERT(!fOriginallyAvailable || pCriticalContext != NULL);
if (pCriticalContext != NULL && pCriticalContext->m_fIsVisibleVirtualProcessor)
fOriginallyAvailable = true;
//
// pGroup might be NULL because we've looped around, because someone blocked during a context recycling
// after we've already NULL'd the group out. In any of these cases, we go to the anonymous schedule group to start the search.
//
if (pGroup == NULL)
{
SchedulingRing *pOwningRing = m_pBoundVProc->m_pOwningNode->GetSchedulingRing();
pGroup = pOwningRing->GetAnonymousScheduleGroup();
}
if (pPreviousContext != NULL)
{
pPreviousContext->SetBlockingType(blockingType);
}
InternalContextBase *pContext = NULL;
while (pContext == NULL)
{
if (m_pBoundVProc->m_pCriticalContext != NULL)
{
//
// Sweep the completion list if we are waiting for a critical context.
// Otherwise the search for runnable would do the sweep.
//
m_pScheduler->MoveCompletionListToRunnables();
//
// The critical context is **ALWAYS** first priority -- no matter what! Since we are the only thread that picks up critical contexts
// due to SFW happening in a critical region, there's no CAS. We simply can clear the flag when appropriate.
//
if (m_pBoundVProc->m_fCriticalIsReady)
{
pContext = m_pBoundVProc->m_pCriticalContext;
m_pBoundVProc->m_fCriticalIsReady = FALSE;
m_pBoundVProc->m_pCriticalContext = NULL;
#if defined(_DEBUG)
fromBits = CTX_DEBUGBIT_PRIMARYAFFINITIZEFROMCRITICAL;
#endif // _DEBUG
CORE_ASSERT(pContext != NULL);
}
}
else
{
CORE_ASSERT(!m_pBoundVProc->m_fCriticalIsReady);
}
//
// Next priority is searching for contexts to run.
//
if (pContext == NULL)
{
//
// We need to do a full search for runnables. This means all scheduling rings, nodes, LRCs, etc... The reason for this is subtle. Normally,
// if we can't quickly find something to run, we switch to the reserved context which is a real search context and everyone is happy (we keep the virtual
// processor active). The only time we'll put the virtual processor to sleep HERE is when there's a critical context blocked or there are no reserved
// contexts.
// You might think we're okay to do that because the wakings there explicitly notify us. Unfortunately, those special contexts might be blocked
// on a lock held by an ARBITRARY context. That ARBITRARY context might have been moved to a runnables list in a different scheduling ring/node by
// the MoveCompletionListToRunnables above. Therefore, we must do a FULL search for runnables here across all rings.
//
WorkItem work;
if (m_pBoundVProc->SearchForWork(&work, pGroup, WorkItem::WorkItemTypeContext))
{
pContext = work.GetContext();
#if defined(_DEBUG)
CMTRACE(MTRACE_EVT_SFW_FOUNDBY, pContext, m_pBoundVProc, NULL);
fromBits = CTX_DEBUGBIT_PRIMARYAFFINITIZEFROMSEARCH;
#endif // _DEBUG
}
}
//
// If we could not find anyone to run by this point, we're stuck having to create a new SFW context. This should only happen
// if we're **NOT** critically blocked.
//
if (!fCritical && pContext == NULL)
{
pContext = m_pScheduler->GetReservedContext();
#if defined(_DEBUG)
fromBits = CTX_DEBUGBIT_PRIMARYRESERVEDCONTEXT;
#endif // _DEBUG
}
if (pPreviousContext != NULL)
{
//
// After one time through the search loop from the source, let go of the previous context. This means we can no longer originate
// a search from the source group. We cannot place a reference here because removing it might entail a deletion from the ListArray
// which cannot happen on the primary. Just search outward from the anonymous schedule group if we cannot find anything the first time
// through
//
if (pContext == NULL)
{
SchedulingRing *pOwningRing = m_pBoundVProc->m_pOwningNode->GetSchedulingRing();
pGroup = pOwningRing->GetAnonymousScheduleGroup();
}
SetUMSBlocked(pPreviousContext, pDispatchState->m_fIsPreviousContextAsynchronouslyBlocked);
pPreviousContext = NULL;
}
if (pContext == NULL)
{
//
// Make a series of passes through the "special SFW" above and then put the virtual processor to sleep.
//
pass++;
if (pass == passes)
{
//
// Make the virtual processor available and perform a flush. We need to make one more loop to "search for work"
// as it's entirely possible we raced with a wake notification on the critical context or reserved context list event.
//
// It's also entirely possible that a context in its last SFW loop after making the virtual processor available UMS triggered and got us
// back here. In that case, we need to remember this because special handling is required. Instead of having a horribly complex state
// machine to manage this particular race, we simply don't Deactivate here and instead, we poll. Much safer.
//
if (!fOriginallyAvailable)
{
fMadeAvailable = true;
m_pBoundVProc->MakeAvailableFromSchedulingContext();
}
//
// Currently safe because this is simply a flush that doesn't restore any state or wait on any events.
//
m_pBoundVProc->EnsureAllTasksVisible(this);
}
else if (pass > passes)
{
//
// Because we're not running on a context, we cannot participate in finalization and yet we are putting this virtual processor
// to sleep. In order to do that safely, we must have a guarantee that something will wake *US* up. That basically means that
// we have a special context blocked -- either a critically blocked context or waiting on reserved context event.
//
// Put the virtual processor to sleep for real. If we wake up for *ANY* reason (doesn't matter if it's the completion notification
// or not), loop back up and perform another SFW.
//
if (!fOriginallyAvailable)
{
if (!m_pBoundVProc->Deactivate(this))
{
//
// This indicates that something came back on the completion list. We really do want to do a FULL SFW here. We need to claim
// ownership of the VProc.
//
ClaimBoundProcessorAndSwallowActivation();
}
fMadeAvailable = false;
}
else
{
//
// In order to avoid horrible race conditions with the context which made this virtual processor available, we simply sleep, loop back
// up and check again.
//
// MINIMIZE blocking between MakeAvailable and Deactivate within the dispatch loop. This path has a big performance penalty.
// Also -- NEVER release the critical region between those paths (see above).
//
Sleep(100);
}
pass = 0;
}
}
}
//
// If we made the virtual processor available, we need to make it not so right now -- we're going to execute a context.
//
if (fMadeAvailable)
{
ClaimBoundProcessorAndSwallowActivation();
}
m_pBoundVProc->Affinitize(pContext);
#if defined(_DEBUG)
pContext->SetDebugBits(fromBits);
#endif // _DEBUG
m_pThreadProxy->SwitchTo(pContext, Blocking);
//
// If we get here, it indicates that the SwitchTo failed as a result of the underlying thread blocking asynchronously (e.g.: it was suspended or
// had a kernel APC running atop it when we tried to SwitchTo it). In this case, just go back up and pick another runnable. There's one absolutely
// critical thing here. We affinitized the vproc to pContext. It isn't executing pContext and never was. The execute failed because of a thread
// suspension, kernel APC, etc... After looping back, we *CANNOT* rely on vproc relative fields. We simply pick another context on the basis of
// information we already know and switch.
//
// On success, SwitchTo will snap out our stack (such is the way of the world on the UMS primary).
//
#if defined(_DEBUG)
pContext->SetDebugBits(CTX_DEBUGBIT_PRIMARYSWITCHTOFAILED);
#endif // _DEBUG
}
return;
}
/// <summary>
/// Returns whether we are on a primary thread.
/// </summary>
bool UMSSchedulingContext::OnPrimary()
{
return (UMSThreadScheduler::FastCurrentSchedulingContext() != NULL);
}
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -