📄 umsthreadscheduler.cpp
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// UMSThreadScheduler.h
//
// Source file containing the implementation for a UMS thread based concrt scheduler
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#pragma once
#include "concrtinternal.h"
namespace Concurrency
{
namespace details
{
DWORD UMSThreadScheduler::t_dwSchedulingContextIndex;
/// <summary>
/// Creates a UMS thread based scheduler
/// </summary>
UMSThreadScheduler::UMSThreadScheduler(__in const ::Concurrency::SchedulerPolicy& policy) :
SchedulerBase(policy),
m_pCompletionList(NULL),
m_pendingRequests(0)
{
}
/// <summary>
/// Creates a UMS thread based scheduler
/// </summary>
UMSThreadScheduler* UMSThreadScheduler::Create(__in const ::Concurrency::SchedulerPolicy& policy)
{
return new UMSThreadScheduler(policy);
}
/// <summary>
/// Creates a UMS thread based virtual processor.
/// </summary>
VirtualProcessor* UMSThreadScheduler::CreateVirtualProcessor(SchedulingNode *pOwningNode, IVirtualProcessorRoot* pOwningRoot)
{
return new UMSThreadVirtualProcessor(pOwningNode, pOwningRoot);
}
///<summary>
/// Creates a new thread internal context and returns it to the base scheduler.
///</summary>
InternalContextBase *UMSThreadScheduler::CreateInternalContext()
{
return new UMSThreadInternalContext(this);
}
/// <summary>
/// Destroys a UMS thread based scheduler
/// </summary>
UMSThreadScheduler::~UMSThreadScheduler()
{
}
/// <summary>
/// Starts up new virtual processor if one is found. The virtual processor is assigned a context
/// that starts its search for work in the schedule group specified.
/// In UMS scheduler, a vproc could be deactivated waiting for resources such as reserved contexts.
/// They need to be excluded when activating a new virtual processor. New work needs to activate
/// a vproc that is not waiting for resources. This is required to honour user specified min
/// concurrency level.
/// </summary>
void UMSThreadScheduler::StartupNewVirtualProcessor(ScheduleGroupBase *pGroup)
{
//
// We **MUST** be in a hyper-critical region during this period. There is an interesting scenario on UMS that makes this so:
//
// - [VP A] can't find work and is in its search for work loop
// - [VP A] makes itself available
// - [VP B] running context alpha adds a new work item and does a StartupIdleVirtualProcessor
// - [VP B] does a FindAvailableVirtualProcessor and claims VP A
// - [VP B] page faults / blocks
// - [VP A] finds context alpha in its final SFW pass
// - [VP A] tries to claim ownership of its virtual processor
// - [VP A] can't claim exclusive ownership because context alpha already did
// - [VP A] calls deactivate to wait for the corresponding activation.
// - [VP A] deadlocks with context alpha. Since it is about to execute alpha, no one else can grab it. Similarly,
// it's waiting on an activate which will only come from context alpha.
//
// Since this code runs on the primary anyway during completion list moves, hyper-crit should be safe. This does mean that
// this code must be extraordinarily careful about what it calls / does. There can be NO MEMORY ALLOCATION or other arbitrary
// Win32 calls in the UMS variant of this path.
//
ContextBase::StaticEnterHyperCriticalRegion();
// The callers of this API MUST check that that the available virtual processor count in the scheduler
// is non-zero before calling the API. We avoid putting that check here since it would evaluate to false
// most of the time, and it saves the function call overhead on fast paths (chore push)
UMSThreadVirtualProcessor *pVirtualProcessor = FindNewVirtualProcessor();
if (pVirtualProcessor != NULL)
{
// Found a vproc to activate
ActivateVirtualProcessor(pVirtualProcessor, pGroup);
}
ContextBase::StaticExitHyperCriticalRegion();
}
/// <summary>
/// Find an idle virtual processor that is not waiting for resources (such as reserved contexts)
/// claim the vproc and return it.
/// </summary>
UMSThreadVirtualProcessor * UMSThreadScheduler::FindNewVirtualProcessor()
{
for (int node = 0; node < m_nodeCount; ++node)
{
SchedulingNode * pNode = m_nodes[node];
int idx;
if (pNode != NULL)
{
UMSThreadVirtualProcessor *pVirtualProcessor =
static_cast<UMSThreadVirtualProcessor *>(pNode->GetFirstVirtualProcessor(&idx));
while (pVirtualProcessor != NULL)
{
// Skip vprocs that are waiting for resources.
if (!pVirtualProcessor->IsWaitingForReservedContext() && pVirtualProcessor->ClaimExclusiveOwnership())
{
// Found a vproc
return pVirtualProcessor;
}
pVirtualProcessor = static_cast<UMSThreadVirtualProcessor *>(pNode->GetNextVirtualProcessor(&idx));
}
}
}
return NULL;
}
///<summary>
/// Notification after a virtual processor goes from INACTIVE to ACTIVE or ACTIVE to INACTIVE
/// For UMS we need to ensure that there is atleast 1 active vproc.
///</summary>
/// <param value="fActive">
/// True if a virtual processor is going from INACTIVE to ACTIVE, and false if it is going from ACTIVE to INACTIVE.
/// </param>
/// <param value="activeCount">
/// Active virtual processor count after the transition
/// </param>
void UMSThreadScheduler::VirtualProcessorActiveNotification(bool fActive, LONG activeCount)
{
// We need to maintain at least 1 active virtual processor as long
// as there is work. Since we cannot easily determine if there are blocked UMS context,
// do not allow active vproc count to go from 1 to 0.
if (activeCount == 0)
{
// If we are the last active virtual processor, we should be in a hyper cricital region
CORE_ASSERT(!fActive);
StartupIdleVirtualProcessor(GetNextSchedulingRing()->GetAnonymousScheduleGroup());
}
}
///<summary>
/// Determines if there is pending work such as blocked context/unstarted chores etc in the
/// scheduler. If there is no pending work, the scheduler will attempt to shutdown.
/// For UMS, look for pending requests from vprocs for reserved contexts
///</summary>
bool UMSThreadScheduler::HasWorkPending()
{
return (m_pendingRequests > 0) || SchedulerBase::HasWorkPending();
}
/// <summary>
/// Replenishes the list of reserved contexts. Reserved contexts are just
/// a cache of internal contexts that are associated with a thread proxy.
/// </summary>
void UMSThreadScheduler::ReplenishReservedContexts()
{
ASSERT(m_pendingRequests > 0);
// The number of pending requests could be much bigger than what is
// really required. This is because a vproc could have a search loop
// that attempts to get a SFW context a few times before waiting on
// the list event. So we will create a fixed number of contexts and
// set the event. The number is bounded by the number of active vprocs
// the scheduler.
long numContextToCreate = GetNumberOfActiveVirtualProcessors() + 1;
// Limit the number of contexts created at a time. Allow a few of
// the vprocs to do useful work while we create more contexts.
if (numContextToCreate > 4)
{
numContextToCreate = 4;
}
// m_numReservedContexts doesn't need to be very accurate. In the worst
// case the background thread would need to run an extra iteration.
while (m_numReservedContexts < numContextToCreate)
{
InternalContextBase * pContext = GetInternalContext();
// Add to the reserved context list
_InterlockedIncrement(&m_numReservedContexts);
m_reservedContexts.Push(pContext);
}
// Allow the background thread to be signalled again
_InterlockedExchange(&m_pendingRequests, 0);
// Indicate that the reserved list has been Replenished. Note that all
// the context could have been consumed before we set this event. Anyone
// waiting on the event would do a GetReservedContext which would return NULL.
// This would wake up the background thread for Replenishing...
SignalReservedContextsAvailable();
}
/// <summary>
/// Releases the list of reserved contexts to the idle pool. The thread proxy
/// is released before returning the contexts to the idle pool
/// </summary>
void UMSThreadScheduler::ReleaseReservedContexts()
{
InternalContextBase *pContext = m_reservedContexts.Pop();
while (pContext != NULL)
{
GetSchedulerProxy()->UnbindContext(pContext);
ReleaseInternalContext(pContext);
pContext = m_reservedContexts.Pop();
}
m_numReservedContexts = 0;
ASSERT(m_pendingRequests == 0);
}
/// <summary>
/// Attempts to get an internal context for execution. If an internal
/// context is obtained the routine prepares it for execution by
/// associating a schedule group with it. Note that this routine can
/// return NULL.
/// </summary>
InternalContextBase * UMSThreadScheduler::GetReservedContext()
{
InternalContextBase *pContext = m_reservedContexts.Pop();
if (pContext != NULL)
{
_InterlockedDecrement(&m_numReservedContexts);
pContext->PrepareForUse(GetAnonymousScheduleGroup(), NULL, false);
}
else
{
if (_InterlockedIncrement(&m_pendingRequests) == 1)
{
// Wake up the background thread
SetEvent(m_hCreateContext);
}
}
return pContext;
}
/// <summary>
/// Release reserved contexts to idle pool and cancel all internal contexts
/// </summary>
void UMSThreadScheduler::CancelAllContexts()
{
// We need to be in a hypercritical region (this code path shall not rely
// on another UT to be scheduled).
ContextBase *pContext = FastCurrentContext();
bool fExitHyperCriticalRegion = false;
if ((pContext != NULL) && (!pContext->IsExternal()))
{
fExitHyperCriticalRegion = true;
pContext->EnterHyperCriticalRegion();
}
// Sweep ensures that there are no pending requests
CORE_ASSERT(m_pendingRequests == 0);
ReleaseReservedContexts();
if (fExitHyperCriticalRegion)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -