📄 schedulerbase.cpp
字号:
/// <summary>
/// Performs one shot static destruction (at unload/process exit).
/// </summary>
void SchedulerBase::OneShotStaticDestruction()
{
UMSThreadScheduler::OneShotStaticDestruction();
TlsFree(t_dwContextIndex);
t_dwContextIndex = 0;
}
/// <summary>
/// Called at unload/process exit to perform cleanup of one-shot initialization items.
/// </summary>
void SchedulerBase::CheckOneShotStaticDestruction()
{
//
// This might happen at unload time and does not need to be governed by lock. Further, at the time of calling in such circumstance,
// all static and globals should already have destructed -- it would be bad form to touch s_schedulerLock even if it is presently
// a wrapper around a POD type. Note that a background thread might come through here but would never get past the InterlockedDecrement
// unless we were at unload time.
//
LONG val = InterlockedDecrement(&s_oneShotInitializationState);
if (val == ONESHOT_INITIALIZED_FLAG) // ref==0
{
//
// Here, we are at unload time.
//
OneShotStaticDestruction();
val = InterlockedAnd(&s_oneShotInitializationState, ~ONESHOT_INITIALIZED_FLAG);
ASSERT(val == ONESHOT_INITIALIZED_FLAG);
}
}
void SchedulerBase::StaticDestruction()
{
_StaticLock::_Scoped_lock lockHolder(s_schedulerLock);
if (InterlockedDecrement(&s_initializedCount) == 0)
{
//
// all static destruction here
//
// We have exclusive access to the free pool, and therefore can use unsafe APIs.
SubAllocator* pAllocator = s_subAllocatorFreePool.Pop();
while (pAllocator != NULL)
{
delete pAllocator;
pAllocator = s_subAllocatorFreePool.Pop();
}
}
}
/// <summary>
/// Initialize variables and request execution resources from the Resource Manager.
/// </summary>
void SchedulerBase::Initialize()
{
m_virtualProcessorAvailableCount = 0;
m_virtualProcessorCount = 0;
m_nodeCount = 0;
// A SchedulerResourceManagement instance implements the interfaces required for communication with
// the Resource Manager.
m_pSchedulerResourceManagement = new SchedulerResourceManagement(this);
m_pResourceManager = Concurrency::CreateResourceManager();
m_id = Concurrency::GetSchedulerId();
// Get the number of nodes on the machine so we can create a fixed array for scheduling nodes and
// scheduling rings - obviating the need for locking these collections when we traverse them.
m_maxNodes = GetProcessorNodeCount();
m_rings = new SchedulingRing*[m_maxNodes];
m_nodes = new SchedulingNode*[m_maxNodes];
memset(m_rings, 0, sizeof(SchedulingRing*) * m_maxNodes);
memset(m_nodes, 0, sizeof(SchedulingNode*) * m_maxNodes);
// The RequestInitialVirtualProcessors API will invoke a scheduler callback to add new virtual processors to
// the scheduler during the course of the API call. If this API succeeds, we can assume that scheduling
// nodes have been populated with virtual processors representing resources allocated by the RM based on
// values specified in the scheduler's policy.
m_pSchedulerProxy = m_pResourceManager->RegisterScheduler(m_pSchedulerResourceManagement, CONCRT_RM_VERSION_1);
m_pSchedulerProxy->RequestInitialVirtualProcessors(false);
m_nextSchedulingRingIndex = GetValidSchedulingRingIndex(0);
m_hSchedulerShutdownSync = CreateSemaphoreW(NULL, 0, 0x7FFFFFFF, NULL);
if (m_hSchedulerShutdownSync == NULL)
throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError())); // the RM process should probably die here
m_pExternalContextTable = new Hash<HANDLE, ExternalContextBase*>();
InitializeSchedulerEventHandlers();
TraceSchedulerEvent(CONCRT_EVENT_START, TRACE_LEVEL_INFORMATION, m_id);
}
/// <summary>
/// Create a context from the default scheduler (possibly create the default too).
/// </summary>
ContextBase* SchedulerBase::CreateContextFromDefaultScheduler()
{
// If the context TLS value is NULL, the current thread is not attached to a scheduler. Find the
// default scheduler and attach to it.
SchedulerBase* pDefaultScheduler = GetDefaultScheduler();
// Creating an external context on the current thread attaches the scheduler.
ContextBase *pContext = pDefaultScheduler->AttachExternalContext(false);
ASSERT((ContextBase*) TlsGetValue(t_dwContextIndex) == pContext);
// GetDefaultScheduler takes a reference which is safe to release after the attach.
pDefaultScheduler->Release();
return pContext;
}
/// <summary>
/// Returns the ConcRT context attached to the current OS execution context. If one does not exist NULL is returned
/// </summary>
ContextBase *SchedulerBase::SafeFastCurrentContext()
{
return IsOneShotInitialized() ? (ContextBase*) TlsGetValue(t_dwContextIndex) : NULL;
}
/// <summary>
/// Returns the ConcRT context attached to the current OS execution context. If one does not exist NULL is returned
/// This is only callable if you know a-priori that all statics have been initialized.
/// </summary>
ContextBase *SchedulerBase::FastCurrentContext()
{
CORE_ASSERT(IsOneShotInitialized());
return (ContextBase*) TlsGetValue(t_dwContextIndex);
}
/// <summary>
/// Returns a pointer to the ConcRT scheduler attached to the current thread. If one does not exist, it creates
/// a context and attaches it to the default scheduler.
/// </summary>
SchedulerBase* SchedulerBase::CurrentScheduler()
{
return CurrentContext()->GetScheduler();
}
/// <summary>
/// Returns a pointer to the current scheduler, if the current thread is attached to a ConcRT scheduler, null otherwise.
/// This is only callable if you know a-priori that all statics have been initialized.
/// </summary>
SchedulerBase *SchedulerBase::FastCurrentScheduler()
{
ContextBase * pContext = FastCurrentContext();
return (pContext != NULL) ? pContext->GetScheduler() : NULL;
}
/// <summary>
/// Returns a pointer to the current scheduler, if the current thread is attached to a ConcRT scheduler, null otherwise.
/// </summary>
SchedulerBase *SchedulerBase::SafeFastCurrentScheduler()
{
ContextBase * pContext = SafeFastCurrentContext();
return (pContext != NULL) ? pContext->GetScheduler() : NULL;
}
/// <summary>
/// Returns a pointer to the default scheduler. Creates one if it doesn't exist and tries to make it the default.
/// NOTE: The API takes an reference on the scheduler which must be released by the caller appropriately.
/// </summary>
SchedulerBase *SchedulerBase::GetDefaultScheduler()
{
// Acquire the lock in order to take a safe reference on the default scheduler.
_StaticLock::_Scoped_lock _lock(s_defaultSchedulerLock);
// If the default scheduler is non-null, try to reference it safely. If the reference fails,
// we've encountered a scheduler that is in the middle of finalization => the thread finalizing
// the scheduler will attempt to clear the value under write mode.
if ((s_pDefaultScheduler == NULL) || !s_pDefaultScheduler->SafeReference())
{
SchedulerPolicy policy(0);
// Note that the default scheduler policy is protected by the default scheduler lock.
SchedulerPolicy * pDefaultPolicy = s_pDefaultSchedulerPolicy;
if (pDefaultPolicy != NULL)
{
policy = *pDefaultPolicy;
}
// Either the default scheduler was null, or we found a scheduler that was in the middle of being finalized.
// Create a scheduler and set it as the default.
s_pDefaultScheduler = SchedulerBase::CreateWithoutInitializing(policy);
// Obtain hardware threads, initialize virtual processors, etc.
s_pDefaultScheduler->Initialize();
// Create returns a scheduler with a reference count of 0. We need to reference the scheduler before releasing the lock.
// to prevent a different thread from assuming this scheduler is shutting down because the ref count is 0.
// The caller is responsible for decrementing it after attaching to the scheduler.
s_pDefaultScheduler->Reference();
}
// We're holding on to a reference, so it is safe to return this scheduler.
ASSERT(s_pDefaultScheduler != NULL);
return s_pDefaultScheduler;
}
/// <summary>
/// Allows a user defined policy to be used to create the default scheduler. It is only valid to call this API when no default
/// scheduler exists, unless the parameter is NULL. Once a default policy is set, it remains in effect until the next valid call
/// to the API.
/// </summary>
/// <param name="_Policy">
/// [in] A pointer to the policy to be set as the default. The runtime will make a copy of the policy
/// for its use, and the user is responsible for the lifetime of the policy that is passed in. An argument of NULL will reset
/// the default scheduler policy, and the next time a default scheduler is created, it will use the runtime抯 default policy settings.
/// </param>
void SchedulerBase::SetDefaultSchedulerPolicy(__in const SchedulerPolicy & _Policy)
{
_Policy._ValidateConcRTPolicy();
bool fSetDefault = false;
if (s_pDefaultScheduler == NULL)
{
// We can only set a non-null default policy if the default scheduler does not exist.
_StaticLock::_Scoped_lock _lock(s_defaultSchedulerLock);
// It's possible the default scheduler exists but is on its way out, i.e. its ref count is 0, and a different thread is about
// acquire the write lock and set the value to null. We ignore this case, and allow the API to fail.
if (s_pDefaultScheduler == NULL)
{
delete s_pDefaultSchedulerPolicy;
s_pDefaultSchedulerPolicy = new SchedulerPolicy(_Policy);
fSetDefault = true;
}
}
if (!fSetDefault)
{
throw default_scheduler_exists();
}
}
/// <summary>
/// Resets the default scheduler policy, and the next time a default scheduler is created, it will use the runtime's default policy settings.
/// </summary>
void SchedulerBase::ResetDefaultSchedulerPolicy()
{
_StaticLock::_Scoped_lock _lock(s_defaultSchedulerLock);
if (s_pDefaultSchedulerPolicy != NULL)
{
delete s_pDefaultSchedulerPolicy;
s_pDefaultSchedulerPolicy = NULL;
}
}
/// <summary>
/// Increments the reference count to the scheduler but does not allow a 0 to 1 transition. This API should
/// be used to safely access a scheduler when the scheduler is not 'owned' by the caller.
/// </summary>
/// <returns>
/// True if the scheduler was referenced, false, if the reference count was 0.
/// </returns>
bool SchedulerBase::SafeReference()
{
return SafeInterlockedIncrement(&m_refCount);
}
/// <summary>
/// Starts up an available virtual processor if one is found. The virtual processor is assigned a context
/// that starts its search for work in the schedule group specified.
/// </summary>
void SchedulerBase::StartupIdleVirtualProcessor(ScheduleGroupBase *pGroup, VirtualProcessor *pBias)
{
//
// We **MUST** be in a hyper-critical region during this period. There is an interesting scenario on UMS that makes this so:
//
// - [VP A] can't find work and is in its search for work loop
// - [VP A] makes itself available
// - [VP B] running context alpha adds a new work item and does a StartupIdleVirtualProcessor
// - [VP B] does a FindAvailableVirtualProcessor and claims VP A
// - [VP B] page faults / blocks
// - [VP A] finds context alpha in its final SFW pass
// - [VP A] tries to claim ownership of its virtual processor
// - [VP A] can't claim exclusive ownership because context alpha already did
// - [VP A] calls deactivate to wait for the corresponding activation.
// - [VP A] deadlocks with context alpha. Since it is about to execute alpha, no one else can grab it. Similarly,
// it's waiting on an activate which will only come from context alpha.
//
// Since this code runs on the primary anyway during completion list moves, hyper-crit should be safe. This does mean that
// this code must be extraordinarily careful about what it calls / does. There can be NO MEMORY ALLOCATION or other arbitrary
// Win32 calls in the UMS variant of this path.
//
ContextBase::StaticEnterHyperCriticalRegion();
// The callers of this API MUST check that that the available virtual processor count in the scheduler
// is non-zero before calling the API. We avoid putting that check here since it would evaluate to false
// most of the time, and it saves the function call overhead on fast paths (chore push)
VirtualProcessor *pVirtualProcessor = FindAvailableVirtualProcessor(pBias);
if (pVirtualProcessor != NULL)
{
ActivateVirtualProcessor(pVirtualProcessor, pGroup);
}
ContextBase::StaticExitHyperCriticalRegion();
}
/// <summary>
/// Activate the given virtual processor
/// </summary>
void SchedulerBase::ActivateVirtualProcessor(VirtualProcessor *pVirtualProcessor, ScheduleGroupBase *pGroup)
{
// Initialize to a value of true, if this a virtual processor that doesn't have a context attached,
// it has already been 'activated' previously.
bool activated = true;
//
// Notify the scheduler that we're about to activate a new virtual processor. Do it only if this is
// truly a new virtual processor and not the one that is sitting in the Dispatch loop waiting for
// work to come in.
//
if (pVirtualProcessor->GetExecutingContext() == NULL)
{
activated = VirtualProcessorActive(true);
}
// If this is not a brand new virtual processor (i.e. an internal context is attached), we do nothing special to
// synchronize with scheduler shutdown here. The shutdown code that cancels contexts will synchronize with us,
// making sure a virtual processor is not activated twice.
if (activated)
{
TRACE(TRACE_SCHEDULER, L"SchedulerBase::FindAvailableVirtualProcessor(vpid=%d,available=%d)",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -