📄 virtualprocessor.cpp
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// VirtualProcessor.cpp
//
// Source file containing the VirtualProcessor implementation.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#include "concrtinternal.h"
namespace Concurrency
{
namespace details
{
/// <summary>
/// Constructs a virtual processor.
/// </summary>
VirtualProcessor::VirtualProcessor()
: m_localRunnableContexts(&m_lock),
m_fThrottled(false)
{
// Derived classes should use Initialize(...) to init the virtual processor
}
/// <summary>
/// Initializes the virtual processor. This API is called by the constructor, and when a virtual processor is to
/// be re-initialized, when it is pulled of the free pool in the list array.
/// </summary>
/// <param name="pOwningNode">
/// The owning schedule node for this virtual processor
/// </param>
/// <param name="pOwningRoot">
/// The owning IVirtualProcessorRoot
/// </param>
void VirtualProcessor::Initialize(SchedulingNode *pOwningNode, IVirtualProcessorRoot *pOwningRoot)
{
//
// A recycled virtual processor should be removed from throttling before being reused.
//
ASSERT(!IsThrottled());
m_pOwningNode = pOwningNode;
m_pOwningRing = pOwningNode->GetSchedulingRing();
m_pCurrentRing = pOwningNode->GetSchedulingRing();
m_pOwningRoot = pOwningRoot;
m_fMarkedForRetirement = false;
m_fRambling = false;
m_fOversubscribed = false;
m_fAvailable = TRUE;
m_fHidden = false;
m_enqueuedTaskCounter = 0;
m_dequeuedTaskCounter = 0;
m_enqueuedTaskCheckpoint = 0;
m_dequeuedTaskCheckpoint = 0;
m_pExecutingContext = NULL;
m_pOversubscribingContext = NULL;
m_safePointMarker.Reset();
m_pSubAllocator = NULL;
if (m_pOwningNode->GetScheduler()->GetSchedulingProtocol() == ::Concurrency::EnhanceScheduleGroupLocality)
m_searchCtx.Reset(this, WorkSearchContext::AlgorithmCacheLocal);
else
m_searchCtx.Reset(this, WorkSearchContext::AlgorithmFair);
// A virtual procesor has the same id as its associated virtual processor root. The roots have process unique ids.
m_id = pOwningRoot->GetId();
TraceVirtualProcessorEvent(CONCRT_EVENT_START, TRACE_LEVEL_INFORMATION, m_pOwningNode->m_pScheduler->Id(), m_id);
}
/// <summary>
/// Destroys a virtual processor
/// </summary>
VirtualProcessor::~VirtualProcessor()
{
ASSERT(m_localRunnableContexts.Count() == 0);
if (m_pSubAllocator != NULL)
{
SchedulerBase::ReturnSubAllocator(m_pSubAllocator);
m_pSubAllocator = NULL;
}
}
/// <summary>
/// Activates a virtual processor with the context provided.
/// </summary>
void VirtualProcessor::Activate(IExecutionContext * pContext)
{
VMTRACE(MTRACE_EVT_ACTIVATE, ToInternalContext(pContext), this, SchedulerBase::FastCurrentContext());
#if _UMSTRACE
ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
CMTRACE(MTRACE_EVT_ACTIVATE, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, pContext);
CMTRACE(MTRACE_EVT_ACTIVATED, ToInternalContext(pContext), this, pCurrentContext);
#endif // _UMSTRACE
m_pOwningRoot->Activate(pContext);
}
/// <summary>
/// Temporarily deactivates a virtual processor.
/// <summary>
/// <returns>
/// An indication of which side the awakening occured from (true -- we activated it, false -- the RM awoke it).
/// </returns>
bool VirtualProcessor::Deactivate(IExecutionContext * pContext)
{
VMTRACE(MTRACE_EVT_DEACTIVATE, ToInternalContext(pContext), this, false);
#if _UMSTRACE
ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
CMTRACE(MTRACE_EVT_DEACTIVATE, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, pContext);
#endif // _UMSTRACE
return m_pOwningRoot->Deactivate(pContext);
}
/// <summary>
/// Invokes the underlying virtual processor root to ensure all tasks are visible
/// </summary>
void VirtualProcessor::EnsureAllTasksVisible(IExecutionContext * pContext)
{
VMTRACE(MTRACE_EVT_DEACTIVATE, ToInternalContext(pContext), this, true);
#if _UMSTRACE
ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
CMTRACE(MTRACE_EVT_DEACTIVATE, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, pContext);
#endif // _UMSTRACE
m_pOwningRoot->EnsureAllTasksVisible(pContext);
}
/// <summary>
/// Start a worker context executing on this.virtual processor.
/// </summary>
void VirtualProcessor::StartupWorkerContext(ScheduleGroupBase* pGroup)
{
TRACE(TRACE_SCHEDULER, L"VirtualProcessor::StartupWorkerContext");
// This virtual processor might already have a context attached to it from the Dispatch loop, where it is waiting for work
// to come in. If this is the case, there is no need to spin off another internal context to do the work.
if (m_pExecutingContext == NULL)
{
InternalContextBase * pContext = pGroup->GetInternalContext();
ASSERT(pContext != NULL);
Affinitize(pContext);
ASSERT(m_pExecutingContext == pContext);
}
else
{
ASSERT(!ToInternalContext(m_pExecutingContext) || ToInternalContext(m_pExecutingContext)->m_pVirtualProcessor == this);
}
m_pOwningRoot->Activate(m_pExecutingContext);
}
/// <summary>
/// Affinitizes an internal context to the virtual processor.
/// </summary>
/// <param name="pContext">
/// The internal context to affinitize.
/// </param>
void VirtualProcessor::Affinitize(InternalContextBase *pContext)
{
//
// Wait until the context is firmly blocked, if it has started. This is essential to prevent vproc orphanage
// if the context we're switching to is IN THE PROCESS of switching out to a different one. An example of how this
// could happen:
//
// 1] ctxA is running on vp1. It is in the process of blocking, and wants to switch to ctxB. This means ctxA needs to
// affintize ctxB to its own vproc, vp1.
//
// 2] At the exact same time, ctxA is unblocked by ctxY and put onto a runnables collection in its scheduler. Meanwhile, ctxZ
// executing on vp2, has also decided to block. It picks ctxA off the runnables collection, and proceeds to switch to it.
// This means that ctxZ needs to affinitize ctxA to ITS vproc vp2.
//
// 3] Now, if ctxZ affintizes ctxA to vp2 BEFORE ctxA has had a chance to affintize ctxB to vp1, ctxB gets mistakenly
// affintized to vp2, and vp1 is orphaned.
//
// In order to prevent this, ctxZ MUST wait until AFTER ctxA has finished its affinitization. This is indicated via the
// blocked flag. ctxA will set its blocked flag to 1, after it has finished affintizing ctxB to vp1, at which point it is
// safe for ctxZ to modify ctxA's vproc and change it from vp1 to vp2.
//
pContext->SpinUntilBlocked();
pContext->PrepareToRun(this);
VCMTRACE(MTRACE_EVT_AFFINITIZED, pContext, this, NULL);
#if defined(_DEBUG)
pContext->ClearDebugBits();
pContext->SetDebugBits(CTX_DEBUGBIT_AFFINITIZED);
#endif // _DEBUG
// Make sure there is a two-way mapping between a virual processor and the affinitized context attached to it.
// The pContext-> side of this mapping was established in PrepareToRun.
m_pExecutingContext = pContext;
//
// If we were unable to update the statistical information because internal context was not
// affinitized to a virtual processor, then do it now when the affinitization is done.
//
if (pContext->m_fHasDequeuedTask)
{
m_dequeuedTaskCounter++;
pContext->m_fHasDequeuedTask = false;
}
}
/// <summary>
/// Marks the the virtual processor such that it removes itself from the scheduler once the context it is executing
/// reaches a safe yield point. Alternatively, if the context has not started executing yet, it can be retired immediately.
/// </summary>
void VirtualProcessor::MarkForRetirement()
{
if (ClaimExclusiveOwnership())
{
// If there is a context attached to this virtual processor but we were able to claim it for
// retirement then we have to unblock this context and send it for retirement. Otherwise, if
// there was no context attached we can simply retire the virtual processor.
if (m_pExecutingContext != NULL)
{
m_fMarkedForRetirement = true;
m_pOwningRoot->Activate(m_pExecutingContext);
}
else
{
Retire();
}
}
else
{
// Instruct the virtual processor to exit at a yield point - when the context it is executing enters the scheduler
// from user code.
m_fMarkedForRetirement = true;
}
}
/// <summary>
/// Attempts to claim exclusive ownership of the virtual processor by resetting the the available flag.
/// </summary>
/// <returns>
/// True if it was able to claim the virtual processor, false otherwise.
/// </returns>
bool VirtualProcessor::ClaimExclusiveOwnership()
{
if ((m_fAvailable == TRUE) && (InterlockedExchange(&m_fAvailable, FALSE) == TRUE))
{
#if _UMSTRACE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -