📄 schedulegroupbase.cpp
字号:
// call.
//
pQueue->RetireAtSafePoint(this);
return true;
}
else
{
CORE_ASSERT(!m_pScheduler->InFinalizationSweep());
//
// The queue is not empty and we need to roll back. Since we never removed the queue from m_workQueues, the work will
// still be found by the scheduler without undue futzing around sleep states. The queue must, however, be placed
// back in m_detachedWorkQueues in a detached state.
//
// There's an unfortunate reality here too -- the slot used for the queue within the detached queues list might already
// be gone. Adding back to the detached queues might trigger a heap allocation. Given that this might be in SFW, a heap allocation
// triggering UMS would be bad. Hence -- if we need to roll back (unlikely), we must do this at a safe point.
//
pQueue->RedetachFromScheduleGroupAtSafePoint(this);
}
}
return false;
}
/// <summary>
/// Creates a realized (non workstealing) chore in the schedule group. Used to schedule light-weight
/// tasks and agents.
/// </summary>
void ScheduleGroupBase::ScheduleTask(__in TaskProc proc, void* data)
{
if (proc == NULL)
{
throw std::invalid_argument("proc");
}
RealizedChore *pChore = m_pScheduler->GetRealizedChore(proc, data);
TRACE(TRACE_SCHEDULER, L"ScheduleGroupBase::ScheduleTask(sgroup=%d,ring=0x%p,chore=0x%p)\n", Id(), m_pRing, pChore);
// Every task takes a reference on its schedule group. This is to ensure a schedule group has a ref count > 0 if
// no contexts are working on it, but queued tasks are present. The reference count is transferred to the context
// that eventually executes the task.
InternalReference();
m_realizedChores.Enqueue(pChore);
ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
if (pCurrentContext == NULL || pCurrentContext->GetScheduler() != m_pScheduler)
{
//
// This is a thread that is in no way tracked in ConcRT (no context assigned to it) or it is a context foreign to
// this scheduler, so we cannot have statistics directly associated with its context. Instead, there is an entry in
// the TLS section PER scheduler that points to the external statistics mapping. From that information, we can know
// whether we have seen this thread before and whether it was ever scheduling tasks on the current scheduler.
//
ExternalStatistics * externalStatistics = (ExternalStatistics *) TlsGetValue(m_pScheduler->m_dwExternalStatisticsIndex);
if (externalStatistics == NULL)
{
//
// This is the first piece of statistical data for this thread on this scheduler, so
// create a statistics class, add it to the list array of statistics on this scheduler and
// save it in the TLS slot reserved for statistics on this scheduler.
//
externalStatistics = new ExternalStatistics();
m_pScheduler->AddExternalStatistics(externalStatistics);
TlsSetValue(m_pScheduler->m_dwExternalStatisticsIndex, externalStatistics);
}
else
{
//
// We already have some statistical data for this thread on this scheduler.
//
ASSERT(m_pScheduler->m_externalThreadStatistics.MaxIndex() > 0);
}
ASSERT(externalStatistics != NULL);
externalStatistics->IncrementEnqueuedTaskCounter();
}
else if (pCurrentContext->IsExternal())
{
static_cast<ExternalContextBase *>(pCurrentContext)->IncrementEnqueuedTaskCounter();
}
else
{
static_cast<InternalContextBase *>(pCurrentContext)->IncrementEnqueuedTaskCounter();
}
// In most cases this if check will fail. To avoid the function call overhead in the common case, we check
// for virtual processors beforehand.
if (m_pScheduler->m_virtualProcessorAvailableCount > 0)
{
m_pScheduler->StartupNewVirtualProcessor(this);
}
}
/// <summary>
/// Places a work queue in the detached queue. This will cause the work queue to remain eligible for stealing
/// while the queue can be detached from a context. The work queue will be recycled and handed back to a
/// context executing within the schedule group that needs a queue. If the queue is not recycled, it will be
/// abandoned and freed when it becomes empty (a steal on it while in detached mode fails).
/// </summary>
void ScheduleGroupBase::DetachActiveWorkQueue(WorkQueue *pWorkQueue)
{
InternalReference();
//
// Note: there is a distinct lack of relative atomicity between the flag set and the queue add. The worst thing that
// happens here is that we ask the list array to remove an element at an invalid index. It is prepared to handle
// that anyway.
//
pWorkQueue->SetDetached(true);
m_detachedWorkQueues.Add(&pWorkQueue->m_detachment);
}
/// <summary>
/// Called by a work queue in order to roll back an attempted kill that could not be committed due to reuse.
/// </summary>
void ScheduleGroupBase::RedetachQueue(WorkQueue *pWorkQueue)
{
//
// Roll back by reinserting into m_detachedWorkQueues. We detect the error before setting detached state to false or releasing
// reference, so this is the only operation which needs to happen. It just cannot happen during the steal due to the fact that
// there is a **SLIGHT** chance that the call will perform a heap allocation.
//
m_detachedWorkQueues.Add(&pWorkQueue->m_detachment);
}
/// <summary>
/// Attempts to acquire a detached work queue from the schedule group. If such a work queue is found, it
/// is removed from detached queue and returned. This allows recycling of work queues that are detached
/// yet still have unstructured work.
///</summary>
WorkQueue *ScheduleGroupBase::GetDetachedWorkQueue()
{
int maxIdx = m_detachedWorkQueues.MaxIndex();
for (int i = 0; i < maxIdx; i++)
{
ListArrayInlineLink<WorkQueue> *pLink = m_detachedWorkQueues[i];
//
// No code below this may dereference pLink unless it is removed from the list array. There is no guarantee
// of safety as this can be called from an external context or multiple internal contexts.
//
if (pLink != NULL && m_detachedWorkQueues.Remove(pLink, i, false))
{
WorkQueue *pWorkQueue = pLink->m_pObject;
pWorkQueue->SetDetached(false);
//
// This removed detached work queue incremented the reference count
// in ScheduleGroupBase::DetachActiveWorkQueue(). Release it now.
//
// This is safe because we are inside the schedule group getting a work queue. This means that there is already
// some context with a reference on the schedule group and it won't disappear out from underneath us by removing
// the detach reference.
//
InternalRelease();
return pWorkQueue;
}
}
return NULL;
}
/// <summary>
/// Called by a work queue in order to retire itself at a safe point.
/// </summary>
void ScheduleGroupBase::RetireDetachedQueue(WorkQueue *pWorkQueue)
{
VERIFY(m_workQueues.Remove(pWorkQueue));
//
// This removed detached work queue incremented the reference count
// in ScheduleGroupBase::DetachActiveWorkQueue(). Release it now.
//
InternalRelease();
}
RealizedChore * ScheduleGroupBase::GetRealizedChore()
{
if (m_realizedChores.Empty())
return NULL;
RealizedChore *pChore = m_realizedChores.Dequeue();
TRACE(TRACE_SCHEDULER, L"ScheduleGroup::GetRealizedChore(sgroup=%d,ring=0x%p,chore=0x%p)\n", Id(), m_pRing, pChore);
return pChore;
}
/// <summary>
/// Gets an internal context from either the idle pool or a newly allocated one and prepares it for
/// exection. A NULL return value from the routine is considered fatal (out of memory). This is the
/// API that should be used to obtain an internal context for exection. The context is associated
// with this schedule group.
/// </summary>
InternalContextBase * ScheduleGroupBase::GetInternalContext(_Chore *pChore, bool choreStolen)
{
// Get an internal context from the idle pool
InternalContextBase* pContext = m_pScheduler->GetInternalContext();
// Associate it with this schedule group
ASSERT(pContext != NULL);
pContext->PrepareForUse(this, pChore, choreStolen);
return pContext;
}
/// <summary>
/// Releases an internal context after execution into the idle pool. If the idle pool
/// is full, it could be freed.
/// </summary>
void ScheduleGroupBase::ReleaseInternalContext(InternalContextBase *pContext)
{
pContext->RemoveFromUse();
m_pScheduler->ReleaseInternalContext(pContext);
}
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -