📄 contextbase.cpp
字号:
}
isDone = (pEntry == NULL);
}
__finally
{
//
// It may have been released due to a back-off.
//
if (fContinue)
{
m_stealers.ReleaseWrite();
}
}
}
}
/// <summary>
/// Pushes an unrealized chore onto the work stealing queue for structured parallelism.
/// </summary>
/// <param name="pChore">
/// The chore to push onto the structured work stealing queue.
/// </param>
void ContextBase::PushStructured(_UnrealizedChore *pChore)
{
GetStructuredWorkQueue()->PushStructured(pChore);
//
// Update the enqueued task numbers for statistics. Since this is a critical performance
// path we avoid making a virtual call since that will imply two memory dereferences plus
// an indirect call. Instead, we make one memory dereference to get a condition and one
// branch. This is faster ONLY because target function call will be inlined.
//
if (IsExternal())
{
static_cast<ExternalContextBase *>(this)->IncrementEnqueuedTaskCounter();
}
else
{
static_cast<InternalContextBase *>(this)->IncrementEnqueuedTaskCounter();
}
if (m_pScheduler->m_virtualProcessorAvailableCount > 0)
{
m_pScheduler->StartupNewVirtualProcessor(m_pGroup);
}
}
/// <summary>
/// Pushes an unrealized chore onto the work stealing queue for unstructured parallelism.
/// </summary>
/// <param name="pChore">
/// The chore to push onto the unstructured work stealing queue.
/// </param>
int ContextBase::PushUnstructured(_UnrealizedChore *pChore)
{
int cookie = GetWorkQueue()->PushUnstructured(pChore);
//
// Update the enqueued task numbers for statistics. Since this is a critical performance
// path we avoid making a virtual call since that will imply two memory dereferences plus
// an indirect call. Instead, we make one memory dereference to get a condition and one
// branch. This is faster ONLY because target function call will be inlined.
//
if (IsExternal())
{
static_cast<ExternalContextBase *>(this)->IncrementEnqueuedTaskCounter();
}
else
{
static_cast<InternalContextBase *>(this)->IncrementEnqueuedTaskCounter();
}
if (m_pScheduler->m_virtualProcessorAvailableCount > 0)
{
m_pScheduler->StartupNewVirtualProcessor(m_pGroup);
}
return cookie;
}
/// <summary>
/// Pops the topmost chore from the work stealing queue for structured parallelism. Failure
/// to pop typically indicates stealing.
/// </summary>
/// <returns>
/// An unrealized chore from the structured work stealing queue or NULL if none is present.
/// </returns>
_UnrealizedChore *ContextBase::PopStructured()
{
ASSERT(m_pWorkQueue != NULL);
return m_pWorkQueue->PopStructured();
}
/// <summary>
/// Attempts to pop the chore specified by a cookie value from the unstructured work stealing queue. Failure
/// to pop typically indicates stealing.
/// </summary>
/// <param name="cookie">
/// A cookie returned from PushUnstructured indicating the chore to attempt to pop from
/// the unstructured work stealing queue.
/// </param>
/// <returns>
/// The specified unrealized chore (as indicated by cookie) or NULL if it could not be popped from
/// the work stealing queue
/// </returns>
_UnrealizedChore *ContextBase::TryPopUnstructured(int cookie)
{
ASSERT(m_pWorkQueue != NULL);
return m_pWorkQueue->TryPopUnstructured(cookie);
}
/// <summary>
/// Sweeps the unstructured work stealing queue for items matching a predicate and potentially removes them
/// based on the result of a callback.
/// </summary>
/// <param name="pPredicate">
/// The predicate for things to call pSweepFn on.
/// </param>
/// <param name="pData">
/// The data for the predicate callback
/// </param>
/// <param name="pSweepFn">
/// The sweep function
/// </param>
void ContextBase::SweepUnstructured(WorkStealingQueue<_UnrealizedChore>::SweepPredicate pPredicate,
void *pData,
WorkStealingQueue<_UnrealizedChore>::SweepFunction pSweepFn
)
{
ASSERT(m_pWorkQueue != NULL);
return m_pWorkQueue->SweepUnstructured(pPredicate, pData, pSweepFn);
}
/// <summary>
/// Create a workqueue for use in unstructured task collections.
/// </summary>
void ContextBase::CreateWorkQueue()
{
//
// First try and reuse a detached workqueue.
//
m_pWorkQueue = m_pGroup->GetDetachedWorkQueue();
//
// A detached work queue is still on m_pGroup->m_workQueues.
//
if (m_pWorkQueue == NULL)
{
//
// If that failed, try and reuse a workqueue from the free pool.
//
m_pWorkQueue = m_pGroup->m_workQueues.PullFromFreePool();
if (m_pWorkQueue == NULL)
{
//
// Must create a new one.
//
m_pWorkQueue = new WorkQueue();
}
else
{
//
// Reinitialize the work queue from the free pool.
//
m_pWorkQueue->Reinitialize();
}
m_pGroup->m_workQueues.Add(m_pWorkQueue);
}
ASSERT(m_pWorkQueue != NULL);
m_pWorkQueue->SetOwningContext(this);
}
/// <summary>
/// Create a workqueue for use in structured task collections.
/// </summary>
void ContextBase::CreateStructuredWorkQueue()
{
//
// First, try and reuse a workqueue from the free pool.
// When using structured task collections, quite often there are
// no previous unstructured task collections that neglected to wait (thus generating detached workqueues).
//
m_pWorkQueue = m_pGroup->m_workQueues.PullFromFreePool();
if (m_pWorkQueue == NULL)
{
//
// If that failed, see if there is a workqueue on the detachedWorkQueues list to reuse.
//
m_pWorkQueue = m_pGroup->GetDetachedWorkQueue();
//
// A detached work queue is still on m_pGroup->m_workQueues.
//
if (m_pWorkQueue == NULL)
{
m_pWorkQueue = new WorkQueue();
m_pGroup->m_workQueues.Add(m_pWorkQueue);
}
}
else
{
//
// Reinitialize the work queue from the free pool.
//
m_pWorkQueue->Reinitialize();
m_pGroup->m_workQueues.Add(m_pWorkQueue);
}
ASSERT(m_pWorkQueue != NULL);
m_pWorkQueue->SetOwningContext(this);
}
/// <summary>
/// Cleans up the internal workqueue.
/// </summary>
void ContextBase::ReleaseWorkQueue()
{
if (m_pWorkQueue != NULL)
{
//
// It's entirely possible that this particular work queue had chores left on the unstructured work queue.
// Someone could create an unstructured task collection within an LWT, queue chores, and subsequently pass
// the collection out of the LWT to be waited upon later. In this case, we must leave the work queue around
// in order for stealing to appropriately happen. This work queue will not be dechained from the schedule
// group, but will remain until empty. It will go on a lookaside and, while in this state, can be handed
// to some new context working on an item within the same schedule group.
//
// Save off a local copy of the workqueue and work with that. The debugger mines the workqueue information
// held in this context, and if we remove the work queue while it's still pointed at by this context, the
// debugger can become confused.
WorkQueue* workQueue = m_pWorkQueue;
m_pWorkQueue = NULL;
if ( !workQueue->IsUnstructuredEmpty())
{
workQueue->LockedSetOwningContext(NULL);
m_pGroup->DetachActiveWorkQueue(workQueue);
}
else
{
//
// Unless someone really side-stepped the intent of _StructuredTaskCollection, it's almost certain that
// workQueue->IsStructuredEmpty() is true or else a missing_wait was already thrown.
//
if (workQueue->IsLockHeld())
{
// Somebody is stealing, don't want to NULL out owning ctx until they're done.
workQueue->LockedSetOwningContext(NULL);
}
else
{
// We know workQueue has no unstructured, since we're on the owning thread.
// Moreover, structured must be empty at this point, because we cannot ever get here until the wait is satisfied.
// If the UnlockedSteal is entered, then we'll early exit w/o ever touching the owning ctx of workQueue.
workQueue->SetOwningContext(NULL);
}
m_pGroup->m_workQueues.Remove(workQueue);
}
}
//
// Make sure that any detachment triggers the stealers to move into the task pool list. Otherwise, we can wind up with
// an A<-B<-A stealing pattern:
//
// TC 1 on thread A
// Thread B steals from TC 1 (A<-B)
// Thread A detaches (no wait on TC1)
// Thread A does SFW and steals from TC 2 deeper inline on thread B (B<-A)
//
// The overall stealers pattern is A<-B<-A which will wind up with lock traversal in this order. The recursive reacquire of
// R/W lock (or out of order acquire: A<-B on one thread, B<-A on the other) will result in later deadlock.
//
DetachStealers();
}
/// <summary>
/// Sets the 'this' context into the tls slot as the current context. This is used by internal contexts in
/// their dispatch loops.
/// </summary>
void ContextBase::SetAsCurrentTls()
{
TlsSetValue(SchedulerBase::t_dwContextIndex, this);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -