📄 searchalgorithms.cpp
字号:
return false;
}
/// <summary>
/// Performs a cache local search for work.
/// </summary>
bool WorkSearchContext::SearchCacheLocal(WorkItem *pWorkItem, ScheduleGroupBase *pOriginGroup, ULONG allowableTypes)
{
//
// The cache local search biases work to the current schedule group, current scheduling ring, current scheduling node before looking elsewhere.
// If you consider the search space as follows:
//
// SR SR SR SR
// Contexts | | | |
// Realized | | | |
// Unrealized v v v v
//
// cache local will make vertical slices through the search space to find work.
//
// Each entry in the above matrix can be viewed as:
//
// SG -> SG -> SG -> SG
//
// e.g.: cache local will look for runnables within the schedule groups of a given ring
// THEN realized chores within the schedule groups of a given ring
// THEN unrealized chores within the schedule groups of a given ring
// biasing such searches to the origin group where possible
//
// The cache local algorithm is highly susceptible to livelock in certain scenarios. Because there is a high degree of biasing of work due
// to local runnable contexts and the tend to keep work "local", it is entirely possible that a series of yielders spin forever waiting on
// work outside the bias. This can happen in several ways:
//
// - The LRC can become deep enough that a dependency chain as follows is established:
// [OLD END] <critical> <yielder> <yielder> <yielder> [NEW END]
// A purely cache local search can spin on the yielders since the LRC is LIFO.
//
// - A dependency chain can be established between the LRC and the schedule group's list due to LRC overflow or the critical path becoming unblocked
// from an external context:
// LRC: <yielder> <yielder> <yielder>
// SG : <critical>
// A purely cache local search can spin on the yielders since the LRC is higher priority than the schedule group's list
//
// - A dependency chain can be established between two schedule groups within the same ring:
// SG1: <yielder> <yielder>
// SG2: <critical>
// A purely cache local search can spin on the yielders since the current schedule group is higher priority than other schedule groups
//
// - A dependency chain can be established between two schedule groups within different rings:
// R1 SG1: <yielder> <yielder>
// R2 SG2: <critical>
// A purely cache local search can spin on the yielders since the current scheduling ring is higher priority than other scheduling rings.
//
// In order to alleviate these circumstances and make forward progress, the cache local algorithm is altered to take a series of steps:
//
// - If we continue to find "local" work (within the LRC or current schedule group) for N items, we will flip the direction of the LRC
// and access our LRC in FIFO order. This is more expensive but it will eliminate a goodly portion of livelocks.
//
// - If we continue to find "local" work for N+K items, we drop the LRC and current group bias and move on to another schedule group. This
// resets the bias counters and begins again completely cache local on the new schedule group.
//
// - If we have not transitioned rings in J items, we move to another scheduling ring. This resets the bias counters and begins again
// completely cache local on the new scheduling ring.
//
bool fFound = false;
int count = 0;
while (!fFound)
{
// The loop should not be exectued more than 2 times.
count++;
CORE_ASSERT(count <= 2);
//
// Do any up-front searching required for special circumstances (e.g.: UMS schedulers)
//
if (PreSearch(pWorkItem))
return true;
SchedulingRing *pCurrentRing = m_pCurrentRing;
if (pCurrentRing == NULL)
pCurrentRing = m_pStartingRing = m_pCurrentRing = m_pVirtualProcessor->GetOwningRing();
SchedulingRing *pRing = pCurrentRing;
// **************************************************
// Initial Runnables Bias:
//
int bias = LocalBiasStage();
CORE_ASSERT((count == 1) || (bias == 0));
//
// Cache local biases to the LRC, our own schedule group, and adjacent LRCs. For runnables, this is always done at the top
// of each search loop.
//
// Note that each of the Biased* functions will update appropriate biasing counters.
//
if ((allowableTypes & WorkItem::WorkItemTypeContext) && (bias < 3) &&
((bias < 2 && GetLocalRunnable(pWorkItem, m_pVirtualProcessor, bias > 0)) ||
(GetRunnableContext(pWorkItem, pOriginGroup)) ||
(StealLocalRunnable(pWorkItem, m_pVirtualProcessor->GetOwningNode(), m_pVirtualProcessor))))
{
fFound = true;
}
else
{
// **************************************************
// Scheduling Ring Slicing:
//
ScheduleGroupBase *pBiasedGroup = bias > 2 ? NULL : pOriginGroup;
//
// If we have gone so far along biasing to the current scheduling ring, move to the next ring and go back to being entirely cache local
// on that particular scheduling ring.
//
if (DropRingBias())
{
//
// We need to check the LRCs of the ring we're looking at to ensure forward progress (ahead of the normal runnables search). When
// we inject a ring change, do this once right before the ring change.
//
SchedulingNode *pRingNode = pRing->GetOwningNode();
if (PerformOneTimeLRCScan())
{
fFound = StealLocalRunnable(pWorkItem, pRingNode, m_pVirtualProcessor);
}
if (fFound)
{
ResetLocalBias();
}
else
{
pRing = m_pScheduler->GetNextSchedulingRing(pCurrentRing, pRing);
ResetBias();
}
}
//
// Slice through every scheduling ring. The origin group will be skipped for runnables since the bias is above. The bias for realized
// and unrealized are in the appropriate search routines. This is done to avoid code duplication with yield since the runnables bias is
// inverted for yielding.
//
if (!fFound)
{
while(pRing != NULL)
{
if (((allowableTypes & WorkItem::WorkItemTypeContext) && SearchCacheLocal_Runnables(pWorkItem, pRing, pBiasedGroup)) ||
((allowableTypes & WorkItem::WorkItemTypeRealizedChore) && SearchCacheLocal_Realized(pWorkItem, pRing, pCurrentRing, pOriginGroup)) ||
((allowableTypes & WorkItem::WorkItemTypeUnrealizedChore) && SearchCacheLocal_Unrealized(pWorkItem, pRing, pCurrentRing, pOriginGroup)) ||
((allowableTypes & WorkItem::WorkItemTypeContext) && StealLocalRunnable(pWorkItem, pRing->GetOwningNode(), m_pVirtualProcessor)))
{
ResetLocalBias();
fFound = true;
break;
}
//
// If we ever move to another scheduling ring, we can completely drop any bias and go back to being completely cache local.
//
pRing = m_pScheduler->GetNextSchedulingRing(pCurrentRing, pRing);
ResetBias();
}
}
}
if (!fFound)
{
//
// If no work was found after a full loop, reset the ring at the top of the next search. Note that if we cannot find work in the scheduler,
// it is completely safe to reset any bias counts and go back to being completely cache local the next time we find work.
//
m_pCurrentRing = NULL;
ResetBias();
if (bias == 0)
{
// We didn't find any work. There is no need to do another round of search since the bias didn't
// cause us to skip any queue.
break;
}
// When the bias is non-zero, we potentially skipped checking some of the queues (LRC, for example). If we didn't find
// any work reset all the bias and do one more search loop.
}
else
{
Bias();
m_pVirtualProcessor->UpdateRamblingState(m_pStartingRing != pRing, pRing);
m_pCurrentRing = pRing;
}
}
return fFound;
}
/// <summary>
/// Performs a cache local search for work in the yielding case.
/// </summary>
bool WorkSearchContext::SearchCacheLocalYield(WorkItem *pWorkItem, ScheduleGroupBase *pOriginGroup, ULONG allowableTypes)
{
//
// The yielding case slices identically to the regular case excepting that the search is done in a pseudo-reverse order
//
bool fFound = false;
//
// Do any up-front searching required for special circumstances (e.g.: UMS schedulers)
//
if (PreSearch(pWorkItem))
return true;
SchedulingRing *pCurrentRing = m_pCurrentRing;
if (m_pCurrentRing == NULL)
pCurrentRing = m_pStartingRing = m_pCurrentRing = m_pVirtualProcessor->GetOwningRing();
// **************************************************
// Scheduling Ring Slicing:
//
SchedulingRing *pRing = pCurrentRing;
ScheduleGroupBase *pBiasedGroup = pOriginGroup;
//
// If we have gone so far along biasing to the current scheduling ring, move to the next ring and go back to being entirely cache local
// on that particular scheduling ring.
//
if (DropRingBias())
{
pRing = m_pScheduler->GetNextSchedulingRing(pCurrentRing, pRing);
ResetBias();
}
while (pRing != NULL)
{
//
// Recompute local bias on each ring transition
//
int bias = LocalBiasStage();
if (((allowableTypes & WorkItem::WorkItemTypeUnrealizedChore) && SearchCacheLocal_Unrealized(pWorkItem, pRing, pCurrentRing, pBiasedGroup)) ||
((allowableTypes & WorkItem::WorkItemTypeRealizedChore) && SearchCacheLocal_Realized(pWorkItem, pRing, pCurrentRing, pBiasedGroup)) ||
(bias < 1 && (allowableTypes & WorkItem::WorkItemTypeContext) && SearchCacheLocal_Runnables(pWorkItem, pRing, pBiasedGroup)) ||
((allowableTypes & WorkItem::WorkItemTypeContext) && StealLocalRunnable(pWorkItem, pRing->GetOwningNode(), m_pVirtualProcessor)))
{
fFound = true;
break;
}
else if ((pRing == pCurrentRing) && (allowableTypes & WorkItem::WorkItemTypeContext))
{
// **************************************************
// Local Bias:
//
if ((bias < 2 && GetRunnableContext(pWorkItem, pOriginGroup)) ||
(GetLocalRunnable(pWorkItem, m_pVirtualProcessor, bias > 2)))
{
fFound = true;
break;
}
}
//
// If we ever move to another scheduling ring, we can completely drop any bias and go back to being completely cache local.
//
pRing = m_pScheduler->GetNextSchedulingRing(pCurrentRing, pRing);
ResetBias();
}
if (fFound)
{
Bias();
m_pCurrentRing = pRing;
}
else
{
//
// If no work was found after a full loop, reset the ring at the top of the next search. Note that if we cannot find work in the scheduler,
// it is completely safe to reset any bias counts and go back to being completely cache local the next time we find work.
//
m_pCurrentRing = NULL;
ResetBias();
}
return fFound;
}
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -