📄 internalcontextbase.h
字号:
/// Remove a context from execution by dis-associating it from any scheduler group/chore.
/// </summary>
void RemoveFromUse();
protected:
//
// Protected types
//
enum ReasonForSwitch
{
GoingIdle,
Blocking,
Yielding,
Nesting
};
//
// Protected data members
//
// The thread proxy that is executing this context's dispatch loop, if any.
IThreadProxy * volatile m_pThreadProxy; // 4/8
//
// Protected methods
//
/// <summary>
/// Spins until the 'this' context is in a firmly blocked state
/// </summary>
void SpinUntilBlocked();
/// <summary>
/// Adds the context to a runnables collection, either on the virtual processor, or the schedule group
/// </summary>
/// <param name="pBias">
/// Bias any awakening of virtual processors to the scheduling node that pBias belongs to.
/// </param>
virtual void AddToRunnables(VirtualProcessor *pBias = NULL);
/// <summary>
/// Switches from one internal context to another.
/// </summary>
void SwitchTo(InternalContextBase* pContext, ReasonForSwitch reason);
/// <summary>
/// Switches out the internal context. Useful when the virtual processor is to be retired.
/// Is also used when un-nesting a scheduler and the context is returning to its original scheduler.
/// </summary>
/// <param name="reason">
/// The reason for switching out of this vproc
/// </param>
/// <returns>
/// True if the context has been canceled.
/// </returns>
bool SwitchOut(ReasonForSwitch reason);
/// <summary>
/// Cancels the context, causing it to exit the dispatch loop if it is executing on a virtual processor
/// </summary>
virtual void Cancel();
/// <summary>
/// If internal context does not own this virtual processor then claim it back. This might require
/// waiting until it becomes available.
/// </summary>
void ReclaimVirtualProcessor();
/// <summary>
/// This function is called to execute the associated chore if one is available. The chore can be a stolen unrealized
/// chore or realized chore.
/// </summary>
/// <returns>
/// Returns true if an associated chore was executed, false otherwise.
/// </returns>
bool ExecutedAssociatedChore();
/// <summary>
/// Performs the necessary cleanup for a canceled context in its dispatch routine.
/// <summary>
void CleanupDispatchedContextOnCancel();
/// <summary>
/// Called in the dispatch loop to check if the virtual processor the context is running on is marked for retirement,
/// and retires the virtual processor if it is.
/// <summary>
/// <returns>
/// True if the virtual processor was retired, false otherwise.
/// </returns>
bool IsVirtualProcessorRetired();
/// <summary>
/// Searches for work using the search algorithm specified by the scheduler's policy. Also prepares the context to execute
/// work by reclaiming the virtual processor if necessary.
/// </summary>
/// <param name=pWork>
/// A pointer to a work item which is filled in if work was found.
/// </param>
/// <returns>
/// True if work was found, false otherwise.
/// </returns>
bool WorkWasFound(WorkItem * pWork);
/// <summary>
/// Switches to the runnable context represented by the work item.
/// </summary>
/// <param name=pWork>
/// A pointer to a work item to be executed.
/// </param>
void SwitchToRunnableContext(WorkItem * pWork);
/// <summary>
/// Executes the chore (realized or unrealized) specified by the work item.
/// </summary>
/// <param name=pWork>
/// A pointer to a work item that represents a realized or unrealized chore.
/// </param>
void ExecuteChoreInline(WorkItem * pWork);
/// <summary>
/// This method implements the wait-for-work and cancelation protocol.
/// </summary>
void WaitForWork(void);
/// <summary>
/// Performs cleanup of the internal thread context.
/// </summary>
void Cleanup();
/// <summary>
/// Called before this executes on a given virtual processor.
/// </summary>
virtual void PrepareToRun(VirtualProcessor *pVProc)
{
#if defined(_DEBUG)
m_lastRunPrepareTimeStamp = __rdtsc();
m_prepareCount++;
m_lastAffinitizedTid = GetCurrentThreadId();
#endif // _DEBUG
m_pVirtualProcessor = pVProc;
InterlockedExchange(&m_blockedState, CONTEXT_NOT_BLOCKED);
}
// Virtual processor the context is executing on.
#if defined(_DEBUG)
void _PutVirtualProcessor(VirtualProcessor *pVirtualProcessor)
{
//
// If this assertion fires, someone is changing m_pVirtualProcessor outside a critical region. Doing this violates safety
// on a UMS scheduler. m_pVirtualProcessor is not guaranteed to be stable on a UMS context. All manipulation must happen
// inside a critical region.
//
CORE_ASSERT(_m_pVirtualProcessor == NULL || IsInsideCriticalRegion());
_m_pVirtualProcessor = pVirtualProcessor;
}
VirtualProcessor *_GetVirtualProcessor() const
{
//
// If this assertion fires, someone is examining m_pVirtualProcessor outside a critical region. Doing this violates safety
// on a UMS scheduler. m_pVirtualProcessor is not guaranteed to be stable on a UMS context. All manipulation must happen
// inside a critical region.
//
CORE_ASSERT(_m_pVirtualProcessor == NULL || IsInsideCriticalRegion());
return _m_pVirtualProcessor;
}
__declspec(property(get=_GetVirtualProcessor, put=_PutVirtualProcessor)) VirtualProcessor *m_pVirtualProcessor;
VirtualProcessor * volatile _m_pVirtualProcessor;
VirtualProcessor *UNSAFE_CurrentVirtualProcessor() const
{
return _m_pVirtualProcessor;
}
void UNSAFE_SetVirtualProcessor(VirtualProcessor *pVirtualProcessor)
{
_m_pVirtualProcessor = pVirtualProcessor;
}
#else
VirtualProcessor * volatile m_pVirtualProcessor;
VirtualProcessor *UNSAFE_CurrentVirtualProcessor() const
{
return m_pVirtualProcessor;
}
void UNSAFE_SetVirtualProcessor(VirtualProcessor *pVirtualProcessor)
{
m_pVirtualProcessor = pVirtualProcessor;
}
#endif
private:
friend class ExternalContextBase;
friend class SchedulerBase;
friend class ThreadScheduler;
friend class UMSThreadScheduler;
friend class VirtualProcessor;
friend class UMSThreadVirtualProcessor;
friend class SchedulingRing;
friend class UMSSchedulingContext;
template <class T, class Counter> friend class Stack;
template <typename T> friend class SQueue;
//
// Private data
//
// Pointer to an oversubscribed virtual processor if one is present.
VirtualProcessor * volatile m_pOversubscribedVProc;
// Chore associated with the context - this could be a realized chore or a stolen chore. The chore is associated with the context
// either when the internal context first starts up, or it is picked out of the idle pool by the scheduler. The context must execute this chore
// before it starts looking for other work. . This is used for indirect aliasing of unstructured task pools.
_Chore *m_pAssociatedChore;
// Counter that indicates how many times the internal context has spun waiting for work.
unsigned int m_searchCount;
// Flag that indicates whether the internal context is canceled.
volatile bool m_fCanceled;
// Flag that indicates whether the associated chore is a stolen unrealized chore or a realized chore.
bool m_fAssociatedChoreStolen;
// Flag that indicates whether internal context is in the final search for work state.
bool m_fIsVisibleVirtualProcessor;
// Flag that indicates whether internal context has dequeued a piece of work without being able
// to immediately update the statistics numbers on a virtual processor (it was not affinitized).
bool m_fHasDequeuedTask : 1;
// Indicates that some work was skipped in the dispatch loop. Currently, this is set if we failed to check some of the work stealing
// queues due to in-progress task collection cancellation.
bool m_fWorkSkipped : 1;
// Debugging purposes: this informs whether the context was *EVER* put on a free list or whether it is a fresh context.
bool m_fEverRecycled : 1;
// Debug information (particularly useful for UMS)
//
// Time logging for forward progress determinations.
//
__int64 m_workStartTimeStamp;
__int64 m_lastRunPrepareTimeStamp;
DWORD m_prepareCount;
DWORD m_ctxDebugBits;
// The last TID this context was dispatched on. You can normally get this from m_pThreadProxy.
DWORD m_lastDispatchedTid;
// The last TID this context was acquired/created on.
DWORD m_lastAcquiredTid;
// The last TID this context was affinitized on.
DWORD m_lastAffinitizedTid;
//
// Tracks the last assigned thread proxy (normally the same as m_pThreadProxy) -- but may not be for recycled contexts.
//
IThreadProxy *m_pAssignedThreadProxy;
IThreadProxy *m_pLastAssignedThreadProxy;
#if _UMSTRACE
_TraceBuffer m_traceBuffer;
#endif // _UMSTRACE
// A flag that is used by contexts adding runnables to a scheduler. When those contexts (the ones performing the add)
// do not implicitly have a reference to the schedule group the runnable belongs to, setting this flag on the runnable
// context they are adding to the scheduler's queues, ensures that the group does not get destroyed and the scheduler
// does not get finalized while they are touching scheduler/schedule group data.
volatile LONG m_fCrossGroupRunnable;
// Intrusive next pointer for SafeSQueue.
InternalContextBase *m_pNext;
// Flag that indicates whether the internal context is in the idle pool or not
volatile bool m_fIdle;
//
// Private methods
//
/// <summary>
/// Called when a context is nesting a scheduler. If nesting takes place on what is an internal context in
/// the 'parent' scheduler, the context must return the virtual processor to the parent scheduler.
/// </summary>
void LeaveScheduler();
/// <summary>
/// Called when a context is un-nesting a scheduler. If the parent context is an internal context, it needs
/// to rejoin the parent scheduler by looking for a virtual processor it can execute on.
/// </summary>
void RejoinScheduler();
/// <summary>
/// Called when the RM wakes up the thread for some reason.
/// </summary>
virtual void RMAwaken()
{
}
};
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -