⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 umsschedulingcontext.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 2 页
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation.  All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// UMSSchedulingContext.cpp
//
// Implementation of the UMS scheduling context.  This is a special context whose sole purpose is to determine
// which context to run next at initial startup of a virtual processor and whenever a UMS thread running on the virtual
// processor blocks.  The RM will invoke this scheduling context whenever a return to primary happens.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

#include "concrtinternal.h"

namespace Concurrency
{
namespace details
{

    /// <summary>
    ///     Creates a new UMS scheduling context that is bound to a particular virtual processor.  Once the context
    ///     is created, it is always bound to the virtual processor used at creation time.
    /// </summary>
    /// <param name="pScheduler">
    ///     The scheduler to which this virtual processor belongs.
    /// </param>
    /// <param name="pBoundVProc">
    ///     The virtual processor to which this context is bound.
    /// </param>
    UMSSchedulingContext::UMSSchedulingContext(UMSThreadScheduler *pScheduler, UMSThreadVirtualProcessor *pBoundVProc) :
        m_pScheduler(pScheduler),
        m_pBoundVProc(pBoundVProc),
        m_pThreadProxy(NULL)
    {
        m_id = m_pScheduler->GetNewContextId();
    }

    /// <summary>
    ///     Returns a scheduler unique identifier for the context.
    /// </summary>
    /// <returns>
    ///     The context Id.
    /// </returns>
    unsigned int UMSSchedulingContext::GetId() const
    {
        return m_id;
    }

    /// <summary>
    ///     Returns the scheduler to which this context belongs.
    /// </summary>
    /// <returns>
    ///     The owning scheduler.
    /// </returns>
    IScheduler * UMSSchedulingContext::GetScheduler()
    {
        return m_pScheduler->GetIScheduler();
    }

    /// <summary>
    ///     Returns the thread proxy which is executing this context.  Until the SetProxy method has been called on the given
    ///     context, this will return NULL.  Once the SetProxy method has been called, this returns the IThreadProxy which
    ///     was passed into the SetProxy method.
    /// </summary>
    /// <returns>
    ///     The thread proxy which dispatched this particular context.
    /// </returns>
    IThreadProxy * UMSSchedulingContext::GetProxy() 
    {
        return m_pThreadProxy;
    }

    /// <summary>
    ///     Sets the thread proxy which is executing this context.  The caller must save this and return it upon a call to the GetProxy method.
    ///     Note that the resource manager guarantees stability of the thread proxy while inside the Dispatch method.
    /// </summary>
    /// <param name="pThreadProxy">
    ///     The thread proxy which dispatched this particular context.
    /// </param>
    void UMSSchedulingContext::SetProxy(IThreadProxy *pThreadProxy)
    {
        if (pThreadProxy == NULL)
            throw std::invalid_argument("pThreadProxy");

        m_pThreadProxy = pThreadProxy;
    }

    /// <summary>
    ///     Sets all blocked status on a given context.
    /// </summary>
    /// <param name="pPreviousContext">
    ///     The previously running context.
    /// </param>
    /// <param name="fAsynchronous">
    ///     Is previously running context asynchronously blocked.
    /// </param>
    void UMSSchedulingContext::SetUMSBlocked(UMSThreadInternalContext *pPreviousContext, bool fAsynchronous)
    {
#if defined(_DEBUG)
        //
        // If this assertion fires, someone has called a blocking API between a ReleaseInternalContext and the time we switch off it.  Doing this
        // will corrupt state within the scheduler.
        //
        CORE_ASSERT((pPreviousContext->GetDebugBits() & CTX_DEBUGBIT_RELEASED) == 0);
        pPreviousContext->ClearDebugBits(CTX_DEBUGBIT_AFFINITIZED);
        pPreviousContext->SetDebugBits(CTX_DEBUGBIT_UMSBLOCKED);
#endif // _DEBUG

        CORE_ASSERT(pPreviousContext->m_pThreadProxy != NULL);

        pPreviousContext->NotifyBlocked(fAsynchronous);

        //
        // After this point, it might be running atop another vproc.  Remember that it may have come back on the completion list and been affinitized
        // prior to even getting into this code!
        //
    }

    /// <summary>
    ///     Claims the bound virtual processor and swallows an activation.
    /// </summary>
    void UMSSchedulingContext::ClaimBoundProcessorAndSwallowActivation()
    {
        if (!m_pBoundVProc->ClaimExclusiveOwnership())
        {
            //
            // Someone else activated us.  Swallow the event set.  The event isn't set until true is returned -- hence the loop.
            //
            while (!m_pBoundVProc->Deactivate(this));
        }
    }

    /// <summary>
    ///     The method that is called when a thread proxy starts executing a particular context.  The thread proxy which executes
    ///     the context is set in SetProxy before entering the dispatch loop and must be saved and returned on a call to the GetProxy method.
    /// </summary>
    /// <param name="pDispatchState">
    ///     The state under which this IExecutionContext is being dispatched.
    /// </param>
    void UMSSchedulingContext::Dispatch(DispatchState * pDispatchState)
    {
        const int PASS_COUNT_BEFORE_SLEEP_NORMAL = 1;
        const int PASS_COUNT_BEFORE_SLEEP_ORIGINALLY_ACTIVATED = 5;

        CORE_ASSERT(m_pThreadProxy != NULL); 
        SetAsCurrentTls();

#if defined(_DEBUG)
        DWORD fromBits = 0;
#endif // _DEBUG
	
        for(;;)
        {
            int pass = 0;
            UMSThreadInternalContext *pPreviousContext = static_cast<UMSThreadInternalContext *> (m_pBoundVProc->GetExecutingContext());
            ScheduleGroupBase *pGroup = (pPreviousContext == NULL ? m_pBoundVProc->m_pStartingGroup : pPreviousContext->m_pGroup);

            // **************************************************
            // READ THIS:
            //
            // Yet another incredibly subtle point about where we get suspended..  There are times in the scheduling context's
            // dispatch loop where we can't find work (the critical context is blocked, etc...) and we want to run through a
            // Deactivate pass in order to put the vproc to sleep much as we do with an ordinary search for work in the dispatch loop.  The unfortunate thing
            // is that there's another context which thinks it this is its exclusive purview.  We aren't going to try to maintain a complex state machine to
            // be able to restore his expected state, so we spin if that's the case.
            //
            // Ordinarily, you might think that we can simply check m_pBoundVProc->IsAvailable, however, there might be a race on that such as what follows:
            //
            // - Context 1 on vproc A makes the vproc available and then blocks
            // - Context 2 on vproc B claims exclusive ownership of the virtual processor (it suspends, takes a while, take your pick)
            // - We get in here and see the virtual processor as not available so we think we're safe to make it available
            // - We make the context available
            // - Context 3 on vproc C claims exclusive ownership of the virtual processor (now 2 contexts think they have exclusive ownership)
            //
            // There are other potential races as well.  What we really need to know is if there IS a context in the dispatch loop that has made the virtual
            // processor available.  It doesn't necessarily need to be pPreviousContext because the original context might have critically blocked in that region
            // and we might be running someone else.  Hence the rule -- you **MUST** stay in a critical region between the call to MakeAvailable and the call to Deactivate
            // without exception.  No other MakeAvailable is permitted.  Once we know what the critical context is, we can check it to see if IT thinks IT has flagged
            // the virtual processor.  That check must come **BEFORE** the call to MakeAvailable and must be fenced by the time m_fAvailable is set to true.
            // **************************************************

            bool fOriginallyAvailable = false;
            bool fMadeAvailable = false;

            int passes = fOriginallyAvailable ? PASS_COUNT_BEFORE_SLEEP_ORIGINALLY_ACTIVATED : PASS_COUNT_BEFORE_SLEEP_NORMAL;

            UMSThreadInternalContext::BlockingType blockingType = UMSThreadInternalContext::BlockingNormal;
            CriticalRegionType type = OutsideCriticalRegion;

            //
            // If someone explicitly switched back to the primary, don't do the UMS blocked bit.  Instead, just conduct the search from
            // the primary for runnables or invoke the reserved context as appropriate. This is accomplished by the fact that affinitize would clear
            // the executing proxy.
            //
            if (pPreviousContext != NULL)
            {
                VCMTRACE(MTRACE_EVT_UMSBLOCKED, pPreviousContext, m_pBoundVProc, NULL);

                CORE_ASSERT(pPreviousContext->UNSAFE_CurrentVirtualProcessor() == m_pBoundVProc);
                CORE_ASSERT(!pPreviousContext->IsBlocked());
                CORE_ASSERT(pPreviousContext->m_pThreadProxy != NULL);
#if defined(_DEBUG)
                //
                // If the context UMS blocks while it's holding a UMS blocked context prior to the switch, we can deadlock in a variety of ways.
                // Assert this instead of relying on stress to ferret this out.
                //
                CORE_ASSERT((pPreviousContext->GetDebugBits() & CTX_DEBUGBIT_HOLDINGUMSBLOCKEDCONTEXT) == 0);
#endif // _DEBUG
                type = pPreviousContext->GetCriticalRegionType();

            }

            CORE_ASSERT(type != InsideHyperCriticalRegion);

            if (m_pBoundVProc->m_pCriticalContext != NULL)
            {
                //
                // Only 1 context can be inside the critical region at a time
                //
                CORE_ASSERT(pPreviousContext->GetCriticalRegionType() == OutsideCriticalRegion);
            }
            else if (type != OutsideCriticalRegion)
            {
                //
                // A thread/context inside a critical region blocked
                //
                CORE_ASSERT(m_pBoundVProc->m_pCriticalContext == NULL);
                VCMTRACE(MTRACE_EVT_CRITICALBLOCK, pPreviousContext, m_pBoundVProc, NULL);
                m_pBoundVProc->m_pCriticalContext = pPreviousContext;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -