⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 internalcontextbase.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 5 页
字号:
        ASSERT(SchedulerBase::FastCurrentContext() == this);
        ASSERT(m_pVirtualProcessor != NULL);

        TraceContextEvent(CONCRT_EVENT_YIELD, TRACE_LEVEL_INFORMATION, m_pScheduler->Id(), m_id);

        if (m_pVirtualProcessor->IsMarkedForRetirement())
        {
            // The virtual processor has been marked for retirement. The context needs to switch out rather 
            // than switching to a different context or continuing to run.
            SwitchOut(Yielding);
        }
        else
        {
            WorkItem work;
            if (m_pVirtualProcessor->SearchForWorkInYield(&work, m_pGroup))
            {
                if (!work.IsContext())
                {
                    //
                    // Bind the work item to a context outside of a critical region -- this prevents the huge cost of allocation
                    // (or worse -- thread creation) within a critical region.
                    //
                    ExitCriticalRegion();
                    CORE_ASSERT(GetCriticalRegionType() == OutsideCriticalRegion);
                    work.Bind();
                    EnterCriticalRegion();
                }

                CMTRACE(MTRACE_EVT_SFW_FOUND, this, m_pVirtualProcessor, work.GetContext());
                CMTRACE(MTRACE_EVT_SFW_FOUNDBY, work.GetContext(), m_pVirtualProcessor, this);

                ASSERT(work.GetContext() != NULL && work.GetContext() != this);

                SwitchTo(work.GetContext(), Yielding);
            }
            else
            {
                //
                // No need to cooperatively yield - there's no other runnable context to execute.
                // However, it is wise to check if the OS has any other threads available to run on the hardware thread.
                // On UMS, SwitchToThread will cause a transition to primary. We want to minimize such context
                // switches within critical region. Exit the critical region and then SwitchToThread.
                //
                bSwitchToThread = true;
            }
        }
        ExitCriticalRegion();

        if (bSwitchToThread)
        {
            m_pThreadProxy->YieldToSystem();
        }
    }

    /// <summary>
    ///     Yields the virtual processor to a different runnable internal context if one is found.
    ///     
    ///     This is intended for spin loops.
    /// </summary>
    void InternalContextBase::SpinYield()
    {
        bool bSwitchToThread = false;

        EnterCriticalRegion();
        ASSERT(SchedulerBase::FastCurrentContext() == this);
        ASSERT(m_pVirtualProcessor != NULL);

        TraceContextEvent(CONCRT_EVENT_YIELD, TRACE_LEVEL_INFORMATION, m_pScheduler->Id(), m_id);

        if (m_pVirtualProcessor->IsMarkedForRetirement())
        {
            // The virtual processor has been marked for retirement. The context needs to switch out rather 
            // than switching to a different context or continuing to run.
            SwitchOut(Yielding);
        }
        else
        {
            WorkItem work;
            if (m_pVirtualProcessor->SearchForWork(&work, m_pGroup, WorkItem::WorkItemTypeContext))
            {
                CMTRACE(MTRACE_EVT_SFW_FOUND, this, m_pVirtualProcessor, work.GetContext());
                CMTRACE(MTRACE_EVT_SFW_FOUNDBY, work.GetContext(), m_pVirtualProcessor, this);

                ASSERT(work.GetContext() != NULL && work.GetContext() != this);

                SwitchTo(work.GetContext(), Yielding);
            }
            else
            {
                //
                // No need to cooperatively yield - there's no other runnable context to execute.
                // However, it is wise to check if the OS has any other threads available to run on the hardware thread.
                // On UMS, SwitchToThread will cause a transition to primary. We want to minimize such context
                // switches within critical region. Exit the critical region and then SwitchToThread.
                //
                bSwitchToThread = true;
            }
        }
        ExitCriticalRegion();

        if (bSwitchToThread)
        {
            m_pThreadProxy->YieldToSystem();
        }
    }

    /// <summary>
    ///     See comments for Concurrency::Context::Oversubscribe.
    /// </summary>
    void InternalContextBase::Oversubscribe(bool beginOversubscription)
    {
        ASSERT(SchedulerBase::FastCurrentContext() == this);
        if (beginOversubscription)
        {
            // Increment the context over-subscription counter and only create an additional virtual processor
            // if the count goes from 0 to 1.
            ASSERT(m_oversubscribeCount >= 0);

            if (++m_oversubscribeCount == 1)
            {
                ASSERT(m_pOversubscribedVProc == NULL);

                // Oversubscribe the hardware thread virtual processor by injecting a virtual processor into the current virtual processors
                // group in the scheduling node.
                EnterCriticalRegion();
                // Oversubscribe invokes a callback to stamp the value of the oversubscribed virtual processor onto the context. The reason
                // for this is that we have to ensure that the vproc <-> context mapping is in place before the virtual processor is added
                // to the collection of vprocs in the scheduler. This is in order to synchronize with RemoveVirtualProcessor, which assumes
                // the virtual processor is fully initialized if it can find it in the collection.
                m_pVirtualProcessor->Oversubscribe();
                ExitCriticalRegion();
            }
        }
        else
        {
            // Decrement the context over-subscription counter and retire the oversubscribed virtual processor
            // if the count goes from 1 to 0.
            if (m_oversubscribeCount == 0)
            {
                throw invalid_oversubscribe_operation();
            }

            if (--m_oversubscribeCount == 0)
            {
                VirtualProcessor * pExpectedVProc = m_pOversubscribedVProc;

                // Note that pExpectedVProc could be null if the RM has already snapped this vproc for removal.
                VirtualProcessor * pVProc = GetAndResetOversubscribedVProc(pExpectedVProc);
                ASSERT(pVProc == NULL || pVProc == pExpectedVProc);

                // We must sychronize with a potential RemoveVirtualProcessor for this virtual processor due to the RM taking the underlying
                // core away. The winner of the interlocked exchange gets to retire the virtual processor.
                if (pVProc != NULL)
                {
                    pVProc->MarkForRetirement();
                }
            }
        }
    }

    /// <summary>
    ///     Called to retreive the oversubscribed vproc and reset it to null.
    /// </summary>
    VirtualProcessor * InternalContextBase::GetAndResetOversubscribedVProc(VirtualProcessor * pExpectedVirtualProcessor)
    {
        // Can be called concurrently by oversubscribing context and the RM. When called by the RM, the argument is 
        // non-NULL and represents what the RM thinks this context has as its oversubscribed vproc. The RM could 
        // have stale information and so if the virtual processor argument doesn't match what is on the context,
        // we return NULL, informing the RM that the virtual processor it was looking for was already marked for
        // retirement by this context previously.
        VirtualProcessor * pVirtualProcessor = NULL;

        if ((pExpectedVirtualProcessor != NULL) && (pExpectedVirtualProcessor == m_pOversubscribedVProc) &&
            (InterlockedCompareExchangePointer((volatile PVOID *)(&m_pOversubscribedVProc), (void*) 0, pExpectedVirtualProcessor) == pExpectedVirtualProcessor))
        {
                pVirtualProcessor = pExpectedVirtualProcessor;
        }

        return pVirtualProcessor;
    }

    /// <summary>
    ///     Returns an identifier to the virtual processor the context is currently executing on, if any.
    /// </summary>
    unsigned int InternalContextBase::GetVirtualProcessorId() const
    {
        //
        // We really aren't changing anything, so cast away constness to enter the critical reigon.  The critical region is necessary
        // to guard volatility on UMS reentrancy due to PF when accessing m_pVirtualProcessor.
        //
        (const_cast<InternalContextBase *>(this))->EnterCriticalRegion();
        unsigned int id = (m_pVirtualProcessor != NULL) ? m_pVirtualProcessor->GetId() : UINT_MAX;
        (const_cast<InternalContextBase *>(this))->ExitCriticalRegion();

        return id;
    }

    /// <summary>
    ///     Adds the context to a runnables collection, either on the virtual processor, or the schedule group
    /// </summary>
    /// <param name="pBias">
    ///     Bias any awakening of virtual processors to the scheduling node that pBias belongs to.
    /// </param>
    void InternalContextBase::AddToRunnables(VirtualProcessor *pBias)
    {
        ASSERT(m_pGroup != NULL);
        ASSERT(m_pThreadProxy != NULL);

        TRACE(TRACE_SCHEDULER, L"InternalContextBase::AddRunnable(ctx=%d,grp=%d,grpRef=%d)", GetId(), GetScheduleGroupId(), ScheduleGroupRefCount());

        ContextBase* pCurrentContext = SchedulerBase::FastCurrentContext();

        CMTRACE(MTRACE_EVT_ADDEDTORUNNABLES, this, NULL, pCurrentContext);
        CMTRACE(MTRACE_EVT_INVERTED_ADDEDTORUNNABLES, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, NULL, this);

        //
        // First see if there is room to place 'this' on the cache of local realized chores
        // for the ambient context.  This attempts to maintain cache locality when Block/Unblock
        // is called in quick succession and the unblocking current context subsequently blocks.
        //
        if (pCurrentContext != NULL && !pCurrentContext->IsExternal() && (m_pScheduler == pCurrentContext->GetScheduler()))
        {
            InternalContextBase* pContext = static_cast<InternalContextBase*>(pCurrentContext);
            int count;
            //
            // The current virtual processor is only safely accessed within a critical region
            //
            pContext->EnterCriticalRegion();

            if (!m_pGroup->IsFairScheduleGroup()
                        &&
                ((count = pContext->m_pVirtualProcessor->m_localRunnableContexts.Count()) < m_pScheduler->m_localContextCacheSize))
            {
                //
                // If the current context does not belong to the same group, the caller is not guaranteed to have a reference to the
                // schedule group. We call CrossGroupRunnable() to make sure that scheduler and schedule group are kept around long
                // enough, that we can attempt to startup the virtual processor without fear of the scheduler being finalized, or the
                // schedule group being destroyed.
                // If the current context DOES belong to same group as 'this', it is possible for it to be recycled to the idle pool
                // once we add it to runnables collection. Since the m_pGroup field is reset to NULL when the context is recycled,
                // we cache it up front.
                //
                ScheduleGroupBase * pGroup = m_pGroup;
                if (pContext->GetScheduleGroup() != pGroup)
                {
                    // Set this flag to allow the calling thread to use m_pGroup safely once the context is pushed onto runnables.
                    // Note that this call does not need a fence. The addition of the context to the vproc LRC queue, which is a work-stealing
                    // queue, is unfenced, but since both, setting the flag, and adding to the queue, result in volatile writes, other processors
                    // will see the stores in the same order. That means that when this context is visible to a stealer, the stealer will also
                    // see the cross group runnable bit set.
                    CrossGroupRunnable(TRUE);
                }

#if defined(_DEBUG)
                SetDebugBits(CTX_DEBUGBIT_ADDEDTOLOCALRUNNABLECONTEXTS);
                if (m_pScheduler->m_virtualProcessorAvailableCount > 0)
                    SetDebugBits(CTX_DEBUGBIT_LIKELYTOSTARTUPIDLEVPROCONOTHERCONTEXT);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -