⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 virtualprocessor.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 2 页
字号:
            ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
            VCMTRACE(MTRACE_EVT_CLAIMEDOWNERSHIP, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, SchedulerBase::FastCurrentContext());
#endif // _UMSTRACE

            if (!IsHidden())
            {
                InterlockedDecrement(&m_pOwningNode->m_pScheduler->m_virtualProcessorAvailableCount);
                InterlockedDecrement(&m_pOwningNode->m_virtualProcessorAvailableCount);
            }

            return true;
        }
        return false;
    }

    /// <summary>
    ///     Makes a virtual processor available for scheduling work.
    /// </summary>
    void VirtualProcessor::MakeAvailable()
    {
        ASSERT(m_fAvailable == FALSE);

#if _UMSTRACE
        ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
        VCMTRACE(MTRACE_EVT_MADEAVAILABLE, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, NULL);
#endif // _UMSTRACE

        InterlockedIncrement(&m_pOwningNode->m_pScheduler->m_virtualProcessorAvailableCount);
        InterlockedIncrement(&m_pOwningNode->m_virtualProcessorAvailableCount);
        InterlockedExchange(&m_fAvailable, TRUE);
    }

    /// <summary>
    ///     Oversubscribes the virtual processor by creating a new virtual processor root affinitized to the same
    ///     execution resource as that of the current root
    /// </summary>
    /// <returns>
    ///     A virtual processor that oversubscribes this one.
    /// </returns>
    VirtualProcessor * VirtualProcessor::Oversubscribe()
    {
        IVirtualProcessorRoot *pOversubscriberRoot = GetOwningNode()->GetScheduler()->GetSchedulerProxy()->CreateOversubscriber(m_pOwningRoot);
        ASSERT(pOversubscriberRoot != NULL);

        return m_pOwningNode->AddVirtualProcessor(pOversubscriberRoot, true);
    }

    /// <summary>
    ///     Causes the virtual processor to remove itself from the scheduler. This is used either when oversubscription
    ///     ends or when the resource manager asks the vproc to retire.
    /// </summary>
    void VirtualProcessor::Retire()
    {
        //
        // If this virtual processor is throttled, it's on a list in the background thread.  Remove it from that list so we do not have the background thread
        // attempting to wake a reused virtual processor after this goes on the free list!
        //
        if (IsThrottled())
            GetOwningNode()->GetScheduler()->RemoveThrottleOversubscriber(this);

        if (m_fRambling)
            UpdateRamblingState(false, NULL);

        // Virtual processor available counts are already decremented by this point. We need to decrement the total counts
        // on both the node and the scheduler. Oversubscribed vprocs do not contribute to the total vproc count on the scheduler.
        InterlockedDecrement(&m_pOwningNode->m_virtualProcessorCount);
        if (!m_fOversubscribed)
        {
            InterlockedDecrement(&m_pOwningNode->m_pScheduler->m_virtualProcessorCount);
        }

        // Since virtual processor is going away we'd like to preserve its counts
        m_pOwningNode->GetScheduler()->SaveRetiredVirtualProcessorStatistics(this);

        // If this is a virtual processor currently associated with an executing context, it's important to assert there that
        // the scheduler is not shutting down. We want to make sure that all virtual processor root removals (for executing virtual
        // processors) occur before the scheduler shuts down. This will ensure that all IVirtualProcessorRoot::Remove calls
        // that can originate from a scheduler's internal contexts instance are received the RM before the ISchedulerProxy::Shutdown call,
        // which asks the RM to release all resources and destroy the remaining virtual processor roots allocated to the scheduler.
        // RM should not receive Remove calls for roots that are already destroyed.
        ASSERT(m_pExecutingContext == NULL || ToInternalContext(m_pExecutingContext) == SchedulerBase::FastCurrentContext());
        ASSERT(m_pExecutingContext == NULL || (!m_pOwningNode->GetScheduler()->InFinalizationSweep() && !m_pOwningNode->GetScheduler()->HasCompletedShutdown()));

        m_pExecutingContext = NULL;

        // Check if there are contexts in the Local Runnables Collection and put them into the collection of runnables in their
        // respective schedule groups.
        InternalContextBase *pContext = GetLocalRunnableContext();
        while (pContext != NULL)
        {
            pContext->GetScheduleGroup()->AddRunnableContext(pContext);
            pContext = GetLocalRunnableContext();
        }

        // Send an IScheduler pointer into to Remove. Scheduler does derive from IScheduler, and therefore we need
        // an extra level of indirection.
        m_pOwningRoot->Remove(m_pOwningNode->GetScheduler()->GetIScheduler());
        m_pOwningRoot = NULL;

        TraceVirtualProcessorEvent(CONCRT_EVENT_END, TRACE_LEVEL_INFORMATION, m_pOwningNode->m_pScheduler->Id(), m_id);

        if (m_pSubAllocator != NULL)
        {
            SchedulerBase::ReturnSubAllocator(m_pSubAllocator);
            m_pSubAllocator = NULL;
        }

        // Removing this VirtualProcessor from the ListArray will move it to a pool for reuse
        // This must be done at the end of this function, otherwise, this virtual processor itself could be
        // pulled out of the list array for reuse and stomped over before retirement is complete.
        m_pOwningNode->m_virtualProcessors.Remove(this);
        // *DO NOT* touch 'this' after removing it from the list array.
    }

    /// <summary>
    ///     Returns a pointer to the suballocator for the virtual processor.
    /// </summary>
    SubAllocator * VirtualProcessor::GetCurrentSubAllocator()
    {
        if (m_pSubAllocator == NULL)
        {
            m_pSubAllocator = SchedulerBase::GetSubAllocator(false);
        }
        return m_pSubAllocator;
    }

    /// <summary>
    ///     Rambling -- searching foreign nodes for work. When work is found, update state accordingly.
    /// </summary>
    void VirtualProcessor::UpdateRamblingState(bool rambling, SchedulingRing *pCurrentRing)
    {
        //
        // One vproc triggers owning ring change for the whole node.
        // Other vproc has m_pCurrentRing change out from under it.
        //
        if (m_pCurrentRing != pCurrentRing)
        {
            if (rambling)
            {
                //
                // searching foreign rings
                //
                if ( !m_fRambling)
                {
#if _UMSTRACE
                    InternalContextBase *pCurrentContext = static_cast<InternalContextBase *>(SchedulerBase::FastCurrentContext());
                    CMTRACE(MTRACE_EVT_UPDATERAMBLING_RING, pCurrentContext, this, pCurrentRing);
#endif // _UMSTRACE
	
                    //
                    // Searching a foreign ring for the first time
                    //

                    m_pCurrentRing = pCurrentRing;

                    m_fRambling = TRUE;
                    
                    //
                    // Check to see if owning ring needs to be changed.
                    // When all vprocs in owning node are rambling, then we change the owning ring
                    // to be the one on which the maximal number of the owning node's vprocs
                    // are working.
                    //
                    if (InterlockedIncrement(&m_pOwningNode->m_ramblingCount) == m_pOwningNode->m_virtualProcessorCount)
                        m_pOwningNode->CheckForNewOwningRing();
                }
            }
            else if (m_fRambling)
            {
                // There is a by-design race here.
                //
                // The idea is to have 2 mechanisms to change owning ring.  
                // 1) SchedulingNode::CheckForNewOwningRing sees if all vprocs in the current node
                // are executing on some different ring than the node's associated ring (viz. they
                // are all rombling) and if so, changes the owning ring of all vprocs on the node
                // to the ring that has the most vprocs (on the node) executing on it.
                // 2) When a vproc again finds work on its node's associated ring, provide stickiness 
                // by resetting the owning and current ring to the node's associated ring.
                //
                // However, another path (CheckForNewOwningRing) could concurrently change the
                // owning and current ring.  This race is acceptable, because this vproc has found work
                // on its node's associated ring.
                //

                //
                // Reset back to node's associated ring (m_pOwningNode->GetSchedulingRing()).
                //
                m_pOwningRing = m_pCurrentRing = m_pOwningNode->GetSchedulingRing();

                // m_fRambling is set to false on multiple paths. 
                // Decrement the count only if this path successfully sets it to false
                if (InterlockedExchange(&m_fRambling, FALSE) == TRUE)
                    InterlockedDecrement(&m_pOwningNode->m_ramblingCount);
            }
        }
    }

    /// <summary>
    ///     Send a virtual processor ETW event.
    /// </summary>
    void VirtualProcessor::ThrowVirtualProcessorEvent(ConcRT_EventType eventType, UCHAR level, DWORD schedulerId, DWORD vprocId)
    {
        if (g_pEtw != NULL)
        {
            CONCRT_TRACE_EVENT_HEADER_COMMON concrtHeader = {0};

            concrtHeader.header.Size = sizeof concrtHeader;
            concrtHeader.header.Flags = WNODE_FLAG_TRACED_GUID;
            concrtHeader.header.Class.Type = (UCHAR)eventType;
            concrtHeader.header.Class.Level = level;
            concrtHeader.header.Guid = VirtualProcessorEventGuid;

            concrtHeader.SchedulerID = schedulerId;
            concrtHeader.VirtualProcessorID = vprocId;

            g_pEtw->Trace(g_ConcRTSessionHandle, &concrtHeader.header);
        }
    }

#if _UMSTRACE
    void VirtualProcessor::TraceSearchedLocalRunnables()
    {
        ContextBase *pCurrentContext = SchedulerBase::FastCurrentContext();
        CMTRACE(MTRACE_EVT_SEARCHEDLOCALRUNNABLES, (pCurrentContext && !pCurrentContext->IsExternal()) ? static_cast<InternalContextBase *>(pCurrentContext) : NULL, this, NULL);
    }
#endif // UMSTRACE

    /// <summary>
    ///     Returns a type-cast of pContext to an InternalContextBase or NULL if it is not.
    /// </summary>
    InternalContextBase *VirtualProcessor::ToInternalContext(IExecutionContext *pContext)
    {
        return static_cast<InternalContextBase *>(pContext);
    }

    /// <summary>
    ///     Called when the context running atop this virtual processor has reached a safe point.
    /// </summary>
    /// <returns>
    ///     An indication of whether the caller should perform a commit.
    /// </returns>
    bool VirtualProcessor::SafePoint()
    {
        return GetOwningNode()->GetScheduler()->MarkSafePoint(&m_safePointMarker);
    }

} // namespace details
} // namespace Concurrency

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -