⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 schedulegroupbase.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 2 页
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation.  All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// ScheduleGroupBase.cpp
//
// Implementation file for ScheduleGroupBase.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

#include "concrtinternal.h"

namespace Concurrency
{
namespace details
{

    /// <summary>
    ///     Constructs a schedule group with an initial reference count of 1.
    /// </summary>
    ScheduleGroupBase::ScheduleGroupBase(SchedulingRing *pRing) 
        : m_pRing(pRing),
          m_refCount(1),
          m_workQueues(pRing->m_pScheduler, 256, 64),
          m_detachedWorkQueues(pRing->m_pScheduler, 256, ListArray< ListArrayInlineLink<WorkQueue> >::DeletionThresholdInfinite) // No deletion
    {
        m_pScheduler = m_pRing->m_pScheduler;
        m_id = m_pScheduler->GetNewScheduleGroupId();
    }

    /// <summary>
    ///     Adds runnable context to the schedule group. This is usually a previously blocked context that
    ///     was subsequently unblocked, but it could also be an internal context executing chores on behalf
    ///     of an external context.
    /// </summary>
    void ScheduleGroupBase::AddRunnableContext(InternalContextBase* pContext, VirtualProcessor *pBias)
    {
        ASSERT(pContext->GetScheduleGroup() == this);
        //
        // If the current context does not belong to this group, the caller is not guaranteed to have a reference to the
        // schedule group. We call CrossGroupRunnable() to make sure that scheduler and schedule group are kept around long
        // enough, that we can attempt to startup the virtual processor without fear of the scheduler being finalized, or the
        // schedule group being destroyed.
        //
        ContextBase* pCurrentContext = SchedulerBase::FastCurrentContext();

        if ((pCurrentContext == NULL) || (pCurrentContext->GetScheduleGroup() != this))
        {
            // Set this flag to allow the calling thread to use 'this' safely once the context is pushed onto runnables.
            // Note that this call does not need a fence because it is fenced by push to the runnable contexts collection.
            pContext->CrossGroupRunnable(TRUE);
        }

        // Add it to the actual collection.
        AddToRunnablesCollection(pContext);

        if (m_pScheduler->m_virtualProcessorAvailableCount > 0)
        {
            m_pScheduler->StartupIdleVirtualProcessor(this, pBias);
        }

        // Reset the flag, if it was set, since we're done with touching scheduler/context data.
        // This flag is not fenced. This means the reader could end up spinning a little longer until the data is
        // propagated by the cache coherency mechanism.
        pContext->CrossGroupRunnable(FALSE);
        // NOTE: It is not safe to touch 'this' after this point, if this was a cross group runnable.
    }

    /// <summary>
    ///     Steals an unrealized chore from a workqueue in the schedule group.
    /// </summary>
    _UnrealizedChore* ScheduleGroupBase::StealUnrealizedChore() 
    {
        //
        // When we fail to steal from a work queue that's detached, it's an indication that the work queue
        // is finally empty and can be retired.
        //

        _UnrealizedChore *pChore;

        bool killEmptyQueues = false;
        int maxIndex = m_workQueues.MaxIndex();
        if (maxIndex > 0)
        {
            int skippedCount = 0;
            const int maxSkippedCount = 16;
            int skippedState[maxSkippedCount];
            bool fEntered = false;

            for (int j = 0; j < maxIndex; j++)
            {
                WorkQueue *pQueue = m_workQueues[j];
                if (pQueue != NULL)
                {
                    if ( !pQueue->IsEmpty())
                    {
                        if ((pChore = pQueue->TryToSteal(fEntered)) != NULL)
                            return pChore;
                        else if ( !fEntered)
                        {
                            if (skippedCount < maxSkippedCount-1)
                            {
                                skippedState[skippedCount++] = j;
                                continue;
                            }
                            else if ((pChore = pQueue->Steal()) != NULL)
                                return pChore;
                        }

                        killEmptyQueues |= (pQueue->IsDetached() && pQueue->IsEmpty());
                    }
                    else
                        killEmptyQueues |= pQueue->IsDetached();
                }
            }

            if (skippedCount > 0)
            {
                for (int j = 0; j < skippedCount; j++)
                {
                    WorkQueue *pQueue = m_workQueues[skippedState[j]];
                    if (pQueue != NULL)
                    {
                        if ( !pQueue->IsEmpty() && (pChore = pQueue->Steal()) != NULL)
                            return pChore;
                        else
                            killEmptyQueues |= (pQueue->IsDetached() && pQueue->IsEmpty());
                    }
                }
            }
        }

        int numDetachedArrays = m_detachedWorkQueues.MaxIndex();
        if (numDetachedArrays > 0 && killEmptyQueues)
        {
            for (int i = 0; i < m_workQueues.MaxIndex(); i++)
            {
                WorkQueue *pQueue = m_workQueues[i];
                if (pQueue != NULL)
                {
                    if (pQueue->IsDetached() && pQueue->IsUnstructuredEmpty()) 
                    {
                        SafelyDeleteDetachedWorkQueue(pQueue);
                    }
                }
            }
        }

        return NULL;
    }

    /// <summary>
    ///     Returns true if the group has any realized chores.
    ///     This is used during scheduler finalization when only one thread is active in the scheduler.
    ///     At any other time, this information is stale since new work could get added to the scheduler.
    /// </summary>
    bool ScheduleGroupBase::HasRealizedChores() const
    {
        return !m_realizedChores.Empty();
    }

    /// <summary>
    ///     Returns true if any of the workqueues in the schedule group has unrealized chores.
    ///     This is used during scheduler finalization when only one thread is active in the scheduler.
    ///     At any other time, this information is stale since new work could get added to the scheduler.
    /// </summary>
    bool ScheduleGroupBase::HasUnrealizedChores()
    {
        for (int i = 0; i < m_workQueues.MaxIndex(); i++)
        {
            WorkQueue *pQueue = m_workQueues[i];
            if (pQueue != NULL)
            {
                if (!pQueue->IsStructuredEmpty() || !pQueue->IsUnstructuredEmpty())
                {
                    return true;
                }
                else if (pQueue->IsDetached())
                {
                    SafelyDeleteDetachedWorkQueue(pQueue);
                }
            }
        }

        return false;
    }

    /// <summary>
    ///     Called to safely delete a detached work queue -- this is lock free and utilizes safe points to perform
    ///     the deletion and dereference.  It can be called during the normal SFW or during the finalization sweep
    ///     safely.
    /// </summary>
    bool ScheduleGroupBase::SafelyDeleteDetachedWorkQueue(WorkQueue *pQueue)
    {
        //
        // The way in which we resolve race conditions between this and queue reattachment is by who is able to remove the
        // element from the detached list array.  We cannot kill the work queue until it's gone out of that list array.
        //
        if (m_detachedWorkQueues.Remove(&pQueue->m_detachment, false))
        {
            //
            // There's always the possibility of a very subtle race where we check IsDetached and IsUnstructuredEmpty and then
            // are preempted, the queue is reattached, work is added, and it's detached again in the same spot with work.  We
            // cannot free the queue in such circumstance.  Only if it is empty AFTER removal from m_detachedWorkQueues are
            // we safe.
            //
            if (pQueue->IsUnstructuredEmpty())
            {
                //
                // Each detached work queue holds a reference on the group.  It is referenced
                // in ScheduleGroupBase::DetachActiveWorkQueue().  Since we are removing this
                // empty work queue, we need to release the reference.
                //
                // There's an unfortunate reality here -- this work queue might be the LAST thing holding reference onto
                // the schedule group.  It's entirely possible that someone just stole and hasn't yet gotten to the point
                // where a reference is added to the schedule group.  If we arbitrarily release this reference, we might delete
                // (or reuse) an active schedule group.  This could cause all sorts of problems.
                //
                // Instead of trying to release that reference here, we will wait until the next safe point to do so.  We 
                // are guaranteed no one is in the middle of stealing from this schedule group at that time.
                //
                // Note that this means that the stealer **MUST** stay within a critical region until after the WorkItem::TransferReferences

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -