⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 taskcollection.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 5 页
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation.  All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// TaskCollection.cpp
//
// Internal implementation of task collections and related data structures
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

#include "concrtinternal.h"
#include <exception>

namespace Concurrency
{
namespace details
{
    /// <summary>
    ///     Destroys a task stack.
    /// </summary>
    TaskStack::~TaskStack()
    {
        if (m_pStack)
            delete [] m_pStack;
    }

    /// <summary>
    ///     Pushes an element onto the task stack.  Returns a bool as to whether this could happen or not.  The only
    ///     possible error here is out of memory.
    /// </summary>
    /// <param name="taskCookie">
    ///     The task cookie to push onto the stack
    /// </param>
    /// <returns>
    ///     An indication of whether the stack cap was reached.
    /// </returns>
    bool TaskStack::Push(int taskCookie)
    {
        if (m_stackPtr >= m_stackSize)
        {
            //
            // Prevent the task stack from growing beyond a predetermined size cap.  If we exceed this cap, we will ignore the push.
            // Note that the CHORE itself is still pushed to the work stealing queue and can still be stolen.  It just won't be on the inlining
            // list within the task collection.  What this means is that a call to Wait will *NOT* be able to inline the chore.  It also means that 
            // any call to Wait after this return will suffer a *HUGE* penalty as every pop will be out-of-order and incur additional fencing
            // in the work stealing queue.
            //
            // The reason we cap this is specifically because we allow passing task collections between threads.  It's entirely possible to have a pattern where
            // one thread (thread A) continues to add items to a task collection while another thread (thread B) waits on it.  They never reverse roles.  In this case,
            // the direct alias for thread A will continue to pile up items on this stack (the inlining list).  Since wait is never called from that thread, the
            // stack will be popped.  Without a cap, this list would grow infinitely.  Note that in this scenario, there is no penalty in continuing to add
            // chores.  The only time a penalty will happen is if Wait were called (and once the collection resets, the penalty goes away until the cap is reached
            // again).
            //
            if (m_stackPtr >= TASK_STACK_SIZE_CAP)
            {
                m_fOverflow = true;
                return false;
            }

            int size = m_stackSize + TASK_STACK_GROWTH_SIZE;
            int *pNewStack = new int[size];

            memcpy(pNewStack, m_pStack, sizeof(int) * m_stackSize);
            m_stackSize = size;

            delete[] m_pStack;
            m_pStack = pNewStack;
        }

        ASSERT(m_stackPtr < m_stackSize);
        m_pStack[m_stackPtr++] = taskCookie;

        return true;
    }

    /// <summary>
    ///     Pops an element from the task stack.
    /// </summary>
    /// <returns>
    ///     The element
    /// </returns>
    int TaskStack::Pop()
    {
        ASSERT(m_stackPtr > 0);
        return m_pStack[--m_stackPtr];
    }

    /// <summary>
    ///     Returns an indication of whether or not the stack is empty.
    /// </summary>
    bool TaskStack::IsEmpty() const
    {
        return m_stackPtr == 0;
    }

    /// <summary>
    ///     Clears out everything on the stack.
    /// </summary>
    void TaskStack::Clear()
    {
        m_stackPtr = 0;
    }

    // **********************************************************************
    // Structured Task Collection:
    // **********************************************************************

    /// <summary>
    ///     Schedules a new unrealized chore on the task collection.
    /// </summary>
    /// <param name="_PChore">
    ///     The new unrealized chore to schedule
    /// </param>
    void _StructuredTaskCollection::_Schedule(_UnrealizedChore * _PChore)
    {
        if (_PChore->_M_pTaskCollection != NULL)
            throw invalid_multiple_scheduling();

        _PChore->_M_pTaskCollection = this;
        _PChore->_M_pChoreFunction = &_UnrealizedChore::_StructuredChoreWrapper;
        ++_M_unpoppedChores;
        if (_M_pOwningContext == NULL) 
            _M_pOwningContext = SchedulerBase::CurrentContext();
        reinterpret_cast <ContextBase *> (_M_pOwningContext)->PushStructured(_PChore);
    }

    /// <summary>
    ///     Runs a specified chore (pChore) and subsequently waits on all chores associated with the task collection
    ///     to execute.
    /// </summary>
    /// <param name="pChore">
    ///     The chore to run locally.
    /// </param>
    /// <returns>
    ///     An indication of the status of the wait.
    /// </returns>
    __declspec(noinline)
    _TaskCollectionStatus __stdcall _StructuredTaskCollection::_RunAndWait(_UnrealizedChore *pChore)
    {
        ASSERT(_M_pOwningContext != NULL || _M_unpoppedChores == 0);
        if (_M_pOwningContext == NULL)
            _M_pOwningContext = SchedulerBase::CurrentContext();
        ContextBase *pCurrentContext = reinterpret_cast <ContextBase *> (_M_pOwningContext);

        _M_pParent = pCurrentContext->GetExecutingCollection();
        pCurrentContext->SetExecutingCollection(this);
        _M_inliningDepth = _M_pParent != NULL ? _M_pParent->_InliningDepth() + 1 : 0;

        try
        {
            if (pChore != NULL)
            {
                //
                // Ordinarily, we need a full fence here to ensure that the write of _M_inliningDepth and the read of the context cancellation
                // flag are not reordered with respect to each other as perceived by a cancellation thread.  If they are, the cancellation thread
                // can miss flagging an entire branch of the work tree rooted at pChore.
                //
                // The scenario is as follows:
                //
                //  - 
                // |A|
                //  -
                //  | \
                //  |  (ch x -- already stolen) [](){A.cancel();}
                //  |
                //  | 
                //  (ch y -- local chore -- pChore)
                //
                // - ch y checks whether it is locally marked for cancellation
                // - ch x cancels.  It doesn't observe _M_inliningDepth yet because there is no barrier on this thread here
                //   therefore, it does not cancel the context
                // - We execute pChore.  pChore's descendents do not see the cancellation because the context flag was not set
                //
                // While a full fence here addresses this issue, it is a cost we do not want to bear during the fast inlining path.  Because of
                // the special properties of structured task collections, we are going to exploit this nature to elide the fence.  Every time a
                // structured chore cancels, the owning context is going to be marked as "pending cancellation" without restriction.  We check
                // the pending flag and then handle things specially for this interruption point.  The below interruption points do not have to
                // perform this special semantic because they all have full barriers before they check flags.
                //
                if (_IsMarkedForCancellation() || pCurrentContext->HasAnyCancellation())
                {
                    _Interrupt(_IsMarkedForCancellation());
                }

                pChore->m_pFunction(pChore);
                pChore->_M_pTaskCollection = NULL;
            }

            while (_M_unpoppedChores > 0)
            {
                pChore = pCurrentContext->PopStructured();

                //
                // **READ THIS** (This is rather subtle):
                //
                // In order to avoid a restriction on structured task collections that there cannot be an interruption point between the declaration
                // of the collection and its corresponding wait, we must guarantee that we only flag the owning context as canceled if the collection
                // is inlined (as evidenced by _M_inliningDepth above).  The problem is that there is **NO FENCE** on this set.  That means that if the
                // cancellation thread perceives the write of _M_inliningDepth out of order with respect to OUR read of the cancellation flags below,
                // this branch can fail to cancel for a single chore (and its nested subtrees).
                //
                // In order to avoid this (in at least the vast majority of cases), the interruption point is being strategically placed between the
                // PopStructured call above and the execution of the chore because Pop is -- the vast majority of the time -- a full barrier.  We are,
                // in essence, borrowing the full fence in pop to order to eliminate this race.
                //
                // Note -- one of the optimizations of the WSQ (privatization) which may occur in the future can elide the fence on pop some of the time.
                // If this happens, it is entirely possible that in rare circumstances, we will STILL miss and the write/read will be perceived in the opposite
                // order by the canceling processor.  In that case, the worst thing that happens is that we execute a single chore and its subtrees without
                // getting the cancel there.  Given that an additional barrier specific to cancellation would result in ~25% performance hit on key benchmarks,
                // this is something we're living with.
                //
                // Note also that there must be a fence of _M_inliningDepth and a subsequent interruption point between the set of _M_inliningDepth and the
                // WaitOnStolenChores if everything was stolen prior to getting into this function.  Otherwise, we can fail to cancel entire branches if the
                // Wait() happens **AFTER** all branches are stolen.  Between the PopStructured (acting as fence) and the break below is the only place to
                // strategically do this without introducing extra overhead.  This means that there will be code replication in the catch blocks below.
                //
                if (_IsMarkedForCancellation() || pCurrentContext->HasAnyCancellation())
                {
                    //
                    // We need to know whether the local chore has performed accounting or not.  Flag this within the collection to avoid additional space
                    // on the local stack (which affects benchmark performance).  This pushes **ALL** of the overhead into the cancellation path.
                    //
                    _Interrupt(_IsMarkedForCancellation(), _S_localCancel);
                }

                if (pChore == NULL)
                    break;

                --_M_unpoppedChores;

                if (pCurrentContext->IsExternal())
                    static_cast<ExternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();
                else
                    static_cast<InternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();

                pChore->m_pFunction(pChore);
                pChore->_M_pTaskCollection = NULL;
            } 

            if (_M_unpoppedChores > 0)
            {
                _WaitOnStolenChores(_M_unpoppedChores);
                _M_unpoppedChores = 0;
            }
        }
        catch(const task_canceled &)
        {
            if (pChore != NULL)
            {
                if (_M_inlineFlags & _S_localCancel)
                {
                    //
                    // This did not happen above because the interruption point prevented it.  The interruption point is located where it is for strategic fence
                    // reduction.  Hence, this code should match **EXACTLY** what is done above between the break and the execution of m_pFunction.
                    //
                    --_M_unpoppedChores;

                    if (pCurrentContext->IsExternal())
                        static_cast<ExternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();
                    else
                        static_cast<InternalContextBase *>(pCurrentContext)->IncrementDequeuedTaskCounter();
                }
                pChore->_M_pTaskCollection = NULL;
            }
            _RaisedCancel();
        }
        catch(...)
        {
            //
            // Track the exception that was thrown here and rethrow outside catch handler.
            //
            if (pChore != NULL)
                pChore->_M_pTaskCollection = NULL;
            _RaisedException();
        }

        pCurrentContext->SetExecutingCollection(_M_pParent);

        if (_M_pException != NULL)
        {
            //
            // This will rethrow if an exception was caught (both in the catch block above and in _UnrealizedChore::_StructuredChoreWrapper)
            //
            _Abort(); 

            if (pCurrentContext->HasAnyCancellation())
                _Interrupt(false);

            return _Canceled;
        }

        //
        // It's possible that our last chore caused a cancellation higher up in the tree and we should interrupt for that case.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -