⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ppl.h

📁 C语言库函数的原型,有用的拿去
💻 H
📖 第 1 页 / 共 5 页
字号:
    /// <seealso cref="task_group::run_and_wait Method"/>
    /// <seealso cref="Task Parallelism"/>
    /**/
    template<typename _Function>
    void run(task_handle<_Function>& _Task_handle)
    {
        _Task_handle._SetRuntimeOwnsLifetime(false);
        _M_task_collection._Schedule(&_Task_handle);
    }

    /// <summary>
    ///     Waits until all work on the <c>task_group</c> object has either completed or been canceled.
    /// </summary>
    /// <returns>
    ///     An indication of whether the wait was satisfied or the task group was canceled, due to either an explicit cancel operation or an exception
    ///     being thrown from one of its tasks. For more information, see <see cref="task_group_status Enumeration">task_group_status</see>.
    /// </returns>
    /// <remarks>
    ///     Note that one or more of the tasks scheduled to this <c>task_group</c> object may execute inline on the calling context.
    ///     <para>If one or more of the tasks scheduled to this <c>task_group</c> object throws an exception, the
    ///     runtime will select one such exception of its choosing and propagate it out of the call to the <c>wait</c> method.</para>
    ///     <para>Calling <c>wait</c> on a <c>task_group</c> object resets it to a clean state where it can be reused. This includes the case
    ///     where the <c>task_group</c> object was canceled.</para>
    ///     <para>In the non-exceptional path of execution, you have a mandate to call either this method or the <c>run_and_wait</c> method before
    ///     the destructor of the <c>task_group</c> executes.</para>
    /// </remarks>
    /**/
    task_group_status wait()
    {
        //
        // The underlying scheduler's definitions map exactly to the PPL's.  No translation beyond the cast is necessary.
        //
        return static_cast<task_group_status>(_M_task_collection._Wait());
    }

    /// <summary>
    ///     Schedules a task to be run inline on the calling context with the assistance of the <c>task_group</c> object for full cancellation support.
    ///     The function then waits until all work on the <c>task_group</c> object has either completed or been canceled.  If a <c>task_handle</c> object
    ///     is passed as a parameter to <c>run_and_wait</c>, the caller is responsible for managing the lifetime of the <c>task_handle</c> object.
    /// </summary>
    /// <typeparam name="_Function">
    ///     The type of the function object that will be invoked to execute the body of the task handle.
    /// </typeparam>
    /// <param name="_Task_handle">
    ///     A handle to the task which will be run inline on the calling context.  Note that the caller has responsibility for the lifetime of this object.
    ///     The runtime will continue to expect it to live until the <c>run_and_wait</c> method finishes execution.
    /// </param>
    /// <returns>
    ///     An indication of whether the wait was satisfied or the task group was canceled, due to either an explicit cancel operation or an exception
    ///     being thrown from one of its tasks. For more information, see <see cref="task_group_status Enumeration">task_group_status</see>.
    /// </returns>
    /// <remarks>
    ///     Note that one or more of the tasks scheduled to this <c>task_group</c> object may execute inline on the calling context.
    ///     <para>If one or more of the tasks scheduled to this <c>task_group</c> object throws an exception, the
    ///     runtime will select one such exception of its choosing and propagate it out of the call to the <c>run_and_wait</c> method.</para>
    ///     <para>Upon return from the <c>run_and_wait</c> method on a <c>task_group</c> object, the runtime resets the object to a clean state where it can be
    ///     reused. This includes the case where the <c>task_group</c> object was canceled.</para>
    ///     <para>In the non-exceptional path of execution, you have a mandate to call either this method or the <c>wait</c> method before
    ///     the destructor of the <c>task_group</c> executes.</para>
    /// </remarks>
    /// <seealso cref="task_group::run Method"/>
    /// <seealso cref="task_group::wait Method"/>
    /// <seealso cref="Task Parallelism"/>
    /**/
    template<class _Function>
    task_group_status run_and_wait(task_handle<_Function>& _Task_handle)
    {
        //
        // The underlying scheduler's definitions map exactly to the PPL's.  No translation beyond the cast is necessary.
        //
        _Task_handle._SetRuntimeOwnsLifetime(false);
        return (task_group_status)_M_task_collection._RunAndWait(&_Task_handle);
    }

    /// <summary>
    ///     Schedules a task to be run inline on the calling context with the assistance of the <c>task_group</c> object for full cancellation support.
    ///     The function then waits until all work on the <c>task_group</c> object has either completed or been canceled.  If a <c>task_handle</c> object
    ///     is passed as a parameter to <c>run_and_wait</c>, the caller is responsible for managing the lifetime of the <c>task_handle</c> object.
    /// </summary>
    /// <typeparam name="_Function">
    ///     The type of the function object that will be invoked to execute the body of the task.
    /// </typeparam>
    /// <param name="_Func">
    ///     A function which will be called to invoke the body of the work.  This may be a lambda expression or other object which supports
    ///     a version of the function call operator with the signature <c>void operator()()</c>.
    /// </param>
    /// <returns>
    ///     An indication of whether the wait was satisfied or the task group was canceled, due to either an explicit cancel operation or an exception
    ///     being thrown from one of its tasks. For more information, see <see cref="task_group_status Enumeration">task_group_status</see>.
    /// </returns>
    /// <remarks>
    ///     Note that one or more of the tasks scheduled to this <c>task_group</c> object may execute inline on the calling context.
    ///     <para>If one or more of the tasks scheduled to this <c>task_group</c> object throws an exception, the
    ///     runtime will select one such exception of its choosing and propagate it out of the call to the <c>run_and_wait</c> method.</para>
    ///     <para>Upon return from the <c>run_and_wait</c> method on a <c>task_group</c> object, the runtime resets the object to a clean state where it can be
    ///     reused. This includes the case where the <c>task_group</c> object was canceled.</para>
    ///     <para>In the non-exceptional path of execution, you have a mandate to call either this method or the <c>wait</c> method before
    ///     the destructor of the <c>task_group</c> executes.</para>
    /// </remarks>
    /// <seealso cref="task_group::run Method"/>
    /// <seealso cref="task_group::wait Method"/>
    /// <seealso cref="Task Parallelism"/>
    /**/
    template<class _Function>
    task_group_status run_and_wait(const _Function& _Func)
    {
        //
        // The underlying scheduler's definitions map exactly to the PPL's.  No translation beyond the cast is necessary.
        //
        return (task_group_status)_M_task_collection._RunAndWait(::Concurrency::details::_UnrealizedChore::_InternalAlloc<task_handle<_Function>, _Function>(_Func));
    }

    /// <summary>
    ///     Makes a best effort attempt to cancel the sub-tree of work rooted at this task group.  Every task scheduled on the task group
    ///     will get canceled transitively if possible.
    /// </summary>
    /// <remarks>
    ///     For more information, see <see cref="Cancellation in the PPL"/>.
    /// </remarks>
    /**/
    void cancel()
    {
        _M_task_collection._Cancel();
    }

    /// <summary>
    ///     Informs the caller whether or not the task group is currently in the midst of a cancellation.  This
    ///     does not necessarily indicate that the <c>cancel</c> method was called on the <c>task_group</c> object
    ///     (although such certainly qualifies this method to return <c>true</c>).  It may be the case that the <c>task_group</c> object
    ///     is executing inline and a task group further up in the work tree was canceled.  In cases such as these where the runtime can determine ahead
    ///     of time that cancellation will flow through this <c>task_group</c> object, <c>true</c> will be returned as well.
    /// </summary>
    /// <returns>
    ///     An indication of whether the <c>task_group</c> object is in the midst of a cancellation (or is guaranteed to be shortly).
    /// </returns>
    /// <remarks>
    ///     For more information, see <see cref="Cancellation in the PPL"/>.
    /// </remarks>
    /**/
    bool is_canceling()
    {
        return _M_task_collection._IsCanceling();
    }

private:

    // Disallow passing in an r-value for a task handle argument
    template<class _Function> void run(task_handle<_Function>&& _Task_handle);

    // The underlying group of tasks as known to the runtime.
    ::Concurrency::details::_TaskCollection _M_task_collection;
};


/// <summary>
///     Returns an indication of whether the task group which is currently executing inline on the current context
///     is in the midst of an active cancellation (or will be shortly).  Note that if there is no task group currently
///     executing inline on the current context, <c>false</c> will be returned.
/// </summary>
/// <returns>
///     <c>true</c> if the task group which is currently executing is canceling, <c>false</c> otherwise.
/// </returns>
/// <remarks>
///     For more information, see <see cref="Cancellation in the PPL"/>.
/// </remarks>
/// <seealso cref="task_group Class"/>
/// <seealso cref="structured_task_group Class"/>
/**/
_CRTIMP2 bool __cdecl is_current_task_group_canceling();

// Parallel Algorithms and Patterns

// Helper function that implements parallel_invoke with two functions
// Used by parallel_for and parallel_foreach implementations

template <typename _Function1, typename _Function2>
void _Parallel_invoke_impl(const _Function1& _Func1, const _Function2& _Func2)
{
    structured_task_group _Task_group;

    task_handle<_Function1> _Task_handle1(_Func1);
    _Task_group.run(_Task_handle1);

    // We inline the last item to prevent the unnecessary push/pop on the work queue.
    task_handle<_Function2> _Task_handle2(_Func2);
    _Task_group.run_and_wait(_Task_handle2);
}

/// <summary>
///     Executes the function objects supplied as parameters in parallel, and blocks until they have finished executing.  Each function object
///     could be a lambda expression, a pointer to function, or any object that supports the function call operator with the signature
///     <c>void operator()()</c>.
/// </summary>
/// <typeparam name="_Function1">
///     The type of the first function object to be executed in parallel.
/// </typeparam>
/// <typeparam name="_Function2">
///     The type of the second function object to be executed in parallel.
/// </typeparam>
/// <param name="_Func1">
///     The first function object to be executed in parallel.
/// </param>
/// <param name="_Func2">
///     The second function object to be executed in parallel.
/// </param>
/// <remarks>
///     Note that one or more of the function objects supplied as parameters may execute inline on the calling context.
///     <para>If one or more of the function objects passed as parameters to this function throws an exception, the
///     runtime will select one such exception of its choosing and propagate it out of the call to <c>parallel_invoke</c>.</para>
///     <para>For more information, see <see cref="Parallel Algorithms"/>.</para>
/// </remarks>
/**/
template <typename _Function1, typename _Function2>
void parallel_invoke(const _Function1& _Func1, const _Function2& _Func2)
{
    _Trace_ppl_function(PPLParallelInvokeEventGuid, _TRACE_LEVEL_INFORMATION, CONCRT_EVENT_START);

    _Parallel_invoke_impl(_Func1, _Func2);

    _Trace_ppl_function(PPLParallelInvokeEventGuid, _TRACE_LEVEL_INFORMATION, CONCRT_EVENT_END);
}

/// <summary>
///     Executes the function objects supplied as parameters in parallel, and blocks until they have finished executing.  Each function object
///     could be a lambda expression, a pointer to function, or any object that supports the function call operator with the signature
///     <c>void operator()()</c>.
/// </summary>
/// <typeparam name="_Function1">
///     The type of the first function object to be executed in parallel.
/// </typeparam>
/// <typeparam name="_Function2">
///     The type of the second function object to be executed in parallel.
/// </typeparam>
/// <typeparam name="_Function3">
///     The type of the third function object to be executed in parallel.
/// </typeparam>
/// <param name="_Func1">
///     The first function object to be executed in parallel.
/// </param>
/// <param name="_Func2">
///     The second function object to be executed in parallel.
/// </param>
/// <param name="_Func3">
///     The third function object to be executed in parallel.
/// </param>
/// <remarks>
///     Note that one or more of the function objects supplied as parameters may execute inline on the calling context.
///     <para>If one or more of the function objects passed as parameters to this function throws an exception, the
///     runtime will select one such exception of its choosing and propagate it out of the call to <c>parallel_invoke</c>.</para>
///     <para>For more information, see <see cref="Parallel Algorithms"/>.</para>
/// </remarks>
/**/
template <typename _Function1, typename _Function2, typename _Function3>
void parallel_invoke(const _Function1& _Func1, const _Function2& _Func2, const _Function3& _Func3)
{
    _Trace_ppl_function(PPLParallelInvokeEventGuid, _TRACE_LEVEL_INFORMATION, CONCRT_EVENT_START);

    structured_task_group _Task_group;

    task_handle<_Function1> _Task_handle1(_Func1);
    _Task_group.run(_Task_handle1);

    task_handle<_Function2> _Task_handle2(_Func2);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -