⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 utils.h

📁 C语言库函数的原型,有用的拿去
💻 H
📖 第 1 页 / 共 2 页
字号:
    }

    /// <summary>
    ///     Spin UNTIL the value of the variable is equal to a given value.
    ///     _Ty and _U should be comparable types
    /// </summary>
    template<typename _Ty, typename _U>
    static inline void SpinwaitUntilEq( volatile _Ty& location, const _U value )
    {
        _SpinWaitBackoffNone spinWait;
        while( location!=value )
        {
            spinWait._SpinOnce();
        }
    }

    /// <summary>
    ///     Spin UNTIL the value of the variable is equal to a given value.
    ///     Uses Sleep(0) to yield
    /// </summary>
    void
    inline
    SpinUntilValueEquals(
         __in LONG volatile * Address,
         __in LONG Value
         )
    {
        if (*Address != Value)
        {
            _SpinWaitBackoffNone spinWait(_Sleep0);

            do
            {
                spinWait._SpinOnce();
            } while (*Address != Value);
        }
    }

    /// <summary>
    ///     Spin UNTIL the specified bits are set
    ///     Uses Sleep(0) to yield
    /// </summary>
    LONG
    inline
    SpinUntilBitsSet(
         __in LONG volatile * Address,
         __in LONG Bits
         )
    {
        LONG val = *Address;
        if ((val & Bits) != Bits)
        {
            _SpinWaitBackoffNone spinWait(_Sleep0);

            do
            {
                spinWait._SpinOnce();
                val = *Address;
            } while ((val & Bits) != Bits);
        }
        return val;
    }

    /// <summary>
    ///     Spin UNTIL the specified bits are reset.
    ///     Uses Sleep(0) to yield
    /// </summary>
    LONG
    inline
    SpinUntilBitsReset(
         __in LONG volatile * Address,
         __in LONG Bits
         )
    {
        LONG val = *Address;
        if ((val & Bits) != 0)
        {
            _SpinWaitBackoffNone spinWait(_Sleep0);

            do
            {
                spinWait._SpinOnce();
                val = *Address;
            } while ((val & Bits) != 0);
        }
        return val;
    }

    /// <summary>
    ///     This non-reentrant lock is a pure spin lock and is intended for use in situations
    ///     where it is known that the lock will not be taken recursively, and can thus be more 
    ///     efficiently implemented.
    /// </summary>
    class _NonReentrantLock
    {
    public:
        /// <summary>
        ///     Constructor for _NonReentrantLock
        /// </summary>
        _NonReentrantLock()
            : _M_Lock(0)
        {
        }

        /// <summary>
        ///     Acquire the lock, spin if necessary
        /// </summary>
        void _Acquire()
        {
#if defined(_DEBUG)
            _DebugAcquire();
#else // !_DEBUG

            if (InterlockedExchange(&_M_Lock, 1) != 0)
            {
                _SpinWaitBackoffNone spinWait(_Sleep0);

                do
                {
                    spinWait._SpinOnce();
                }
                while (InterlockedExchange(&_M_Lock, 1) != 0);
            }

#endif // !_DEBUG
        }

        /// <summary>
        ///     Tries to acquire the lock, does not spin.
        ///     Returns true if the lock is taken, false otherwise
        /// </summary>
        bool _TryAcquire()
        {
#if defined(_DEBUG)
            return _DebugTryAcquire();
#else // !_DEBUG
            return (_M_Lock == 0 && InterlockedExchange(&_M_Lock, 1) == 0);
#endif // _DEBUG
        }

        /// <summary>
        ///     Releases the lock
        /// </summary>
        void _Release()
        {
#if defined(_DEBUG)
            _M_Lock &= ~1;
#else // !_DEBUG
            _M_Lock = 0;
#endif // _DEBUG
        }

        bool _IsLockHeld() const 
        {
            return (_M_Lock != 0);
        }

        /// <summary>
        ///     An exception safe RAII wrapper.
        /// </summary>
        class _Scoped_lock
        {
        public:
            explicit _Scoped_lock(_NonReentrantLock& _Lock) : _M_lock(_Lock)
            {
                _M_lock._Acquire();
            }

            ~_Scoped_lock()
            {
                _M_lock._Release();
            }
        private:
            _NonReentrantLock& _M_lock;

            _Scoped_lock(const _Scoped_lock&);                    // no copy constructor
            _Scoped_lock const & operator=(const _Scoped_lock&);  // no assignment operator
        };

    private:
        // The lock being held
        volatile long _M_Lock;

        bool _DebugTryAcquire();
        void _DebugAcquire();
    };

    /// <summary>
    ///     A variant of _NonReentrantLock which ensures that the lock is taken in a hyper critical region.
    /// </summary>
    class _HyperNonReentrantLock
    {
    public:
        void _Acquire();
        bool _TryAcquire();
        void _Release();

        bool _IsLockHeld() const
        {
            return m_lock._IsLockHeld();
        }

        /// <summary>
        ///     An exception safe RAII wrapper.
        /// </summary>
        class _Scoped_lock
        {
        public:
            explicit _Scoped_lock(_HyperNonReentrantLock& _Lock) : _M_lock(_Lock)
            {
                _M_lock._Acquire();
            }

            ~_Scoped_lock()
            {
                _M_lock._Release();
            }
        private:
            _HyperNonReentrantLock& _M_lock;

            _Scoped_lock(const _Scoped_lock&);                    // no copy constructor
            _Scoped_lock const & operator=(const _Scoped_lock&);  // no assignment operator
        };

    private:
        _NonReentrantLock m_lock;
    };

    /// <summary>
    ///     A variant of _NonReentrantLock which ensures that the lock is taken in a critical region.
    /// </summary>
    class _CriticalNonReentrantLock
    {
    public:
        void _Acquire();
        bool _TryAcquire();
        void _Release();

        bool _IsLockHeld() const
        {
            return m_lock._IsLockHeld();
        }

        /// <summary>
        ///     An exception safe RAII wrapper.
        /// </summary>
        class _Scoped_lock
        {
        public:
            explicit _Scoped_lock(_CriticalNonReentrantLock& _Lock) : _M_lock(_Lock)
            {
                _M_lock._Acquire();
            }

            ~_Scoped_lock()
            {
                _M_lock._Release();
            }
        private:
            _CriticalNonReentrantLock& _M_lock;

            _Scoped_lock(const _Scoped_lock&);                    // no copy constructor
            _Scoped_lock const & operator=(const _Scoped_lock&);  // no assignment operator
        };

    private:
        _NonReentrantLock m_lock;
    };


    typedef _NonReentrantLock _StaticLock;


    // Wrapper around _BitScanReverse (for concurrent_vector)
    _CRTIMP unsigned long Log2(size_t);

    // **************************************************
    // Safe Points:
    //
    // Pre-declare structures used for safe points.  These must be defined early due to usage within collections and other utilities.
    //
    // A safe point is defined as a region past which every virtual processor is guaranteed to have made a particular observation.  Operations
    // subject to safe points are usually defined as two phase operations where phase 1 of an operation is performed, and phase 2 is registered
    // to occur at the next safe point.  The actual safe point may occur an **ARBITRARY** amount of time later.  If a given virtual processor
    // is sleeping, it may not make the observation until it awakens, is retired, or the scheduler finalizes.  Likewise, if a given virtual processor
    // is performing work which is not cooperatively blocking, it may not make the observation until the next cooperative event.  Thus, the operation 
    // performed at a safe point must not be performance critical relative to when it was scheduled.
    //
    // The typical uses of safe points are things like deferred deletions from lock free lists.  For example, an element may be removed from 
    // a ListArray as phase 1 and deleted on reaching the next safe point as phase 2.  This guarantees that every virtual processor has observed 
    // the removal and isn't touching the element.
    //
    // Each virtual processor contains a SafePointMarker which performs necessary data versioning for this mechanism to work.
    //
    // ******************** READ THIS NOW ********************
    //
    // Safe points are observations by virtual processors, not every context running on the scheduler.  While this distinction does not matter so much
    // on the thread scheduler, it is **EXTREMELY IMPORTANT** on the UMS scheduler.  As such, the usage of safe points to guard an operation X must follow
    // a set of rules:
    //
    //     - The operation X must be inclusively bound by a critical region on the UMS scheduler. 
    //
    //       As an example, consider lock-free traversal of a ListArray.  Internal contexts traverse list array objects without regard to the state of
    //       the objects.  The ListArray code frees objects at safe points.  The safe point is guarding the reference of ListArray objects.  Therefore,
    //       the entire region from inclusively between saying p = ListArray[x] and the last dereference of p must be bounded by a critical region.
    //
    //       As a second example, detached work stealing queues release their reference on their schedule group at retirement at a safe point.  Code which
    //       steals from a detached work stealing queue does not put a new reference on the schedule group until calling WorkItem::TransferReferences.  The
    //       safe point is guarding the region between the steal and the placement of the reference.  Therefore, the entire region inclusively between
    //       WorkQueue::Steal... and WorkItem::TransferReferences must be inclusively bounded by a critical region.
    //

    template<class T> class SQueue;

    /// <summary>
    ///     An intrusive object which is inserted into the list of work a scheduler must invoke on the next safe point.
    /// </summary>
    class SafePointInvocation
    {
    public:

        typedef void (*InvocationFunction)(void *);

        /// <summary>
        ///     Registers a particular function to be called with particular data when a given scheduler reaches the next safe point
        ///     after the call is made.  This is an intrusive invocation with the current SafePointInvocation class incuring no heap
        ///     allocations.
        /// </summary>
        /// <param name="pInvocationFunction">
        ///     The function which will be invoked at the next safe point
        /// </param>
        /// <param name="pData">
        ///     User specified data.
        /// </param>
        /// <param name="pScheduler">
        ///     The scheduler on which to wait for a safe point to invoke pInvocationFunction.
        /// </param>
        void InvokeAtNextSafePoint(InvocationFunction pInvocationFunction, void *pData, SchedulerBase *pScheduler);

    private:

        friend class SchedulerBase;
        template<class T> friend class SQueue;

        /// <summary>
        ///     The invocation of the callback for this particular registration.
        /// </summary>
        void Invoke()
        {
            m_pInvocation(m_pData);
        }

        // The client invocation function
        InvocationFunction m_pInvocation;

        // The client data
        void *m_pData;

        // The data version for this safe point.
        ULONG m_safePointVersion;

        // The queue linkage (spin-lock guarded)
        SafePointInvocation *m_pNext;

    };

    /// <summary>
    ///     This performs all version tracking for a particular virtual processor.  Only the scheduler touches this data structure.
    /// </summary>
    class SafePointMarker
    {
    public:

        /// <summary>
        ///     Construct a new safe point marker.
        /// </summary>
        SafePointMarker()
        {
            Reset();
        }

        /// <summary>
        ///     Reset a safe point marker.
        /// </summary>
        void Reset()
        {
            //
            // Zero is a special key indicating that it has made no data observations.
            //
            m_lastObservedVersion = 0;
        }

    private:

        friend class SchedulerBase;

        // The last observed version of data.
        ULONG m_lastObservedVersion;
    };

    /// <summary>
    ///     Adds a reference to a host module and then creates the thread. First reference is managed by LoadLibrary,
    ///     and all subsequent ones are reference counted internally to avoid LoadLibrary call overhead.
    /// </summary>
    HANDLE LoadLibraryAndCreateThread
    (
        LPSECURITY_ATTRIBUTES lpThreadAttributes,
        SIZE_T dwStackSize,
        LPTHREAD_START_ROUTINE lpStartAddress,
        LPVOID lpParameter,
        DWORD dwCreationFlags,
        LPDWORD lpThreadId
    );

    /// <summary>
    ///     Removes a reference count on a host module and in the case of last reference frees the library.
    /// </summary>
    void FreeLibraryAndDestroyThread(DWORD exitCode);
} // namespace details
} // namespace Concurrency

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -