⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtlocks.cpp

📁 C语言库函数的原型,有用的拿去
💻 CPP
📖 第 1 页 / 共 4 页
字号:
        {
            return (m_ticketState & StateIsBlocked) != 0;
        }

        bool IsPreviousBlocked()
        {
            return (m_ticketState & StateIsPreviousBlocked) != 0;
        }

        bool IsTicketValid()
        {
            return (m_ticketState & StateIsTicketValid) != 0;
        }

        // Const statics needed for blocking heuristics
        static const unsigned int TicketThreshold        = 2;
        static const unsigned int StateIsBlocked         = 0x00000001;
        static const unsigned int StateIsTicketValid     = 0x00000002;
        static const unsigned int StateIsPreviousBlocked = 0x00000004;
        static const unsigned int MaskBlockedStates      = ~(StateIsBlocked | StateIsPreviousBlocked);
        static const unsigned int NumberOfBooleanStates  = 0x00000003;
        static const unsigned int TicketIncrement        = 1 << NumberOfBooleanStates;

        Context *                 m_pContext;
        LockQueueNode *           m_pNextNode;
        volatile unsigned int     m_ticketState;
    };

    //
    // A C++ holder for a Non-reentrant PPL lock.
    //
    _CRTIMP _NonReentrantPPLLock::_Scoped_lock::_Scoped_lock(_NonReentrantPPLLock & _Lock) : _M_lock(_Lock)
    {
        new(reinterpret_cast <void *> (_M_lockNode)) LockQueueNode;
        _M_lock._Acquire(reinterpret_cast <void *> (_M_lockNode));
    }

    _CRTIMP _NonReentrantPPLLock::_Scoped_lock::~_Scoped_lock()
    {
        _M_lock._Release();
    }

    //
    // A C++ holder for a Reentrant PPL lock.
    //
    _CRTIMP _ReentrantPPLLock::_Scoped_lock::_Scoped_lock(_ReentrantPPLLock & _Lock) : _M_lock(_Lock)
    {
        new(reinterpret_cast <void *> (_M_lockNode)) LockQueueNode;
        _M_lock._Acquire(reinterpret_cast <void *> (_M_lockNode));
    }

    _CRTIMP _ReentrantPPLLock::_Scoped_lock::~_Scoped_lock()
    {
        _M_lock._Release();
    }

} // namespace details

/// <summary>
///     Constructs an critical section
/// </summary>
_CRTIMP critical_section::critical_section() : _M_pHead(NULL), _M_pTail(NULL)
{
    _ASSERTE(sizeof(_M_activeNode) >= sizeof(LockQueueNode));

    // Hide the inside look of LockQueueNode behind a char array big enough to keep 3 pointers
    // This is why LockQueueNode is newed in place instead of a more traditional allocation.
    new(reinterpret_cast<void *>(_M_activeNode)) LockQueueNode(NULL, LockQueueNode::StateIsTicketValid);
}

/// <summary>
///     Destroys a critical section.  It is expected that the lock is no longer held.
/// </summary>
_CRTIMP critical_section::~critical_section()
{
    _ASSERT_EXPR(_M_pHead == NULL, L"Lock was destructed while held");
}

/// <summary>
///     Gets a critical section handle.
/// </summary>
/// <returns>
///     A reference to this critical section.
/// </returns>
_CRTIMP critical_section::native_handle_type critical_section::native_handle()
{
    return *this;
}

/// <summary>
///     Acquires this critical section.
/// </summary>
/// <remarks>
///     Throws a improper_lock exception if the lock is acquired recursively
/// </remarks>
_CRTIMP void critical_section::lock()
{
    LockQueueNode   newNode;                   // Allocated on the stack and goes out of scope before unlock()
    LockQueueNode * pNewNode = &newNode;

    //
    // Acquire the lock node that was just created on the stack
    //
    _Acquire_lock(pNewNode, false);

    //
    // At this point the context has exclusive ownership of the lock
    //

    _Switch_to_active(pNewNode);
}

/// <summary>
///     Tries to acquire the lock, does not block.
/// </summary>
/// <returns>
///     true if the lock is acquired, false otherwise
/// </returns>
_CRTIMP bool critical_section::try_lock()
{
    LockQueueNode   newNode;                   // Allocated on the stack and goes out of scope before unlock()
    LockQueueNode * pNewNode = &newNode;
    LockQueueNode * pPreviousNode = reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pTail, pNewNode, NULL));

    // Try and acquire this lock.  If this CAS succeeds, then the lock has been acquired.
    if (pPreviousNode == NULL)
    {
        _M_pHead = pNewNode;
        pNewNode->UpdateQueuePosition(reinterpret_cast<LockQueueNode *>(_M_activeNode));
        pNewNode->UnblockWithoutContext();
        _Switch_to_active(pNewNode);
        return true;
    }

    return false;
}

/// <summary>
///     Unlocks an acquired lock.
/// </summary>
_CRTIMP void critical_section::unlock()
{
    LockQueueNode * pCurrentNode = reinterpret_cast<LockQueueNode *>(_M_pHead);

    _ASSERT_EXPR(pCurrentNode != NULL, L"Lock not being held");
    _ASSERT_EXPR(pCurrentNode->m_pContext == SchedulerBase::SafeFastCurrentContext(), L"Lock being held by different context");

    LockQueueNode * pNextNode = pCurrentNode->m_pNextNode;
    _M_pHead = pNextNode;

    // Reset context on the active context to ensure that it is possible to detect the error case
    // where the same context tries to enter the lock twice.
    reinterpret_cast<LockQueueNode *>(&_M_activeNode)->m_pContext = NULL;

    if (pNextNode != NULL)
    {
        pNextNode->Unblock();
    }
    else
    {
        // If someone is adding a context then wait until next node pointer is populated.
        if (reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pTail, NULL, pCurrentNode)) != pCurrentNode)
        {
            LockQueueNode * pWaitedNode = pCurrentNode->WaitForNextNode();
            pWaitedNode->Unblock();
        }
    }
}

/// <summary>
///     If no one owns the lock at the instant the API is called, it returns instantly. If there is an owner,
///     it performs a lock followed by an unlock.
/// </summary>
void critical_section::_Flush_current_owner()
{
    if (_M_pTail != NULL)
    {
        lock();
        unlock();
    }
}

/// <summary>
///     Acquires this critical section given a specific node to lock.
/// </summary>
/// <param name="_PLockingNode">
///     The node that needs to own the lock.
/// </param>
/// <remarks>
///     Throws a improper_lock exception if the lock is acquired recursively
/// </remarks>
void critical_section::_Acquire_lock(void * _PLockingNode, bool _FHasExternalNode)
{
    LockQueueNode * pNewNode = reinterpret_cast<LockQueueNode *>(_PLockingNode);
    LockQueueNode * pActiveNode = reinterpret_cast<LockQueueNode *>(&_M_activeNode);

    // Locks are non-reentrant, so throw if this condition is detected.
    if (pNewNode->m_pContext == pActiveNode->m_pContext)
    {
        throw improper_lock("Lock already taken");
    }

    LockQueueNode * pPrevious = reinterpret_cast<LockQueueNode *>(InterlockedExchangePointer(&_M_pTail, pNewNode));

    // No one held this critical section, so this context now acquired the lock
    if (pPrevious == NULL)
    {
        _M_pHead = pNewNode;

        pNewNode->UpdateQueuePosition(pActiveNode);
        pNewNode->UnblockWithoutContext();
    }
    else
    {
        pNewNode->UpdateQueuePosition(pPrevious);
        pPrevious->m_pNextNode = pNewNode;

        // NOT SAFE TO TOUCH pPrevious AFTER THE ASSIGNMENT ABOVE!

        pNewNode->Block(pActiveNode->m_ticketState);

        // Do another position estimation in case we missed the previous number due to race
        pNewNode->UpdateQueuePosition(pActiveNode);
    }

    // Since calls with external nodes will not call _Switch_to_active, make
    // sure that we are setting the head and the active node properly.
    if (_FHasExternalNode)
    {
        pActiveNode->Copy(pNewNode);
        _M_pHead = pNewNode;
    }
}

/// <summary>
///     The acquiring node allocated on the stack never really owns the lock. The reason for that is that
///     it would go out of scope and its insides would not be visible in unlock() where it would potentially
///     need to unblock the next in the queue. Instead, its state is transferred to the internal
///     node which is used as a scratch node.
/// </summary>
/// <param name="_PLockingNode">
///     The node that needs to own the lock.
/// </param>
void critical_section::_Switch_to_active(void * _PLockingNode)
{
    LockQueueNode * pLockingNode = reinterpret_cast<LockQueueNode *>(_PLockingNode);
    LockQueueNode * pActiveNode = reinterpret_cast<LockQueueNode *>(&_M_activeNode);

    //
    // Copy the contents of the node allocated on the stack which now owns the lock, so that we would
    // have its information available during unlock.
    //
    pActiveNode->Copy(pLockingNode);

    //
    // If someone is acquiring the critical_section then wait until next node pointer is populated. Otherwise, there will be no way
    // to unblock that acquiring context after pLockingNode goes out of scope.
    //
    if (pActiveNode->m_pNextNode == NULL)
    {
        //
        // If the compare-and-swap to active node succeeds that means that a new acquirer coming in will 
        // properly set the _M_pHead. Otherwise, it has to be set manually when next node is done.
        //
        if (reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pTail, pActiveNode, pLockingNode)) != pLockingNode)
        {
            pLockingNode->WaitForNextNode();

            //
            // During the initial copy the next pointer was not copied over and it has been populated in the meantime.
            // This copy can now be safely performed because tail has moved, so next will point to the second element.
            //
            pActiveNode->Copy(pLockingNode);
        }
    }

    _ASSERTE(_PLockingNode != _M_pTail);

    _M_pHead = pActiveNode;
}

/// <summary>
///     Constructs a holder object and acquires the critical_section passed to it.
//      If the critical_section is held by another thread this call will block.
/// </summary>
/// <param name="_Critical_section">
///     Critical section to lock.
/// </param>
critical_section::scoped_lock::scoped_lock(critical_section& _Critical_section) : _M_critical_section(_Critical_section)
{
    static_assert(sizeof(LockQueueNode) <= sizeof(_M_node), "_M_node buffer too small");
    LockQueueNode * pNewNode = reinterpret_cast<LockQueueNode *>(_M_node);
    new(pNewNode) LockQueueNode;
    _M_critical_section._Acquire_lock(pNewNode, true);
}

/// <summary>
///     Destructs a holder object and releases the critical_section.
/// </summary>
critical_section::scoped_lock::~scoped_lock()
{
    _M_critical_section.unlock();
}

/// <summary>
///     Constructs a new reader_writer_lock object.
/// </summary>
_CRTIMP reader_writer_lock::reader_writer_lock() : _M_pReaderHead(NULL), _M_pWriterHead(NULL), _M_pWriterTail(NULL), _M_lockState(0)
{
    _ASSERTE(sizeof(_M_activeWriter) >= sizeof(LockQueueNode));

    // Hide the inside look of LockQueueNode behind a char array big enough to keep 3 pointers
    // This is why LockQueueNode is newed in place instead of a more traditional allocation.
    new(reinterpret_cast <void *>(_M_activeWriter)) LockQueueNode(NULL, LockQueueNode::StateIsTicketValid);
}

/// <summary>
///     Destructs reader_writer_lock object. If lock is held during the destruction an exception is thrown.
/// </summary>
_CRTIMP reader_writer_lock::~reader_writer_lock()
{
    _ASSERT_EXPR(_M_lockState == 0, L"Lock was destructed while held");

    // Since LockQueueNode has a trivial destructor, no need to call it here. If it ever becomes
    // non-trivial then it would be called here instead of calling delete (since memory is allocated
    // in the char array and will be reclaimed anyway when reader_writer_lock is destructed).
}

/// <summary>
///     Writer entering the lock. If there are readers active they are immediately notified to finish
///     and relinquish the lock.
/// </summary>
/// <remarks>
///     Writer blocks by doing spinning on a local variable. Writers are chained so that a writer
///     exiting the lock releases the next writer in line.
/// </remarks> 
_CRTIMP void reader_writer_lock::lock()
{
    LockQueueNode   newWriterNode;                   // Allocated on the stack and goes out of scope before unlock()
    LockQueueNode * pNewWriter = &newWriterNode;

    //
    // Acquire the lock node that was just created on the stack
    //
    _Acquire_lock(pNewWriter, false);

    //
    // At this point the writer has exclusive ownership of the lock
    //

    _Switch_to_active(pNewWriter);
}

/// <summary>
///     Try to take a writer lock.
/// </summary>
/// <returns>
///     true if the lock is immediately available and lock succeeded; false otherwise.
/// </returns>
_CRTIMP bool reader_writer_lock::try_lock()
{
    LockQueueNode   newWriterNode;                   // Allocated on the stack and goes out of scope before unlock()
    LockQueueNode * pNewWriter = &newWriterNode;
    LockQueueNode * pPreviousWriter = reinterpret_cast<LockQueueNode *>(InterlockedCompareExchangePointer(&_M_pWriterTail, pNewWriter, NULL));

    // Is this the only writer present? If yes, it will win over any new writer coming in.
    if (pPreviousWriter == NULL)
    {
        _M_pWriterHead = pNewWriter;

        // Is there any active readers? If no, our lock succeeded.
        if (InterlockedCompareExchange(&_M_lockState, (RWLockWriterInterested | RWLockWriterExclusive), 0) == 0)
        {
            pNewWriter->UpdateQueuePosition(reinterpret_cast<LockQueueNode *>(_M_activeWriter));
            pNewWriter->UnblockWithoutContext();
            _Switch_to_active(pNewWriter);
            return true;
        }
        else
        {
            // Lock failed, but other writers may now be linked to this failed write attempt.
            // Thus, unwind all the actions and leave the lock in a consistent state.
            _Remove_last_writer(pNewWriter);
        }
    }

    return false;
}

/// <summary>
///     Reader entering the lock. If there are writers active readers have to wait until they are done.
///     Reader simply registers an interest in the lock and waits for writers to release it.
/// </summary>
/// <remarks> 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -