📄 rtlocks.cpp
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// rtlocks.cpp
//
// Implementation file for locks used only within the runtime implementation. The locks
// themselves are expected to be dependent on the underlying platform definition.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#include "concrtinternal.h"
namespace Concurrency
{
namespace details
{
const unsigned int SPIN_COUNT = 4000;
unsigned int _SpinCount::_S_spinCount = SPIN_COUNT;
#if defined(_DEBUG)
#define DebugBitsNone 0
#define DebugBitsLockAcquiredOnPrimary 0x80000000
#define DebugBitsLockAcquiredInHyperCritical 0x40000000
#define DebugBitsLockAcquiredInCritical 0x20000000
#define DebugBitsLockAcquiredOutsideCritical 0x10000000
#define DebugBitsMask 0xF0000000
/// <summary>
/// Returns a set of debug bits indicating where the lock was acquired.
/// </summary>
LONG GetDebugBits()
{
if (!SchedulerBase::IsOneShotInitialized())
return DebugBitsNone;
LONG dbgBits = DebugBitsNone;
if (UMSSchedulingContext::OnPrimary() && UMS::GetCurrentUmsThread() != NULL)
dbgBits |= DebugBitsLockAcquiredOnPrimary;
//
// UMS might not be initialized by the time we utilize a lock (think a global one). Don't play games with the RM part
// if it's not initialized.
//
UMSThreadProxy *pProxy = UMS::Initialized() ? UMSThreadProxy::GetCurrent() : NULL;
ContextBase *pContext = SchedulerBase::FastCurrentContext();
//
// Once a proxy is transmogrified, it behaves as if it were an external thread with respect to all validations.
//
if (pProxy != NULL && !pProxy->IsShutdownValidations() && !pProxy->IsTransmogrified() && (pContext == NULL || !pContext->IsShutdownValidations()))
{
switch(pProxy->GetCriticalRegionType())
{
case OutsideCriticalRegion:
dbgBits |= DebugBitsLockAcquiredOutsideCritical;
break;
case InsideCriticalRegion:
dbgBits |= DebugBitsLockAcquiredInCritical;
break;
case InsideHyperCriticalRegion:
dbgBits |= DebugBitsLockAcquiredInHyperCritical;
break;
}
}
return dbgBits;
}
/// <summary>
/// Validates the lock conditions.
/// </summary>
void ValidateDebugBits(LONG dbgBits)
{
//
// If a lock is taken on the primary, it may only be taken inside a hyper critical region. It cannot be taken inside an ordinary critical
// region or outside a critical region.
//
if ((dbgBits & (DebugBitsLockAcquiredOnPrimary | DebugBitsLockAcquiredInHyperCritical)) != 0)
{
//
// **** READ THIS ****
//
// If this assert fires, you have placed a lock on a data structure and have not protected access to that lock appropriately on the UMS scheduler.
// It's quite likely that you will randomly deadlock in stress. Locks taken in the UMS primary can only be taken on other threads if they are taken
// in a hyper critical region.
//
CORE_ASSERT((dbgBits & (DebugBitsLockAcquiredInCritical | DebugBitsLockAcquiredOutsideCritical)) == 0);
}
}
#endif // _DEBUG
void _SpinCount::_Initialize()
{
_S_spinCount = (::Concurrency::GetProcessorCount() > 1) ? SPIN_COUNT : 0;
}
unsigned int _SpinCount::_Value()
{
return _S_spinCount;
}
//
// The non-reentrant lock for use with the thread-based implementation is defined as
// a 32-bit integer that is set to '1' when the lock is held, using interlocked
// APIs.
//
_NonReentrantBlockingLock::_NonReentrantBlockingLock()
{
static_assert(sizeof(CRITICAL_SECTION) <= sizeof(_M_criticalSection), "_M_critical section buffer too small");
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
new(pCriticalSection) CRITICAL_SECTION;
InitializeCriticalSectionAndSpinCount(pCriticalSection, _SpinCount::_S_spinCount);
}
_NonReentrantBlockingLock::~_NonReentrantBlockingLock()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
DeleteCriticalSection(pCriticalSection);
}
//
// Acquire the lock using an InterlockedExchange on _M_lock. After s_spinCount
// number of retries, it will begin calling sleep(0).
//
void _NonReentrantBlockingLock::_Acquire()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
EnterCriticalSection(pCriticalSection);
}
void _NonReentrantLock::_DebugAcquire()
{
#if defined(_DEBUG)
LONG old;
LONG dbgBits = GetDebugBits();
_SpinWaitBackoffNone spinWait(_Sleep0);
for (;;)
{
//
// Under the debug build, verify lock sharing rules in the runtime by stealing high bits of the _M_lock field.
// This is purely for UMS so we don't run into people changing lock structures and inadvertently causing HARD TO FIND
// random deadlocks in UMS.
//
old = _M_Lock;
if ((old & 1) == 0)
{
LONG destVal = old | 1 | dbgBits;
LONG xchg = InterlockedCompareExchange(&_M_Lock, destVal, old);
if (xchg == old)
{
ValidateDebugBits(destVal);
break;
}
}
spinWait._SpinOnce();
}
#endif // _DEBUG
}
//
// Try to acquire the lock, does not spin if it is unable to acquire.
//
bool _NonReentrantBlockingLock::_TryAcquire()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
return TryEnterCriticalSection(pCriticalSection) != 0;
}
bool _NonReentrantLock::_DebugTryAcquire()
{
#if defined(_DEBUG)
LONG dbgBits = GetDebugBits();
LONG old = _M_Lock;
if ((old & 1) == 0)
{
for(;;)
{
if ((old & 1) == 1)
break;
LONG destVal = old | 1 | dbgBits;
LONG xchg = InterlockedCompareExchange(&_M_Lock, destVal, old);
if (xchg == old)
{
ValidateDebugBits(destVal);
return true;
}
old = xchg;
}
}
#endif // _DEBUG
return false;
}
//
// Release the lock, which can be safely done without a memory barrier
//
void _NonReentrantBlockingLock::_Release()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
LeaveCriticalSection(pCriticalSection);
}
#define NULL_THREAD_ID -1L
_ReentrantBlockingLock::_ReentrantBlockingLock()
{
static_assert(sizeof(CRITICAL_SECTION) <= sizeof(_M_criticalSection), "_M_critical section buffer too small");
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
new(pCriticalSection) CRITICAL_SECTION;
InitializeCriticalSectionAndSpinCount(pCriticalSection, _SpinCount::_S_spinCount);
}
_ReentrantBlockingLock::~_ReentrantBlockingLock()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
DeleteCriticalSection(pCriticalSection);
}
_ReentrantLock::_ReentrantLock()
{
_M_owner = NULL_THREAD_ID;
_M_recursionCount = 0;
}
void _ReentrantBlockingLock::_Acquire()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
EnterCriticalSection(pCriticalSection);
}
void _ReentrantLock::_Acquire()
{
LONG id = (LONG) GetCurrentThreadId();
LONG old;
_SpinWaitBackoffNone spinWait(_Sleep0);
#if defined(_DEBUG)
LONG dbgBits = GetDebugBits();
#endif // _DEBUG
for (;;)
{
old = InterlockedCompareExchange(&_M_owner, id, NULL_THREAD_ID);
if ( old == NULL_THREAD_ID )
{
#if defined(_DEBUG)
//
// Under the debug build, verify lock sharing rules in the runtime by stealing high bits of the _M_recursionCount field.
// This is purely for UMS so we don't run into people changing lock structures and inadvertently causing HARD TO FIND
// random deadlocks in UMS.
//
// This does mean you better not recursively acquire the lock more than a billion times ;)
//
_M_recursionCount = (_M_recursionCount & DebugBitsMask) | 1;
#else // _DEBUG
_M_recursionCount = 1;
#endif // _DEBUG
break;
}
else if ( old == id )
{
#if defined(_DEBUG)
CORE_ASSERT((_M_recursionCount & ~DebugBitsMask) < (DebugBitsMask - 2));
_M_recursionCount = ((_M_recursionCount & ~DebugBitsMask) + 1) | (_M_recursionCount & DebugBitsMask) | dbgBits;
#else
_M_recursionCount++;
#endif // _DEBUG
break;
}
spinWait._SpinOnce();
}
#if defined(_DEBUG)
ValidateDebugBits(_M_recursionCount);
#endif // _DEBUG
}
bool _ReentrantBlockingLock::_TryAcquire()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
return TryEnterCriticalSection(pCriticalSection) != 0;
}
bool _ReentrantLock::_TryAcquire()
{
#if defined(_DEBUG)
LONG dbgBits = GetDebugBits();
#endif // _DEBUG
LONG id = (LONG) GetCurrentThreadId();
LONG old = InterlockedCompareExchange(&_M_owner, id, NULL_THREAD_ID);
if ( old == NULL_THREAD_ID || old == id )
{
#if defined(_DEBUG)
CORE_ASSERT((_M_recursionCount & ~DebugBitsMask) < (DebugBitsMask - 2));
_M_recursionCount = ((_M_recursionCount & ~DebugBitsMask) + 1) | (_M_recursionCount & DebugBitsMask) | dbgBits;
#else // !_DEBUG
_M_recursionCount++;
#endif
}
else
{
return false;
}
#if defined(_DEBUG)
ValidateDebugBits(_M_recursionCount);
#endif // _DEBUG
return true;
}
void _ReentrantBlockingLock::_Release()
{
CRITICAL_SECTION * pCriticalSection = reinterpret_cast<CRITICAL_SECTION *>(_M_criticalSection);
LeaveCriticalSection(pCriticalSection);
}
void _ReentrantLock::_Release()
{
if ( _M_owner != (LONG) GetCurrentThreadId() || _M_recursionCount < 1)
return;
#if defined(_DEBUG)
if ( (_M_recursionCount & ~DebugBitsMask) < 1 )
#else // !_DEBUG
if ( _M_recursionCount < 1 )
#endif // _DEBUG
return;
_M_recursionCount--;
#if defined(_DEBUG)
if ( (_M_recursionCount & DebugBitsMask) == 0 )
#else // !_DEBUG
if ( _M_recursionCount == 0 )
#endif // DEBUG
{
_M_owner = NULL_THREAD_ID;
}
}
//
// NonReentrant PPL Critical Section Wrapper
//
_NonReentrantPPLLock::_NonReentrantPPLLock()
{
}
void _NonReentrantPPLLock::_Acquire(void* _Lock_node)
{
_M_criticalSection._Acquire_lock(_Lock_node, true);
}
void _NonReentrantPPLLock::_Release()
{
_M_criticalSection.unlock();
}
//
// Reentrant PPL Critical Section Wrapper
//
_ReentrantPPLLock::_ReentrantPPLLock()
{
_M_owner = NULL_THREAD_ID;
_M_recursionCount = 0;
}
void _ReentrantPPLLock::_Acquire(void* _Lock_node)
{
LONG id = (LONG) GetCurrentThreadId();
if ( _M_owner == id )
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -