📄 utils.h
字号:
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// utils.h
//
// Header file containing the utility routine declarations.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#pragma once
//**************************************************************************
// Previously Public Macro Definitions:
//**************************************************************************
// Enable tracing mechanisms
#if defined(_DEBUG) && defined(CONCRT_TRACING)
# define CONCRT_TRACE(...) ::Concurrency::details::_ConcRT_Trace(__VA_ARGS__)
#else
# define CONCRT_TRACE(...) ((void)0)
#endif
// Weaker than assert/verify, yet informational
#define CONCRT_MESSAGE(...) ::Concurrency::details::_ConcRT_DumpMessage(__VA_ARGS__)
#if defined(_DEBUG)
# define CONCRT_DEBUGMESSAGE(...) ::Concurrency::details::_ConcRT_DumpMessage(__VA_ARGS__)
#else
# define CONCRT_DEBUGMESSAGE(...) ((void)0)
#endif
#if defined(_DEBUG)
# define CONCRT_ASSERT(x) (((x) ? ((void)0) : ::Concurrency::details::_ConcRT_Assert(#x, __FILE__, __LINE__)), __assume(x))
#else
# define CONCRT_ASSERT(x) (__assume(x))
#endif
#if defined(_DEBUG)
# define CONCRT_COREASSERT(x) (((x) ? ((void)0) : ::Concurrency::details::_ConcRT_CoreAssert(#x, __FILE__, __LINE__)), __assume(x))
#else
# define CONCRT_COREASSERT(x) CONCRT_ASSERT(x)
#endif
// will not evaluate 'x' multiple times
#if defined(_DEBUG)
# define CONCRT_FASSERT(x) {bool b = !!(x); (__assume(b), (b) ? ((void)0) : ::Concurrency::details::_ConcRT_Assert(#x, __FILE__, __LINE__));}
#else
# define CONCRT_FASSERT(x) (__assume(x))
#endif
#if defined(_DEBUG)
# define CONCRT_VERIFY(x) CONCRT_FASSERT(x)
#else
# define CONCRT_VERIFY(x) (__assume(x), ((void)x))
#endif
//
// MTRACE:
//
// Memory tracing for UMS debugging (since it's nigh impossible elsewhere). This should be improved once the scheduler is "stable". Right now,
// buffers are interlocked incremented.
//
#define MTRACE_EVT_AFFINITIZED 1
#define MTRACE_EVT_ADDEDTORUNNABLES 2
#define MTRACE_EVT_UMSBLOCKED 3
#define MTRACE_EVT_CRITICALBLOCK 4
#define MTRACE_EVT_PULLEDFROMCOMPLETION 5
#define MTRACE_EVT_SWITCHTO_BLOCKING 6
#define MTRACE_EVT_SWITCHTO_IDLE 7
#define MTRACE_EVT_SWITCHTO_YIELDING 8
#define MTRACE_EVT_SWITCHTO_NESTING 9
#define MTRACE_EVT_CONTEXT_RELEASED 10
#define MTRACE_EVT_CONTEXT_ACQUIRED 11
#define MTRACE_EVT_SFW_FOUND 12
#define MTRACE_EVT_SFW_FOUNDBY 13
#define MTRACE_EVT_CRITICALNOTIFY 14
#define MTRACE_EVT_SUTNOTIFY 15
#define MTRACE_EVT_BLOCKUNBLOCKRACE 16
#define MTRACE_EVT_DEACTIVATE 17
#define MTRACE_EVT_ACTIVATE 18
#define MTRACE_EVT_INVERTED_ADDEDTORUNNABLES 19
#define MTRACE_EVT_CLAIMEDOWNERSHIP 20
#define MTRACE_EVT_MADEAVAILABLE 21
#define MTRACE_EVT_AVAILABLEVPROCS 22
#define MTRACE_EVT_SWITCHTO 23
#define MTRACE_EVT_WOKEAFTERDEACTIVATE 24
#define MTRACE_EVT_RMAWAKEN 25
#define MTRACE_EVT_ACTIVATED 26
#define MTRACE_EVT_SEARCHEDLOCALRUNNABLES 27
#define MTRACE_EVT_RESTARTRAMBLING 28
#define MTRACE_EVT_STARTRAMBLING 29
#define MTRACE_EVT_STOPRAMBLING 30
#define MTRACE_EVT_SFW_NEXTLOOP 31
#define MTRACE_EVT_UPDATERAMBLING_RING 32
#define MTRACE_EVT_UPDATERAMBLING_RESETRING 33
#define MTRACE_EVT_UPDATERAMBLING_ALLVPROCS 34
#define MTRACE_EVT_RETURNTOPRIMARY_BLOCKED 35
#define MTRACE_EVT_RETURNTOPRIMARY_YIELD 36
#define MTRACE_EVT_EXECUTE 37
#define MTRACE_EVT_EXECUTEFAIL 38
#define MTRACE_EVT_RETIRE 39
#define MTRACE_EVT_ORIGINALCOMPLETION 40
#define MTRACE_EVT_CONTEXTPOOLED 41
#define MTRACE_EVT_CONTEXTUNPOOLED 42
#define MTRACE_EVT_CONTEXTUNBOUND 43
#if _UMSTRACE
//
// Scheduler Level:
//
#define VMTRACE(traceevt, ctx, vp, data) ::Concurrency::details::_ConcRT_VMTrace((int)traceevt, (void *)ctx, (void *)vp, (ULONG_PTR)data);
#define CMTRACE(traceevt, ctx, vp, data) ::Concurrency::details::_ConcRT_CMTrace((int)traceevt, (void *)ctx, (void *)vp, (ULONG_PTR)data);
#define VCMTRACE(traceevt, ctx, vp, data) { VMTRACE(traceevt, ctx, vp, data); CMTRACE(traceevt, ctx, vp, data); }
//
// RM Level:
//
#define RVMTRACE(traceevt, ctx, vp, data) ::Concurrency::details::_ConcRT_RVMTrace((int)traceevt, (void *)ctx, (void *)vp, (ULONG_PTR)data);
#define RPMTRACE(traceevt, ctx, vp, data) ::Concurrency::details::_ConcRT_RPMTrace((int)traceevt, (void *)ctx, (void *)vp, (ULONG_PTR)data);
#define RVPMTRACE(traceevt, ctx, vp, data) { RVMTRACE(traceevt, ctx, vp, data); RPMTRACE(traceevt, ctx, vp, data); }
#else
#define VMTRACE(traceevt, ctx, vp, data)
#define CMTRACE(traceevt, ctx, vp, data)
#define VCMTRACE(traceevt, ctx, vp, data)
#define RVMTRACE(traceevt, ctx, vp, data)
#define RPMTRACE(traceevt, ctx, vp, data)
#define RVPMTRACE(traceevt, ctx, vp, data)
#endif // _UMS_TRACE
#define CONCRT_TRACE_ALL 0xFFFF
#define CONCRT_TRACE_SCHEDULER 0x0001
#define CONCRT_TRACE_MSG 0x0002
#define CONCRT_TRACE_SGROUP 0x0004
#define CONCRT_TRACE_SCHEDULER_INSTANCE 0x0008
#define CONCRT_TRACE_COLLECTIONS 0x0010
#define CONCRT_TRACE_EVENT 0x0020
#define CONCRT_TRACE_CHORES 0x0040
#define CONCRT_TRACE_WORKQUEUE 0x0080
#define CONCRT_TRACE_UNIT 0x0100
#define CONCRT_TRACE_HILLCLIMBING 0x0200
#define CONCRT_TRACE_DYNAMIC_RM 0x0400
// Various macros are defined in public headers as CONCRT_whatever. Define
// them here without the CONCRT_ prefix for use in the internal implementation.
#define TRACE CONCRT_TRACE
#define ASSERT CONCRT_ASSERT
#define CORE_ASSERT CONCRT_COREASSERT
#define FASSERT CONCRT_FASSERT // evaluates argument just once, but __assume is not as effective in influencing oacr
#define VERIFY CONCRT_VERIFY
#define MESSAGE CONCRT_MESSAGE
#define DEBUGMESSAGE CONCRT_DEBUGMESSAGE
#define TRACE_ALL CONCRT_TRACE_ALL
#define TRACE_SCHEDULER CONCRT_TRACE_SCHEDULER
#define TRACE_MSG CONCRT_TRACE_MSG
#define TRACE_SGROUP CONCRT_TRACE_SGROUP
#define TRACE_SCHEDULER_INSTANCE CONCRT_TRACE_SCHEDULER_INSTANCE
#define TRACE_COLLECTIONS CONCRT_TRACE_COLLECTIONS
#define TRACE_EVENT CONCRT_TRACE_EVENT
#define TRACE_CHORES CONCRT_TRACE_CHORES
#define TRACE_WORKQUEUE CONCRT_TRACE_WORKQUEUE
#define TRACE_UNIT CONCRT_TRACE_UNIT
// Useful Macros
#define UNREACHED 0
#define KB 1024
#define DEFAULTCONTEXTSTACKSIZE (64 * KB)
#define WIDEN2(str) L ## str
#define WIDEN(str) WIDEN2(str)
#define __WFILE__ WIDEN(__FILE__)
#define DIM(array) (sizeof(array) / sizeof(array[0]))
// Ensure we use the intrinsic forms of Interlocked* APIs
#undef InterlockedAnd
#undef InterlockedCompareExchange
#undef InterlockedDecrement
#undef InterlockedExchange
#undef InterlockedExchangeAdd
#undef InterlockedIncrement
#undef InterlockedOr
#undef InterlockedXor
#define InterlockedAnd _InterlockedAnd
#define InterlockedCompareExchange _InterlockedCompareExchange
#define InterlockedDecrement _InterlockedDecrement
#define InterlockedExchange _InterlockedExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#define InterlockedIncrement _InterlockedIncrement
#define InterlockedOr _InterlockedOr
#define InterlockedXor _InterlockedXor
#ifdef _M_X64
# undef InterlockedAnd64
# undef InterlockedOr64
# undef InterlockedXor64
# undef InterlockedIncrement64
# define InterlockedAnd64 _InterlockedAnd64
# define InterlockedOr64 _InterlockedOr64
# define InterlockedXor64 _InterlockedXor64
# define InterlockedIncrement64 _InterlockedIncrement64
#endif
#if defined(_M_IA64) || defined(_M_AMD64) || (defined(_M_IX86) && _MSC_FULL_VER >= 140030626)
#define USE_ICX64 1
# undef InterlockedCompareExchange64
# define InterlockedCompareExchange64 _InterlockedCompareExchange64
#else
#undef USE_ICX64
#endif
#undef InterlockedCompareExchangePointer
#undef InterlockedExchangePointer
#define InterlockedCompareExchangePointer _InterlockedCompareExchangePointer
#define InterlockedExchangePointer _InterlockedExchangePointer
LONGLONG
FORCEINLINE
InterlockedInc64 (
__inout LONGLONG volatile *Addend
)
{
#ifdef _M_X64
return _InterlockedIncrement64(Addend);
#else
LONGLONG Old;
do {
Old = *Addend;
} while (_InterlockedCompareExchange64(Addend,
Old + 1,
Old) != Old);
return Old + 1;
#endif
}
#undef InterlockedIncrementSizeT
#undef InterlockedDecrementSizeT
#undef InterlockedCompareExchangeSizeT
#ifdef _WIN64
#define InterlockedIncrementSizeT(x) (size_t)(InterlockedIncrement64(reinterpret_cast<volatile LONGLONG *>((x))))
#define InterlockedDecrementSizeT(x) (size_t)(InterlockedDecrement64(reinterpret_cast<volatile LONGLONG *>((x))))
#define InterlockedCompareExchangeSizeT(x,y,z) (size_t)(InterlockedCompareExchange64(reinterpret_cast<volatile LONGLONG *>((x)), (LONGLONG)((y)), (LONGLONG)((z))))
#else
#define InterlockedIncrementSizeT(x) (size_t)(InterlockedIncrement(reinterpret_cast<volatile LONG *>((x))))
#define InterlockedDecrementSizeT(x) (size_t)(InterlockedDecrement(reinterpret_cast<volatile LONG *>((x))))
#define InterlockedCompareExchangeSizeT(x,y,z) (size_t)(InterlockedCompareExchange(reinterpret_cast<volatile LONG *>((x)), (LONG)((y)), (LONG)((z))))
#endif
bool
FORCEINLINE
SafeInterlockedIncrement (
__inout LONG volatile *Addend
)
{
LONG Old;
do {
Old = *Addend;
if (Old == 0)
return false;
} while (_InterlockedCompareExchange(Addend,
Old + 1,
Old) != Old);
return true;
}
// For HillClimbing
inline double GetCurrentHiRezTime()
{
static LARGE_INTEGER qpcFreq;
if (0 == qpcFreq.QuadPart)
{
QueryPerformanceFrequency(&qpcFreq);
}
LARGE_INTEGER time;
QueryPerformanceCounter(&time);
return (double)time.QuadPart / (double)qpcFreq.QuadPart;
}
template<typename T>
T sign(T val)
{
if (val == 0)
{
return 0;
}
return val > 0 ? 1 : -1;
}
USHORT
inline
NumberOfBitsSet(
__in ULONG_PTR mask
)
{
USHORT count = 0;
while (mask != 0)
{
++count;
mask &= (mask - 1);
}
return count;
}
namespace Concurrency
{
namespace details
{
_CRTIMP void _ConcRT_Trace(int trace_level, const wchar_t * format, ...);
_CRTIMP void _ConcRT_DumpMessage(const wchar_t * format, ...);
_CRTIMP void _ConcRT_Assert(const char* value, const char* filename, int lineno);
_CRTIMP void _ConcRT_CoreAssert(const char* value, const char* filename, int lineno);
_CRTIMP void _ConcRT_VMTrace(int traceevt, void *pCtx, void *pVp, ULONG_PTR data);
_CRTIMP void _ConcRT_CMTrace(int traceevt, void *pCtx, void *pVp, ULONG_PTR data);
_CRTIMP void _ConcRT_RVMTrace(int traceevt, void *pCtx, void *pVp, ULONG_PTR data);
_CRTIMP void _ConcRT_RPMTrace(int traceevt, void *pCtx, void *pVp, ULONG_PTR data);
void InitializeUtilityRoutines();
/// <summary>
/// Static methods related to security such as encode/decode pointer
/// </summary>
class Security
{
public:
static ULONG_PTR s_cookie;
static volatile long s_initialized;
static ULONG_PTR InitializeCookie();
static PVOID EncodePointer(PVOID ptr);
static PVOID DecodePointer(PVOID ptr);
};
//
// Logging utilities specific to the UMS scheduler
//
#if _UMSTRACE
#define _UMSTRACE_BUFFER_SIZE 16384
struct _TraceEntry
{
int m_traceEvt;
void *m_pCtx;
void *m_pVproc;
ULONG_PTR m_data;
DWORD m_tid;
};
class _TraceBuffer
{
public:
_TraceBuffer() : m_tracePtr(0xFFFFFFFF)
{
}
~_TraceBuffer()
{
}
void Trace(int traceEvt, void *pCtx, void *pVproc, ULONG_PTR data)
{
ULONG ptr = (ULONG)InterlockedIncrement((volatile LONG *)&m_tracePtr);
_TraceEntry *pTrace = m_trace + (ptr % _UMSTRACE_BUFFER_SIZE);
pTrace->m_traceEvt = traceEvt;
pTrace->m_pCtx = pCtx;
pTrace->m_pVproc = pVproc;
pTrace->m_data = data;
pTrace->m_tid = GetCurrentThreadId();
}
private:
volatile ULONG m_tracePtr;
_TraceEntry m_trace[_UMSTRACE_BUFFER_SIZE];
};
#endif // _UMSTRACE
/// <summary>
/// Use Sleep(0) to do the yield.
/// </summary>
void __cdecl _Sleep0();
/// <summary>
/// Spin WHILE the value of the variable is equal to a given value.
/// _Ty and _U should be comparable types
/// </summary>
template<typename _Ty, typename _U>
static inline void SpinwaitWhileEq( volatile _Ty& location, _U value )
{
_SpinWaitBackoffNone spinWait;
while( location==value )
{
spinWait._SpinOnce();
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -