📄 atomic_functions.h
字号:
*/
WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement_up(ws_sint32_t volatile * /* pl */)
{
_asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
sub dword ptr [ecx], 1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_read_up(ws_sint32_t volatile const * /* pl */)
{
_asm
{
mov eax, 0
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
// pop 0 into eax, which can then be atomically added into *pl (held
// in ecx), leaving the value unchanged.
xadd dword ptr [ecx], eax
// Since it's an xadd it exchanges the previous value into eax, which
// is exactly what's required
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write_up(ws_sint32_t volatile * /* pl */, ws_sint32_t /* n */)
{
_asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl, edx is n
// Just exchange *pl and n
xchg dword ptr [ecx], edx
// The previous value goes into edx, so me move it into eax for return
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8
mov ecx, dword ptr [esp + 4] // Load the address of pl into ecx
mov eax, dword ptr [esp + 8] // Load the value into eax, so the return value will be there waiting
xchg dword ptr [ecx], eax
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd_up(ws_sint32_t volatile * /* pl */, ws_sint32_t /* n */)
{
// Thanks to Eugene Gershnik for the fast-call implementation
__asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl, edx is n
// Simply atomically add them, which will leave the previous value
// in edx
xadd dword ptr [ecx], edx
// Just need to move adx into eax to return it
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8
// Simply atomically add them, which will leave the previous value
// in edx
mov ecx, dword ptr [esp + 4] // Load the address of pl into ecx
mov eax, dword ptr [esp + 8] // Load the value into eax, so the return value will be there waiting
xadd dword ptr [ecx], eax
// Just need to move adx into eax to return it
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
// Symmetric multi-processor
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_preincrement_smp(ws_sint32_t volatile * /* pl */)
{
_asm
{
// pop 1 into eax, which can then be atomically added into *pl (held
// in ecx). Since it's an xadd it exchanges the previous value into eax
mov eax, 1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
lock xadd dword ptr [ecx], eax
// Since this is pre-increment, we need to inc eax to catch up with the
// real value
inc eax
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_predecrement_smp(ws_sint32_t volatile * /* pl */)
{
_asm
{
// pop 1 into eax, which can then be atomically added into *pl (held
// in ecx). Since it's an xadd it exchanges the previous value into eax
mov eax, -1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
lock xadd dword ptr [ecx], eax
// Since this is pre-decrement, we need to inc eax to catch up with the
// real value
dec eax
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postincrement_smp(ws_sint32_t volatile * /* pl */)
{
_asm
{
// pop 1 into eax, which can then be atomically added into *pl (held
// in ecx). Since it's an xadd it exchanges the previous value into eax
mov eax, 1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
lock xadd dword ptr [ecx], eax
// Since this is post-increment, we need do nothing, since the previous
// value is in eax
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postdecrement_smp(ws_sint32_t volatile * /* pl */)
{
_asm
{
// pop 1 into eax, which can then be atomically added into *pl (held
// in ecx). Since it's an xadd it exchanges the previous value into eax
mov eax, -1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
lock xadd dword ptr [ecx], eax
// Since this is post-decrement, we need do nothing, since the previous
// value is in eax
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_read_smp(ws_sint32_t volatile const * /* pl */)
{
_asm
{
mov eax, 0
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack
mov ecx, dword ptr [esp + 4]
#else
# error Need to define calling convention
#endif /* call-conv */
// pop 0 into eax, which can then be atomically added into *pl (held
// in ecx), leaving the value unchanged.
lock xadd dword ptr [ecx], eax
// Since it's an xadd it exchanges the previous value into eax, which
// is exactly what's required
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write_smp(ws_sint32_t volatile * /* pl */, ws_sint32_t /* n */)
{
_asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl, edx is n
// Just exchange *pl and n
/* lock */ xchg dword ptr [ecx], edx
// The previous value goes into edx, so me move it into eax for return
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8
mov ecx, dword ptr [esp + 4] // Load the address of pl into ecx
mov eax, dword ptr [esp + 8] // Load the value into eax, so the return value will be there waiting
/* lock */ xchg dword ptr [ecx], eax
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd_smp(ws_sint32_t volatile * /* pl */, ws_sint32_t /* n */)
{
// Thanks to Eugene Gershnik for the fast-call implementation
__asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl, edx is n
// Simply atomically add them, which will leave the previous value
// in edx
lock xadd dword ptr [ecx], edx
// Just need to move adx into eax to return it
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
// __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8
// Simply atomically add them, which will leave the previous value
// in edx
mov ecx, dword ptr [esp + 4] // Load the address of pl into ecx
mov eax, dword ptr [esp + 8] // Load the value into eax, so the return value will be there waiting
lock xadd dword ptr [ecx], eax
// Just need to move adx into eax to return it
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
// Processor detection
namespace
{
inline ws_bool_t is_host_up()
{
// All these statics are guaranteed to be zero as a result of the module/process loading
static ws_sint32_t s_spin; // The spin variable
static ws_bool_t s_init; // This is guaranteed to be zero
static ws_bool_t s_up; // This is the flag variably, also guaranteed to be zero
// Simple spin lock
if(!s_init) // Low cost pre-test. In the unlikely event that another thread does come in and
{ // also see this as false, the dual initialisation of all three statics is benign
spin_mutex smx(&s_spin);
smx.lock();
if(!s_init)
{
SYSTEM_INFO sys_info;
::GetSystemInfo(&sys_info);
s_init = true;
s_up = 1 == sys_info.dwNumberOfProcessors;
}
smx.unlock();
}
return s_up;
}
// s_up is guaranteed to be zero at load time.
//
// There is a race condition with all static variables, since multiple threads
// can come in and one can have set the hidden flag variable without prior to
// setting the static variable itself, just at the time that an arbitrary number
// of other threads pick up the pre-initialised value.
//
// However, because the test here is whether to skip the lock, the pathological
// case is benign. The only cost in the very rare case where it happens is that
// the thread(s) will use bus locking until such time as the static is fully
// initialised.
static ws_bool_t s_up = is_host_up();
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_preincrement(ws_sint32_t volatile * /* pl */)
{
if(s_up)
{
_asm
{
// pop 1 into eax, which can then be atomically added into *pl (held
// in ecx). Since it's an xadd it exchanges the previous value into eax
mov eax, 1
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
// __fastcall: ecx is pl
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -