📄 atomic_functions.h
字号:
/* Since it's an xadd it exchanges the previous value into eax, which
* is exactly what's required
*/
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
ret 4
#endif /* call-conv */
}
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write(ws_sint32_t volatile* /* pl */, ws_sint32_t /* n */)
{
_asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
/* __fastcall: ecx is pl, edx is n */
/* Just exchange *pl and n */
lock xchg dword ptr [ecx], edx
/* The previous value goes into edx, so me move it into eax for return */
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
/* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
xchg dword ptr [ecx], eax
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd(ws_sint32_t volatile* /* pl */, ws_sint32_t /* n */)
{
/* Thanks to Eugene Gershnik for the fast-call implementation */
if(s_up)
{
__asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
/* __fastcall: ecx is pl, edx is n */
/* Simply atomically add them, which will leave the previous value
* in edx
*/
xadd dword ptr [ecx], edx
/* Just need to move adx into eax to return it */
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
/* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
/* Simply atomically add them, which will leave the previous value
* in edx
*/
mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
xadd dword ptr [ecx], eax
/* Just need to move adx into eax to return it */
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
else
{
__asm
{
#if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
/* __fastcall: ecx is pl, edx is n */
/* Simply atomically add them, which will leave the previous value
* in edx
*/
lock xadd dword ptr [ecx], edx
/* Just need to move adx into eax to return it */
mov eax, edx
ret
#elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
/* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
/* Simply atomically add them, which will leave the previous value
* in edx
*/
mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
lock xadd dword ptr [ecx], eax
/* Just need to move adx into eax to return it */
ret 8
#else
# error Need to define calling convention
#endif /* call-conv */
}
}
}
#ifdef STLSOFT_COMPILER_IS_BORLAND
# pragma warn .8070 /* Suppresses: "Function should return a value" */
# pragma warn .8002 /* Suppresses: "Restarting compile using assembly" */
#endif /* compiler */
# else /* STSLSOFT_INLINE_ASM_SUPPORTED */
/* Non-assembler versions
*
* These use the Win32 Interlocked functions. These are not guaranteed to give
* precise answers on Windows 95.
*/
/* Multi-processor detection variants */
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_preincrement(ws_sint32_t volatile* pl)
{
return STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_predecrement(ws_sint32_t volatile* pl)
{
return STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postincrement(ws_sint32_t volatile* pl)
{
ws_sint32_t pre = *pl;
STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
return pre;
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postdecrement(ws_sint32_t volatile* pl)
{
ws_sint32_t pre = *pl;
STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
return pre;
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment(ws_sint32_t volatile* pl)
{
STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement(ws_sint32_t volatile* pl)
{
STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return STLSOFT_NS_GLOBAL(InterlockedExchange)((LPLONG)pl, n);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_read(ws_sint32_t volatile const* pl)
{
return *pl;
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return (ws_sint32_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
}
/* Uni-processor variants */
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_preincrement_up(ws_sint32_t volatile* pl)
{
return atomic_preincrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_predecrement_up(ws_sint32_t volatile* pl)
{
return atomic_predecrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postincrement_up(ws_sint32_t volatile* pl)
{
return atomic_postincrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postdecrement_up(ws_sint32_t volatile* pl)
{
return atomic_postdecrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment_up(ws_sint32_t volatile* pl)
{
atomic_increment(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement_up(ws_sint32_t volatile* pl)
{
atomic_decrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write_up(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return atomic_write(pl, n);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_read_up(ws_sint32_t volatile const* pl)
{
return *pl;
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd_up(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return (ws_sint32_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
}
/* SMP variants */
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_preincrement_smp(ws_sint32_t volatile* pl)
{
return atomic_preincrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_predecrement_smp(ws_sint32_t volatile* pl)
{
return atomic_predecrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postincrement_smp(ws_sint32_t volatile* pl)
{
return atomic_postincrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postdecrement_smp(ws_sint32_t volatile* pl)
{
return atomic_postdecrement(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_write_smp(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return atomic_write(pl, n);
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_read_smp(ws_sint32_t volatile const* pl)
{
return *pl;
}
/** \brief
*
* \ingroup group__library__synch
*/
WINSTL_ATOMIC_FNS_IMPL_(ws_sint32_t) atomic_postadd_smp(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return (ws_sint32_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
}
# endif /* STSLSOFT_INLINE_ASM_SUPPORTED */
# endif /* !WINSTL_ATOMIC_FNS_DECLARATION_ONLY */
/* /////////////////////////////////////////////////////////////////////////
* Other inline atomic function
*/
/** \brief
*
* \ingroup group__library__synch
*/
STLSOFT_INLINE ws_sint32_t atomic_preadd_up(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return n + atomic_postadd_up(pl, n);
}
/** \brief
*
* \ingroup group__library__synch
*/
STLSOFT_INLINE void atomic_increment_smp(ws_sint32_t volatile* pl)
{
atomic_postincrement_smp(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
STLSOFT_INLINE void atomic_decrement_smp(ws_sint32_t volatile* pl)
{
atomic_postdecrement_smp(pl);
}
/** \brief
*
* \ingroup group__library__synch
*/
STLSOFT_INLINE ws_sint32_t atomic_preadd_smp(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return n + atomic_postadd_smp(pl, n);
}
/** \brief
*
* \ingroup group__library__synch
*/
STLSOFT_INLINE ws_sint32_t atomic_preadd(ws_sint32_t volatile* pl, ws_sint32_t n)
{
return n + atomic_postadd(pl, n);
}
#endif /* !STLSOFT_DOCUMENTATION_SKIP_SECTION */
/* /////////////////////////////////////////////////////////////////////////
* Unit-testing
*/
#ifdef STLSOFT_UNITTEST
# include "./unittest/atomic_functions_unittest_.h"
#endif /* STLSOFT_UNITTEST */
/* ////////////////////////////////////////////////////////////////////// */
#ifndef _WINSTL_NO_NAMESPACE
# if defined(_STLSOFT_NO_NAMESPACE) || \
defined(STLSOFT_DOCUMENTATION_SKIP_SECTION)
} /* namespace winstl */
# else
} /* namespace winstl_project */
} /* namespace stlsoft */
# endif /* _STLSOFT_NO_NAMESPACE */
#endif /* !_WINSTL_NO_NAMESPACE */
/* ////////////////////////////////////////////////////////////////////// */
#endif /* !WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS */
/* ////////////////////////////////////////////////////////////////////// */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -