📄 atomicbase.h
字号:
/***********************************************************************
* Windows / x86 (Visual C/C++)
*
* Implementation Notes:
* 'xadd' is only available in the 486 series and later, not the 386.
* There is no 'xsub' counterpart, you have to negate the operand
* and use 'xadd'. Note the use of the 'lock' prefix to ensure
* certain operations occur atomically.
*/
#elif defined (_M_IX86) /* && _M_IX86 > 300 XXX wschildbach: disabled until the build system delivers the correct value */
/* Increment by 1 */
static __inline void
HXAtomicIncUINT32(UINT32* pNum)
{
// register usage summary:
// eax - pointer to the value we're modifying
_asm
{
mov eax, pNum ; Load the pointer into a register
lock inc dword ptr [eax] ; Atomically increment *pNum
}
}
/* Decrement by 1 */
static __inline void
HXAtomicDecUINT32(UINT32* pNum)
{
// register usage summary:
// eax - pointer to the value we're modifying
_asm
{
mov eax, pNum ; Load the pointer into a register
lock dec dword ptr [eax] ; Atomically decrement *pNum
}
}
/* Increment by 1 and return new value */
static __inline UINT32
HXAtomicIncRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, 0x1 ; Load increment amount into a register
lock xadd dword ptr [eax], ebx ; Increment *pNum; ebx gets old value
inc ebx ; Increment old value
mov ulRet, ebx ; Set the return value
}
return ulRet;
}
/* Decrement by 1 and return new value */
static __inline UINT32
HXAtomicDecRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
// note: we increment by 0xffffffff to decrement by 1
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, 0xffffffff ; Load decrement amount into a register
lock xadd dword ptr [eax], ebx ; Decrement *pNum; ebx gets old value
dec ebx ; decrement old value
mov ulRet, ebx ; Set the return value
}
return ulRet;
}
/* Add n */
static __inline void
HXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum)
{
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, ulNum ; Load increment amount into a register
lock add dword ptr [eax], ebx ; Increment *pNum by ulNum
}
}
/* Subtract n */
static __inline void
HXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum)
{
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, ulNum ; Load increment amount into a register
lock sub dword ptr [eax], ebx ; Atomically decrement *pNum by ulNum
}
}
/* Add n and return new value */
static __inline UINT32
HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
// ecx - work register #2
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, ulNum ; Load increment amount into a register
mov ecx, ebx ; copy ebx into ecx
lock xadd dword ptr [eax], ecx ; Increment *pNum; ecx gets old value
add ecx, ebx ; Add ulNum to it
mov ulRet, ecx ; save result in ulRet
}
return ulRet;
}
/* Subtract n and return new value */
static __inline UINT32
HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
// register usage summary:
// eax - pointer to the value we're modifying
// ebx - work register
// ecx - work register #2
_asm
{
mov eax, pNum ; Load the pointer into a register
mov ebx, ulNum ; Load increment amount into a register
mov ecx, 0x0 ; zero out ecx
sub ecx, ebx ; compute -(ulNum), saving in ecx
lock xadd dword ptr [eax], ecx ; Decrement *pNum; ecx gets old value
sub ecx, ebx ; subtract ulNum from it
mov ulRet, ecx ; save result in ulRet
}
return ulRet;
}
static __inline void HXAtomicIncINT32(INT32* p) { HXAtomicIncUINT32((UINT32*)p); }
static __inline void HXAtomicDecINT32(INT32* p) { HXAtomicDecUINT32((UINT32*)p); }
static __inline void HXAtomicAddINT32(INT32* p, INT32 n) { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); }
static __inline void HXAtomicSubINT32(INT32* p, INT32 n) { HXAtomicSubUINT32((UINT32*)p, (UINT32)n); }
static __inline INT32 HXAtomicIncRetINT32(INT32* p) { return HXAtomicIncRetUINT32((UINT32*)p); }
static __inline INT32 HXAtomicDecRetINT32(INT32* p) { return HXAtomicDecRetUINT32((UINT32*)p); }
static __inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }
static __inline INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }
/***********************************************************************
* Intel x86 (gcc) / Unix -- i486 and higher
*
* Implementation Notes:
* 'xadd' is only available in the 486 series and later, not the 386.
* There is no 'xsub' counterpart, you have to negate the operand
* and use 'xadd'. Note the use of the 'lock' prefix to ensure
* certain operations occur atomically.
*
* OpenBSD is excluded since the standard assembler on x86 systems
* can't handle the xadd instruction.
*
*/
#elif defined(__GNUC__) && !defined(_OPENBSD) && \
(__GNUC__>2 || (__GNUC__==2 && __GNUC_MINOR__>=95)) && \
( defined (__i486__) || defined (__i586__) || defined (__i686__) || \
defined (__pentium__) || defined (__pentiumpro__))
/* Increment by 1 */
static __inline__ void
HXAtomicIncUINT32(UINT32* pNum)
{
__asm__ __volatile__(
"lock incl (%0);" // atomically add 1 to *pNum
: /* no output */
: "r" (pNum)
: "cc", "memory"
);
}
/* Decrement by 1 */
static __inline__ void
HXAtomicDecUINT32(UINT32* pNum)
{
__asm__ __volatile__(
"lock decl (%0);" // atomically add -1 to *pNum
: /* no output */
: "r" (pNum)
: "cc", "memory"
);
}
/* Increment by 1 and return new value */
static __inline__ UINT32
HXAtomicIncRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
"lock xaddl %0, (%1);" // atomically add 1 to *pNum
" inc %0;" // old value in %0, increment it
: "=r" (ulRet)
: "r" (pNum), "0" (0x1)
: "cc", "memory"
);
return ulRet;
}
/* Decrement by 1 and return new value */
static __inline__ UINT32
HXAtomicDecRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
"lock xaddl %0, (%1);" // atomically add -1 to *pNum
" dec %0;" // old value in %0, decrement it
: "=r" (ulRet)
: "r" (pNum), "0" (-1)
: "cc", "memory"
);
return ulRet;
}
/* Add n */
static __inline__ void
HXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum)
{
__asm__ __volatile__(
"lock addl %1, (%0);" // atomically add ulNum to *pNum
: /* no output */
: "r" (pNum), "r" (ulNum)
: "cc", "memory"
);
}
/* Subtract n */
static __inline__ void
HXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum)
{
__asm__ __volatile__(
"lock subl %1, (%0);" // atomically add ulNum to *pNum
: /* no output */
: "r" (pNum), "r" (ulNum)
: "cc", "memory"
);
}
/* Add n and return new value */
static __inline__ UINT32
HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" mov %2, %0;" // copy ulNum into %0
"lock xaddl %0, (%1);" // atomically add ulNum to *pNum
" add %2, %0;" // old value in %0, add ulNum
: "=r" (ulRet)
: "r" (pNum), "r" (ulNum), "0" (0)
: "cc", "memory"
);
return ulRet;
}
/* Subtract n and return new value */
static __inline__ UINT32
HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" sub %2, %0;" // negate ulNum, saving in %0
"lock xaddl %0, (%1);" // atomically add -(ulNum) to *pNum
" sub %2, %0;" // old value in %0, subtract ulNum
: "=r" (ulRet)
: "r" (pNum), "r" (ulNum), "0" (0)
: "cc", "memory"
);
return ulRet;
}
static __inline__ void HXAtomicIncINT32(INT32* p) { HXAtomicIncUINT32((UINT32*)p); }
static __inline__ void HXAtomicDecINT32(INT32* p) { HXAtomicDecUINT32((UINT32*)p); }
static __inline__ void HXAtomicAddINT32(INT32* p, INT32 n) { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); }
static __inline__ void HXAtomicSubINT32(INT32* p, INT32 n) { HXAtomicSubUINT32((UINT32*)p, (UINT32)n); }
static __inline__ INT32 HXAtomicIncRetINT32(INT32* p) { return HXAtomicIncRetUINT32((UINT32*)p); }
static __inline__ INT32 HXAtomicDecRetINT32(INT32* p) { return HXAtomicDecRetUINT32((UINT32*)p); }
static __inline__ INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }
static __inline__ INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }
/***********************************************************************
* HP-UX / IA64 (Native compiler)
*
* Implementation Notes:
* A work-in-progress...
*/
#elif defined(_HPUX) && defined(_IA64)
#if defined(__cplusplus)
extern "C" {
#endif
UINT32 _HXAtomicIncRetUINT32 (UINT32* pNum);
UINT32 _HXAtomicDecRetUINT32 (UINT32* pNum);
UINT32 _HXAtomicAddRetUINT32 (UINT32* pNum, UINT32 ulNum);
UINT32 _HXAtomicSubRetUINT32 (UINT32* pNum, UINT32 ulNum);
#if defined(__cplusplus)
}
#endif
#define HXAtomicIncINT32(p) _HXAtomicIncRetUINT32((UINT32*)(p))
#define HXAtomicDecINT32(p) _HXAtomicDecRetUINT32((UINT32*)(p))
#define HXAtomicIncRetINT32(p) _HXAtomicIncRetUINT32((UINT32*)(p))
#define HXAtomicDecRetINT32(p) _HXAtomicDecRetUINT32((UINT32*)(p))
#define HXAtomicAddINT32(p,n) _HXAtomicAddRetUINT32((UINT32*)(p),(INT32)(n))
#define HXAtomicSubINT32(p,n) _HXAtomicSubRetUINT32((UINT32*)(p),(INT32)(n))
#define HXAtomicAddRetINT32(p,n) _HXAtomicAddRetUINT32((UINT32*)(p),(INT32)(n))
#define HXAtomicSubRetINT32(p,n) _HXAtomicSubRetUINT32((UINT32*)(p),(INT32)(n))
#define HXAtomicIncUINT32(p) _HXAtomicIncRetUINT32((p))
#define HXAtomicDecUINT32(p) _HXAtomicDecRetUINT32((p))
#define HXAtomicIncRetUINT32(p) _HXAtomicIncRetUINT32((p))
#define HXAtomicDecRetUINT32(p) _HXAtomicDecRetUINT32((p))
#define HXAtomicAddUINT32(p,n) _HXAtomicAddRetUINT32((p),(n))
#define HXAtomicSubUINT32(p,n) _HXAtomicSubRetUINT32((p),(n))
#define HXAtomicAddRetUINT32(p,n) _HXAtomicAddRetUINT32((p),(n))
#define HXAtomicSubRetUINT32(p,n) _HXAtomicSubRetUINT32((p),(n))
/***********************************************************************
* Tru64 (OSF1) / Alpha (Native compiler)
*
* Implementation Notes:
*
* The Alpha CPU provides instructions to load-lock a value,
* modify it, and attempt to write it back. If the value has
* been modified by someone else since the load-lock occured,
* the write will fail and you can check the status code to
* know whether you need to retry or not.
*
*/
#elif defined (__alpha)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -