📄 atomicbase.h
字号:
#define HXAtomicAddRetINT32(p,n) g_AtomicOps._AddRetINT32((p),(n))
#define HXAtomicSubRetINT32(p,n) g_AtomicOps._SubRetINT32((p),(n))
#define HXAtomicAddINT32(p,n) g_AtomicOps._AddRetINT32((p),(n))
#define HXAtomicSubINT32(p,n) g_AtomicOps._SubRetINT32((p),(n))
#define HXAtomicIncUINT32(p) g_AtomicOps._AddRetUINT32((p),(UINT32)1)
#define HXAtomicDecUINT32(p) g_AtomicOps._SubRetUINT32((p),(UINT32)1)
#define HXAtomicIncRetUINT32(p) g_AtomicOps._AddRetUINT32((p),(UINT32)1)
#define HXAtomicDecRetUINT32(p) g_AtomicOps._SubRetUINT32((p),(UINT32)1)
#define HXAtomicAddRetUINT32(p,n) g_AtomicOps._AddRetUINT32((p),(n))
#define HXAtomicSubRetUINT32(p,n) g_AtomicOps._SubRetUINT32((p),(n))
#define HXAtomicAddUINT32(p,n) g_AtomicOps._AddRetUINT32((p),(n))
#define HXAtomicSubUINT32(p,n) g_AtomicOps._SubRetUINT32((p),(n))
/***********************************************************************
* SYMBIAN
*
* Implementation Notes:
*
* Note: This is an imcompletely-defined platform, be aware that
* not all standard HXAtomic operators are defined!
*
*/
#elif defined(_SYMBIAN)
/* Increment by 1 and return new value */
inline INT32
HXAtomicIncRetINT32(INT32* pNum)
{
return User::LockedInc(*((TInt*)pNum)) + 1;
}
/* Decrement by 1 and return new value */
inline INT32
HXAtomicDecRetINT32(INT32* pNum)
{
return User::LockedDec(*((TInt*)pNum)) - 1;
}
/* Increment by 1 and return new value */
inline UINT32
HXAtomicIncRetUINT32(UINT32* pNum)
{
return ((UINT32)User::LockedInc(*((TInt*)pNum))) + 1;
}
/* Decrement by 1 and return new value */
inline UINT32
HXAtomicDecRetUINT32(UINT32* pNum)
{
return ((UINT32)User::LockedDec(*((TInt*)pNum))) - 1;
}
#define HXAtomicIncINT32(p) HXAtomicIncRetINT32((p))
#define HXAtomicDecINT32(p) HXAtomicDecRetINT32((p))
#define HXAtomicIncUINT32(p) HXAtomicIncRetUINT32((p))
#define HXAtomicDecUINT32(p) HXAtomicDecRetUINT32((p))
#if 0
/*
* Add and subtract operations are not implemented
* at this time because there isn't an easy way to
* do it using the facilities provided by Symbian.
* Assembly will likely be needed.
*/
/* Add n and return new value */
inline INT32
HXAtomicAddRetINT32(INT32* pNum, INT32 n)
{
}
/* Subtract n and return new value */
inline INT32
HXAtomicSubRetINT32(INT32* pNum, INT32 n)
{
}
/* Add n and return new value */
inline UINT32
HXAtomicAddRetUINT32(UINT32* pNum, UINT32 n)
{
}
/* Subtract n and return new value */
inline UINT32
HXAtomicSubRetUINT32(UINT32* pNum, UINT32 n)
{
}
#define HXAtomicAddINT32(p,n) HXAtomicAddRetINT32((p),(n))
#define HXAtomicSubINT32(p,n) HXAtomicSubRetINT32((p),(n))
#define HXAtomicAddUINT32(p,n) HXAtomicAddRetUINT32((p),(n))
#define HXAtomicSubUINT32(p,n) HXAtomicSubRetUINT32((p),(n))
#endif
/***********************************************************************
* Linux / ARM (gcc)
*
* Implementation Notes:
*
* This implementation sacrifices being able to store the value
* 0x800000000 in the INT32 value, which is a special "busy" marker value.
* Since these are intended for use primarily with AddRef/Release and
* resource usage counters, this should be acceptable for now. If a counter
* is incremented to the point it would conflict with the flag, it is
* incremented one more to hop over it. The same in reverse for decrement.
*
* Basic design of the flag-based implementation:
* 1. Load a register with 0x80000000
* 2. _atomically_ swap it with the INT32 (critical!)
* 3. Compare what we got with 0x80000000
* 4. Branch if equal to #2
* 5. Increment (or decrement) the result
* 6. Compare to 0x80000000
* 7. Increment (or decrement) again if equal
* 8. Save the new value to the INT32's location in memory
* 9. Return new INT32 result if required
*
*/
#elif defined (_ARM) && defined (__GNUC__)
/* Increment by 1 */
inline void
HXAtomicIncUINT32(UINT32* pNum)
{
UINT32 ulTmp;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulTmp to 0x800000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulTmp */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" add %0, %0, #1;\n" /* Increment ulTmp */
" cmp %0, #0x80000000;\n" /* check for overflow */
" addeq %0, %0, #1;\n" /* if so, increment again */
" str %0, [%1];\n" /* Save new value into *pNum */
: /* no output */
: "r" (ulTmp), "r" (pNum)
: "cc", "memory"
);
}
/* Decrement by 1 */
inline void
HXAtomicDecUINT32(UINT32* pNum)
{
UINT32 ulTmp;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulTmp to 0x800000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulTmp */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" sub %0, %0, #1;\n" /* Decrement ulTmp */
" cmp %0, #0x80000000;\n" /* check for overflow */
" subeq %0, %0, #1;\n" /* if so, decrement again */
" str %0, [%1];\n" /* Save new value into *pNum */
: /* no output */
: "r" (ulTmp), "r" (pNum)
: "cc", "memory"
);
}
/* Increment by 1 and return new value */
inline UINT32
HXAtomicIncRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulRet to 0x80000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulRet */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" add %0, %0, #1;\n" /* Increment ulRet */
" cmp %0, #0x80000000;\n" /* check for overflow */
" addeq %0, %0, #1;\n" /* if so, increment again */
" str %0, [%1];\n" /* Save new value into *pNum */
: "=&r" (ulRet)
: "r" (pNum)
: "cc", "memory"
);
return ulRet;
}
/* Decrement by 1 and return new value */
inline UINT32
HXAtomicDecRetUINT32(UINT32* pNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulRet to 0x80000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulRet */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" sub %0, %0, #1;\n" /* Decrement ulRet */
" cmp %0, #0x80000000;\n" /* check for overflow */
" subeq %0, %0, #1;\n" /* if so, decrement again */
" str %0, [%1];\n" /* Save new value into *pNum */
: "=&r" (ulRet)
: "r" (pNum)
: "cc", "memory"
);
return ulRet;
}
/* Add n */
inline void
HXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum)
{
UINT32 ulTmp;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulTmp to 0x800000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulTmp */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" add %0, %0, %2;\n" /* Add ulNum to ulTmp */
" cmp %0, #0x80000000;\n" /* check for overflow */
" addeq %0, %0, #1;\n" /* if so, increment again */
" str %0, [%1];\n" /* Save new value into *pNum */
: /* no output */
: "r" (ulTmp), "r" (pNum), "r" (ulNum)
: "cc", "memory"
);
}
/* Subtract n */
inline void
HXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum)
{
UINT32 ulTmp;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulTmp to 0x800000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulTmp */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" sub %0, %0, %2;\n" /* Subtract ulNum from ulTmp */
" cmp %0, #0x80000000;\n" /* check for overflow */
" subeq %0, %0, #1;\n" /* if so, decrement again */
" str %0, [%1];\n" /* Save new value into *pNum */
: /* no output */
: "r" (ulTmp), "r" (pNum), "r" (ulNum)
: "cc", "memory"
);
}
/* Add n and return new value */
inline UINT32
HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulRet to 0x80000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulRet */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" add %0, %0, %2;\n" /* Add ulNum to ulRet */
" cmp %0, #0x80000000;\n" /* check for overflow */
" addeq %0, %0, #1;\n" /* if so, increment again */
" str %0, [%1];\n" /* Save new value into *pNum */
: "=&r" (ulRet)
: "r" (pNum) , "r" (ulNum)
: "cc", "memory"
);
return ulRet;
}
/* Subtract n and return new value */
inline UINT32
HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum)
{
volatile UINT32 ulRet;
__asm__ __volatile__(
" mov %0, #0x80000000;\n" /* Set ulRet to 0x80000000 */
"1: swp %0, %0, [%1];\n" /* Swap *pNum and ulRet */
" cmp %0, #0x80000000;\n" /* Is someone else using pNum? */
" beq 1;\n" /* If so, retry... */
" sub %0, %0, %2;\n" /* Subtract ulNum from ulRet */
" cmp %0, #0x80000000;\n" /* check for overflow */
" subeq %0, %0, #1;\n" /* if so, decrement again */
" str %0, [%1];\n" /* Save new value into *pNum */
: "=&r" (ulRet)
: "r" (pNum), "r" (ulNum)
: "cc", "memory"
);
return ulRet;
}
inline void HXAtomicIncINT32(INT32* p) { HXAtomicIncUINT32((UINT32*)p); }
inline void HXAtomicDecINT32(INT32* p) { HXAtomicDecUINT32((UINT32*)p); }
inline void HXAtomicAddINT32(INT32* p, INT32 n) { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); }
inline void HXAtomicSubINT32(INT32* p, INT32 n) { HXAtomicSubUINT32((UINT32*)p, (UINT32)n); }
inline INT32 HXAtomicIncRetINT32(INT32* p) { return HXAtomicIncRetUINT32((UINT32*)p); }
inline INT32 HXAtomicDecRetINT32(INT32* p) { return HXAtomicDecRetUINT32((UINT32*)p); }
inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }
inline INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }
/***********************************************************************
* Add new platforms above here
*/
#else
//
// Unsupported platform
//
# ifndef HELIX_CONFIG_DISABLE_ATOMIC_OPERATORS
// Defining HELIX_CONFIG_DISABLE_ATOMIC_OPERATORS will use the ++ and --
// operators in place of atomic operators in some places in the code. These
// operators are not thread-safe, and should only be used in the intermediary
// stages of porting.
# error "You need to create atomic dec/inc opers for your platform or #define HELIX_CONFIG_DISABLE_ATOMIC_OPERATORS"
# endif
#endif
/*************************************************************************/
/*
* Conditional override of InterlockedIncrement/Decrement
*
* Place this in your Umakefil/.pcf file to turn off atomic
* InterlockedIncrement/Decrement on a per-module basis,
* or place it in your umake profile for system-wide scope.
* If this is defined you'll still have access to the underlying
* HXAtomicxxx operators (if they exist for your platform),
* just that the specific InterlockedIncrement/InterlockedDecrement
* macros won't be defined to use them.
*/
#if !defined (HELIX_CONFIG_DISABLE_ATOMIC_OPERATORS)
#undef InterlockedIncrement
#undef InterlockedDecrement
// Since many classes (incorrectly) implement their refcount using LONG32
// rather than the proper ULONG32, we have to use the typecast for things
// to build on many platforms.
#define InterlockedIncrement(p) HXAtomicIncRetUINT32((UINT32*)(p))
#define InterlockedDecrement(p) HXAtomicDecRetUINT32((UINT32*)(p))
#define HAVE_INTERLOCKED_INCREMENT //so hxcom.h doesn't redefine these to ++/--
#endif /* !defined(HELIX_CONFIG_DISABLE_ATOMIC_OPERATORS) */
#endif /* _ATOMICBASE_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -