⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 atomicbase.h

📁 linux下的一款播放器
💻 H
📖 第 1 页 / 共 4 页
字号:
             mov  eax, pNum              ; Load the pointer into a register        lock inc  dword ptr [eax]        ; Atomically increment *pNum    }}/* Decrement by 1 */static __inline voidHXAtomicDecUINT32(UINT32* pNum){        // register usage summary:        //   eax - pointer to the value we're modifying    _asm    {             mov  eax,  pNum             ; Load the pointer into a register        lock dec  dword ptr [eax]        ; Atomically decrement *pNum    }}/* Increment by 1 and return new value */static __inline UINT32HXAtomicIncRetUINT32(UINT32* pNum){    volatile UINT32 ulRet;             // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, 0x1               ; Load increment amount into a register        lock xadd dword ptr [eax], ebx   ; Increment *pNum; ebx gets old value             inc  ebx                    ; Increment old value             mov  ulRet, ebx             ; Set the return value    }    return ulRet;}/* Decrement by 1 and return new value */static __inline UINT32HXAtomicDecRetUINT32(UINT32* pNum){       volatile UINT32 ulRet;        // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register        // note: we increment by 0xffffffff to decrement by 1    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, 0xffffffff        ; Load decrement amount into a register        lock xadd dword ptr [eax], ebx   ; Decrement *pNum; ebx gets old value             dec  ebx                    ; decrement old value             mov  ulRet, ebx             ; Set the return value    }    return ulRet;}/* Add n */static __inline voidHXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum){        // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, ulNum             ; Load increment amount into a register        lock add  dword ptr [eax], ebx   ; Increment *pNum by ulNum    }}/* Subtract n */static __inline voidHXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum){        // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, ulNum             ; Load increment amount into a register        lock sub  dword ptr [eax], ebx   ; Atomically decrement *pNum by ulNum    }}/* Add n and return new value */static __inline UINT32HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum){    volatile UINT32 ulRet;        // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register        //   ecx - work register #2    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, ulNum             ; Load increment amount into a register             mov  ecx, ebx               ; copy ebx into ecx        lock xadd dword ptr [eax], ecx   ; Increment *pNum; ecx gets old value             add  ecx, ebx               ; Add ulNum to it             mov  ulRet, ecx             ; save result in ulRet    }    return ulRet;}/* Subtract n and return new value */static __inline UINT32HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum) {       volatile UINT32 ulRet;        // register usage summary:        //   eax - pointer to the value we're modifying        //   ebx - work register        //   ecx - work register #2    _asm    {             mov  eax, pNum              ; Load the pointer into a register             mov  ebx, ulNum             ; Load increment amount into a register             mov  ecx, 0x0               ; zero out ecx             sub  ecx, ebx               ; compute -(ulNum), saving in ecx        lock xadd dword ptr [eax], ecx   ; Decrement *pNum; ecx gets old value             sub  ecx, ebx               ; subtract ulNum from it             mov  ulRet, ecx             ; save result in ulRet    }    return ulRet;}static __inline void HXAtomicIncINT32(INT32* p)              { HXAtomicIncUINT32((UINT32*)p); }static __inline void HXAtomicDecINT32(INT32* p)              { HXAtomicDecUINT32((UINT32*)p); }static __inline void HXAtomicAddINT32(INT32* p, INT32 n)     { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); }static __inline void HXAtomicSubINT32(INT32* p, INT32 n)     { HXAtomicSubUINT32((UINT32*)p, (UINT32)n); }static __inline INT32 HXAtomicIncRetINT32(INT32* p)          { return HXAtomicIncRetUINT32((UINT32*)p); }static __inline INT32 HXAtomicDecRetINT32(INT32* p)          { return HXAtomicDecRetUINT32((UINT32*)p); }static __inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }static __inline INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }/*********************************************************************** * Intel x86 (gcc) / Unix  -- i486 and higher - 32-bit * * Implementation Notes: *   'xadd' is only available in the 486 series and later, not the 386. *   There is no 'xsub' counterpart, you have to negate the operand *   and use 'xadd'.  Note the use of the 'lock' prefix to ensure *   certain operations occur atomically. * *   OpenBSD is excluded since the standard assembler on x86 systems *   can't handle the xadd instruction. * */#elif defined(__GNUC__) && !defined(_OPENBSD) && \      (__GNUC__>2 || (__GNUC__==2 && __GNUC_MINOR__>=95)) && \      ( defined (__i486__) || defined (__i586__) || defined (__i686__) || \        defined (__pentium__) || defined (__pentiumpro__))/* Increment by 1 */static __inline__ voidHXAtomicIncUINT32(UINT32* pNum){    __asm__ __volatile__(        "lock incl (%0);"                // atomically add 1 to *pNum        : /* no output */        : "r" (pNum)        : "cc", "memory"        );}/* Decrement by 1 */static __inline__ voidHXAtomicDecUINT32(UINT32* pNum){    __asm__ __volatile__(        "lock decl (%0);"                // atomically add -1 to *pNum        : /* no output */        : "r" (pNum)        : "cc", "memory"        );}/* Increment by 1 and return new value */static __inline__ UINT32HXAtomicIncRetUINT32(UINT32* pNum){    volatile UINT32 ulRet;    __asm__ __volatile__(        "lock xaddl %0, (%1);"           // atomically add 1 to *pNum        "     inc   %0;"                 // old value in %0, increment it        : "=r" (ulRet)        : "r" (pNum), "0" (0x1)        : "cc", "memory"        );    return ulRet;}/* Decrement by 1 and return new value */static __inline__ UINT32HXAtomicDecRetUINT32(UINT32* pNum){       volatile UINT32 ulRet;    __asm__ __volatile__(        "lock xaddl %0, (%1);"           // atomically add -1 to *pNum        "     dec   %0;"                 // old value in %0, decrement it        : "=r" (ulRet)        : "r" (pNum), "0" (-1)        : "cc", "memory"        );    return ulRet;}/* Add n */static __inline__ voidHXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum){    __asm__ __volatile__(        "lock addl %1, (%0);"            // atomically add ulNum to *pNum        : /* no output */        : "r" (pNum), "r" (ulNum)        : "cc", "memory"        );}/* Subtract n */static __inline__ voidHXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum){    __asm__ __volatile__(        "lock subl %1, (%0);"            // atomically add ulNum to *pNum        : /* no output */        : "r" (pNum), "r" (ulNum)        : "cc", "memory"        );}/* Add n and return new value */static __inline__ UINT32HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum){    volatile UINT32 ulRet;    __asm__ __volatile__(        "     mov   %2, %0;"             // copy ulNum into %0        "lock xaddl %0, (%1);"           // atomically add ulNum to *pNum        "     add   %2, %0;"             // old value in %0, add ulNum        : "=r" (ulRet)        : "r" (pNum), "r" (ulNum), "0" (0)        : "cc", "memory"        );    return ulRet;}/* Subtract n and return new value */static __inline__ UINT32HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum) {       volatile UINT32 ulRet;    __asm__ __volatile__(        "     sub   %2, %0;"             // negate ulNum, saving in %0        "lock xaddl %0, (%1);"           // atomically add -(ulNum) to *pNum        "     sub   %2, %0;"             // old value in %0, subtract ulNum        : "=r" (ulRet)        : "r" (pNum), "r" (ulNum), "0" (0)        : "cc", "memory"        );    return ulRet;}static __inline__ void HXAtomicIncINT32(INT32* p)              { HXAtomicIncUINT32((UINT32*)p); }static __inline__ void HXAtomicDecINT32(INT32* p)              { HXAtomicDecUINT32((UINT32*)p); }static __inline__ void HXAtomicAddINT32(INT32* p, INT32 n)     { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); }static __inline__ void HXAtomicSubINT32(INT32* p, INT32 n)     { HXAtomicSubUINT32((UINT32*)p, (UINT32)n); }static __inline__ INT32 HXAtomicIncRetINT32(INT32* p)          { return HXAtomicIncRetUINT32((UINT32*)p); }static __inline__ INT32 HXAtomicDecRetINT32(INT32* p)          { return HXAtomicDecRetUINT32((UINT32*)p); }static __inline__ INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }static __inline__ INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }/*********************************************************************** * Intel x86/amd64/x86_64 (gcc) / Unix  -- 64-bit * * Implementation Notes: * */#elif defined(__GNUC__) && (defined (__amd64__) || defined (__x86_64__))/* Increment by 1 */static __inline__ voidHXAtomicIncUINT32(UINT32* pNum){    __asm__ __volatile__(        "lock incl (%%rax);"             // atomically add 1 to *pNum        : /* no output */        : "a" (pNum)        : "cc", "memory"        );}/* Decrement by 1 */static __inline__ voidHXAtomicDecUINT32(UINT32* pNum){    __asm__ __volatile__(        "lock decl (%%rax);"             // atomically add -1 to *pNum        : /* no output */        : "a" (pNum)        : "cc", "memory"        );}/* Increment by 1 and return new value */static __inline__ UINT32HXAtomicIncRetUINT32(UINT32* pNum){    volatile UINT32 ulRet;    __asm__ __volatile__(        "lock xaddl %%ebx, (%%rax);"     // atomically add 1 to *pNum        "     incl  %%ebx;"              // old value in %%ebx, increment it        : "=b" (ulRet)        : "a" (pNum), "b" (0x1)        : "cc", "memory"        );    return ulRet;}/* Decrement by 1 and return new value */static __inline__ UINT32HXAtomicDecRetUINT32(UINT32* pNum){       volatile UINT32 ulRet;    __asm__ __volatile__(        "lock xaddl %%ebx, (%%rax);"     // atomically add -1 to *pNum        "     decl  %%ebx;"              // old value in %%ebx, decrement it        : "=b" (ulRet)        : "a" (pNum), "b" (-1)        : "cc", "memory"        );    return ulRet;}/* Add n */static __inline__ voidHXAtomicAddUINT32(UINT32* pNum, UINT32 ulNum){    __asm__ __volatile__(        "lock addl %%ebx, (%%rax);"      // atomically add ulNum to *pNum        : /* no output */        : "a" (pNum), "b" (ulNum)        : "cc", "memory"        );}/* Subtract n */static __inline__ voidHXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum){    __asm__ __volatile__(        "lock subl %%ebx, (%%rax);"      // atomically add ulNum to *pNum        : /* no output */        : "a" (pNum), "b" (ulNum)        : "cc", "memory"        );}/* Add n and return new value */static __inline__ UINT32HXAtomicAddRetUINT32(UINT32* pNum, UINT32 ulNum){    volatile UINT32 ulRet;    __asm__ __volatile__(        "     movl  %%ebx, %%ecx;"       // copy ulNum into %0        "lock xaddl %%ecx, (%%rax);"     // atomically add ulNum to *pNum        "     addl  %%ebx, %%ecx;"       // old value in %%ecx, add ulNum        : "=c" (ulRet)        : "a" (pNum), "b" (ulNum), "c" (0)        : "cc", "memory"        );    return ulRet;}/* Subtract n and return new value */static __inline__ UINT32HXAtomicSubRetUINT32(UINT32* pNum, UINT32 ulNum) {       volatile UINT32 ulRet;    __asm__ __volatile__(        "     subl  %%ebx, %%ecx;"       // negate ulNum, saving in %0        "lock xaddl %%ecx, (%%rax);"     // atomically add -(ulNum) to *pNum        "     subl  %%ebx, %%ecx;"       // old value in %%ecx, subtract ulNum

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -