📄 atomic_add_return-sles9.patch
字号:
Index: linux-2.6.5-7.283/include/asm-i386/atomic.h===================================================================--- linux-2.6.5-7.283.orig/include/asm-i386/atomic.h+++ linux-2.6.5-7.283/include/asm-i386/atomic.h@@ -2,6 +2,7 @@ #define __ARCH_I386_ATOMIC__ #include <linux/config.h>+#include <linux/bitops.h> /* * Atomic operations that C can't guarantee us. Useful for@@ -176,6 +177,47 @@ static __inline__ int atomic_add_negativ return c; } +/**+ * atomic_add_return - add and return+ * @v: pointer of type atomic_t+ * @i: integer value to add+ *+ * Atomically adds @i to @v and returns @i + @v+ */+static __inline__ int atomic_add_return(int i, atomic_t *v)+{+ int __i;+#ifdef CONFIG_M386+ unsigned long flags;+ if(unlikely(boot_cpu_data.x86==3))+ goto no_xadd;+#endif+ /* Modern 486+ processor */+ __i = i;+ __asm__ __volatile__(+ LOCK_PREFIX "xaddl %0, %1"+ :"+r" (i), "+m" (v->counter)+ : : "memory");+ return i + __i;++#ifdef CONFIG_M386+no_xadd: /* Legacy 386 processor */+ local_irq_save(flags);+ __i = atomic_read(v);+ atomic_set(v, i + __i);+ local_irq_restore(flags);+ return i + __i;+#endif+}++static __inline__ int atomic_sub_return(int i, atomic_t *v)+{+ return atomic_add_return(-i,v);+}++#define atomic_inc_return(v) (atomic_add_return(1,v))+#define atomic_dec_return(v) (atomic_sub_return(1,v))+ /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK "andl %0,%1" \Index: linux-2.6.5-7.283/include/asm-x86_64/atomic.h===================================================================--- linux-2.6.5-7.283.orig/include/asm-x86_64/atomic.h+++ linux-2.6.5-7.283/include/asm-x86_64/atomic.h@@ -2,6 +2,7 @@ #define __ARCH_X86_64_ATOMIC__ #include <linux/config.h>+#include <linux/bitops.h> /* atomic_t should be 32 bit signed type */ @@ -178,6 +179,31 @@ static __inline__ int atomic_add_negativ return c; } +/**+ * atomic_add_return - add and return+ * @i: integer value to add+ * @v: pointer of type atomic_t+ *+ * Atomically adds @i to @v and returns @i + @v+ */+static __inline__ int atomic_add_return(int i, atomic_t *v)+{+ int __i = i;+ __asm__ __volatile__(+ LOCK_PREFIX "xaddl %0, %1"+ :"+r" (i), "+m" (v->counter)+ : : "memory");+ return i + __i;+}++static __inline__ int atomic_sub_return(int i, atomic_t *v)+{+ return atomic_add_return(-i,v);+}++#define atomic_inc_return(v) (atomic_add_return(1,v))+#define atomic_dec_return(v) (atomic_sub_return(1,v))+ /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK "andl %0,%1" \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -