📄 iatomic.h
字号:
#ifndef __ALSA_IATOMIC_H#define __ALSA_IATOMIC_H#if defined(__i386__) || defined(__x86_64__)/* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */#define ATOMIC_SMP_LOCK "lock ; "/* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */typedef struct { volatile int counter; } atomic_t;#define ATOMIC_INIT(i) { (i) }/** * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ #define atomic_read(v) ((v)->counter)/** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ #define atomic_set(v,i) (((v)->counter) = (i))/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. Note that the guaranteed useful range * of an atomic_t is only 24 bits. */static __inline__ void atomic_add(int i, atomic_t *v){ __asm__ __volatile__( ATOMIC_SMP_LOCK "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter));}/** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */static __inline__ void atomic_sub(int i, atomic_t *v){ __asm__ __volatile__( ATOMIC_SMP_LOCK "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter));}/** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */static __inline__ int atomic_sub_and_test(int i, atomic_t *v){ unsigned char c; __asm__ __volatile__( ATOMIC_SMP_LOCK "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c;}/** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * * Atomically increments @v by 1. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ static __inline__ void atomic_inc(atomic_t *v){ __asm__ __volatile__( ATOMIC_SMP_LOCK "incl %0" :"=m" (v->counter) :"m" (v->counter));}/** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * * Atomically decrements @v by 1. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ static __inline__ void atomic_dec(atomic_t *v){ __asm__ __volatile__( ATOMIC_SMP_LOCK "decl %0" :"=m" (v->counter) :"m" (v->counter));}/** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ static __inline__ int atomic_dec_and_test(atomic_t *v){ unsigned char c; __asm__ __volatile__( ATOMIC_SMP_LOCK "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0;}/** * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ static __inline__ int atomic_inc_and_test(atomic_t *v){ unsigned char c; __asm__ __volatile__( ATOMIC_SMP_LOCK "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0;}/** * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ static __inline__ int atomic_add_negative(int i, atomic_t *v){ unsigned char c; __asm__ __volatile__( ATOMIC_SMP_LOCK "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c;}/* These are x86-specific, used by some header files */#define atomic_clear_mask(mask, addr) \__asm__ __volatile__(ATOMIC_SMP_LOCK "andl %0,%1" \: : "r" (~(mask)),"m" (*addr) : "memory")#define atomic_set_mask(mask, addr) \__asm__ __volatile__(ATOMIC_SMP_LOCK "orl %0,%1" \: : "r" (mask),"m" (*addr) : "memory")/* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. * * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. */ #ifdef __i386__#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")#define rmb() mb()#define wmb() __asm__ __volatile__ ("": : :"memory")#else#define mb() asm volatile("mfence":::"memory")#define rmb() asm volatile("lfence":::"memory")#define wmb() asm volatile("sfence":::"memory")#endif#undef ATOMIC_SMP_LOCK#define IATOMIC_DEFINED 1#endif /* __i386__ */#ifdef __ia64__/* * On IA-64, counter must always be volatile to ensure that that the * memory accesses are ordered. */typedef struct { volatile int counter; } atomic_t;#define ATOMIC_INIT(i) ((atomic_t) { (i) })#define atomic_read(v) ((v)->counter)#define atomic_set(v,i) (((v)->counter) = (i))/* stripped version - we need only 4byte version */#define ia64_cmpxchg(sem,ptr,old,new,size) \({ \ __typeof__(ptr) _p_ = (ptr); \ __typeof__(new) _n_ = (new); \ unsigned long _o_, _r_; \ _o_ = (unsigned int) (long) (old); \ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \ __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \ : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ (__typeof__(old)) _r_; \})static __inline__ intia64_atomic_add (int i, atomic_t *v){ int old, new; // CMPXCHG_BUGCHECK_DECL do { // CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; } while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old); return new;}static __inline__ intia64_atomic_sub (int i, atomic_t *v){ int old, new; // CMPXCHG_BUGCHECK_DECL do { // CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); return new;}#define IA64_FETCHADD(tmp,v,n,sz) \({ \ switch (sz) { \ case 4: \ __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ break; \ \ case 8: \ __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ break; \ } \})#define ia64_fetch_and_add(i,v) \({ \ unsigned long _tmp; \ volatile __typeof__(*(v)) *_v = (v); \ switch (i) { \ case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \ case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \ case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \ case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \ case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \ case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \ case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \ case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \ } \ (__typeof__(*v)) (_tmp + (i)); /* return new value */ \})/* * Atomically add I to V and return TRUE if the resulting value is * negative. */static __inline__ intatomic_add_negative (int i, atomic_t *v){ return ia64_atomic_add(i, v) < 0;}#define atomic_add_return(i,v) \ ((__builtin_constant_p(i) && \ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ ? ia64_fetch_and_add(i, &(v)->counter) \ : ia64_atomic_add(i, v))#define atomic_sub_return(i,v) \ ((__builtin_constant_p(i) && \ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ ? ia64_fetch_and_add(-(i), &(v)->counter) \ : ia64_atomic_sub(i, v))#define atomic_dec_return(v) atomic_sub_return(1, (v))#define atomic_inc_return(v) atomic_add_return(1, (v))#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)#define atomic_add(i,v) atomic_add_return((i), (v))#define atomic_sub(i,v) atomic_sub_return((i), (v))#define atomic_inc(v) atomic_add(1, (v))#define atomic_dec(v) atomic_sub(1, (v))/* * Macros to force memory ordering. In these descriptions, "previous" * and "subsequent" refer to program order; "visible" means that all * architecturally visible effects of a memory access have occurred * (at a minimum, this means the memory has been read or written). * * wmb(): Guarantees that all preceding stores to memory- * like regions are visible before any subsequent * stores and that all following stores will be * visible only after all previous stores. * rmb(): Like wmb(), but for reads. * mb(): wmb()/rmb() combo, i.e., all previous memory * accesses are visible before all subsequent * accesses and vice versa. This is also known as * a "fence." * * Note: "mb()" and its variants cannot be used as a fence to order * accesses to memory mapped I/O registers. For that, mf.a needs to * be used. However, we don't want to always use mf.a because (a) * it's (presumably) much slower than mf and (b) mf.a is supported for * sequential memory pages only. */#define mb() __asm__ __volatile__ ("mf" ::: "memory")#define rmb() mb()#define wmb() mb()#define IATOMIC_DEFINED 1#endif /* __ia64__ */#ifdef __alpha__/* * Atomic operations that C can't guarantee us. Useful for * resource counting etc... * * But use these as seldom as possible since they are much slower * than regular operations. *//* * Counter is volatile to make sure gcc doesn't try to be clever * and move things around on us. We need to use _exactly_ the address * the user gave us, not some alias that contains the same information. */typedef struct { volatile int counter; } atomic_t;#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )#define atomic_read(v) ((v)->counter)#define atomic_set(v,i) ((v)->counter = (i))/* * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then * branch back to restart the operation. */static __inline__ void atomic_add(int i, atomic_t * v){ unsigned long temp; __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%2,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter) :"Ir" (i), "m" (v->counter));}static __inline__ void atomic_sub(int i, atomic_t * v){ unsigned long temp; __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%2,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter) :"Ir" (i), "m" (v->counter));}/* * Same as above, but return the result value */static __inline__ long atomic_add_return(int i, atomic_t * v){ long temp, result; __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%3,%2\n" " addl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); return result;}static __inline__ long atomic_sub_return(int i, atomic_t * v){ long temp, result; __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%3,%2\n" " subl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); return result;}#define atomic_dec_return(v) atomic_sub_return(1,(v))#define atomic_inc_return(v) atomic_add_return(1,(v))#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)#define atomic_inc(v) atomic_add(1,(v))#define atomic_dec(v) atomic_sub(1,(v))#define mb() \__asm__ __volatile__("mb": : :"memory")#define rmb() \__asm__ __volatile__("mb": : :"memory")#define wmb() \__asm__ __volatile__("wmb": : :"memory")#define IATOMIC_DEFINED 1#endif /* __alpha__ */#ifdef __powerpc__typedef struct { volatile int counter; } atomic_t;#define ATOMIC_INIT(i) { (i) }#define atomic_read(v) ((v)->counter)#define atomic_set(v,i) (((v)->counter) = (i))extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);extern void atomic_set_mask(unsigned long mask, unsigned long *addr);#define SMP_ISYNC "\n\tisync"static __inline__ void atomic_add(int a, atomic_t *v){ int t; __asm__ __volatile__("1: lwarx %0,0,%3 # atomic_add\n\ add %0,%2,%0\n\ stwcx. %0,0,%3\n\ bne- 1b" : "=&r" (t), "=m" (v->counter) : "r" (a), "r" (&v->counter), "m" (v->counter) : "cc");}static __inline__ int atomic_add_return(int a, atomic_t *v){ int t; __asm__ __volatile__("1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n\ stwcx. %0,0,%2\n\ bne- 1b" SMP_ISYNC : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); return t;}static __inline__ void atomic_sub(int a, atomic_t *v){ int t; __asm__ __volatile__("1: lwarx %0,0,%3 # atomic_sub\n\ subf %0,%2,%0\n\ stwcx. %0,0,%3\n\ bne- 1b" : "=&r" (t), "=m" (v->counter) : "r" (a), "r" (&v->counter), "m" (v->counter) : "cc");}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -