⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 atomic.h

📁 glibc 库, 不仅可以学习使用库函数,还可以学习函数的具体实现,是提高功力的好资料
💻 H
📖 第 1 页 / 共 2 页
字号:
			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (lock "addw %w1, %0"				      \			: "=m" (*mem)					      \			: "ir" (value), "m" (*mem),			      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (lock "addl %1, %0"				      \			: "=m" (*mem)					      \			: "ir" (value), "m" (*mem),			      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else								      \      {									      \	__typeof (value) __addval = (value);				      \	__typeof (mem) __memp = (mem);					      \	__typeof (*mem) __oldval = *__memp;				      \	__typeof (*mem) __tmpval;					      \	do								      \	  __tmpval = __oldval;						      \	while ((__oldval = pfx##_compare_and_exchange_val_64_acq	      \		(__memp, __oldval + __addval, __oldval)) == __tmpval);	      \      }									      \  } while (0)#define atomic_add(mem, value) \  __arch_add_body (LOCK_PREFIX, __arch, mem, value)#define __arch_add_cprefix \  "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"#define catomic_add(mem, value) \  __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)#define atomic_add_negative(mem, value) \  ({ unsigned char __result;						      \     if (sizeof (*mem) == 1)						      \       __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "iq" (value), "m" (*mem));			      \     else if (sizeof (*mem) == 2)					      \       __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "ir" (value), "m" (*mem));			      \     else if (sizeof (*mem) == 4)					      \       __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "ir" (value), "m" (*mem));			      \     else								      \       abort ();							      \     __result; })#define atomic_add_zero(mem, value) \  ({ unsigned char __result;						      \     if (sizeof (*mem) == 1)						      \       __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "iq" (value), "m" (*mem));			      \     else if (sizeof (*mem) == 2)					      \       __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "ir" (value), "m" (*mem));			      \     else if (sizeof (*mem) == 4)					      \       __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "ir" (value), "m" (*mem));			      \     else								      \       abort ();							      \     __result; })#define __arch_increment_body(lock,  pfx, mem) \  do {									      \    if (sizeof (*mem) == 1)						      \      __asm __volatile (lock "incb %b0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (lock "incw %w0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (lock "incl %0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else								      \      {									      \	__typeof (mem) __memp = (mem);					      \	__typeof (*mem) __oldval = *__memp;				      \	__typeof (*mem) __tmpval;					      \	do								      \	  __tmpval = __oldval;						      \	while ((__oldval = pfx##_compare_and_exchange_val_64_acq	      \		(__memp, __oldval + 1, __oldval)) == __tmpval);		      \      }									      \  } while (0)#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)#define __arch_increment_cprefix \  "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"#define catomic_increment(mem) \  __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)#define atomic_increment_and_test(mem) \  ({ unsigned char __result;						      \     if (sizeof (*mem) == 1)						      \       __asm __volatile (LOCK_PREFIX "incb %0; sete %b1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else if (sizeof (*mem) == 2)					      \       __asm __volatile (LOCK_PREFIX "incw %0; sete %w1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else if (sizeof (*mem) == 4)					      \       __asm __volatile (LOCK_PREFIX "incl %0; sete %1"			      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else								      \       abort ();							      \     __result; })#define __arch_decrement_body(lock, pfx, mem) \  do {									      \    if (sizeof (*mem) == 1)						      \      __asm __volatile (lock "decb %b0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (lock "decw %w0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (lock "decl %0"					      \			: "=m" (*mem)					      \			: "m" (*mem),					      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else								      \      {									      \	__typeof (mem) __memp = (mem);					      \	__typeof (*mem) __oldval = *__memp;				      \	__typeof (*mem) __tmpval;					      \	do								      \	  __tmpval = __oldval;						      \	while ((__oldval = pfx##_compare_and_exchange_val_64_acq	      \		(__memp, __oldval - 1, __oldval)) == __tmpval); 	      \      }									      \  } while (0)#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)#define __arch_decrement_cprefix \  "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"#define catomic_decrement(mem) \  __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)#define atomic_decrement_and_test(mem) \  ({ unsigned char __result;						      \     if (sizeof (*mem) == 1)						      \       __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else if (sizeof (*mem) == 2)					      \       __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"		      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else if (sizeof (*mem) == 4)					      \       __asm __volatile (LOCK_PREFIX "decl %0; sete %1"			      \			 : "=m" (*mem), "=qm" (__result)		      \			 : "m" (*mem));					      \     else								      \       abort ();							      \     __result; })#define atomic_bit_set(mem, bit) \  do {									      \    if (sizeof (*mem) == 1)						      \      __asm __volatile (LOCK_PREFIX "orb %b2, %0"			      \			: "=m" (*mem)					      \			: "m" (*mem), "iq" (1 << (bit)));		      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (LOCK_PREFIX "orw %w2, %0"			      \			: "=m" (*mem)					      \			: "m" (*mem), "ir" (1 << (bit)));		      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (LOCK_PREFIX "orl %2, %0"			      \			: "=m" (*mem)					      \			: "m" (*mem), "ir" (1 << (bit)));		      \    else								      \      abort ();								      \  } while (0)#define atomic_bit_test_set(mem, bit) \  ({ unsigned char __result;						      \     if (sizeof (*mem) == 1)						      \       __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"		      \			 : "=q" (__result), "=m" (*mem)			      \			 : "m" (*mem), "ir" (bit));			      \     else if (sizeof (*mem) == 2)					      \       __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"		      \			 : "=q" (__result), "=m" (*mem)			      \			 : "m" (*mem), "ir" (bit));			      \     else if (sizeof (*mem) == 4)					      \       __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"		      \			 : "=q" (__result), "=m" (*mem)			      \			 : "m" (*mem), "ir" (bit));			      \     else							      	      \       abort ();							      \     __result; })#define atomic_delay() asm ("rep; nop")#define atomic_and(mem, mask) \  do {									      \    if (sizeof (*mem) == 1)						      \      __asm __volatile (LOCK_PREFIX "andb %b1, %0"			      \			: "=m" (*mem)					      \			: "iq" (mask), "m" (*mem));			      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (LOCK_PREFIX "andw %w1, %0"			      \			: "=m" (*mem)					      \			: "ir" (mask), "m" (*mem));			      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (LOCK_PREFIX "andl %1, %0"			      \			: "=m" (*mem)					      \			: "ir" (mask), "m" (*mem));			      \    else								      \      abort ();								      \  } while (0)#define __arch_or_body(lock, mem, mask) \  do {									      \    if (sizeof (*mem) == 1)						      \      __asm __volatile (lock "orb %b1, %0"				      \			: "=m" (*mem)					      \			: "iq" (mask), "m" (*mem),			      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 2)					      \      __asm __volatile (lock "orw %w1, %0"				      \			: "=m" (*mem)					      \			: "ir" (mask), "m" (*mem),			      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else if (sizeof (*mem) == 4)					      \      __asm __volatile (lock "orl %1, %0"				      \			: "=m" (*mem)					      \			: "ir" (mask), "m" (*mem),			      \			  "i" (offsetof (tcbhead_t, multiple_threads)));      \    else								      \      abort ();								      \  } while (0)#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)#define __arch_or_cprefix \  "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -