⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mutex.h

📁 这是国外的resip协议栈
💻 H
📖 第 1 页 / 共 3 页
字号:
#define	MUTEX_ALIGN		sizeof(unsigned int)#endif#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_INIT(x)		0#define	MUTEX_SET(tsl)		(*(tsl) = 1)#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#endif#endif/********************************************************************* * Win32 *********************************************************************/#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)#define	MUTEX_FIELDS							\	LONG tas;							\	LONG nwaiters;							\	u_int32_t id;	/* ID used for creating events */		\#if defined(LOAD_ACTUAL_MUTEX_CODE)#define	MUTEX_SET(tsl)		(!InterlockedExchange((PLONG)tsl, 1))#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)/* * From Intel's performance tuning documentation (and see SR #6975): * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf * * "For this reason, it is highly recommended that you insert the PAUSE * instruction into all spin-wait code immediately. Using the PAUSE * instruction does not affect the correctness of programs on existing * platforms, and it improves performance on Pentium 4 processor platforms." */#ifdef HAVE_MUTEX_WIN32#ifndef _WIN64#define	MUTEX_PAUSE		{__asm{_emit 0xf3}; __asm{_emit 0x90}}#endif#endif#ifdef HAVE_MUTEX_WIN32_GCC#define	MUTEX_PAUSE		asm volatile ("rep; nop" : : );#endif#endif#endif/********************************************************************* * 68K/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_SET_TEST	1		/* gcc/68K: 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	int __r;							\	    asm volatile("tas  %1; \n					\			  seq  %0"					\		: "=dm" (__r), "=m" (*__l)				\		: "1" (*__l)						\		);							\	__r & 1;							\})#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * ALPHA/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLYtypedef u_int32_t tsl_t;#ifndef	MUTEX_ALIGN#define	MUTEX_ALIGN	4#endif#ifdef LOAD_ACTUAL_MUTEX_CODE/* * For gcc/alpha.  Should return 0 if could not acquire the lock, 1 if * lock was acquired properly. */static inline intMUTEX_SET(tsl_t *tsl) {	register tsl_t *__l = tsl;	register tsl_t __r;	asm volatile(		"1:	ldl_l	%0,%2\n"		"	blbs	%0,2f\n"		"	or	$31,1,%0\n"		"	stl_c	%0,%1\n"		"	beq	%0,3f\n"		"	mb\n"		"	br	3f\n"		"2:	xor	%0,%0\n"		"3:"		: "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");	return __r;}/* * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction * might be necessary before unlocking */static inline intMUTEX_UNSET(tsl_t *tsl) {	asm volatile("	mb\n");	return *tsl = 0;}#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * Tru64/cc assembly. *********************************************************************/#ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLYtypedef volatile u_int32_t tsl_t;#ifndef	MUTEX_ALIGN#define	MUTEX_ALIGN	4#endif#ifdef LOAD_ACTUAL_MUTEX_CODE#include <alpha/builtins.h>#define	MUTEX_SET(tsl)		(__LOCK_LONG_RETRY((tsl), 1) != 0)#define	MUTEX_UNSET(tsl)	(__UNLOCK_LONG(tsl))#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)#endif#endif/********************************************************************* * ARM/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_SET_TEST	1		/* gcc/arm: 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	int __r;							\	asm volatile(							\		"swpb	%0, %1, [%2]\n\t"				\		"eor	%0, %0, #1\n\t"					\	    : "=&r" (__r)						\	    : "r" (1), "r" (tsl)					\	    );								\	__r & 1;							\})#define	MUTEX_UNSET(tsl)	(*(volatile tsl_t *)(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * HPPA/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLYtypedef u_int32_t tsl_t;#ifndef	MUTEX_ALIGN#define	MUTEX_ALIGN	16#define	HPUX_MUTEX_PAD	 8#endif#ifdef LOAD_ACTUAL_MUTEX_CODE/* * The PA-RISC has a "load and clear" instead of a "test and set" instruction. * The 32-bit word used by that instruction must be 16-byte aligned.  We could * use the "aligned" attribute in GCC but that doesn't work for stack variables. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	int __r;							\	asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l));	\	__r & 1;							\})#define	MUTEX_UNSET(tsl)	(*(tsl) = -1)#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)#endif#endif/********************************************************************* * IA64/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_SET_TEST	1		/* gcc/ia64: 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	long __r;							\	asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\	__r ^ 1;							\})/* * Store through a "volatile" pointer so we get a store with "release" * semantics. */#define	MUTEX_UNSET(tsl)	(*(volatile unsigned char *)(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * PowerPC/gcc assembly. *********************************************************************/#if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY)typedef u_int32_t tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * The PowerPC does a sort of pseudo-atomic locking.  You set up a * 'reservation' on a chunk of memory containing a mutex by loading the * mutex value with LWARX.  If the mutex has an 'unlocked' (arbitrary) * value, you then try storing into it with STWCX.  If no other process or * thread broke your 'reservation' by modifying the memory containing the * mutex, then the STCWX succeeds; otherwise it fails and you try to get * a reservation again. * * While mutexes are explicitly 4 bytes, a 'reservation' applies to an * entire cache line, normally 32 bytes, aligned naturally.  If the mutex * lives near data that gets changed a lot, there's a chance that you'll * see more broken reservations than you might otherwise.  The only * situation in which this might be a problem is if one processor is * beating on a variable in the same cache block as the mutex while another * processor tries to acquire the mutex.  That's bad news regardless * because of the way it bashes caches, but if you can't guarantee that a * mutex will reside in a relatively quiescent cache line, you might * consider padding the mutex to force it to live in a cache line by * itself.  No, you aren't guaranteed that cache lines are 32 bytes.  Some * embedded processors use 16-byte cache lines, while some 64-bit * processors use 128-bit cache lines.  But assuming a 32-byte cache line * won't get you into trouble for now. * * If mutex locking is a bottleneck, then you can speed it up by adding a * regular LWZ load before the LWARX load, so that you can test for the * common case of a locked mutex without wasting cycles making a reservation. * * 'set' mutexes have the value 1, like on Intel; the returned value from * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise. */#define	MUTEX_SET_TEST	1		/* gcc/ppc: 0 is clear, 1 is set. */static inline intMUTEX_SET(int *tsl)  {         int __r;         int __tmp = (int)tsl;    asm volatile ("0:                             \n\t""       lwarx   %0,0,%2         \n\t""       cmpwi   %0,0            \n\t""       bne-    1f              \n\t""       stwcx.  %2,0,%2         \n\t""       isync                   \n\t""       beq+    2f              \n\t""       b       0b              \n\t""1:                             \n\t""       li      %1, 0           \n\t""2:                             \n\t"         : "=&r" (__r), "=r" (tsl)         : "r" (__tmp)         : "cr0", "memory");         return (int)tsl;}static inline intMUTEX_UNSET(tsl_t *tsl) {         asm volatile("sync" : : : "memory");         return *tsl = 0;}#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * OS/390 C *********************************************************************/#ifdef HAVE_MUTEX_S390_CC_ASSEMBLYtypedef int tsl_t;#ifndef	MUTEX_ALIGN#define	MUTEX_ALIGN	sizeof(int)#endif#ifdef LOAD_ACTUAL_MUTEX_CODE/* * cs() is declared in <stdlib.h> but is built in to the compiler. * Must use LANGLVL(EXTENDED) to get its declaration. */#define	MUTEX_SET(tsl)		(!cs(&zero, (tsl), 1))#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * S/390 32-bit assembly. *********************************************************************/#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLYtypedef int tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_SET_TEST	1		/* gcc/S390: 0 is clear, 1 is set. */static inline intMUTEX_SET(tsl_t *tsl) {							\	register tsl_t *__l = (tsl);					\	int __r;							\  asm volatile(								\       "    la    1,%1\n"						\       "    lhi   0,1\n"						\       "    l     %0,%1\n"						\       "0:  cs    %0,0,0(1)\n"						\       "    jl    0b"							\       : "=&d" (__r), "+m" (*__l)					\       : : "0", "1", "cc");						\	return !__r;							\

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -