⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mutex.h

📁 File system using stacked.
💻 H
📖 第 1 页 / 共 2 页
字号:
typedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * For gcc/ia64, 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	long __r;							\	asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\	__r ^ 1;							\})/* * Store through a "volatile" pointer so we get a store with "release" * semantics. */#define	MUTEX_UNSET(tsl)	(*(volatile unsigned char *)(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * PowerPC/gcc assembly. *********************************************************************/#if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) ||			\    (HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)typedef u_int32_t tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * The PowerPC does a sort of pseudo-atomic locking.  You set up a * 'reservation' on a chunk of memory containing a mutex by loading the * mutex value with LWARX.  If the mutex has an 'unlocked' (arbitrary) * value, you then try storing into it with STWCX.  If no other process or * thread broke your 'reservation' by modifying the memory containing the * mutex, then the STCWX succeeds; otherwise it fails and you try to get * a reservation again. * * While mutexes are explicitly 4 bytes, a 'reservation' applies to an * entire cache line, normally 32 bytes, aligned naturally.  If the mutex * lives near data that gets changed a lot, there's a chance that you'll * see more broken reservations than you might otherwise.  The only * situation in which this might be a problem is if one processor is * beating on a variable in the same cache block as the mutex while another * processor tries to acquire the mutex.  That's bad news regardless * because of the way it bashes caches, but if you can't guarantee that a * mutex will reside in a relatively quiescent cache line, you might * consider padding the mutex to force it to live in a cache line by * itself.  No, you aren't guaranteed that cache lines are 32 bytes.  Some * embedded processors use 16-byte cache lines, while some 64-bit * processors use 128-bit cache lines.  But assuming a 32-byte cache line * won't get you into trouble for now. * * If mutex locking is a bottleneck, then you can speed it up by adding a * regular LWZ load before the LWARX load, so that you can test for the * common case of a locked mutex without wasting cycles making a reservation. * * 'set' mutexes have the value 1, like on Intel; the returned value from * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise. * * Mutexes on Mac OS X work the same way as the standard PowerPC version, but * the assembler syntax is subtly different -- the standard PowerPC version * assembles but doesn't work correctly.  This version makes (unnecessary?) * use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the * ___db_mutex_set label is used as a function name. */#ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLYextern int __db_mutex_set __P((volatile tsl_t *));void__db_mutex_tas_dummy(){	__asm__	__volatile__("		\n\	.globl 	___db_mutex_set		\n\___db_mutex_set:			\n\	lwarx	r5,0,r3			\n\	cmpwi 	r5,0			\n\	bne 	fail			\n\	addi 	r5,r5,1			\n\	stwcx. 	r5,0,r3			\n\	beq 	success			\n\fail:					\n\	li 	r3,0			\n\	blr 				\n\success:				\n\	li 	r3,1			\n\	blr");	}#define	MUTEX_SET(tsl)  __db_mutex_set(tsl)#endif#ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY#define	MUTEX_SET(tsl)	({		\	int __one = 1;			\	int __r;			\	tsl_t *__l = (tsl);		\	asm volatile ("			\0:					\	lwarx %0,0,%1;			\	cmpwi %0,0;			\	bne 1f;				\	stwcx. %2,0,%1;			\	bne- 0b;			\1:"					\	: "=&r" (__r)			\	: "r" (__l), "r" (__one));	\	!(__r & 1);			\})#endif#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * S/390 32-bit assembly. *********************************************************************/#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLYtypedef int tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * For gcc/S390, 0 is clear, 1 is set. */static inline intMUTEX_SET(tsl_t *tsl) {							\	register tsl_t *__l = (tsl);					\	int __r;							\  asm volatile(								\       "    la    1,%1\n"						\       "    lhi   0,1\n"						\       "    l     %0,%1\n"						\       "0:  cs    %0,0,0(1)\n"						\       "    jl    0b"							\       : "=&d" (__r), "+m" (*__l)					\       : : "0", "1", "cc");						\	return !__r;							\}#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * SCO/cc assembly. *********************************************************************/#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * UnixWare has threads in libthread, but OpenServer doesn't (yet). * * For cc/x86, 0 is clear, 1 is set. */#if defined(__USLC__)asm int_tsl_set(void *tsl){%mem tsl	movl	tsl, %ecx	movl	$1, %eax	lock	xchgb	(%ecx),%al	xorl	$1,%eax}#endif#define	MUTEX_SET(tsl)		_tsl_set(tsl)#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * Sparc/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * * The ldstub instruction takes the location specified by its first argument * (a register containing a memory address) and loads its contents into its * second argument (a register) and atomically sets the contents the location * specified by its first argument to a byte of 1s.  (The value in the second * argument is never read, but only overwritten.) * * The stbar is needed for v8, and is implemented as membar #sync on v9, * so is functional there as well.  For v7, stbar may generate an illegal * instruction and we have no way to tell what we're running on.  Some * operating systems notice and skip this instruction in the fault handler. * * For gcc/sparc, 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	register tsl_t __r;						\	__asm__ volatile						\	    ("ldstub [%1],%0; stbar"					\	    : "=r"( __r) : "r" (__l));					\	!__r;								\})#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/********************************************************************* * UTS/cc assembly. *********************************************************************/#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLYtypedef int tsl_t;#define	MUTEX_ALIGN	sizeof(int)#ifdef LOAD_ACTUAL_MUTEX_CODE#define	MUTEX_INIT(x)	0#define	MUTEX_SET(x)	(!uts_lock(x, 1))#define	MUTEX_UNSET(x)	(*(x) = 0)#endif#endif#endif  /* __KERNEL__ *//********************************************************************* * x86/gcc assembly. *********************************************************************/#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLYtypedef unsigned char tsl_t;#ifdef LOAD_ACTUAL_MUTEX_CODE/* * For gcc/x86, 0 is clear, 1 is set. */#define	MUTEX_SET(tsl) ({						\	register tsl_t *__l = (tsl);					\	int __r;							\	asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\	    : "=&a" (__r), "=m" (*__l)					\	    : "1" (*__l)						\	    );								\	__r & 1;							\})#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)#endif#endif/* * Mutex alignment defaults to one byte. * * !!! * Various systems require different alignments for mutexes (the worst we've * seen so far is 16-bytes on some HP architectures).  Malloc(3) is assumed * to return reasonable alignment, all other mutex users must ensure proper * alignment locally. */#ifndef	MUTEX_ALIGN#define	MUTEX_ALIGN	1#endif/* * Mutex destruction defaults to a no-op. */#ifdef LOAD_ACTUAL_MUTEX_CODE#ifndef	MUTEX_DESTROY#define	MUTEX_DESTROY(x)#endif#endif/* * !!! * These defines are separated into the u_int8_t flags stored in the * mutex below, and the 32 bit flags passed to __db_mutex_setup. * But they must co-exist and not overlap.  Flags to __db_mutex_setup are: * * MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated. *    The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever *    you use this flag.  If this flag is not set, the 'ptr' arg is *    a DB_MUTEX *. * MUTEX_NO_RECORD - Explicitly do not record the mutex in the region. *    Otherwise the mutex will be recorded by default.  If you set *    this you need to understand why you don't need it recorded.  The *    *only* ones not recorded are those that are part of region structures *    that only get destroyed when the regions are destroyed. * MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise *    the region will be locked by default. * MUTEX_SELF_BLOCK - Set if self blocking mutex. * MUTEX_THREAD - Set if mutex is a thread-only mutex. */#define	MUTEX_IGNORE		0x001	/* Ignore, no lock required. */#define	MUTEX_INITED		0x002	/* Mutex is successfully initialized */#define	MUTEX_MPOOL		0x004	/* Allocated from mpool. */#define	MUTEX_SELF_BLOCK	0x008	/* Must block self. *//* Flags only, may be larger than 0xff. */#define	MUTEX_ALLOC		0x00000100 /* Allocate and init a mutex */#define	MUTEX_NO_RECORD		0x00000200 /* Do not record lock */#define	MUTEX_NO_RLOCK		0x00000400 /* Do not acquire region lock */#define	MUTEX_THREAD		0x00000800 /* Thread-only mutex. *//* Mutex. */struct __mutex_t {#ifndef __KERNEL__#ifdef	HAVE_MUTEX_THREADS#ifdef	MUTEX_FIELDS	MUTEX_FIELDS#else	tsl_t	tas;			/* Test and set. */#endif	u_int32_t spins;		/* Spins before block. */	u_int32_t locked;		/* !0 if locked. */#else	u_int32_t off;			/* Byte offset to lock. */	u_int32_t pid;			/* Lock holder: 0 or process pid. */#endif	u_int32_t mutex_set_wait;	/* Granted after wait. */	u_int32_t mutex_set_nowait;	/* Granted without waiting. */	u_int32_t mutex_set_spin;	/* Granted without spinning. */	u_int32_t mutex_set_spins;	/* Total number of spins. */#ifdef HAVE_MUTEX_SYSTEM_RESOURCES	roff_t	  reg_off;		/* Shared lock info offset. */#endif#else#ifdef	MUTEX_FIELDS	MUTEX_FIELDS	u_int32_t spins;		/* Spins before block. */	u_int32_t locked;		/* !0 if locked. */	u_int32_t mutex_set_wait;	/* Granted after wait. */	u_int32_t mutex_set_nowait;	/* Granted without waiting. */#endif#include <asm/semaphore.h>	struct semaphore tas;		/* Mutex. */	#endif /* __KERNEL__ */	u_int8_t  flags;		/* MUTEX_XXX */};/* Redirect calls to the correct functions. */#ifdef __KERNEL__#define	__db_mutex_init_int(a, b, c, d)	__db_tas_mutex_init(a, b, d)#define	__db_mutex_lock(a, b)		__db_tas_mutex_lock(a, b)#define	__db_mutex_unlock(a, b)		__db_tas_mutex_unlock(a, b)#define	__db_mutex_destroy(a)		__db_tas_mutex_destroy(a)#else#ifdef HAVE_MUTEX_THREADS#if defined(HAVE_MUTEX_PTHREADS) ||					\    defined(HAVE_MUTEX_SOLARIS_LWP) ||					\    defined(HAVE_MUTEX_UI_THREADS)#define	__db_mutex_init_int(a, b, c, d)	__db_pthread_mutex_init(a, b, d)#define	__db_mutex_lock(a, b)		__db_pthread_mutex_lock(a, b)#define	__db_mutex_unlock(a, b)		__db_pthread_mutex_unlock(a, b)#define	__db_mutex_destroy(a)		__db_pthread_mutex_destroy(a)#elif defined(HAVE_MUTEX_WIN32)#define	__db_mutex_init_int(a, b, c, d)	__db_win32_mutex_init(a, b, d)#define	__db_mutex_lock(a, b)		__db_win32_mutex_lock(a, b)#define	__db_mutex_unlock(a, b)		__db_win32_mutex_unlock(a, b)#define	__db_mutex_destroy(a)		__db_win32_mutex_destroy(a)#else#define	__db_mutex_init_int(a, b, c, d)	__db_tas_mutex_init(a, b, d)#define	__db_mutex_lock(a, b)		__db_tas_mutex_lock(a, b)#define	__db_mutex_unlock(a, b)		__db_tas_mutex_unlock(a, b)#define	__db_mutex_destroy(a)		__db_tas_mutex_destroy(a)#endif#else#define	__db_mutex_init_int(a, b, c, d)	__db_fcntl_mutex_init(a, b, c)#define	__db_mutex_lock(a, b)		__db_fcntl_mutex_lock(a, b)#define	__db_mutex_unlock(a, b)		__db_fcntl_mutex_unlock(a, b)#define	__db_mutex_destroy(a)		__db_fcntl_mutex_destroy(a)#endif#endif /* __KERNEL__ *//* Redirect system resource calls to correct functions */#ifdef HAVE_MUTEX_SYSTEM_RESOURCES#define	__db_maintinit(a, b, c)		__db_shreg_maintinit(a, b, c)#define	__db_shlocks_clear(a, b, c)	__db_shreg_locks_clear(a, b, c)#define	__db_shlocks_destroy(a, b)	__db_shreg_locks_destroy(a, b)#define	__db_mutex_init(a, b, c, d, e, f)	\    __db_shreg_mutex_init(a, b, c, d, e, f)#else#define	__db_maintinit(a, b, c)#define	__db_shlocks_clear(a, b, c)#define	__db_shlocks_destroy(a, b)#define	__db_mutex_init(a, b, c, d, e, f)	__db_mutex_init_int(a, b, c, d)#endif/* * Lock/unlock a mutex.  If the mutex was marked as uninteresting, the thread * of control can proceed without it. * * If the lock is for threads-only, then it was optionally not allocated and * file handles aren't necessary, as threaded applications aren't supported by * fcntl(2) locking. */#ifdef DIAGNOSTIC	/*	 * XXX	 * We want to switch threads as often as possible.  Yield every time	 * we get a mutex to ensure contention.	 */#define	MUTEX_LOCK(dbenv, mp)						\	if (!F_ISSET((mp), MUTEX_IGNORE))				\		DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0);		\	if (F_ISSET(dbenv, DB_ENV_YIELDCPU))				\		__os_yield(NULL, 1);#else#define	MUTEX_LOCK(dbenv, mp)						\	if (!F_ISSET((mp), MUTEX_IGNORE))				\		(void)__db_mutex_lock(dbenv, mp);#endif#define	MUTEX_UNLOCK(dbenv, mp)						\	if (!F_ISSET((mp), MUTEX_IGNORE))				\		(void)__db_mutex_unlock(dbenv, mp);#define	MUTEX_THREAD_LOCK(dbenv, mp)					\	if (mp != NULL)							\		MUTEX_LOCK(dbenv, mp)#define	MUTEX_THREAD_UNLOCK(dbenv, mp)					\	if (mp != NULL)							\		MUTEX_UNLOCK(dbenv, mp)/* * We use a single file descriptor for fcntl(2) locking, and (generally) the * object's offset in a shared region as the byte that we're locking.  So, * there's a (remote) possibility that two objects might have the same offsets * such that the locks could conflict, resulting in deadlock.  To avoid this * possibility, we offset the region offset by a small integer value, using a * different offset for each subsystem's locks.  Since all region objects are * suitably aligned, the offset guarantees that we don't collide with another * region's objects. */#define	DB_FCNTL_OFF_GEN	0		/* Everything else. */#define	DB_FCNTL_OFF_LOCK	1		/* Lock subsystem offset. */#define	DB_FCNTL_OFF_MPOOL	2		/* Mpool subsystem offset. */#ifdef HAVE_MUTEX_SYSTEM_RESOURCES/* * When the underlying mutexes require library (most likely heap) or system * resources, we have to clean up when we discard mutexes (for the library * resources) and both when discarding mutexes and after application failure * (for the mutexes requiring system resources).  This violates the rule that * we never look at a shared region after application failure, but we've no * other choice.  In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is * set. * * To support mutex release after application failure, allocate thread-handle * mutexes in shared memory instead of in the heap.  The number of slots we * allocate for this purpose isn't configurable, but this tends to be an issue * only on embedded systems where we don't expect large server applications. */#define	DB_MAX_HANDLES	100			/* Mutex slots for handles. */#endif#endif /* !_DB_MUTEX_H_ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -