⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 s_lock.h

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 H
📖 第 1 页 / 共 2 页
字号:
/*------------------------------------------------------------------------- * * s_lock.h *	   Hardware-dependent implementation of spinlocks. * *	NOTE: none of the macros in this file are intended to be called directly. *	Call them through the hardware-independent macros in spin.h. * *	The following hardware-dependent macros must be provided for each *	supported platform: * *	void S_INIT_LOCK(slock_t *lock) *		Initialize a spinlock (to the unlocked state). * *	void S_LOCK(slock_t *lock) *		Acquire a spinlock, waiting if necessary. *		Time out and abort() if unable to acquire the lock in a *		"reasonable" amount of time --- typically ~ 1 minute. * *	void S_UNLOCK(slock_t *lock) *		Unlock a previously acquired lock. * *	bool S_LOCK_FREE(slock_t *lock) *		Tests if the lock is free. Returns TRUE if free, FALSE if locked. *		This does *not* change the state of the lock. * *	void SPIN_DELAY(void) *		Delay operation to occur inside spinlock wait loop. * *	Note to implementors: there are default implementations for all these *	macros at the bottom of the file.  Check if your platform can use *	these or needs to override them. * *  Usually, S_LOCK() is implemented in terms of an even lower-level macro *	TAS(): * *	int TAS(slock_t *lock) *		Atomic test-and-set instruction.  Attempt to acquire the lock, *		but do *not* wait.	Returns 0 if successful, nonzero if unable *		to acquire the lock. * *	TAS() is NOT part of the API, and should never be called directly. * *	CAUTION: on some platforms TAS() may sometimes report failure to acquire *	a lock even when the lock is not locked.  For example, on Alpha TAS() *	will "fail" if interrupted.  Therefore TAS() should always be invoked *	in a retry loop, even if you are certain the lock is free. * *	ANOTHER CAUTION: be sure that TAS() and S_UNLOCK() represent sequence *	points, ie, loads and stores of other values must not be moved across *	a lock or unlock.  In most cases it suffices to make the operation be *	done through a "volatile" pointer. * *	On most supported platforms, TAS() uses a tas() function written *	in assembly language to execute a hardware atomic-test-and-set *	instruction.  Equivalent OS-supplied mutex routines could be used too. * *	If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not *	defined), then we fall back on an emulation that uses SysV semaphores *	(see spin.c).  This emulation will be MUCH MUCH slower than a proper TAS() *	implementation, because of the cost of a kernel call per lock or unlock. *	An old report is that Postgres spends around 40% of its time in semop(2) *	when using the SysV semaphore code. * * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * *	  $PostgreSQL: pgsql/src/include/storage/s_lock.h,v 1.142 2005/10/11 20:41:32 tgl Exp $ * *------------------------------------------------------------------------- */#ifndef S_LOCK_H#define S_LOCK_H#include "storage/pg_sema.h"#ifdef HAVE_SPINLOCKS	/* skip spinlocks if requested */#if defined(__GNUC__) || defined(__ICC)/************************************************************************* * All the gcc inlines * Gcc consistently defines the CPU as __cpu__. * Other compilers use __cpu or __cpu__ so we test for both in those cases. *//*---------- * Standard gcc asm format (assuming "volatile slock_t *lock"):	__asm__ __volatile__(		"	instruction	\n"		"	instruction	\n"		"	instruction	\n":		"=r"(_res), "+m"(*lock)		// return register, in/out lock value:		"r"(lock)					// lock pointer, in input register:		"memory", "cc");			// show clobbered registers here * The output-operands list (after first colon) should always include * "+m"(*lock), whether or not the asm code actually refers to this * operand directly.  This ensures that gcc believes the value in the * lock variable is used and set by the asm code.  Also, the clobbers * list (after third colon) should always include "memory"; this prevents * gcc from thinking it can cache the values of shared-memory fields * across the asm code.  Add "cc" if your asm code changes the condition * code register, and also list any temp registers the code uses. *---------- */#ifdef __i386__#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register slock_t _res = 1;	/*	 * Use a non-locking test before asserting the bus lock.  Note that the	 * extra test appears to be a small loss on some x86 platforms and a small	 * win on others; it's by no means clear that we should keep it.	 */	__asm__ __volatile__(		"	cmpb	$0,%1	\n"		"	jne		1f		\n"		"	lock			\n"		"	xchgb	%0,%1	\n"		"1: \n":		"+q"(_res), "+m"(*lock)::		"memory", "cc");	return (int) _res;}#define SPIN_DELAY() spin_delay()static __inline__ voidspin_delay(void){	/*	 * This sequence is equivalent to the PAUSE instruction ("rep" is	 * ignored by old IA32 processors if the following instruction is	 * not a string operation); the IA-32 Architecture Software	 * Developer's Manual, Vol. 3, Section 7.7.2 describes why using	 * PAUSE in the inner loop of a spin lock is necessary for good	 * performance:	 *	 *     The PAUSE instruction improves the performance of IA-32	 *     processors supporting Hyper-Threading Technology when	 *     executing spin-wait loops and other routines where one	 *     thread is accessing a shared lock or semaphore in a tight	 *     polling loop. When executing a spin-wait loop, the	 *     processor can suffer a severe performance penalty when	 *     exiting the loop because it detects a possible memory order	 *     violation and flushes the core processor's pipeline. The	 *     PAUSE instruction provides a hint to the processor that the	 *     code sequence is a spin-wait loop. The processor uses this	 *     hint to avoid the memory order violation and prevent the	 *     pipeline flush. In addition, the PAUSE instruction	 *     de-pipelines the spin-wait loop to prevent it from	 *     consuming execution resources excessively.	 */	__asm__ __volatile__(		" rep; nop			\n");}#endif	 /* __i386__ */#ifdef __x86_64__		/* AMD Opteron, Intel EM64T */#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register slock_t _res = 1;	/*	 * On Opteron, using a non-locking test before the locking instruction	 * is a huge loss.  On EM64T, it appears to be a wash or small loss,	 * so we needn't bother to try to distinguish the sub-architectures.	 */	__asm__ __volatile__(		"	lock			\n"		"	xchgb	%0,%1	\n":		"+q"(_res), "+m"(*lock)::		"memory", "cc");	return (int) _res;}#define SPIN_DELAY() spin_delay()static __inline__ voidspin_delay(void){	/*	 * Adding a PAUSE in the spin delay loop is demonstrably a no-op on	 * Opteron, but it may be of some use on EM64T, so we keep it.	 */	__asm__ __volatile__(		" rep; nop			\n");}#endif	 /* __x86_64__ */#if defined(__ia64__) || defined(__ia64)/* Intel Itanium */#define HAS_TEST_AND_SETtypedef unsigned int slock_t;#define TAS(lock) tas(lock)#ifndef __INTEL_COMPILERstatic __inline__ inttas(volatile slock_t *lock){	long int	ret;	__asm__ __volatile__(		"	xchg4 	%0=%1,%2	\n":		"=r"(ret), "+m"(*lock):		"r"(1):		"memory");	return (int) ret;}#else /* __INTEL_COMPILER */static __inline__ inttas(volatile slock_t *lock){	int		ret;	ret = _InterlockedExchange(lock,1);	/* this is a xchg asm macro */	return ret;}#endif /* __INTEL_COMPILER */#endif	 /* __ia64__ || __ia64 */#if defined(__arm__) || defined(__arm)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register slock_t _res = 1;	__asm__ __volatile__(		"	swpb 	%0, %0, [%2]	\n":		"+r"(_res), "+m"(*lock):		"r"(lock):		"memory");	return (int) _res;}#endif	 /* __arm__ */#if defined(__s390__) || defined(__s390x__)/* S/390 and S/390x Linux (32- and 64-bit zSeries) */#define HAS_TEST_AND_SETtypedef unsigned int slock_t;#define TAS(lock)	   tas(lock)static __inline__ inttas(volatile slock_t *lock){	int			_res = 0;	__asm__	__volatile__(		"	cs 	%0,%3,0(%2)		\n":		"+d"(_res), "+m"(*lock):		"a"(lock), "d"(1):		"memory", "cc");	return _res;}#endif	 /* __s390__ || __s390x__ */#if defined(__sparc__)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register slock_t _res;	__asm__ __volatile__(		"	ldstub	[%2], %0	\n":		"=r"(_res), "+m"(*lock):		"r"(lock):		"memory");	return (int) _res;}#endif	 /* __sparc__ */#if defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)#define HAS_TEST_AND_SET#if defined(__powerpc64__)typedef unsigned long slock_t;#elsetypedef unsigned int slock_t;#endif#define TAS(lock) tas(lock)/* * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002, * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop. */static __inline__ inttas(volatile slock_t *lock){	slock_t _t;	int _res;	__asm__ __volatile__("	lwarx   %0,0,%3		\n""	cmpwi   %0,0		\n""	bne     1f			\n""	addi    %0,%0,1		\n""	stwcx.  %0,0,%3		\n""	beq     2f         	\n""1:	li      %1,1		\n""	b		3f			\n""2:						\n""	isync				\n""	li      %1,0		\n""3:						\n":	"=&r"(_t), "=r"(_res), "+m"(*lock):	"r"(lock):	"memory", "cc");	return _res;}/* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction */#define S_UNLOCK(lock)	\do \{\	__asm__ __volatile__ ("	sync \n"); \	*((volatile slock_t *) (lock)) = 0; \} while (0)#endif /* powerpc */#if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register int rv;	__asm__	__volatile__(		"	clrl	%0		\n"		"	tas		%1		\n"		"	sne		%0		\n":		"=d"(rv), "+m"(*lock)::		"memory", "cc");	return rv;}#endif	 /* (__mc68000__ || __m68k__) && __linux__ */#if defined(__vax__)/* * VAXen -- even multiprocessor ones * (thanks to Tom Ivar Helbekkmo) */#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){	register int	_res;	__asm__ __volatile__(		"	movl 	$1, %0			\n"		"	bbssi	$0, (%2), 1f	\n"		"	clrl	%0				\n"		"1: \n":		"=&r"(_res), "+m"(*lock):		"r"(lock):		"memory");	return _res;}#endif	 /* __vax__ */#if defined(__ns32k__)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -