📄 s_lock.h
字号:
register int _res; __asm__ __volatile__( " sbitb 0, %1 \n" " sfsd %0 \n": "=r"(_res), "+m"(*lock):: "memory"); return _res;}#endif /* __ns32k__ */#if defined(__alpha) || defined(__alpha__)/* * Correct multi-processor locking methods are explained in section 5.5.3 * of the Alpha AXP Architecture Handbook, which at this writing can be * found at ftp://ftp.netbsd.org/pub/NetBSD/misc/dec-docs/index.html. * For gcc we implement the handbook's code directly with inline assembler. */#define HAS_TEST_AND_SETtypedef unsigned long slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){ register slock_t _res; __asm__ __volatile__( " ldq $0, %1 \n" " bne $0, 2f \n" " ldq_l %0, %1 \n" " bne %0, 2f \n" " mov 1, $0 \n" " stq_c $0, %1 \n" " beq $0, 2f \n" " mb \n" " br 3f \n" "2: mov 1, %0 \n" "3: \n": "=&r"(_res), "+m"(*lock):: "memory", "0"); return (int) _res;}#define S_UNLOCK(lock) \do \{\ __asm__ __volatile__ (" mb \n"); \ *((volatile slock_t *) (lock)) = 0; \} while (0)#endif /* __alpha || __alpha__ */#if defined(__mips__) && !defined(__sgi)/* Note: on SGI we use the OS' mutex ABI, see below *//* Note: R10000 processors require a separate SYNC */#define HAS_TEST_AND_SETtypedef unsigned int slock_t;#define TAS(lock) tas(lock)static __inline__ inttas(volatile slock_t *lock){ register volatile slock_t *_l = lock; register int _res; register int _tmp; __asm__ __volatile__( " .set push \n" " .set mips2 \n" " .set noreorder \n" " .set nomacro \n" " ll %0, %2 \n" " or %1, %0, 1 \n" " sc %1, %2 \n" " xori %1, 1 \n" " or %0, %0, %1 \n" " sync \n" " .set pop ": "=&r" (_res), "=&r" (_tmp), "+R" (*_l):: "memory"); return _res;}/* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */#define S_UNLOCK(lock) \do \{ \ __asm__ __volatile__( \ " .set push \n" \ " .set mips2 \n" \ " .set noreorder \n" \ " .set nomacro \n" \ " sync \n" \ " .set pop "); \ *((volatile slock_t *) (lock)) = 0; \} while (0)#endif /* __mips__ && !__sgi *//* These live in s_lock.c, but only for gcc */#if defined(__m68k__) && !defined(__linux__)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#endif#endif /* __GNUC__ *//*************************************************************************** * Platforms that use non-gcc inline assembly: */#if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */#if defined(USE_UNIVEL_CC)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) tas(lock)asm inttas(volatile slock_t *s_lock){/* UNIVEL wants %mem in column 1, so we don't pg_indent this file */%mem s_lock pushl %ebx movl s_lock, %ebx movl $255, %eax lock xchgb %al, (%ebx) popl %ebx}#endif /* defined(USE_UNIVEL_CC) */#if defined(__alpha) || defined(__alpha__)/* * The Tru64 compiler doesn't support gcc-style inline asm, but it does * have some builtin functions that accomplish much the same results. * For simplicity, slock_t is defined as long (ie, quadword) on Alpha * regardless of the compiler in use. LOCK_LONG and UNLOCK_LONG only * operate on an int (ie, longword), but that's OK as long as we define * S_INIT_LOCK to zero out the whole quadword. */#define HAS_TEST_AND_SETtypedef unsigned long slock_t;#include <alpha/builtins.h>#define S_INIT_LOCK(lock) (*(lock) = 0)#define TAS(lock) (__LOCK_LONG_RETRY((lock), 1) == 0)#define S_UNLOCK(lock) __UNLOCK_LONG(lock)#endif /* __alpha || __alpha__ */#if defined(__hppa) || defined(__hppa__)/* * HP's PA-RISC * * See src/backend/port/hpux/tas.c.template for details about LDCWX. Because * LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte * struct. The active word in the struct is whichever has the aligned address; * the other three words just sit at -1. * * When using gcc, we can inline the required assembly code. */#define HAS_TEST_AND_SETtypedef struct{ int sema[4];} slock_t;#define TAS_ACTIVE_WORD(lock) ((volatile int *) (((long) (lock) + 15) & ~15))#if defined(__GNUC__)static __inline__ inttas(volatile slock_t *lock){ volatile int *lockword = TAS_ACTIVE_WORD(lock); register int lockval; __asm__ __volatile__( " ldcwx 0(0,%2),%0 \n": "=r"(lockval), "+m"(*lockword): "r"(lockword): "memory"); return (lockval == 0);}#endif /* __GNUC__ */#define S_UNLOCK(lock) (*TAS_ACTIVE_WORD(lock) = -1)#define S_INIT_LOCK(lock) \ do { \ volatile slock_t *lock_ = (lock); \ lock_->sema[0] = -1; \ lock_->sema[1] = -1; \ lock_->sema[2] = -1; \ lock_->sema[3] = -1; \ } while (0)#define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)#endif /* __hppa || __hppa__ */#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)#define HAS_TEST_AND_SETtypedef unsigned int slock_t;#include <ia64/sys/inline.h>#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)#endif /* HPUX on IA64, non gcc */#if defined(__QNX__) && defined(__WATCOMC__)/* * QNX 4 using WATCOM C */#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#define TAS(lock) wc_tas(lock)extern slock_t wc_tas(volatile slock_t *lock);#pragma aux wc_tas =\ " mov al,1 " \ " lock xchg al,[esi]" \ parm [esi] \ value [al];#endif /* __QNX__ and __WATCOMC__*/#if defined(__sgi)/* * SGI IRIX 5 * slock_t is defined as a unsigned long. We use the standard SGI * mutex API. * * The following comment is left for historical reasons, but is probably * not a good idea since the mutex ABI is supported. * * This stuff may be supplemented in the future with Masato Kataoka's MIPS-II * assembly from his NECEWS SVR4 port, but we probably ought to retain this * for the R3000 chips out there. */#define HAS_TEST_AND_SETtypedef unsigned long slock_t;#include "mutex.h"#define TAS(lock) (test_and_set(lock,1))#define S_UNLOCK(lock) (test_then_and(lock,0))#define S_INIT_LOCK(lock) (test_then_and(lock,0))#define S_LOCK_FREE(lock) (test_then_add(lock,0) == 0)#endif /* __sgi */#if defined(sinix)/* * SINIX / Reliant UNIX * slock_t is defined as a struct abilock_t, which has a single unsigned long * member. (Basically same as SGI) */#define HAS_TEST_AND_SET#include "abi_mutex.h"typedef abilock_t slock_t;#define TAS(lock) (!acquire_lock(lock))#define S_UNLOCK(lock) release_lock(lock)#define S_INIT_LOCK(lock) init_lock(lock)#define S_LOCK_FREE(lock) (stat_lock(lock) == UNLOCKED)#endif /* sinix */#if defined(_AIX)/* * AIX (POWER) */#define HAS_TEST_AND_SETtypedef unsigned int slock_t;#define TAS(lock) _check_lock(lock, 0, 1)#define S_UNLOCK(lock) _clear_lock(lock, 0)#endif /* _AIX */#if defined (nextstep)#define HAS_TEST_AND_SETtypedef struct mutex slock_t;#define S_LOCK(lock) mutex_lock(lock)#define S_UNLOCK(lock) mutex_unlock(lock)#define S_INIT_LOCK(lock) mutex_init(lock)/* For Mach, we have to delve inside the entrails of `struct mutex'. Ick! */#define S_LOCK_FREE(alock) ((alock)->lock == 0)#endif /* nextstep *//* These are in s_lock.c */#if defined(sun3)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#endif#if defined(__sparc__) || defined(__sparc)#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#endif/* out-of-line assembler from src/backend/port/tas/foo.s */#if defined(__sun) && defined(__i386)/* * Solaris/386 (we only get here for non-gcc case) */#define HAS_TEST_AND_SETtypedef unsigned char slock_t;#endif#endif /* !defined(HAS_TEST_AND_SET) *//* Blow up if we didn't have any way to do spinlocks */#ifndef HAS_TEST_AND_SET#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@postgresql.org.#endif#else /* !HAVE_SPINLOCKS *//* * Fake spinlock implementation using semaphores --- slow and prone * to fall foul of kernel limits on number of semaphores, so don't use this * unless you must! The subroutines appear in spin.c. */typedef PGSemaphoreData slock_t;extern bool s_lock_free_sema(volatile slock_t *lock);extern void s_unlock_sema(volatile slock_t *lock);extern void s_init_lock_sema(volatile slock_t *lock);extern int tas_sema(volatile slock_t *lock);#define S_LOCK_FREE(lock) s_lock_free_sema(lock)#define S_UNLOCK(lock) s_unlock_sema(lock)#define S_INIT_LOCK(lock) s_init_lock_sema(lock)#define TAS(lock) tas_sema(lock)#endif /* HAVE_SPINLOCKS *//* * Default Definitions - override these above as needed. */#if !defined(S_LOCK)#define S_LOCK(lock) \ do { \ if (TAS(lock)) \ s_lock((lock), __FILE__, __LINE__); \ } while (0)#endif /* S_LOCK */#if !defined(S_LOCK_FREE)#define S_LOCK_FREE(lock) (*(lock) == 0)#endif /* S_LOCK_FREE */#if !defined(S_UNLOCK)#define S_UNLOCK(lock) (*((volatile slock_t *) (lock)) = 0)#endif /* S_UNLOCK */#if !defined(S_INIT_LOCK)#define S_INIT_LOCK(lock) S_UNLOCK(lock)#endif /* S_INIT_LOCK */#if !defined(SPIN_DELAY)#define SPIN_DELAY() ((void) 0)#endif /* SPIN_DELAY */#if !defined(TAS)extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or * s_lock.c */#define TAS(lock) tas(lock)#endif /* TAS *//* * Platform-independent out-of-line support routines */extern void s_lock(volatile slock_t *lock, const char *file, int line);/* Support for dynamic adjustment of spins_per_delay */#define DEFAULT_SPINS_PER_DELAY 100extern void set_spins_per_delay(int shared_spins_per_delay);extern int update_spins_per_delay(int shared_spins_per_delay);#endif /* S_LOCK_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -