⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lwlock.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 2 页
字号:
/*------------------------------------------------------------------------- * * lwlock.c *	  Lightweight lock manager * * Lightweight locks are intended primarily to provide mutual exclusion of * access to shared-memory data structures.  Therefore, they offer both * exclusive and shared lock modes (to support read/write and read-only * access to a shared object).	There are few other frammishes.  User-level * locking should be done with the full lock manager --- which depends on * an LWLock to protect its shared state. * * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.34 2005/10/15 02:49:26 momjian Exp $ * *------------------------------------------------------------------------- */#include "postgres.h"#include "access/slru.h"#include "storage/lwlock.h"#include "storage/proc.h"#include "storage/spin.h"/* We use the ShmemLock spinlock to protect LWLockAssign */extern slock_t *ShmemLock;typedef struct LWLock{	slock_t		mutex;			/* Protects LWLock and queue of PGPROCs */	bool		releaseOK;		/* T if ok to release waiters */	char		exclusive;		/* # of exclusive holders (0 or 1) */	int			shared;			/* # of shared holders (0..MaxBackends) */	PGPROC	   *head;			/* head of list of waiting PGPROCs */	PGPROC	   *tail;			/* tail of list of waiting PGPROCs */	/* tail is undefined when head is NULL */} LWLock;/* * All the LWLock structs are allocated as an array in shared memory. * (LWLockIds are indexes into the array.)	We force the array stride to * be a power of 2, which saves a few cycles in indexing, but more * importantly also ensures that individual LWLocks don't cross cache line * boundaries.	This reduces cache contention problems, especially on AMD * Opterons.  (Of course, we have to also ensure that the array start * address is suitably aligned.) * * LWLock is between 16 and 32 bytes on all known platforms, so these two * cases are sufficient. */#define LWLOCK_PADDED_SIZE	(sizeof(LWLock) <= 16 ? 16 : 32)typedef union LWLockPadded{	LWLock		lock;	char		pad[LWLOCK_PADDED_SIZE];} LWLockPadded;/* * This points to the array of LWLocks in shared memory.  Backends inherit * the pointer by fork from the postmaster (except in the EXEC_BACKEND case, * where we have special measures to pass it down). */NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;/* * We use this structure to keep track of locked LWLocks for release * during error recovery.  The maximum size could be determined at runtime * if necessary, but it seems unlikely that more than a few locks could * ever be held simultaneously. */#define MAX_SIMUL_LWLOCKS	100static int	num_held_lwlocks = 0;static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];#ifdef LOCK_DEBUGbool		Trace_lwlocks = false;inline static voidPRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock){	if (Trace_lwlocks)		elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",			 where, (int) lockid,			 (int) lock->exclusive, lock->shared, lock->head,			 (int) lock->releaseOK);}inline static voidLOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg){	if (Trace_lwlocks)		elog(LOG, "%s(%d): %s", where, (int) lockid, msg);}#else							/* not LOCK_DEBUG */#define PRINT_LWDEBUG(a,b,c)#define LOG_LWDEBUG(a,b,c)#endif   /* LOCK_DEBUG *//* * Compute number of LWLocks to allocate. */intNumLWLocks(void){	int			numLocks;	/*	 * Possibly this logic should be spread out among the affected modules,	 * the same way that shmem space estimation is done.  But for now, there	 * are few enough users of LWLocks that we can get away with just keeping	 * the knowledge here.	 */	/* Predefined LWLocks */	numLocks = (int) NumFixedLWLocks;	/* bufmgr.c needs two for each shared buffer */	numLocks += 2 * NBuffers;	/* clog.c needs one per CLOG buffer */	numLocks += NUM_SLRU_BUFFERS;	/* subtrans.c needs one per SubTrans buffer */	numLocks += NUM_SLRU_BUFFERS;	/*	 * multixact.c needs one per MultiXact buffer, but there are two SLRU	 * areas for MultiXact	 */	numLocks += 2 * NUM_SLRU_BUFFERS;	/* Leave a few extra for use by user-defined modules. */	numLocks += NUM_USER_DEFINED_LWLOCKS;	return numLocks;}/* * Compute shmem space needed for LWLocks. */SizeLWLockShmemSize(void){	Size		size;	int			numLocks = NumLWLocks();	/* Space for the LWLock array. */	size = mul_size(numLocks, sizeof(LWLockPadded));	/* Space for dynamic allocation counter, plus room for alignment. */	size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);	return size;}/* * Allocate shmem space for LWLocks and initialize the locks. */voidCreateLWLocks(void){	int			numLocks = NumLWLocks();	Size		spaceLocks = LWLockShmemSize();	LWLockPadded *lock;	int		   *LWLockCounter;	char	   *ptr;	int			id;	/* Allocate space */	ptr = (char *) ShmemAlloc(spaceLocks);	/* Leave room for dynamic allocation counter */	ptr += 2 * sizeof(int);	/* Ensure desired alignment of LWLock array */	ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;	LWLockArray = (LWLockPadded *) ptr;	/*	 * Initialize all LWLocks to "unlocked" state	 */	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)	{		SpinLockInit(&lock->lock.mutex);		lock->lock.releaseOK = true;		lock->lock.exclusive = 0;		lock->lock.shared = 0;		lock->lock.head = NULL;		lock->lock.tail = NULL;	}	/*	 * Initialize the dynamic-allocation counter, which is stored just before	 * the first LWLock.	 */	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));	LWLockCounter[0] = (int) NumFixedLWLocks;	LWLockCounter[1] = numLocks;}/* * LWLockAssign - assign a dynamically-allocated LWLock number * * We interlock this using the same spinlock that is used to protect * ShmemAlloc().  Interlocking is not really necessary during postmaster * startup, but it is needed if any user-defined code tries to allocate * LWLocks after startup. */LWLockIdLWLockAssign(void){	LWLockId	result;	/* use volatile pointer to prevent code rearrangement */	volatile int *LWLockCounter;	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));	SpinLockAcquire(ShmemLock);	if (LWLockCounter[0] >= LWLockCounter[1])	{		SpinLockRelease(ShmemLock);		elog(ERROR, "no more LWLockIds available");	}	result = (LWLockId) (LWLockCounter[0]++);	SpinLockRelease(ShmemLock);	return result;}/* * LWLockAcquire - acquire a lightweight lock in the specified mode * * If the lock is not available, sleep until it is. * * Side effect: cancel/die interrupts are held off until lock release. */voidLWLockAcquire(LWLockId lockid, LWLockMode mode){	volatile LWLock *lock = &(LWLockArray[lockid].lock);	PGPROC	   *proc = MyProc;	bool		retry = false;	int			extraWaits = 0;	PRINT_LWDEBUG("LWLockAcquire", lockid, lock);	/*	 * We can't wait if we haven't got a PGPROC.  This should only occur	 * during bootstrap or shared memory initialization.  Put an Assert here	 * to catch unsafe coding practices.	 */	Assert(!(proc == NULL && IsUnderPostmaster));	/* Ensure we will have room to remember the lock */	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)		elog(ERROR, "too many LWLocks taken");	/*	 * Lock out cancel/die interrupts until we exit the code section protected	 * by the LWLock.  This ensures that interrupts will not interfere with	 * manipulations of data structures in shared memory.	 */	HOLD_INTERRUPTS();	/*	 * Loop here to try to acquire lock after each time we are signaled by	 * LWLockRelease.	 *	 * NOTE: it might seem better to have LWLockRelease actually grant us the	 * lock, rather than retrying and possibly having to go back to sleep. But	 * in practice that is no good because it means a process swap for every	 * lock acquisition when two or more processes are contending for the same	 * lock.  Since LWLocks are normally used to protect not-very-long	 * sections of computation, a process needs to be able to acquire and	 * release the same lock many times during a single CPU time slice, even	 * in the presence of contention.  The efficiency of being able to do that	 * outweighs the inefficiency of sometimes wasting a process dispatch	 * cycle because the lock is not free when a released waiter finally gets	 * to run.	See pgsql-hackers archives for 29-Dec-01.	 */	for (;;)	{		bool		mustwait;		/* Acquire mutex.  Time spent holding mutex should be short! */		SpinLockAcquire_NoHoldoff(&lock->mutex);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -