lwlock.c

来自「postgresql8.3.4源码,开源数据库」· C语言 代码 · 共 695 行 · 第 1/2 页

C
695
字号
/*------------------------------------------------------------------------- * * lwlock.c *	  Lightweight lock manager * * Lightweight locks are intended primarily to provide mutual exclusion of * access to shared-memory data structures.  Therefore, they offer both * exclusive and shared lock modes (to support read/write and read-only * access to a shared object).	There are few other frammishes.  User-level * locking should be done with the full lock manager --- which depends on * LWLocks to protect its shared state. * * * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.50 2008/01/01 19:45:52 momjian Exp $ * *------------------------------------------------------------------------- */#include "postgres.h"#include "access/clog.h"#include "access/multixact.h"#include "access/subtrans.h"#include "miscadmin.h"#include "storage/ipc.h"#include "storage/proc.h"#include "storage/spin.h"/* We use the ShmemLock spinlock to protect LWLockAssign */extern slock_t *ShmemLock;typedef struct LWLock{	slock_t		mutex;			/* Protects LWLock and queue of PGPROCs */	bool		releaseOK;		/* T if ok to release waiters */	char		exclusive;		/* # of exclusive holders (0 or 1) */	int			shared;			/* # of shared holders (0..MaxBackends) */	PGPROC	   *head;			/* head of list of waiting PGPROCs */	PGPROC	   *tail;			/* tail of list of waiting PGPROCs */	/* tail is undefined when head is NULL */} LWLock;/* * All the LWLock structs are allocated as an array in shared memory. * (LWLockIds are indexes into the array.)	We force the array stride to * be a power of 2, which saves a few cycles in indexing, but more * importantly also ensures that individual LWLocks don't cross cache line * boundaries.	This reduces cache contention problems, especially on AMD * Opterons.  (Of course, we have to also ensure that the array start * address is suitably aligned.) * * LWLock is between 16 and 32 bytes on all known platforms, so these two * cases are sufficient. */#define LWLOCK_PADDED_SIZE	(sizeof(LWLock) <= 16 ? 16 : 32)typedef union LWLockPadded{	LWLock		lock;	char		pad[LWLOCK_PADDED_SIZE];} LWLockPadded;/* * This points to the array of LWLocks in shared memory.  Backends inherit * the pointer by fork from the postmaster (except in the EXEC_BACKEND case, * where we have special measures to pass it down). */NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;/* * We use this structure to keep track of locked LWLocks for release * during error recovery.  The maximum size could be determined at runtime * if necessary, but it seems unlikely that more than a few locks could * ever be held simultaneously. */#define MAX_SIMUL_LWLOCKS	100static int	num_held_lwlocks = 0;static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];static int	lock_addin_request = 0;static bool lock_addin_request_allowed = true;#ifdef LWLOCK_STATSstatic int	counts_for_pid = 0;static int *sh_acquire_counts;static int *ex_acquire_counts;static int *block_counts;#endif#ifdef LOCK_DEBUGbool		Trace_lwlocks = false;inline static voidPRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock){	if (Trace_lwlocks)		elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",			 where, (int) lockid,			 (int) lock->exclusive, lock->shared, lock->head,			 (int) lock->releaseOK);}inline static voidLOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg){	if (Trace_lwlocks)		elog(LOG, "%s(%d): %s", where, (int) lockid, msg);}#else							/* not LOCK_DEBUG */#define PRINT_LWDEBUG(a,b,c)#define LOG_LWDEBUG(a,b,c)#endif   /* LOCK_DEBUG */#ifdef LWLOCK_STATSstatic voidprint_lwlock_stats(int code, Datum arg){	int			i;	int		   *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));	int			numLocks = LWLockCounter[1];	/* Grab an LWLock to keep different backends from mixing reports */	LWLockAcquire(0, LW_EXCLUSIVE);	for (i = 0; i < numLocks; i++)	{		if (sh_acquire_counts[i] || ex_acquire_counts[i] || block_counts[i])			fprintf(stderr, "PID %d lwlock %d: shacq %u exacq %u blk %u\n",					MyProcPid, i, sh_acquire_counts[i], ex_acquire_counts[i],					block_counts[i]);	}	LWLockRelease(0);}#endif   /* LWLOCK_STATS *//* * Compute number of LWLocks to allocate. */intNumLWLocks(void){	int			numLocks;	/*	 * Possibly this logic should be spread out among the affected modules,	 * the same way that shmem space estimation is done.  But for now, there	 * are few enough users of LWLocks that we can get away with just keeping	 * the knowledge here.	 */	/* Predefined LWLocks */	numLocks = (int) NumFixedLWLocks;	/* bufmgr.c needs two for each shared buffer */	numLocks += 2 * NBuffers;	/* clog.c needs one per CLOG buffer */	numLocks += NUM_CLOG_BUFFERS;	/* subtrans.c needs one per SubTrans buffer */	numLocks += NUM_SUBTRANS_BUFFERS;	/* multixact.c needs two SLRU areas */	numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;	/*	 * Add any requested by loadable modules; for backwards-compatibility	 * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if	 * there are no explicit requests.	 */	lock_addin_request_allowed = false;	numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);	return numLocks;}/* * RequestAddinLWLocks *		Request that extra LWLocks be allocated for use by *		a loadable module. * * This is only useful if called from the _PG_init hook of a library that * is loaded into the postmaster via shared_preload_libraries.	Once * shared memory has been allocated, calls will be ignored.  (We could * raise an error, but it seems better to make it a no-op, so that * libraries containing such calls can be reloaded if needed.) */voidRequestAddinLWLocks(int n){	if (IsUnderPostmaster || !lock_addin_request_allowed)		return;					/* too late */	lock_addin_request += n;}/* * Compute shmem space needed for LWLocks. */SizeLWLockShmemSize(void){	Size		size;	int			numLocks = NumLWLocks();	/* Space for the LWLock array. */	size = mul_size(numLocks, sizeof(LWLockPadded));	/* Space for dynamic allocation counter, plus room for alignment. */	size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);	return size;}/* * Allocate shmem space for LWLocks and initialize the locks. */voidCreateLWLocks(void){	int			numLocks = NumLWLocks();	Size		spaceLocks = LWLockShmemSize();	LWLockPadded *lock;	int		   *LWLockCounter;	char	   *ptr;	int			id;	/* Allocate space */	ptr = (char *) ShmemAlloc(spaceLocks);	/* Leave room for dynamic allocation counter */	ptr += 2 * sizeof(int);	/* Ensure desired alignment of LWLock array */	ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;	LWLockArray = (LWLockPadded *) ptr;	/*	 * Initialize all LWLocks to "unlocked" state	 */	for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)	{		SpinLockInit(&lock->lock.mutex);		lock->lock.releaseOK = true;		lock->lock.exclusive = 0;		lock->lock.shared = 0;		lock->lock.head = NULL;		lock->lock.tail = NULL;	}	/*	 * Initialize the dynamic-allocation counter, which is stored just before	 * the first LWLock.	 */	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));	LWLockCounter[0] = (int) NumFixedLWLocks;	LWLockCounter[1] = numLocks;}/* * LWLockAssign - assign a dynamically-allocated LWLock number * * We interlock this using the same spinlock that is used to protect * ShmemAlloc().  Interlocking is not really necessary during postmaster * startup, but it is needed if any user-defined code tries to allocate * LWLocks after startup. */LWLockIdLWLockAssign(void){	LWLockId	result;	/* use volatile pointer to prevent code rearrangement */	volatile int *LWLockCounter;	LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));	SpinLockAcquire(ShmemLock);	if (LWLockCounter[0] >= LWLockCounter[1])	{		SpinLockRelease(ShmemLock);		elog(ERROR, "no more LWLockIds available");	}	result = (LWLockId) (LWLockCounter[0]++);	SpinLockRelease(ShmemLock);	return result;}/* * LWLockAcquire - acquire a lightweight lock in the specified mode * * If the lock is not available, sleep until it is. * * Side effect: cancel/die interrupts are held off until lock release. */voidLWLockAcquire(LWLockId lockid, LWLockMode mode){	volatile LWLock *lock = &(LWLockArray[lockid].lock);	PGPROC	   *proc = MyProc;	bool		retry = false;	int			extraWaits = 0;	PRINT_LWDEBUG("LWLockAcquire", lockid, lock);#ifdef LWLOCK_STATS	/* Set up local count state first time through in a given process */	if (counts_for_pid != MyProcPid)	{		int		   *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));		int			numLocks = LWLockCounter[1];		sh_acquire_counts = calloc(numLocks, sizeof(int));		ex_acquire_counts = calloc(numLocks, sizeof(int));		block_counts = calloc(numLocks, sizeof(int));		counts_for_pid = MyProcPid;		on_shmem_exit(print_lwlock_stats, 0);	}	/* Count lock acquisition attempts */	if (mode == LW_EXCLUSIVE)		ex_acquire_counts[lockid]++;	else		sh_acquire_counts[lockid]++;#endif   /* LWLOCK_STATS */	/*	 * We can't wait if we haven't got a PGPROC.  This should only occur	 * during bootstrap or shared memory initialization.  Put an Assert here	 * to catch unsafe coding practices.	 */	Assert(!(proc == NULL && IsUnderPostmaster));	/* Ensure we will have room to remember the lock */	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?