📄 lock.c
字号:
/*------------------------------------------------------------------------- * * lock.c * POSTGRES primary lock mechanism * * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.181.2.1 2008/03/04 19:54:13 tgl Exp $ * * NOTES * A lock table is a shared memory hash table. When * a process tries to acquire a lock of a type that conflicts * with existing locks, it is put to sleep using the routines * in storage/lmgr/proc.c. * * For the most part, this code should be invoked via lmgr.c * or another lock-management module, not directly. * * Interface: * * InitLocks(), GetLocksMethodTable(), * LockAcquire(), LockRelease(), LockReleaseAll(), * LockCheckConflicts(), GrantLock() * *------------------------------------------------------------------------- */#include "postgres.h"#include <signal.h>#include <unistd.h>#include "access/transam.h"#include "access/twophase.h"#include "access/twophase_rmgr.h"#include "miscadmin.h"#include "pgstat.h"#include "utils/memutils.h"#include "utils/ps_status.h"#include "utils/resowner.h"/* This configuration variable is used to set the lock table size */int max_locks_per_xact; /* set by guc.c */#define NLOCKENTS() \ mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))/* * Data structures defining the semantics of the standard lock methods. * * The conflict table defines the semantics of the various lock modes. */static const LOCKMASK LockConflicts[] = { 0, /* AccessShareLock */ (1 << AccessExclusiveLock), /* RowShareLock */ (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* RowExclusiveLock */ (1 << ShareLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* ShareUpdateExclusiveLock */ (1 << ShareUpdateExclusiveLock) | (1 << ShareLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* ShareLock */ (1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* ShareRowExclusiveLock */ (1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) | (1 << ShareLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* ExclusiveLock */ (1 << RowShareLock) | (1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) | (1 << ShareLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock), /* AccessExclusiveLock */ (1 << AccessShareLock) | (1 << RowShareLock) | (1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) | (1 << ShareLock) | (1 << ShareRowExclusiveLock) | (1 << ExclusiveLock) | (1 << AccessExclusiveLock)};/* Names of lock modes, for debug printouts */static const char *const lock_mode_names[] ={ "INVALID", "AccessShareLock", "RowShareLock", "RowExclusiveLock", "ShareUpdateExclusiveLock", "ShareLock", "ShareRowExclusiveLock", "ExclusiveLock", "AccessExclusiveLock"};#ifndef LOCK_DEBUGstatic bool Dummy_trace = false;#endifstatic const LockMethodData default_lockmethod = { AccessExclusiveLock, /* highest valid lock mode number */ true, LockConflicts, lock_mode_names,#ifdef LOCK_DEBUG &Trace_locks#else &Dummy_trace#endif};static const LockMethodData user_lockmethod = { AccessExclusiveLock, /* highest valid lock mode number */ false, LockConflicts, lock_mode_names,#ifdef LOCK_DEBUG &Trace_userlocks#else &Dummy_trace#endif};/* * map from lock method id to the lock table data structures */static const LockMethod LockMethods[] = { NULL, &default_lockmethod, &user_lockmethod};/* Record that's written to 2PC state file when a lock is persisted */typedef struct TwoPhaseLockRecord{ LOCKTAG locktag; LOCKMODE lockmode;} TwoPhaseLockRecord;/* * Pointers to hash tables containing lock state * * The LockMethodLockHash and LockMethodProcLockHash hash tables are in * shared memory; LockMethodLocalHash is local to each backend. */static HTAB *LockMethodLockHash;static HTAB *LockMethodProcLockHash;static HTAB *LockMethodLocalHash;/* private state for GrantAwaitedLock */static LOCALLOCK *awaitedLock;static ResourceOwner awaitedOwner;#ifdef LOCK_DEBUG/*------ * The following configuration options are available for lock debugging: * * TRACE_LOCKS -- give a bunch of output what's going on in this file * TRACE_USERLOCKS -- same but for user locks * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid * (use to avoid output on system tables) * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;) * * Furthermore, but in storage/lmgr/lwlock.c: * TRACE_LWLOCKS -- trace lightweight locks (pretty useless) * * Define LOCK_DEBUG at compile time to get all these enabled. * -------- */int Trace_lock_oidmin = FirstNormalObjectId;bool Trace_locks = false;bool Trace_userlocks = false;int Trace_lock_table = 0;bool Debug_deadlocks = false;inline static boolLOCK_DEBUG_ENABLED(const LOCKTAG *tag){ return (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) && ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin)) || (Trace_lock_table && (tag->locktag_field2 == Trace_lock_table));}inline static voidLOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type){ if (LOCK_DEBUG_ENABLED(&lock->tag)) elog(LOG, "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) " "req(%d,%d,%d,%d,%d,%d,%d)=%d " "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)", where, lock, lock->tag.locktag_field1, lock->tag.locktag_field2, lock->tag.locktag_field3, lock->tag.locktag_field4, lock->tag.locktag_type, lock->tag.locktag_lockmethodid, lock->grantMask, lock->requested[1], lock->requested[2], lock->requested[3], lock->requested[4], lock->requested[5], lock->requested[6], lock->requested[7], lock->nRequested, lock->granted[1], lock->granted[2], lock->granted[3], lock->granted[4], lock->granted[5], lock->granted[6], lock->granted[7], lock->nGranted, lock->waitProcs.size, LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);}inline static voidPROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP){ if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag)) elog(LOG, "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)", where, proclockP, proclockP->tag.myLock, PROCLOCK_LOCKMETHOD(*(proclockP)), proclockP->tag.myProc, (int) proclockP->holdMask);}#else /* not LOCK_DEBUG */#define LOCK_PRINT(where, lock, type)#define PROCLOCK_PRINT(where, proclockP)#endif /* not LOCK_DEBUG */static uint32 proclock_hash(const void *key, Size keysize);static void RemoveLocalLock(LOCALLOCK *locallock);static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable);static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded);/* * InitLocks -- Initialize the lock manager's data structures. * * This is called from CreateSharedMemoryAndSemaphores(), which see for * more comments. In the normal postmaster case, the shared hash tables * are created here, as well as a locallock hash table that will remain * unused and empty in the postmaster itself. Backends inherit the pointers * to the shared tables via fork(), and also inherit an image of the locallock * hash table, which they proceed to use. In the EXEC_BACKEND case, each * backend re-executes this code to obtain pointers to the already existing * shared hash tables and to create its locallock hash table. */voidInitLocks(void){ HASHCTL info; int hash_flags; long init_table_size, max_table_size; /* * Compute init/max size to request for lock hashtables. Note these * calculations must agree with LockShmemSize! */ max_table_size = NLOCKENTS(); init_table_size = max_table_size / 2; /* * Allocate hash table for LOCK structs. This stores per-locked-object * information. */ MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(LOCKTAG); info.entrysize = sizeof(LOCK); info.hash = tag_hash; info.num_partitions = NUM_LOCK_PARTITIONS; hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); LockMethodLockHash = ShmemInitHash("LOCK hash", init_table_size, max_table_size, &info, hash_flags); if (!LockMethodLockHash) elog(FATAL, "could not initialize lock hash table"); /* Assume an average of 2 holders per lock */ max_table_size *= 2; init_table_size *= 2; /* * Allocate hash table for PROCLOCK structs. This stores * per-lock-per-holder information. */ info.keysize = sizeof(PROCLOCKTAG); info.entrysize = sizeof(PROCLOCK); info.hash = proclock_hash; info.num_partitions = NUM_LOCK_PARTITIONS; hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash", init_table_size, max_table_size, &info, hash_flags); if (!LockMethodProcLockHash) elog(FATAL, "could not initialize proclock hash table"); /* * Allocate non-shared hash table for LOCALLOCK structs. This stores lock * counts and resource owner information. * * The non-shared table could already exist in this process (this occurs * when the postmaster is recreating shared memory after a backend crash). * If so, delete and recreate it. (We could simply leave it, since it * ought to be empty in the postmaster, but for safety let's zap it.) */ if (LockMethodLocalHash) hash_destroy(LockMethodLocalHash); info.keysize = sizeof(LOCALLOCKTAG); info.entrysize = sizeof(LOCALLOCK); info.hash = tag_hash; hash_flags = (HASH_ELEM | HASH_FUNCTION); LockMethodLocalHash = hash_create("LOCALLOCK hash", 128, &info, hash_flags);}/* * Fetch the lock method table associated with a given lock */LockMethodGetLocksMethodTable(const LOCK *lock){ LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock); Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods)); return LockMethods[lockmethodid];}/* * Compute the hash code associated with a LOCKTAG. * * To avoid unnecessary recomputations of the hash code, we try to do this * just once per function, and then pass it around as needed. Aside from * passing the hashcode to hash_search_with_hash_value(), we can extract * the lock partition number from the hashcode. */uint32LockTagHashCode(const LOCKTAG *locktag){ return get_hash_value(LockMethodLockHash, (const void *) locktag);}/* * Compute the hash code associated with a PROCLOCKTAG. * * Because we want to use just one set of partition locks for both the * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs * fall into the same partition number as their associated LOCKs. * dynahash.c expects the partition number to be the low-order bits of * the hash code, and therefore a PROCLOCKTAG's hash code must have the * same low-order bits as the associated LOCKTAG's hash code. We achieve * this with this specialized hash function. */static uint32proclock_hash(const void *key, Size keysize){ const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key; uint32 lockhash; Datum procptr; Assert(keysize == sizeof(PROCLOCKTAG)); /* Look into the associated LOCK object, and compute its hash code */ lockhash = LockTagHashCode(&proclocktag->myLock->tag); /* * To make the hash code also depend on the PGPROC, we xor the proc * struct's address into the hash code, left-shifted so that the * partition-number bits don't change. Since this is only a hash, we * don't care if we lose high-order bits of the address; use an * intermediate variable to suppress cast-pointer-to-int warnings. */ procptr = PointerGetDatum(proclocktag->myProc); lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS; return lockhash;}/* * Compute the hash code associated with a PROCLOCKTAG, given the hashcode * for its underlying LOCK. * * We use this just to avoid redundant calls of LockTagHashCode(). */static inline uint32ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode){ uint32 lockhash = hashcode; Datum procptr; /* * This must match proclock_hash()! */ procptr = PointerGetDatum(proclocktag->myProc); lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS; return lockhash;}/* * LockAcquire -- Check for lock conflicts, sleep if conflict found, * set lock if/when no conflicts. * * Inputs: * locktag: unique identifier for the lockable object * lockmode: lock mode to acquire * sessionLock: if true, acquire lock for session not current transaction * dontWait: if true, don't wait to acquire lock * * Returns one of: * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true * LOCKACQUIRE_OK lock successfully acquired
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -