⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lock.c

📁 postgresql8.3.4源码,开源数据库
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * GrantLock -- update the lock and proclock data structures to show *		the lock request has been granted. * * NOTE: if proc was blocked, it also needs to be removed from the wait list * and have its waitLock/waitProcLock fields cleared.  That's not done here. * * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK * table entry; but since we may be awaking some other process, we can't do * that here; it's done by GrantLockLocal, instead. */voidGrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode){	lock->nGranted++;	lock->granted[lockmode]++;	lock->grantMask |= LOCKBIT_ON(lockmode);	if (lock->granted[lockmode] == lock->requested[lockmode])		lock->waitMask &= LOCKBIT_OFF(lockmode);	proclock->holdMask |= LOCKBIT_ON(lockmode);	LOCK_PRINT("GrantLock", lock, lockmode);	Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));	Assert(lock->nGranted <= lock->nRequested);}/* * UnGrantLock -- opposite of GrantLock. * * Updates the lock and proclock data structures to show that the lock * is no longer held nor requested by the current holder. * * Returns true if there were any waiters waiting on the lock that * should now be woken up with ProcLockWakeup. */static boolUnGrantLock(LOCK *lock, LOCKMODE lockmode,			PROCLOCK *proclock, LockMethod lockMethodTable){	bool		wakeupNeeded = false;	Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));	Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));	Assert(lock->nGranted <= lock->nRequested);	/*	 * fix the general lock stats	 */	lock->nRequested--;	lock->requested[lockmode]--;	lock->nGranted--;	lock->granted[lockmode]--;	if (lock->granted[lockmode] == 0)	{		/* change the conflict mask.  No more of this lock type. */		lock->grantMask &= LOCKBIT_OFF(lockmode);	}	LOCK_PRINT("UnGrantLock: updated", lock, lockmode);	/*	 * We need only run ProcLockWakeup if the released lock conflicts with at	 * least one of the lock types requested by waiter(s).	Otherwise whatever	 * conflict made them wait must still exist.  NOTE: before MVCC, we could	 * skip wakeup if lock->granted[lockmode] was still positive. But that's	 * not true anymore, because the remaining granted locks might belong to	 * some waiter, who could now be awakened because he doesn't conflict with	 * his own locks.	 */	if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)		wakeupNeeded = true;	/*	 * Now fix the per-proclock state.	 */	proclock->holdMask &= LOCKBIT_OFF(lockmode);	PROCLOCK_PRINT("UnGrantLock: updated", proclock);	return wakeupNeeded;}/* * CleanUpLock -- clean up after releasing a lock.	We garbage-collect the * proclock and lock objects if possible, and call ProcLockWakeup if there * are remaining requests and the caller says it's OK.  (Normally, this * should be called after UnGrantLock, and wakeupNeeded is the result from * UnGrantLock.) * * The appropriate partition lock must be held at entry, and will be * held at exit. */static voidCleanUpLock(LOCK *lock, PROCLOCK *proclock,			LockMethod lockMethodTable, uint32 hashcode,			bool wakeupNeeded){	/*	 * If this was my last hold on this lock, delete my entry in the proclock	 * table.	 */	if (proclock->holdMask == 0)	{		uint32		proclock_hashcode;		PROCLOCK_PRINT("CleanUpLock: deleting", proclock);		SHMQueueDelete(&proclock->lockLink);		SHMQueueDelete(&proclock->procLink);		proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);		if (!hash_search_with_hash_value(LockMethodProcLockHash,										 (void *) &(proclock->tag),										 proclock_hashcode,										 HASH_REMOVE,										 NULL))			elog(PANIC, "proclock table corrupted");	}	if (lock->nRequested == 0)	{		/*		 * The caller just released the last lock, so garbage-collect the lock		 * object.		 */		LOCK_PRINT("CleanUpLock: deleting", lock, 0);		Assert(SHMQueueEmpty(&(lock->procLocks)));		if (!hash_search_with_hash_value(LockMethodLockHash,										 (void *) &(lock->tag),										 hashcode,										 HASH_REMOVE,										 NULL))			elog(PANIC, "lock table corrupted");	}	else if (wakeupNeeded)	{		/* There are waiters on this lock, so wake them up. */		ProcLockWakeup(lockMethodTable, lock);	}}/* * GrantLockLocal -- update the locallock data structures to show *		the lock request has been granted. * * We expect that LockAcquire made sure there is room to add a new * ResourceOwner entry. */static voidGrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner){	LOCALLOCKOWNER *lockOwners = locallock->lockOwners;	int			i;	Assert(locallock->numLockOwners < locallock->maxLockOwners);	/* Count the total */	locallock->nLocks++;	/* Count the per-owner lock */	for (i = 0; i < locallock->numLockOwners; i++)	{		if (lockOwners[i].owner == owner)		{			lockOwners[i].nLocks++;			return;		}	}	lockOwners[i].owner = owner;	lockOwners[i].nLocks = 1;	locallock->numLockOwners++;}/* * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing *		WaitOnLock on. * * proc.c needs this for the case where we are booted off the lock by * timeout, but discover that someone granted us the lock anyway. * * We could just export GrantLockLocal, but that would require including * resowner.h in lock.h, which creates circularity. */voidGrantAwaitedLock(void){	GrantLockLocal(awaitedLock, awaitedOwner);}/* * WaitOnLock -- wait to acquire a lock * * Caller must have set MyProc->heldLocks to reflect locks already held * on the lockable object by this process. * * The appropriate partition lock must be held at entry. */static voidWaitOnLock(LOCALLOCK *locallock, ResourceOwner owner){	LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);	LockMethod	lockMethodTable = LockMethods[lockmethodid];	char	   * volatile new_status = NULL;	LOCK_PRINT("WaitOnLock: sleeping on lock",			   locallock->lock, locallock->tag.mode);	/* Report change to waiting status */	if (update_process_title)	{		const char *old_status;		int			len;		old_status = get_ps_display(&len);		new_status = (char *) palloc(len + 8 + 1);		memcpy(new_status, old_status, len);		strcpy(new_status + len, " waiting");		set_ps_display(new_status, false);		new_status[len] = '\0'; /* truncate off " waiting" */	}	pgstat_report_waiting(true);	awaitedLock = locallock;	awaitedOwner = owner;	/*	 * NOTE: Think not to put any shared-state cleanup after the call to	 * ProcSleep, in either the normal or failure path.  The lock state must	 * be fully set by the lock grantor, or by CheckDeadLock if we give up	 * waiting for the lock.  This is necessary because of the possibility	 * that a cancel/die interrupt will interrupt ProcSleep after someone else	 * grants us the lock, but before we've noticed it. Hence, after granting,	 * the locktable state must fully reflect the fact that we own the lock;	 * we can't do additional work on return.	 *	 * We can and do use a PG_TRY block to try to clean up after failure,	 * but this still has a major limitation: elog(FATAL) can occur while	 * waiting (eg, a "die" interrupt), and then control won't come back here.	 * So all cleanup of essential state should happen in LockWaitCancel,	 * not here.  We can use PG_TRY to clear the "waiting" status flags,	 * since doing that is unimportant if the process exits.	 */	PG_TRY();	{		if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)		{			/*			 * We failed as a result of a deadlock, see CheckDeadLock().			 * Quit now.			 */			awaitedLock = NULL;			LOCK_PRINT("WaitOnLock: aborting on lock",					   locallock->lock, locallock->tag.mode);			LWLockRelease(LockHashPartitionLock(locallock->hashcode));			/*			 * Now that we aren't holding the partition lock, we can give an			 * error report including details about the detected deadlock.			 */			DeadLockReport();			/* not reached */		}	}	PG_CATCH();	{		/* In this path, awaitedLock remains set until LockWaitCancel */		/* Report change to non-waiting status */		pgstat_report_waiting(false);		if (update_process_title)		{			set_ps_display(new_status, false);			pfree(new_status);		}		/* and propagate the error */		PG_RE_THROW();	}	PG_END_TRY();	awaitedLock = NULL;	/* Report change to non-waiting status */	pgstat_report_waiting(false);	if (update_process_title)	{		set_ps_display(new_status, false);		pfree(new_status);	}	LOCK_PRINT("WaitOnLock: wakeup on lock",			   locallock->lock, locallock->tag.mode);}/* * Remove a proc from the wait-queue it is on (caller must know it is on one). * This is only used when the proc has failed to get the lock, so we set its * waitStatus to STATUS_ERROR. * * Appropriate partition lock must be held by caller.  Also, caller is * responsible for signaling the proc if needed. * * NB: this does not clean up any locallock object that may exist for the lock. */voidRemoveFromWaitQueue(PGPROC *proc, uint32 hashcode){	LOCK	   *waitLock = proc->waitLock;	PROCLOCK   *proclock = proc->waitProcLock;	LOCKMODE	lockmode = proc->waitLockMode;	LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);	/* Make sure proc is waiting */	Assert(proc->waitStatus == STATUS_WAITING);	Assert(proc->links.next != INVALID_OFFSET);	Assert(waitLock);	Assert(waitLock->waitProcs.size > 0);	Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));	/* Remove proc from lock's wait queue */	SHMQueueDelete(&(proc->links));	waitLock->waitProcs.size--;	/* Undo increments of request counts by waiting process */	Assert(waitLock->nRequested > 0);	Assert(waitLock->nRequested > proc->waitLock->nGranted);	waitLock->nRequested--;	Assert(waitLock->requested[lockmode] > 0);	waitLock->requested[lockmode]--;	/* don't forget to clear waitMask bit if appropriate */	if (waitLock->granted[lockmode] == waitLock->requested[lockmode])		waitLock->waitMask &= LOCKBIT_OFF(lockmode);	/* Clean up the proc's own state, and pass it the ok/fail signal */	proc->waitLock = NULL;	proc->waitProcLock = NULL;	proc->waitStatus = STATUS_ERROR;	/*	 * Delete the proclock immediately if it represents no already-held locks.	 * (This must happen now because if the owner of the lock decides to	 * release it, and the requested/granted counts then go to zero,	 * LockRelease expects there to be no remaining proclocks.) Then see if	 * any other waiters for the lock can be woken up now.	 */	CleanUpLock(waitLock, proclock,				LockMethods[lockmethodid], hashcode,				true);}/* * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it. *		Release a session lock if 'sessionLock' is true, else release a *		regular transaction lock. * * Side Effects: find any waiting processes that are now wakable, *		grant them their requested locks and awaken them. *		(We have to grant the lock here to avoid a race between *		the waking process and any new process to *		come along and request the lock.) */boolLockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock){	LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;	LockMethod	lockMethodTable;	LOCALLOCKTAG localtag;	LOCALLOCK  *locallock;	LOCK	   *lock;	PROCLOCK   *proclock;	LWLockId	partitionLock;	bool		wakeupNeeded;	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))		elog(ERROR, "unrecognized lock method: %d", lockmethodid);	lockMethodTable = LockMethods[lockmethodid];	if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)		elog(ERROR, "unrecognized lock mode: %d", lockmode);#ifdef LOCK_DEBUG	if (LOCK_DEBUG_ENABLED(locktag))		elog(LOG, "LockRelease: lock [%u,%u] %s",			 locktag->locktag_field1, locktag->locktag_field2,			 lockMethodTable->lockModeNames[lockmode]);#endif	/*	 * Find the LOCALLOCK entry for this lock and lockmode	 */	MemSet(&localtag, 0, sizeof(localtag));		/* must clear padding */	localtag.lock = *locktag;	localtag.mode = lockmode;	locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,										  (void *) &localtag,										  HASH_FIND, NULL);	/*	 * let the caller print its own error message, too. Do not ereport(ERROR).	 */	if (!locallock || locallock->nLocks <= 0)	{		elog(WARNING, "you don't own a lock of type %s",			 lockMethodTable->lockModeNames[lockmode]);		return FALSE;	}	/*	 * Decrease the count for the resource owner.	 */	{		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;		ResourceOwner owner;		int			i;		/* Session locks are never transactional, else check table */		if (!sessionLock && lockMethodTable->transactional)			owner = CurrentResourceOwner;		else			owner = NULL;		for (i = locallock->numLockOwners - 1; i >= 0; i--)		{			if (lockOwners[i].owner == owner)			{				Assert(lockOwners[i].nLocks > 0);				if (--lockOwners[i].nLocks == 0)				{					/* compact out unused slot */					locallock->numLockOwners--;					if (i < locallock->numLockOwners)						lockOwners[i] = lockOwners[locallock->numLockOwners];				}				break;			}		}		if (i < 0)		{			/* don't release a lock belonging to another owner */			elog(WARNING, "you don't own a lock of type %s",				 lockMethodTable->lockModeNames[lockmode]);			return FALSE;		}	}	/*	 * Decrease the total local count.	If we're still holding the lock, we're	 * done.	 */	locallock->nLocks--;	if (locallock->nLocks > 0)		return TRUE;	/*	 * Otherwise we've got to mess with the shared lock table.	 */	partitionLock = LockHashPartitionLock(locallock->hashcode);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -