⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lock.c

📁 这是国外的resip协议栈
💻 C
📖 第 1 页 / 共 3 页
字号:
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1996-2004 *	Sleepycat Software.  All rights reserved. * * $Id: lock.c,v 11.167 2004/10/15 16:59:41 bostic Exp $ */#include "db_config.h"#ifndef NO_SYSTEM_INCLUDES#include <sys/types.h>#include <string.h>#endif#include "db_int.h"#include "dbinc/db_shash.h"#include "dbinc/lock.h"#include "dbinc/log.h"static int  __lock_freelock __P((DB_LOCKTAB *,		struct __db_lock *, u_int32_t, u_int32_t));static int  __lock_getobj		__P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **));static int  __lock_inherit_locks __P ((DB_LOCKTAB *, u_int32_t, u_int32_t));static int  __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));static int  __lock_put_internal __P((DB_LOCKTAB *,		struct __db_lock *, u_int32_t,  u_int32_t));static int  __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));static void __lock_remove_waiter __P((DB_LOCKTAB *,		DB_LOCKOBJ *, struct __db_lock *, db_status_t));static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));static const char __db_lock_invalid[] = "%s: Lock is no longer valid";static const char __db_locker_invalid[] = "Locker is not valid";/* * __lock_vec_pp -- *	DB_ENV->lock_vec pre/post processing. * * PUBLIC: int __lock_vec_pp __P((DB_ENV *, * PUBLIC:     u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); */int__lock_vec_pp(dbenv, locker, flags, list, nlist, elistp)	DB_ENV *dbenv;	u_int32_t locker, flags;	int nlist;	DB_LOCKREQ *list, **elistp;{	int rep_check, ret;	PANIC_CHECK(dbenv);	ENV_REQUIRES_CONFIG(dbenv,	    dbenv->lk_handle, "DB_ENV->lock_vec", DB_INIT_LOCK);	/* Validate arguments. */	if ((ret = __db_fchk(dbenv,	     "DB_ENV->lock_vec", flags, DB_LOCK_NOWAIT)) != 0)		return (ret);	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;	if (rep_check)		__env_rep_enter(dbenv);	ret = __lock_vec(dbenv, locker, flags, list, nlist, elistp);	if (rep_check)		__env_db_rep_exit(dbenv);	return (ret);}/* * __lock_vec -- *	DB_ENV->lock_vec. * *	Vector lock routine.  This function takes a set of operations *	and performs them all at once.  In addition, lock_vec provides *	functionality for lock inheritance, releasing all locks for a *	given locker (used during transaction commit/abort), releasing *	all locks on a given object, and generating debugging information. * * PUBLIC: int __lock_vec __P((DB_ENV *, * PUBLIC:     u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); */int__lock_vec(dbenv, locker, flags, list, nlist, elistp)	DB_ENV *dbenv;	u_int32_t locker, flags;	int nlist;	DB_LOCKREQ *list, **elistp;{	struct __db_lock *lp, *next_lock;	DB_LOCK lock;	DB_LOCKER *sh_locker;	DB_LOCKOBJ *sh_obj;	DB_LOCKREGION *region;	DB_LOCKTAB *lt;	DBT *objlist, *np;	u_int32_t lndx, ndx;	int did_abort, i, ret, run_dd, upgrade, writes;	/* Check if locks have been globally turned off. */	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))		return (0);	lt = dbenv->lk_handle;	region = lt->reginfo.primary;	run_dd = 0;	LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);	for (i = 0, ret = 0; i < nlist && ret == 0; i++)		switch (list[i].op) {		case DB_LOCK_GET_TIMEOUT:			LF_SET(DB_LOCK_SET_TIMEOUT);			/* FALLTHROUGH */		case DB_LOCK_GET:			if (IS_RECOVERING(dbenv)) {				LOCK_INIT(list[i].lock);				break;			}			ret = __lock_get_internal(dbenv->lk_handle,			    locker, flags, list[i].obj,			    list[i].mode, list[i].timeout, &list[i].lock);			break;		case DB_LOCK_INHERIT:			ret = __lock_inherit_locks(lt, locker, flags);			break;		case DB_LOCK_PUT:			ret = __lock_put_nolock(dbenv,			    &list[i].lock, &run_dd, flags);			break;		case DB_LOCK_PUT_ALL:		case DB_LOCK_PUT_READ:		case DB_LOCK_UPGRADE_WRITE:			/*			 * Get the locker and mark it as deleted.  This			 * allows us to traverse the locker links without			 * worrying that someone else is deleting locks out			 * from under us.  Since the locker may hold no			 * locks (i.e., you could call abort before you've			 * done any work), it's perfectly reasonable for there			 * to be no locker; this is not an error.			 */			LOCKER_LOCK(lt, region, locker, ndx);			if ((ret = __lock_getlocker(lt,			    locker, ndx, 0, &sh_locker)) != 0 ||			    sh_locker == NULL ||			    F_ISSET(sh_locker, DB_LOCKER_DELETED))				/*				 * If ret is set, then we'll generate an				 * error.  If it's not set, we have nothing				 * to do.				 */				break;			upgrade = 0;			writes = 1;			if (list[i].op == DB_LOCK_PUT_READ)				writes = 0;			else if (list[i].op == DB_LOCK_UPGRADE_WRITE) {				if (F_ISSET(sh_locker, DB_LOCKER_DIRTY))					upgrade = 1;				writes = 0;			}			objlist = list[i].obj;			if (objlist != NULL) {				/*				 * We know these should be ilocks,				 * but they could be something else,				 * so allocate room for the size too.				 */				objlist->size =				     sh_locker->nwrites * sizeof(DBT);				if ((ret = __os_malloc(dbenv,				     objlist->size, &objlist->data)) != 0)					goto up_done;				memset(objlist->data, 0, objlist->size);				np = (DBT *) objlist->data;			} else				np = NULL;			F_SET(sh_locker, DB_LOCKER_DELETED);			/* Now traverse the locks, releasing each one. */			for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);			    lp != NULL; lp = next_lock) {				sh_obj = (DB_LOCKOBJ *)				    ((u_int8_t *)lp + lp->obj);				next_lock = SH_LIST_NEXT(lp,				    locker_links, __db_lock);				if (writes == 1 ||				    lp->mode == DB_LOCK_READ ||				    lp->mode == DB_LOCK_DIRTY) {					SH_LIST_REMOVE(lp,					    locker_links, __db_lock);					sh_obj = (DB_LOCKOBJ *)					    ((u_int8_t *)lp + lp->obj);					SHOBJECT_LOCK(lt, region, sh_obj, lndx);					/*					 * We are not letting lock_put_internal					 * unlink the lock, so we'll have to					 * update counts here.					 */					sh_locker->nlocks--;					if (IS_WRITELOCK(lp->mode))						sh_locker->nwrites--;					ret = __lock_put_internal(lt, lp,					    lndx, DB_LOCK_FREE | DB_LOCK_DOALL);					if (ret != 0)						break;					continue;				}				if (objlist != NULL) {					DB_ASSERT((char *)np <					     (char *)objlist->data +					     objlist->size);					np->data = SH_DBT_PTR(&sh_obj->lockobj);					np->size = sh_obj->lockobj.size;					np++;				}			}			if (ret != 0)				goto up_done;			if (objlist != NULL)				if ((ret = __lock_fix_list(dbenv,				     objlist, sh_locker->nwrites)) != 0)					goto up_done;			switch (list[i].op) {			case DB_LOCK_UPGRADE_WRITE:				if (upgrade != 1)					goto up_done;				for (lp = SH_LIST_FIRST(				    &sh_locker->heldby, __db_lock);				    lp != NULL;				    lp = SH_LIST_NEXT(lp,					    locker_links, __db_lock)) {					if (lp->mode != DB_LOCK_WWRITE)						continue;					lock.off = R_OFFSET(&lt->reginfo, lp);					lock.gen = lp->gen;					F_SET(sh_locker, DB_LOCKER_INABORT);					if ((ret = __lock_get_internal(lt,					    locker, flags | DB_LOCK_UPGRADE,					    NULL, DB_LOCK_WRITE, 0, &lock)) !=0)						break;				}			up_done:				/* FALLTHROUGH */			case DB_LOCK_PUT_READ:			case DB_LOCK_PUT_ALL:				F_CLR(sh_locker, DB_LOCKER_DELETED);				break;			default:				break;			}			break;		case DB_LOCK_PUT_OBJ:			/* Remove all the locks associated with an object. */			OBJECT_LOCK(lt, region, list[i].obj, ndx);			if ((ret = __lock_getobj(lt, list[i].obj,			    ndx, 0, &sh_obj)) != 0 || sh_obj == NULL) {				if (ret == 0)					ret = EINVAL;				break;			}			/*			 * Go through both waiters and holders.  Don't bother			 * to run promotion, because everyone is getting			 * released.  The processes waiting will still get			 * awakened as their waiters are released.			 */			for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);			    ret == 0 && lp != NULL;			    lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock))				ret = __lock_put_internal(lt, lp, ndx,				    DB_LOCK_UNLINK |				    DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);			/*			 * On the last time around, the object will get			 * reclaimed by __lock_put_internal, structure the			 * loop carefully so we do not get bitten.			 */			for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);			    ret == 0 && lp != NULL;			    lp = next_lock) {				next_lock = SH_TAILQ_NEXT(lp, links, __db_lock);				ret = __lock_put_internal(lt, lp, ndx,				    DB_LOCK_UNLINK |				    DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);			}			break;		case DB_LOCK_TIMEOUT:			ret = __lock_set_timeout_internal(dbenv,			    locker, 0, DB_SET_TXN_NOW);			break;		case DB_LOCK_TRADE:			/*			 * INTERNAL USE ONLY.			 * Change the holder of the lock described in			 * list[i].lock to the locker-id specified by			 * the locker parameter.			 */			/*			 * You had better know what you're doing here.			 * We are trading locker-id's on a lock to			 * facilitate file locking on open DB handles.			 * We do not do any conflict checking on this,			 * so heaven help you if you use this flag under			 * any other circumstances.			 */			ret = __lock_trade(dbenv, &list[i].lock, locker);			break;#if defined(DEBUG) && defined(HAVE_STATISTICS)		case DB_LOCK_DUMP:			/* Find the locker. */			LOCKER_LOCK(lt, region, locker, ndx);			if ((ret = __lock_getlocker(lt,			    locker, ndx, 0, &sh_locker)) != 0 ||			    sh_locker == NULL ||			    F_ISSET(sh_locker, DB_LOCKER_DELETED))				break;			for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);			    lp != NULL;			    lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {				__lock_printlock(lt, NULL, lp, 1);			}			break;#endif		default:			__db_err(dbenv,			    "Invalid lock operation: %d", list[i].op);			ret = EINVAL;			break;		}	if (ret == 0 && region->detect != DB_LOCK_NORUN &&	     (region->need_dd || LOCK_TIME_ISVALID(&region->next_timeout)))		run_dd = 1;	UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);	if (run_dd)		(void)__lock_detect(dbenv, region->detect, &did_abort);	if (ret != 0 && elistp != NULL)		*elistp = &list[i - 1];	return (ret);}/* * __lock_get_pp -- *	DB_ENV->lock_get pre/post processing. * * PUBLIC: int __lock_get_pp __P((DB_ENV *, * PUBLIC:     u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); */int__lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock)	DB_ENV *dbenv;	u_int32_t locker, flags;	const DBT *obj;	db_lockmode_t lock_mode;	DB_LOCK *lock;{	int rep_check, ret;	PANIC_CHECK(dbenv);	ENV_REQUIRES_CONFIG(dbenv,	    dbenv->lk_handle, "DB_ENV->lock_get", DB_INIT_LOCK);	/* Validate arguments. */	if ((ret = __db_fchk(dbenv, "DB_ENV->lock_get", flags,	    DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)		return (ret);	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;	if (rep_check)		__env_rep_enter(dbenv);	ret = __lock_get(dbenv, locker, flags, obj, lock_mode, lock);	if (rep_check)		__env_db_rep_exit(dbenv);	return (ret);}/* * __lock_get -- *	DB_ENV->lock_get. * * PUBLIC: int __lock_get __P((DB_ENV *, * PUBLIC:     u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); */int__lock_get(dbenv, locker, flags, obj, lock_mode, lock)	DB_ENV *dbenv;	u_int32_t locker, flags;	const DBT *obj;	db_lockmode_t lock_mode;	DB_LOCK *lock;{	int ret;	if (IS_RECOVERING(dbenv)) {		LOCK_INIT(*lock);		return (0);	}	LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);	ret = __lock_get_internal(dbenv->lk_handle,	    locker, flags, obj, lock_mode, 0, lock);	UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);	return (ret);}/* * __lock_get_internal -- *	All the work for lock_get (and for the GET option of lock_vec) is done *	inside of lock_get_internal. * * PUBLIC: int  __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, * PUBLIC:     const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *)); */int__lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)	DB_LOCKTAB *lt;	u_int32_t locker, flags;	const DBT *obj;	db_lockmode_t lock_mode;	db_timeout_t timeout;	DB_LOCK *lock;{	struct __db_lock *newl, *lp, *wwrite;	DB_ENV *dbenv;	DB_LOCKER *sh_locker;	DB_LOCKOBJ *sh_obj;	DB_LOCKREGION *region;	u_int32_t holder, locker_ndx, obj_ndx;	int did_abort, ihold, grant_dirty, no_dd, ret, t_ret;	/*	 * We decide what action to take based on what locks are already held	 * and what locks are in the wait queue.	 */	enum {		GRANT,		/* Grant the lock. */		UPGRADE,	/* Upgrade the lock. */		HEAD,		/* Wait at head of wait queue. */		SECOND,		/* Wait as the second waiter. */		TAIL		/* Wait at tail of the wait queue. */	} action;	dbenv = lt->dbenv;	region = lt->reginfo.primary;	/* Check if locks have been globally turned off. */	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))		return (0);	no_dd = ret = 0;	newl = NULL;	/*	 * If we are not going to reuse this lock, invalidate it	 * so that if we fail it will not look like a valid lock.	 */	if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))		LOCK_INIT(*lock);	/* Check that the lock mode is valid.  */	if (lock_mode >= (db_lockmode_t)region->stat.st_nmodes) {		__db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",		    (u_long)lock_mode);		return (EINVAL);	}	region->stat.st_nrequests++;	if (obj == NULL) {		DB_ASSERT(LOCK_ISSET(*lock));		lp = R_ADDR(&lt->reginfo, lock->off);		sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);	} else {		/* Allocate a shared memory new object. */		OBJECT_LOCK(lt, region, obj, lock->ndx);		if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)			goto err;	}	/* Get the locker, we may need it to find our parent. */	LOCKER_LOCK(lt, region, locker, locker_ndx);	if ((ret = __lock_getlocker(lt, locker,	    locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) {		/*		 * XXX		 * We cannot tell if we created the object or not, so we don't		 * kow if we should free it or not.		 */		goto err;	}	if (sh_locker == NULL) {		__db_err(dbenv, "Locker does not exist");		ret = EINVAL;		goto err;	}	/*	 * Figure out if we can grant this lock or if it should wait.	 * By default, we can grant the new lock if it does not conflict with	 * anyone on the holders list OR anyone on the waiters list.	 * The reason that we don't grant if there's a conflict is that	 * this can lead to starvation (a writer waiting on a popularly	 * read item will never be granted).  The downside of this is that	 * a waiting reader can prevent an upgrade from reader to writer,	 * which is not uncommon.	 *	 * There are two exceptions to the no-conflict rule.  First, if	 * a lock is held by the requesting locker AND the new lock does	 * not conflict with any other holders, then we grant the lock.	 * The most common place this happens is when the holder has a	 * WRITE lock and a READ lock request comes in for the same locker.	 * If we do not grant the read lock, then we guarantee deadlock.	 * Second, dirty readers are granted if at all possible while	 * avoiding starvation, see below.	 *	 * In case of conflict, we put the new lock on the end of the waiters	 * list, unless we are upgrading or this is a dirty reader in which	 * case the locker goes at or near the front of the list.	 */	ihold = 0;	grant_dirty = 0;	holder = 0;	wwrite = NULL;	/*	 * SWITCH is a special case, used by the queue access method	 * when we want to get an entry which is past the end of the queue.	 * We have a DB_READ_LOCK and need to switch it to DB_LOCK_WAIT and	 * join the waiters queue.  This must be done as a single operation	 * so that another locker cannot get in and fail to wake us up.	 */	if (LF_ISSET(DB_LOCK_SWITCH))		lp = NULL;	else		lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -