📄 lock.c
字号:
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1996-2002 * Sleepycat Software. All rights reserved. */#include "db_config.h"#ifndef lintstatic const char revid[] = "$Id: lock.c,v 11.108 2002/08/06 06:11:34 bostic Exp $";#endif /* not lint */#ifndef NO_SYSTEM_INCLUDES#include <sys/types.h>#include <string.h>#endif#include "db_int.h"#include "dbinc/db_shash.h"#include "dbinc/lock.h"#include "dbinc/log.h"#include "dbinc/txn.h"static int __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, u_int32_t, u_int32_t));static void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t));static void __lock_freelocker __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t));static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *));static int __lock_getobj __P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **));static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, u_int32_t, u_int32_t));static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));static void __lock_remove_waiter __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t));static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));static const char __db_lock_err[] = "Lock table is out of available %s";static const char __db_lock_invalid[] = "%s: Lock is no longer valid";static const char __db_locker_invalid[] = "Locker is not valid";/* * __lock_id -- * Generate a unique locker id. * * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *)); */int__lock_id(dbenv, idp) DB_ENV *dbenv; u_int32_t *idp;{ DB_LOCKER *lk; DB_LOCKTAB *lt; DB_LOCKREGION *region; u_int32_t *ids, locker_ndx; int nids, ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK); lt = dbenv->lk_handle; region = lt->reginfo.primary; ret = 0; /* * Allocate a new lock id. If we wrap around then we * find the minimum currently in use and make sure we * can stay below that. This code is similar to code * in __txn_begin_int for recovering txn ids. */ LOCKREGION(dbenv, lt); /* * Our current valid range can span the maximum valid value, so check * for it and wrap manually. */ if (region->stat.st_id == DB_LOCK_MAXID && region->stat.st_cur_maxid != DB_LOCK_MAXID) region->stat.st_id = DB_LOCK_INVALIDID; if (region->stat.st_id == region->stat.st_cur_maxid) { if ((ret = __os_malloc(dbenv, sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0) goto err; nids = 0; for (lk = SH_TAILQ_FIRST(®ion->lockers, __db_locker); lk != NULL; lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker)) ids[nids++] = lk->id; region->stat.st_id = DB_LOCK_INVALIDID; region->stat.st_cur_maxid = DB_LOCK_MAXID; if (nids != 0) __db_idspace(ids, nids, ®ion->stat.st_id, ®ion->stat.st_cur_maxid); __os_free(dbenv, ids); } *idp = ++region->stat.st_id; /* Allocate a locker for this id. */ LOCKER_LOCK(lt, region, *idp, locker_ndx); ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);err: UNLOCKREGION(dbenv, lt); return (ret);}/* * __lock_id_free -- * Free a locker id. * * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t)); */int__lock_id_free(dbenv, id) DB_ENV *dbenv; u_int32_t id;{ DB_LOCKER *sh_locker; DB_LOCKTAB *lt; DB_LOCKREGION *region; u_int32_t locker_ndx; int ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK); lt = dbenv->lk_handle; region = lt->reginfo.primary; LOCKREGION(dbenv, lt); LOCKER_LOCK(lt, region, id, locker_ndx); if ((ret = __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0) goto err; if (sh_locker == NULL) { ret = EINVAL; goto err; } if (sh_locker->nlocks != 0) { __db_err(dbenv, "Locker still has locks"); ret = EINVAL; goto err; } __lock_freelocker(lt, region, sh_locker, locker_ndx);err: UNLOCKREGION(dbenv, lt); return (ret);}/* * __lock_vec -- * Vector lock routine. This function takes a set of operations * and performs them all at once. In addition, lock_vec provides * functionality for lock inheritance, releasing all locks for a * given locker (used during transaction commit/abort), releasing * all locks on a given object, and generating debugging information. * * PUBLIC: int __lock_vec __P((DB_ENV *, * PUBLIC: u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); */int__lock_vec(dbenv, locker, flags, list, nlist, elistp) DB_ENV *dbenv; u_int32_t locker, flags; int nlist; DB_LOCKREQ *list, **elistp;{ struct __db_lock *lp, *next_lock; DB_LOCK lock; DB_LOCKER *sh_locker, *sh_parent; DB_LOCKOBJ *obj, *sh_obj; DB_LOCKREGION *region; DB_LOCKTAB *lt; u_int32_t lndx, ndx; int did_abort, i, ret, run_dd, upgrade, writes; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, "DB_ENV->lock_vec", DB_INIT_LOCK); /* Check if locks have been globally turned off. */ if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); /* Validate arguments. */ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_vec", flags, DB_LOCK_FREE_LOCKER | DB_LOCK_NOWAIT)) != 0) return (ret); lt = dbenv->lk_handle; region = lt->reginfo.primary; run_dd = 0; LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle); for (i = 0, ret = 0; i < nlist && ret == 0; i++) switch (list[i].op) { case DB_LOCK_GET_TIMEOUT: LF_SET(DB_LOCK_SET_TIMEOUT); case DB_LOCK_GET: ret = __lock_get_internal(dbenv->lk_handle, locker, flags, list[i].obj, list[i].mode, list[i].timeout, &list[i].lock); break; case DB_LOCK_INHERIT: /* * Get the committing locker and mark it as deleted. * This allows us to traverse the locker links without * worrying that someone else is deleting locks out * from under us. However, if the locker doesn't * exist, that just means that the child holds no * locks, so inheritance is easy! */ LOCKER_LOCK(lt, region, locker, ndx); if ((ret = __lock_getlocker(lt, locker, ndx, 0, &sh_locker)) != 0 || sh_locker == NULL || F_ISSET(sh_locker, DB_LOCKER_DELETED)) { if (ret == 0 && sh_locker != NULL) ret = EINVAL; __db_err(dbenv, __db_locker_invalid); break; } /* Make sure we are a child transaction. */ if (sh_locker->parent_locker == INVALID_ROFF) { __db_err(dbenv, "Not a child transaction"); ret = EINVAL; break; } sh_parent = (DB_LOCKER *) R_ADDR(<->reginfo, sh_locker->parent_locker); F_SET(sh_locker, DB_LOCKER_DELETED); /* * Now, lock the parent locker; move locks from * the committing list to the parent's list. */ LOCKER_LOCK(lt, region, locker, ndx); if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) { if (ret == 0) { __db_err(dbenv, "Parent locker is not valid"); ret = EINVAL; } break; } for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { SH_LIST_REMOVE(lp, locker_links, __db_lock); SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp, locker_links, __db_lock); lp->holder = sh_parent->id; /* Get the object associated with this lock. */ obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS)); } /* Transfer child counts to parent. */ sh_parent->nlocks += sh_locker->nlocks; sh_parent->nwrites += sh_locker->nwrites; /* Now free the original locker. */ ret = __lock_checklocker(lt, NULL, locker, DB_LOCK_IGNOREDEL); break; case DB_LOCK_PUT: ret = __lock_put_nolock(dbenv, &list[i].lock, &run_dd, flags); break; case DB_LOCK_PUT_ALL: case DB_LOCK_PUT_READ: case DB_LOCK_UPGRADE_WRITE: /* * Get the locker and mark it as deleted. This * allows us to traverse the locker links without * worrying that someone else is deleting locks out * from under us. Since the locker may hold no * locks (i.e., you could call abort before you've * done any work), it's perfectly reasonable for there * to be no locker; this is not an error. */ LOCKER_LOCK(lt, region, locker, ndx); if ((ret = __lock_getlocker(lt, locker, ndx, 0, &sh_locker)) != 0 || sh_locker == NULL || F_ISSET(sh_locker, DB_LOCKER_DELETED)) /* * If ret is set, then we'll generate an * error. If it's not set, we have nothing * to do. */ break; upgrade = 0; writes = 1; if (list[i].op == DB_LOCK_PUT_READ) writes = 0; else if (list[i].op == DB_LOCK_UPGRADE_WRITE) { if (F_ISSET(sh_locker, DB_LOCKER_DIRTY)) upgrade = 1; writes = 0; } F_SET(sh_locker, DB_LOCKER_DELETED); /* Now traverse the locks, releasing each one. */ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL;) { sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj); if (writes == 1 || lp->mode == DB_LOCK_READ) { SH_LIST_REMOVE(lp, locker_links, __db_lock); sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj); SHOBJECT_LOCK(lt, region, sh_obj, lndx); /* * We are not letting lock_put_internal * unlink the lock, so we'll have to * update counts here. */ sh_locker->nlocks--; if (IS_WRITELOCK(lp->mode)) sh_locker->nwrites--; ret = __lock_put_internal(lt, lp, lndx, DB_LOCK_FREE | DB_LOCK_DOALL); if (ret != 0) break; lp = SH_LIST_FIRST( &sh_locker->heldby, __db_lock); } else lp = SH_LIST_NEXT(lp, locker_links, __db_lock); } switch (list[i].op) { case DB_LOCK_UPGRADE_WRITE: if (upgrade != 1) goto up_done; for (lp = SH_LIST_FIRST( &sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) { if (ret != 0) break; lock.off = R_OFFSET(<->reginfo, lp); lock.gen = lp->gen; F_SET(sh_locker, DB_LOCKER_INABORT); ret = __lock_get_internal(lt, locker, DB_LOCK_UPGRADE, NULL, DB_LOCK_WRITE, 0, &lock); } up_done: /* FALL THROUGH */ case DB_LOCK_PUT_READ: F_CLR(sh_locker, DB_LOCKER_DELETED); break; case DB_LOCK_PUT_ALL: if (ret == 0) ret = __lock_checklocker(lt, NULL, locker, DB_LOCK_IGNOREDEL); break; default: break; } break; case DB_LOCK_PUT_OBJ: /* Remove all the locks associated with an object. */ OBJECT_LOCK(lt, region, list[i].obj, ndx); if ((ret = __lock_getobj(lt, list[i].obj, ndx, 0, &sh_obj)) != 0 || sh_obj == NULL) { if (ret == 0) ret = EINVAL; break; } /* * Go through both waiters and holders. Don't bother * to run promotion, because everyone is getting * released. The processes waiting will still get * awakened as their waiters are released. */ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); ret == 0 && lp != NULL; lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) ret = __lock_put_internal(lt, lp, ndx, DB_LOCK_UNLINK | DB_LOCK_NOPROMOTE | DB_LOCK_DOALL); /* * On the last time around, the object will get * reclaimed by __lock_put_internal, structure the * loop carefully so we do not get bitten. */ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); ret == 0 && lp != NULL; lp = next_lock) { next_lock = SH_TAILQ_NEXT(lp, links, __db_lock); ret = __lock_put_internal(lt, lp, ndx, DB_LOCK_UNLINK | DB_LOCK_NOPROMOTE | DB_LOCK_DOALL); } break; case DB_LOCK_TIMEOUT: ret = __lock_set_timeout(dbenv, locker, 0, DB_SET_TXN_NOW); region->need_dd = 1; break; case DB_LOCK_TRADE: /* * INTERNAL USE ONLY. * Change the holder of the lock described in * list[i].lock to the locker-id specified by * the locker parameter. */ /* * You had better know what you're doing here. * We are trading locker-id's on a lock to * facilitate file locking on open DB handles. * We do not do any conflict checking on this, * so heaven help you if you use this flag under * any other circumstances. */ ret = __lock_trade(dbenv, &list[i].lock, locker); break;#ifdef DEBUG case DB_LOCK_DUMP: /* Find the locker. */ LOCKER_LOCK(lt, region, locker, ndx); if ((ret = __lock_getlocker(lt, locker, ndx, 0, &sh_locker)) != 0 || sh_locker == NULL || F_ISSET(sh_locker, DB_LOCKER_DELETED)) break; for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) { __lock_printlock(lt, lp, 1); } break;#endif default: __db_err(dbenv, "Invalid lock operation: %d", list[i].op); ret = EINVAL; break; } if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN) run_dd = 1; UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -