📄 lock.c
字号:
for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { if (locker == lp->holder) { if (lp->mode == lock_mode && lp->status == DB_LSTAT_HELD) { if (LF_ISSET(DB_LOCK_UPGRADE)) goto upgrade; /* * Lock is held, so we can increment the * reference count and return this lock * to the caller. We do not count reference * increments towards the locks held by * the locker. */ lp->refcount++; lock->off = R_OFFSET(<->reginfo, lp); lock->gen = lp->gen; lock->mode = lp->mode; goto done; } else { ihold = 1; if (lock_mode == DB_LOCK_WRITE && lp->mode == DB_LOCK_WWRITE) wwrite = lp; } } else if (__lock_is_parent(lt, lp->holder, sh_locker)) ihold = 1; else if (CONFLICTS(lt, region, lp->mode, lock_mode)) break; else if (lp->mode == DB_LOCK_READ || lp->mode == DB_LOCK_WWRITE) { grant_dirty = 1; holder = lp->holder; } } /* If we want a write lock and we have a was write, upgrade. */ if (wwrite != NULL) LF_SET(DB_LOCK_UPGRADE); /* * If there are conflicting holders we will have to wait. An upgrade * or dirty reader goes to the head of the queue, everyone else to the * back. */ if (lp != NULL) { if (LF_ISSET(DB_LOCK_UPGRADE) || lock_mode == DB_LOCK_DIRTY) action = HEAD; else action = TAIL; } else { if (LF_ISSET(DB_LOCK_SWITCH)) action = TAIL; else if (LF_ISSET(DB_LOCK_UPGRADE)) action = UPGRADE; else if (ihold) action = GRANT; else { /* * Look for conflicting waiters. */ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { if (CONFLICTS(lt, region, lp->mode, lock_mode) && locker != lp->holder) break; } /* * If there are no conflicting holders or waiters, * then we grant. Normally when we wait, we * wait at the end (TAIL). However, the goal of * DIRTY_READ locks to allow forward progress in the * face of updating transactions, so we try to allow * all DIRTY_READ requests to proceed as rapidly * as possible, so long as we can prevent starvation. * * When determining how to queue a DIRTY_READ * request: * * 1. If there is a waiting upgrading writer, * then we enqueue the dirty reader BEHIND it * (second in the queue). * 2. Else, if the current holders are either * READ or WWRITE, we grant * 3. Else queue SECOND i.e., behind the first * waiter. * * The end result is that dirty_readers get to run * so long as other lockers are blocked. Once * there is a locker which is only waiting on * dirty readers then they queue up behind that * locker so that it gets to run. In general * this locker will be a WRITE which will shortly * get downgraded to a WWRITE, permitting the * DIRTY locks to be granted. */ if (lp == NULL) action = GRANT; else if (lock_mode == DB_LOCK_DIRTY && grant_dirty) { /* * An upgrade will be at the head of the * queue. */ lp = SH_TAILQ_FIRST( &sh_obj->waiters, __db_lock); if (lp->mode == DB_LOCK_WRITE && lp->holder == holder) action = SECOND; else action = GRANT; } else if (lock_mode == DB_LOCK_DIRTY) action = SECOND; else action = TAIL; } } switch (action) { case HEAD: case TAIL: case SECOND: case GRANT: /* Allocate a new lock. */ if ((newl = SH_TAILQ_FIRST(®ion->free_locks, __db_lock)) == NULL) return (__lock_nomem(dbenv, "locks")); SH_TAILQ_REMOVE(®ion->free_locks, newl, links, __db_lock); /* Update new lock statistics. */ if (++region->stat.st_nlocks > region->stat.st_maxnlocks) region->stat.st_maxnlocks = region->stat.st_nlocks; newl->holder = locker; newl->refcount = 1; newl->mode = lock_mode; newl->obj = (roff_t)SH_PTR_TO_OFF(newl, sh_obj); /* * Now, insert the lock onto its locker's list. * If the locker does not currently hold any locks, * there's no reason to run a deadlock * detector, save that information. */ no_dd = sh_locker->master_locker == INVALID_ROFF && SH_LIST_FIRST( &sh_locker->child_locker, __db_locker) == NULL && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL; SH_LIST_INSERT_HEAD( &sh_locker->heldby, newl, locker_links, __db_lock); break; case UPGRADE:upgrade: if (wwrite != NULL) { lp = wwrite; lp->refcount++; lock->off = R_OFFSET(<->reginfo, lp); lock->gen = lp->gen; lock->mode = lock_mode; } else lp = R_ADDR(<->reginfo, lock->off); if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode)) sh_locker->nwrites++; lp->mode = lock_mode; goto done; } switch (action) { case UPGRADE: DB_ASSERT(0); break; case GRANT: newl->status = DB_LSTAT_HELD; SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links); break; case HEAD: case TAIL: case SECOND: if (LF_ISSET(DB_LOCK_NOWAIT)) { ret = DB_LOCK_NOTGRANTED; region->stat.st_nnowaits++; goto err; } if ((lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) == NULL) SH_TAILQ_INSERT_HEAD(®ion->dd_objs, sh_obj, dd_links, __db_lockobj); switch (action) { case HEAD: SH_TAILQ_INSERT_HEAD( &sh_obj->waiters, newl, links, __db_lock); break; case SECOND: SH_TAILQ_INSERT_AFTER( &sh_obj->waiters, lp, newl, links, __db_lock); break; case TAIL: SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links); break; default: DB_ASSERT(0); } /* If we are switching drop the lock we had. */ if (LF_ISSET(DB_LOCK_SWITCH) && (ret = __lock_put_nolock(dbenv, lock, &ihold, DB_LOCK_NOWAITERS)) != 0) { __lock_remove_waiter(lt, sh_obj, newl, DB_LSTAT_FREE); goto err; } /* * This is really a blocker for the thread. It should be * initialized locked, so that when we try to acquire it, we * block. */ newl->status = DB_LSTAT_WAITING; region->stat.st_nconflicts++; region->need_dd = 1; /* * First check to see if this txn has expired. * If not then see if the lock timeout is past * the expiration of the txn, if it is, use * the txn expiration time. lk_expire is passed * to avoid an extra call to get the time. */ if (__lock_expired(dbenv, &sh_locker->lk_expire, &sh_locker->tx_expire)) { newl->status = DB_LSTAT_EXPIRED; sh_locker->lk_expire = sh_locker->tx_expire; /* We are done. */ goto expired; } /* * If a timeout was specified in this call then it * takes priority. If a lock timeout has been specified * for this transaction then use that, otherwise use * the global timeout value. */ if (!LF_ISSET(DB_LOCK_SET_TIMEOUT)) { if (F_ISSET(sh_locker, DB_LOCKER_TIMEOUT)) timeout = sh_locker->lk_timeout; else timeout = region->lk_timeout; } if (timeout != 0) __lock_expires(dbenv, &sh_locker->lk_expire, timeout); else LOCK_SET_TIME_INVALID(&sh_locker->lk_expire); if (LOCK_TIME_ISVALID(&sh_locker->tx_expire) && (timeout == 0 || __lock_expired(dbenv, &sh_locker->lk_expire, &sh_locker->tx_expire))) sh_locker->lk_expire = sh_locker->tx_expire; if (LOCK_TIME_ISVALID(&sh_locker->lk_expire) && (!LOCK_TIME_ISVALID(®ion->next_timeout) || LOCK_TIME_GREATER( ®ion->next_timeout, &sh_locker->lk_expire))) region->next_timeout = sh_locker->lk_expire; UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle); /* * We are about to wait; before waiting, see if the deadlock * detector should be run. */ if (region->detect != DB_LOCK_NORUN && !no_dd) (void)__lock_detect(dbenv, region->detect, &did_abort); MUTEX_LOCK(dbenv, &newl->mutex); LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle); /* Turn off lock timeout. */ if (newl->status != DB_LSTAT_EXPIRED) LOCK_SET_TIME_INVALID(&sh_locker->lk_expire); switch (newl->status) { case DB_LSTAT_ABORTED: ret = DB_LOCK_DEADLOCK; goto err; case DB_LSTAT_NOTEXIST: ret = DB_LOCK_NOTEXIST; goto err; case DB_LSTAT_EXPIRED:expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx); if ((ret = __lock_put_internal(lt, newl, obj_ndx, DB_LOCK_UNLINK | DB_LOCK_FREE)) != 0) break; if (LOCK_TIME_EQUAL( &sh_locker->lk_expire, &sh_locker->tx_expire)) region->stat.st_ntxntimeouts++; else region->stat.st_nlocktimeouts++; return (DB_LOCK_NOTGRANTED); case DB_LSTAT_PENDING: if (LF_ISSET(DB_LOCK_UPGRADE)) { /* * The lock just granted got put on the holders * list. Since we're upgrading some other lock, * we've got to remove it here. */ SH_TAILQ_REMOVE( &sh_obj->holders, newl, links, __db_lock); /* * Ensure the object is not believed to be on * the object's lists, if we're traversing by * locker. */ newl->links.stqe_prev = -1; goto upgrade; } else newl->status = DB_LSTAT_HELD; break; case DB_LSTAT_FREE: case DB_LSTAT_HELD: case DB_LSTAT_WAITING: default: __db_err(dbenv, "Unexpected lock status: %d", (int)newl->status); ret = __db_panic(dbenv, EINVAL); goto err; } } lock->off = R_OFFSET(<->reginfo, newl); lock->gen = newl->gen; lock->mode = newl->mode; sh_locker->nlocks++; if (IS_WRITELOCK(newl->mode)) sh_locker->nwrites++; return (0);done: ret = 0;err: if (newl != NULL && (t_ret = __lock_freelock(lt, newl, locker, DB_LOCK_FREE | DB_LOCK_UNLINK)) != 0 && ret == 0) ret = t_ret; return (ret);}/* * __lock_put_pp -- * DB_ENV->lock_put pre/post processing. * * PUBLIC: int __lock_put_pp __P((DB_ENV *, DB_LOCK *)); */int__lock_put_pp(dbenv, lock) DB_ENV *dbenv; DB_LOCK *lock;{ int rep_check, ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK); rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; if (rep_check) __env_rep_enter(dbenv); ret = __lock_put(dbenv, lock, 0); if (rep_check) __env_db_rep_exit(dbenv); return (ret);}/* * __lock_put -- * * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *, u_int32_t)); * Internal lock_put interface. */int__lock_put(dbenv, lock, flags) DB_ENV *dbenv; DB_LOCK *lock; u_int32_t flags;{ DB_LOCKTAB *lt; int ret, run_dd; if (IS_RECOVERING(dbenv)) return (0); lt = dbenv->lk_handle; LOCKREGION(dbenv, lt); ret = __lock_put_nolock(dbenv, lock, &run_dd, flags); UNLOCKREGION(dbenv, lt); /* * Only run the lock detector if put told us to AND we are running * in auto-detect mode. If we are not running in auto-detect, then * a call to lock_detect here will 0 the need_dd bit, but will not * actually abort anything. */ if (ret == 0 && run_dd) (void)__lock_detect(dbenv, ((DB_LOCKREGION *)lt->reginfo.primary)->detect, NULL); return (ret);}static int__lock_put_nolock(dbenv, lock, runp, flags) DB_ENV *dbenv; DB_LOCK *lock; int *runp; u_int32_t flags;{ struct __db_lock *lockp; DB_LOCKREGION *region; DB_LOCKTAB *lt; int ret; /* Check if locks have been globally turned off. */ if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); lt = dbenv->lk_handle; region = lt->reginfo.primary; lockp = R_ADDR(<->reginfo, lock->off); if (lock->gen != lockp->gen) { __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put"); LOCK_INIT(*lock); return (EINVAL); } if (LF_ISSET(DB_LOCK_DOWNGRADE) && lock->mode == DB_LOCK_WRITE && lockp->refcount > 1) { ret = __lock_downgrade(dbenv, lock, DB_LOCK_WWRITE, DB_LOCK_NOREGION); if (ret == 0) lockp->refcount--; } else ret = __lock_put_internal(lt, lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE); LOCK_INIT(*lock); *runp = 0; if (ret == 0 && region->detect != DB_LOCK_NORUN && (region->need_dd || LOCK_TIME_ISVALID(®ion->next_timeout))) *runp = 1; return (ret);}/* * __lock_downgrade -- * * Used to downgrade locks. Currently this is used in three places: 1) by the * Concurrent Data Store product to downgrade write locks back to iwrite locks * and 2) to downgrade write-handle locks to read-handle locks at the end of * an open/create. 3) To downgrade write locks to was_write to support dirty * reads. * * PUBLIC: int __lock_downgrade __P((DB_ENV *, * PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t)); */int__lock_downgrade(dbenv, lock, new_mode, flags) DB_ENV *dbenv; DB_LOCK *lock; db_lockmode_t new_mode; u_int32_t flags;{ struct __db_lock *lockp; DB_LOCKER *sh_locker; DB_LOCKOBJ *obj; DB_LOCKREGION *region; DB_LOCKTAB *lt; u_int32_t indx; int ret; PANIC_CHECK(dbenv); ret = 0; /* Check if locks have been globally turned off. */ if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); lt = dbenv->lk_handle; region = lt->reginfo.primary; if (!LF_ISSET(DB_LOCK_NOREGION)) LOCKREGION(dbenv, lt); lockp = R_ADDR(<->reginfo, lock->off); if (lock->gen != lockp->gen) { __db_err(dbenv, __db_lock_invalid, "lock_downgrade"); ret = EINVAL; goto out; } LOCKER_LOCK(lt, region, lockp->holder, indx); if ((ret = __lock_getlocker(lt, lockp->holder, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) { if (ret == 0) ret = EINVAL; __db_err(dbenv, __db_locker_invalid); goto out; } if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode)) sh_locker->nwrites--; if (new_mode == DB_LOCK_WWRITE) F_SET(sh_locker, DB_LOCKER_DIRTY); lockp->mode = new_mode; lock->mode = new_mode; /* Get the object associated with this lock. */ obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));out: if (!LF_ISSET(DB_LOCK_NOREGION)) UNLOCKREGION(dbenv, lt); return (ret);}static int__lock_put_internal(lt, lockp, obj_ndx, flags) DB_LOCKTAB *lt; struct __db_lock *lockp; u_int32_t obj_ndx, flags;{ DB_LOCKOBJ *sh_obj; DB_LOCKREGION *region; int ret, state_changed; region = lt->reginfo.primary; ret = state_changed = 0; if (!OBJ_LINKS_VALID(lockp)) { /* * Someone removed this lock while we were doing a release * by locker id. We are trying to free this lock, but it's * already been done; all we need to do is return it to the * free list. */ (void)__lock_freelock(lt, lockp, 0, DB_LOCK_FREE); return (0); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -