📄 lock.c
字号:
if (LF_ISSET(DB_LOCK_DOALL)) region->stat.st_nreleases += lockp->refcount; else region->stat.st_nreleases++; if (!LF_ISSET(DB_LOCK_DOALL) && lockp->refcount > 1) { lockp->refcount--; return (0); } /* Increment generation number. */ lockp->gen++; /* Get the object associated with this lock. */ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); /* Remove this lock from its holders/waitlist. */ if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING) __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE); else { SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock); lockp->links.stqe_prev = -1; } if (LF_ISSET(DB_LOCK_NOPROMOTE)) state_changed = 0; else state_changed = __lock_promote(lt, sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS)); /* Check if object should be reclaimed. */ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL && SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) { HASHREMOVE_EL(lt->obj_tab, obj_ndx, __db_lockobj, links, sh_obj); if (sh_obj->lockobj.size > sizeof(sh_obj->objdata)) __db_shalloc_free(<->reginfo, SH_DBT_PTR(&sh_obj->lockobj)); SH_TAILQ_INSERT_HEAD( ®ion->free_objs, sh_obj, links, __db_lockobj); region->stat.st_nobjects--; state_changed = 1; } /* Free lock. */ if (LF_ISSET(DB_LOCK_UNLINK | DB_LOCK_FREE)) ret = __lock_freelock(lt, lockp, lockp->holder, flags); /* * If we did not promote anyone; we need to run the deadlock * detector again. */ if (state_changed == 0) region->need_dd = 1; return (ret);}/* * __lock_freelock -- * Free a lock. Unlink it from its locker if necessary. * */static int__lock_freelock(lt, lockp, locker, flags) DB_LOCKTAB *lt; struct __db_lock *lockp; u_int32_t locker, flags;{ DB_ENV *dbenv; DB_LOCKER *sh_locker; DB_LOCKREGION *region; u_int32_t indx; int ret; dbenv = lt->dbenv; region = lt->reginfo.primary; ret = 0; if (LF_ISSET(DB_LOCK_UNLINK)) { LOCKER_LOCK(lt, region, locker, indx); if ((ret = __lock_getlocker(lt, locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) { if (ret == 0) ret = EINVAL; __db_err(dbenv, __db_locker_invalid); return (ret); } SH_LIST_REMOVE(lockp, locker_links, __db_lock); if (lockp->status == DB_LSTAT_HELD) { sh_locker->nlocks--; if (IS_WRITELOCK(lockp->mode)) sh_locker->nwrites--; } } if (LF_ISSET(DB_LOCK_FREE)) { lockp->status = DB_LSTAT_FREE; SH_TAILQ_INSERT_HEAD( ®ion->free_locks, lockp, links, __db_lock); region->stat.st_nlocks--; } return (ret);}/* * __lock_getobj -- * Get an object in the object hash table. The create parameter * indicates if the object should be created if it doesn't exist in * the table. * * This must be called with the object bucket locked. */static int__lock_getobj(lt, obj, ndx, create, retp) DB_LOCKTAB *lt; const DBT *obj; u_int32_t ndx; int create; DB_LOCKOBJ **retp;{ DB_ENV *dbenv; DB_LOCKOBJ *sh_obj; DB_LOCKREGION *region; int ret; void *p; dbenv = lt->dbenv; region = lt->reginfo.primary; /* Look up the object in the hash table. */ HASHLOOKUP(lt->obj_tab, ndx, __db_lockobj, links, obj, sh_obj, __lock_cmp); /* * If we found the object, then we can just return it. If * we didn't find the object, then we need to create it. */ if (sh_obj == NULL && create) { /* Create new object and then insert it into hash table. */ if ((sh_obj = SH_TAILQ_FIRST(®ion->free_objs, __db_lockobj)) == NULL) { ret = __lock_nomem(lt->dbenv, "object entries"); goto err; } /* * If we can fit this object in the structure, do so instead * of shalloc-ing space for it. */ if (obj->size <= sizeof(sh_obj->objdata)) p = sh_obj->objdata; else if ((ret = __db_shalloc(<->reginfo, obj->size, 0, &p)) != 0) { __db_err(dbenv, "No space for lock object storage"); goto err; } memcpy(p, obj->data, obj->size); SH_TAILQ_REMOVE( ®ion->free_objs, sh_obj, links, __db_lockobj); if (++region->stat.st_nobjects > region->stat.st_maxnobjects) region->stat.st_maxnobjects = region->stat.st_nobjects; SH_TAILQ_INIT(&sh_obj->waiters); SH_TAILQ_INIT(&sh_obj->holders); sh_obj->lockobj.size = obj->size; sh_obj->lockobj.off = (roff_t)SH_PTR_TO_OFF(&sh_obj->lockobj, p); HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj); } *retp = sh_obj; return (0);err: return (ret);}/* * __lock_is_parent -- * Given a locker and a transaction, return 1 if the locker is * an ancestor of the designated transaction. This is used to determine * if we should grant locks that appear to conflict, but don't because * the lock is already held by an ancestor. */static int__lock_is_parent(lt, locker, sh_locker) DB_LOCKTAB *lt; u_int32_t locker; DB_LOCKER *sh_locker;{ DB_LOCKER *parent; parent = sh_locker; while (parent->parent_locker != INVALID_ROFF) { parent = R_ADDR(<->reginfo, parent->parent_locker); if (parent->id == locker) return (1); } return (0);}/* * __lock_locker_is_parent -- * Determine if "locker" is an ancestor of "child". * *retp == 1 if so, 0 otherwise. * * PUBLIC: int __lock_locker_is_parent * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, int *)); */int__lock_locker_is_parent(dbenv, locker, child, retp) DB_ENV *dbenv; u_int32_t locker, child; int *retp;{ DB_LOCKER *sh_locker; DB_LOCKREGION *region; DB_LOCKTAB *lt; u_int32_t locker_ndx; int ret; lt = dbenv->lk_handle; region = lt->reginfo.primary; LOCKER_LOCK(lt, region, child, locker_ndx); if ((ret = __lock_getlocker(lt, child, locker_ndx, 0, &sh_locker)) != 0) { __db_err(dbenv, __db_locker_invalid); return (ret); } /* * The locker may not exist for this transaction, if not then it has * no parents. */ if (sh_locker == NULL) *retp = 0; else *retp = __lock_is_parent(lt, locker, sh_locker); return (0);}/* * __lock_inherit_locks -- * Called on child commit to merge child's locks with parent's. */static int__lock_inherit_locks(lt, locker, flags) DB_LOCKTAB *lt; u_int32_t locker; u_int32_t flags;{ DB_ENV *dbenv; DB_LOCKER *sh_locker, *sh_parent; DB_LOCKOBJ *obj; DB_LOCKREGION *region; int ret; struct __db_lock *hlp, *lp; u_int32_t ndx; region = lt->reginfo.primary; dbenv = lt->dbenv; /* * Get the committing locker and mark it as deleted. * This allows us to traverse the locker links without * worrying that someone else is deleting locks out * from under us. However, if the locker doesn't * exist, that just means that the child holds no * locks, so inheritance is easy! */ LOCKER_LOCK(lt, region, locker, ndx); if ((ret = __lock_getlocker(lt, locker, ndx, 0, &sh_locker)) != 0 || sh_locker == NULL || F_ISSET(sh_locker, DB_LOCKER_DELETED)) { if (ret == 0 && sh_locker != NULL) ret = EINVAL; __db_err(dbenv, __db_locker_invalid); goto err; } /* Make sure we are a child transaction. */ if (sh_locker->parent_locker == INVALID_ROFF) { __db_err(dbenv, "Not a child transaction"); ret = EINVAL; goto err; } sh_parent = R_ADDR(<->reginfo, sh_locker->parent_locker); F_SET(sh_locker, DB_LOCKER_DELETED); /* * Now, lock the parent locker; move locks from * the committing list to the parent's list. */ LOCKER_LOCK(lt, region, locker, ndx); if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) { if (ret == 0) { __db_err(dbenv, "Parent locker is not valid"); ret = EINVAL; } goto err; } /* * In order to make it possible for a parent to have * many, many children who lock the same objects, and * not require an inordinate number of locks, we try * to merge the child's locks with its parent's. */ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { SH_LIST_REMOVE(lp, locker_links, __db_lock); /* See if the parent already has a lock. */ obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); for (hlp = SH_TAILQ_FIRST(&obj->holders, __db_lock); hlp != NULL; hlp = SH_TAILQ_NEXT(hlp, links, __db_lock)) if (hlp->holder == sh_parent->id && lp->mode == hlp->mode) break; if (hlp != NULL) { /* Parent already holds lock. */ hlp->refcount += lp->refcount; /* Remove lock from object list and free it. */ DB_ASSERT(lp->status == DB_LSTAT_HELD); SH_TAILQ_REMOVE(&obj->holders, lp, links, __db_lock); (void)__lock_freelock(lt, lp, locker, DB_LOCK_FREE); } else { /* Just move lock to parent chains. */ SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp, locker_links, __db_lock); lp->holder = sh_parent->id; } /* * We may need to promote regardless of whether we simply * moved the lock to the parent or changed the parent's * reference count, because there might be a sibling waiting, * who will now be allowed to make forward progress. */ (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS)); } /* Transfer child counts to parent. */ sh_parent->nlocks += sh_locker->nlocks; sh_parent->nwrites += sh_locker->nwrites;err: return (ret);}/* * __lock_promote -- * * Look through the waiters and holders lists and decide which (if any) * locks can be promoted. Promote any that are eligible. * * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t)); */int__lock_promote(lt, obj, flags) DB_LOCKTAB *lt; DB_LOCKOBJ *obj; u_int32_t flags;{ struct __db_lock *lp_w, *lp_h, *next_waiter; DB_LOCKER *sh_locker; DB_LOCKREGION *region; u_int32_t locker_ndx; int had_waiters, state_changed; region = lt->reginfo.primary; had_waiters = 0; /* * We need to do lock promotion. We also need to determine if we're * going to need to run the deadlock detector again. If we release * locks, and there are waiters, but no one gets promoted, then we * haven't fundamentally changed the lockmgr state, so we may still * have a deadlock and we have to run again. However, if there were * no waiters, or we actually promoted someone, then we are OK and we * don't have to run it immediately. * * During promotion, we look for state changes so we can return this * information to the caller. */ for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock), state_changed = lp_w == NULL; lp_w != NULL; lp_w = next_waiter) { had_waiters = 1; next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock); /* Waiter may have aborted or expired. */ if (lp_w->status != DB_LSTAT_WAITING) continue; /* Are we switching locks? */ if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT) continue; if (LF_ISSET(DB_LOCK_REMOVE)) { __lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST); continue; } for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock); lp_h != NULL; lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) { if (lp_h->holder != lp_w->holder && CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) { LOCKER_LOCK(lt, region, lp_w->holder, locker_ndx); if ((__lock_getlocker(lt, lp_w->holder, locker_ndx, 0, &sh_locker)) != 0) { DB_ASSERT(0); break; } if (!__lock_is_parent(lt, lp_h->holder, sh_locker)) break; } } if (lp_h != NULL) /* Found a conflict. */ break; /* No conflict, promote the waiting lock. */ SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock); lp_w->status = DB_LSTAT_PENDING; SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links); /* Wake up waiter. */ MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex); state_changed = 1; } /* * If this object had waiters and doesn't any more, then we need * to remove it from the dd_obj list. */ if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL) SH_TAILQ_REMOVE(®ion->dd_objs, obj, dd_links, __db_lockobj); return (state_changed);}/* * __lock_remove_waiter -- * Any lock on the waitlist has a process waiting for it. Therefore, * we can't return the lock to the freelist immediately. Instead, we can * remove the lock from the list of waiters, set the status field of the * lock, and then let the process waking up return the lock to the * free list. * * This must be called with the Object bucket locked. */static void__lock_remove_waiter(lt, sh_obj, lockp, status) DB_LOCKTAB *lt; DB_LOCKOBJ *sh_obj; struct __db_lock *lockp; db_status_t status;{ DB_LOCKREGION *region; int do_wakeup; region = lt->reginfo.primary; do_wakeup = lockp->status == DB_LSTAT_WAITING; SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock); lockp->links.stqe_prev = -1; lockp->status = status; if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) SH_TAILQ_REMOVE( ®ion->dd_objs, sh_obj, dd_links, __db_lockobj); /* * Wake whoever is waiting on this lock. */ if (do_wakeup) MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);}/* * __lock_trade -- * * Trade locker ids on a lock. This is used to reassign file locks from * a transactional locker id to a long-lived locker id. This should be * called with the region mutex held. */static int__lock_trade(dbenv, lock, new_locker) DB_ENV *dbenv; DB_LOCK *lock; u_int32_t new_locker;{ struct __db_lock *lp; DB_LOCKREGION *region; DB_LOCKTAB *lt; DB_LOCKER *sh_locker; int ret; u_int32_t locker_ndx; lt = dbenv->lk_handle; region = lt->reginfo.primary; lp = R_ADDR(<->reginfo, lock->off); /* If the lock is already released, simply return. */ if (lp->gen != lock->gen) return (DB_NOTFOUND); /* Make sure that we can get new locker and add this lock to it. */ LOCKER_LOCK(lt, region, new_locker, locker_ndx); if ((ret = __lock_getlocker(lt, new_locker, locker_ndx, 0, &sh_locker)) != 0) return (ret); if (sh_locker == NULL) { __db_err(dbenv, "Locker does not exist"); return (EINVAL); } /* Remove the lock from its current locker. */ if ((ret = __lock_freelock(lt, lp, lp->holder, DB_LOCK_UNLINK)) != 0) return (ret); /* Add lock to its new locker. */ SH_LIST_INSERT_HEAD(&sh_locker->heldby, lp, locker_links, __db_lock); sh_locker->nlocks++; if (IS_WRITELOCK(lp->mode)) sh_locker->nwrites++; lp->holder = new_locker; return (0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -