📄 vlocation.c
字号:
spin_unlock(&afs_vlocation_updates_lock);}/* * lookup volume location * - iterate through the VL servers in a cell until one of them admits knowing * about the volume in question * - lookup in the local cache if not able to find on the VL server * - insert/update in the local cache if did get a VL response */struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell, struct key *key, const char *name, size_t namesz){ struct afs_vlocation *vl; int ret; _enter("{%s},{%x},%*.*s,%zu", cell->name, key_serial(key), (int) namesz, (int) namesz, name, namesz); if (namesz >= sizeof(vl->vldb.name)) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } /* see if we have an in-memory copy first */ down_write(&cell->vl_sem); spin_lock(&cell->vl_lock); list_for_each_entry(vl, &cell->vl_list, link) { if (vl->vldb.name[namesz] != '\0') continue; if (memcmp(vl->vldb.name, name, namesz) == 0) goto found_in_memory; } spin_unlock(&cell->vl_lock); /* not in the cell's in-memory lists - create a new record */ vl = afs_vlocation_alloc(cell, name, namesz); if (!vl) { up_write(&cell->vl_sem); return ERR_PTR(-ENOMEM); } afs_get_cell(cell); list_add_tail(&vl->link, &cell->vl_list); vl->state = AFS_VL_CREATING; up_write(&cell->vl_sem);fill_in_record: ret = afs_vlocation_fill_in_record(vl, key); if (ret < 0) goto error_abandon; spin_lock(&vl->lock); vl->state = AFS_VL_VALID; spin_unlock(&vl->lock); wake_up(&vl->waitq); /* schedule for regular updates */ afs_vlocation_queue_for_updates(vl); goto success;found_in_memory: /* found in memory */ _debug("found in memory"); atomic_inc(&vl->usage); spin_unlock(&cell->vl_lock); if (!list_empty(&vl->grave)) { spin_lock(&afs_vlocation_graveyard_lock); list_del_init(&vl->grave); spin_unlock(&afs_vlocation_graveyard_lock); } up_write(&cell->vl_sem); /* see if it was an abandoned record that we might try filling in */ spin_lock(&vl->lock); while (vl->state != AFS_VL_VALID) { afs_vlocation_state_t state = vl->state; _debug("invalid [state %d]", state); if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) { vl->state = AFS_VL_CREATING; spin_unlock(&vl->lock); goto fill_in_record; } /* must now wait for creation or update by someone else to * complete */ _debug("wait"); spin_unlock(&vl->lock); ret = wait_event_interruptible(vl->waitq, vl->state == AFS_VL_NEW || vl->state == AFS_VL_VALID || vl->state == AFS_VL_NO_VOLUME); if (ret < 0) goto error; spin_lock(&vl->lock); } spin_unlock(&vl->lock);success: _leave(" = %p",vl); return vl;error_abandon: spin_lock(&vl->lock); vl->state = AFS_VL_NEW; spin_unlock(&vl->lock); wake_up(&vl->waitq);error: ASSERT(vl != NULL); afs_put_vlocation(vl); _leave(" = %d", ret); return ERR_PTR(ret);}/* * finish using a volume location record */void afs_put_vlocation(struct afs_vlocation *vl){ if (!vl) return; _enter("%s", vl->vldb.name); ASSERTCMP(atomic_read(&vl->usage), >, 0); if (likely(!atomic_dec_and_test(&vl->usage))) { _leave(""); return; } spin_lock(&afs_vlocation_graveyard_lock); if (atomic_read(&vl->usage) == 0) { _debug("buried"); list_move_tail(&vl->grave, &afs_vlocation_graveyard); vl->time_of_death = get_seconds(); schedule_delayed_work(&afs_vlocation_reap, afs_vlocation_timeout * HZ); /* suspend updates on this record */ if (!list_empty(&vl->update)) { spin_lock(&afs_vlocation_updates_lock); list_del_init(&vl->update); spin_unlock(&afs_vlocation_updates_lock); } } spin_unlock(&afs_vlocation_graveyard_lock); _leave(" [killed?]");}/* * destroy a dead volume location record */static void afs_vlocation_destroy(struct afs_vlocation *vl){ _enter("%p", vl);#ifdef AFS_CACHING_SUPPORT cachefs_relinquish_cookie(vl->cache, 0);#endif afs_put_cell(vl->cell); kfree(vl);}/* * reap dead volume location records */static void afs_vlocation_reaper(struct work_struct *work){ LIST_HEAD(corpses); struct afs_vlocation *vl; unsigned long delay, expiry; time_t now; _enter(""); now = get_seconds(); spin_lock(&afs_vlocation_graveyard_lock); while (!list_empty(&afs_vlocation_graveyard)) { vl = list_entry(afs_vlocation_graveyard.next, struct afs_vlocation, grave); _debug("check %p", vl); /* the queue is ordered most dead first */ expiry = vl->time_of_death + afs_vlocation_timeout; if (expiry > now) { delay = (expiry - now) * HZ; _debug("delay %lu", delay); if (!schedule_delayed_work(&afs_vlocation_reap, delay)) { cancel_delayed_work(&afs_vlocation_reap); schedule_delayed_work(&afs_vlocation_reap, delay); } break; } spin_lock(&vl->cell->vl_lock); if (atomic_read(&vl->usage) > 0) { _debug("no reap"); list_del_init(&vl->grave); } else { _debug("reap"); list_move_tail(&vl->grave, &corpses); list_del_init(&vl->link); } spin_unlock(&vl->cell->vl_lock); } spin_unlock(&afs_vlocation_graveyard_lock); /* now reap the corpses we've extracted */ while (!list_empty(&corpses)) { vl = list_entry(corpses.next, struct afs_vlocation, grave); list_del(&vl->grave); afs_vlocation_destroy(vl); } _leave("");}/* * initialise the VL update process */int __init afs_vlocation_update_init(void){ afs_vlocation_update_worker = create_singlethread_workqueue("kafs_vlupdated"); return afs_vlocation_update_worker ? 0 : -ENOMEM;}/* * discard all the volume location records for rmmod */void afs_vlocation_purge(void){ afs_vlocation_timeout = 0; spin_lock(&afs_vlocation_updates_lock); list_del_init(&afs_vlocation_updates); spin_unlock(&afs_vlocation_updates_lock); cancel_delayed_work(&afs_vlocation_update); queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0); destroy_workqueue(afs_vlocation_update_worker); cancel_delayed_work(&afs_vlocation_reap); schedule_delayed_work(&afs_vlocation_reap, 0);}/* * update a volume location */static void afs_vlocation_updater(struct work_struct *work){ struct afs_cache_vlocation vldb; struct afs_vlocation *vl, *xvl; time_t now; long timeout; int ret; _enter(""); now = get_seconds(); /* find a record to update */ spin_lock(&afs_vlocation_updates_lock); for (;;) { if (list_empty(&afs_vlocation_updates)) { spin_unlock(&afs_vlocation_updates_lock); _leave(" [nothing]"); return; } vl = list_entry(afs_vlocation_updates.next, struct afs_vlocation, update); if (atomic_read(&vl->usage) > 0) break; list_del_init(&vl->update); } timeout = vl->update_at - now; if (timeout > 0) { queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, timeout * HZ); spin_unlock(&afs_vlocation_updates_lock); _leave(" [nothing]"); return; } list_del_init(&vl->update); atomic_inc(&vl->usage); spin_unlock(&afs_vlocation_updates_lock); /* we can now perform the update */ _debug("update %s", vl->vldb.name); vl->state = AFS_VL_UPDATING; vl->upd_rej_cnt = 0; vl->upd_busy_cnt = 0; ret = afs_vlocation_update_record(vl, NULL, &vldb); spin_lock(&vl->lock); switch (ret) { case 0: afs_vlocation_apply_update(vl, &vldb); vl->state = AFS_VL_VALID; break; case -ENOMEDIUM: vl->state = AFS_VL_VOLUME_DELETED; break; default: vl->state = AFS_VL_UNCERTAIN; break; } spin_unlock(&vl->lock); wake_up(&vl->waitq); /* and then reschedule */ _debug("reschedule"); vl->update_at = get_seconds() + afs_vlocation_update_timeout; spin_lock(&afs_vlocation_updates_lock); if (!list_empty(&afs_vlocation_updates)) { /* next update in 10 minutes, but wait at least 1 second more * than the newest record already queued so that we don't spam * the VL server suddenly with lots of requests */ xvl = list_entry(afs_vlocation_updates.prev, struct afs_vlocation, update); if (vl->update_at <= xvl->update_at) vl->update_at = xvl->update_at + 1; xvl = list_entry(afs_vlocation_updates.next, struct afs_vlocation, update); timeout = xvl->update_at - now; if (timeout < 0) timeout = 0; } else { timeout = afs_vlocation_update_timeout; } ASSERT(list_empty(&vl->update)); list_add_tail(&vl->update, &afs_vlocation_updates); _debug("timeout %ld", timeout); queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, timeout * HZ); spin_unlock(&afs_vlocation_updates_lock); afs_put_vlocation(vl);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -