📄 glock.c
字号:
} } if (gh->gh_flags & LM_FLAG_PRIORITY) list_add(&gh->gh_list, &gl->gl_waiters3); else list_add_tail(&gh->gh_list, &gl->gl_waiters3);}/** * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) * @gh: the holder structure * * if (gh->gh_flags & GL_ASYNC), this never returns an error * * Returns: 0, GLR_TRYFAILED, or errno on failure */int gfs2_glock_nq(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_sbd; int error = 0;restart: if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { set_bit(HIF_ABORTED, &gh->gh_iflags); return -EIO; } set_bit(HIF_PROMOTE, &gh->gh_iflags); spin_lock(&gl->gl_spin); add_to_queue(gh); run_queue(gl); spin_unlock(&gl->gl_spin); if (!(gh->gh_flags & GL_ASYNC)) { error = glock_wait_internal(gh); if (error == GLR_CANCELED) { msleep(100); goto restart; } } return error;}/** * gfs2_glock_poll - poll to see if an async request has been completed * @gh: the holder * * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on */int gfs2_glock_poll(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; int ready = 0; spin_lock(&gl->gl_spin); if (test_bit(HIF_HOLDER, &gh->gh_iflags)) ready = 1; else if (list_empty(&gh->gh_list)) { if (gh->gh_error == GLR_CANCELED) { spin_unlock(&gl->gl_spin); msleep(100); if (gfs2_glock_nq(gh)) return 1; return 0; } else ready = 1; } spin_unlock(&gl->gl_spin); return ready;}/** * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC * @gh: the holder structure * * Returns: 0, GLR_TRYFAILED, or errno on failure */int gfs2_glock_wait(struct gfs2_holder *gh){ int error; error = glock_wait_internal(gh); if (error == GLR_CANCELED) { msleep(100); gh->gh_flags &= ~GL_ASYNC; error = gfs2_glock_nq(gh); } return error;}/** * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) * @gh: the glock holder * */void gfs2_glock_dq(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; const struct gfs2_glock_operations *glops = gl->gl_ops; unsigned delay = 0; if (gh->gh_flags & GL_NOCACHE) handle_callback(gl, LM_ST_UNLOCKED, 0, 0); gfs2_glmutex_lock(gl); spin_lock(&gl->gl_spin); list_del_init(&gh->gh_list); if (list_empty(&gl->gl_holders)) { spin_unlock(&gl->gl_spin); if (glops->go_unlock) glops->go_unlock(gh); spin_lock(&gl->gl_spin); gl->gl_stamp = jiffies; } clear_bit(GLF_LOCK, &gl->gl_flags); spin_unlock(&gl->gl_spin); gfs2_glock_hold(gl); if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && !test_bit(GLF_DEMOTE, &gl->gl_flags)) delay = gl->gl_ops->go_min_hold_time; if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl);}void gfs2_glock_dq_wait(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; gfs2_glock_dq(gh); wait_on_demote(gl);}/** * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it * @gh: the holder structure * */void gfs2_glock_dq_uninit(struct gfs2_holder *gh){ gfs2_glock_dq(gh); gfs2_holder_uninit(gh);}/** * gfs2_glock_nq_num - acquire a glock based on lock number * @sdp: the filesystem * @number: the lock number * @glops: the glock operations for the type of glock * @state: the state to acquire the glock in * @flags: modifier flags for the aquisition * @gh: the struct gfs2_holder * * Returns: errno */int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, unsigned int state, int flags, struct gfs2_holder *gh){ struct gfs2_glock *gl; int error; error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); if (!error) { error = gfs2_glock_nq_init(gl, state, flags, gh); gfs2_glock_put(gl); } return error;}/** * glock_compare - Compare two struct gfs2_glock structures for sorting * @arg_a: the first structure * @arg_b: the second structure * */static int glock_compare(const void *arg_a, const void *arg_b){ const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; const struct lm_lockname *a = &gh_a->gh_gl->gl_name; const struct lm_lockname *b = &gh_b->gh_gl->gl_name; if (a->ln_number > b->ln_number) return 1; if (a->ln_number < b->ln_number) return -1; BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); return 0;}/** * nq_m_sync - synchonously acquire more than one glock in deadlock free order * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * * Returns: 0 on success (all glocks acquired), * errno on failure (no glocks acquired) */static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, struct gfs2_holder **p){ unsigned int x; int error = 0; for (x = 0; x < num_gh; x++) p[x] = &ghs[x]; sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); for (x = 0; x < num_gh; x++) { p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); error = gfs2_glock_nq(p[x]); if (error) { while (x--) gfs2_glock_dq(p[x]); break; } } return error;}/** * gfs2_glock_nq_m - acquire multiple glocks * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * * * Returns: 0 on success (all glocks acquired), * errno on failure (no glocks acquired) */int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs){ struct gfs2_holder *tmp[4]; struct gfs2_holder **pph = tmp; int error = 0; switch(num_gh) { case 0: return 0; case 1: ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); return gfs2_glock_nq(ghs); default: if (num_gh <= 4) break; pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS); if (!pph) return -ENOMEM; } error = nq_m_sync(num_gh, ghs, pph); if (pph != tmp) kfree(pph); return error;}/** * gfs2_glock_dq_m - release multiple glocks * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * */void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs){ unsigned int x; for (x = 0; x < num_gh; x++) gfs2_glock_dq(&ghs[x]);}/** * gfs2_glock_dq_uninit_m - release multiple glocks * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * */void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs){ unsigned int x; for (x = 0; x < num_gh; x++) gfs2_glock_dq_uninit(&ghs[x]);}/** * gfs2_lvb_hold - attach a LVB from a glock * @gl: The glock in question * */int gfs2_lvb_hold(struct gfs2_glock *gl){ int error; gfs2_glmutex_lock(gl); if (!atomic_read(&gl->gl_lvb_count)) { error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); if (error) { gfs2_glmutex_unlock(gl); return error; } gfs2_glock_hold(gl); } atomic_inc(&gl->gl_lvb_count); gfs2_glmutex_unlock(gl); return 0;}/** * gfs2_lvb_unhold - detach a LVB from a glock * @gl: The glock in question * */void gfs2_lvb_unhold(struct gfs2_glock *gl){ gfs2_glock_hold(gl); gfs2_glmutex_lock(gl); gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); if (atomic_dec_and_test(&gl->gl_lvb_count)) { gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); gl->gl_lvb = NULL; gfs2_glock_put(gl); } gfs2_glmutex_unlock(gl); gfs2_glock_put(gl);}static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, unsigned int state){ struct gfs2_glock *gl; unsigned long delay = 0; unsigned long holdtime; unsigned long now = jiffies; gl = gfs2_glock_find(sdp, name); if (!gl) return; holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; if (time_before(now, holdtime)) delay = holdtime - now; handle_callback(gl, state, 1, delay); if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl);}/** * gfs2_glock_cb - Callback used by locking module * @sdp: Pointer to the superblock * @type: Type of callback * @data: Type dependent data pointer * * Called by the locking module when it wants to tell us something. * Either we need to drop a lock, one of our ASYNC requests completed, or * a journal from another client needs to be recovered. */void gfs2_glock_cb(void *cb_data, unsigned int type, void *data){ struct gfs2_sbd *sdp = cb_data; switch (type) { case LM_CB_NEED_E: blocking_cb(sdp, data, LM_ST_UNLOCKED); return; case LM_CB_NEED_D: blocking_cb(sdp, data, LM_ST_DEFERRED); return; case LM_CB_NEED_S: blocking_cb(sdp, data, LM_ST_SHARED); return; case LM_CB_ASYNC: { struct lm_async_cb *async = data; struct gfs2_glock *gl; down_read(&gfs2_umount_flush_sem); gl = gfs2_glock_find(sdp, &async->lc_name); if (gfs2_assert_warn(sdp, gl)) return; if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) gl->gl_req_bh(gl, async->lc_ret); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); up_read(&gfs2_umount_flush_sem); return; } case LM_CB_NEED_RECOVERY: gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); if (sdp->sd_recoverd_process) wake_up_process(sdp->sd_recoverd_process); return; case LM_CB_DROPLOCKS: gfs2_gl_hash_clear(sdp, NO_WAIT); gfs2_quota_scan(sdp); return; default: gfs2_assert_warn(sdp, 0); return; }}/** * demote_ok - Check to see if it's ok to unlock a glock * @gl: the glock * * Returns: 1 if it's ok */static int demote_ok(struct gfs2_glock *gl){ const struct gfs2_glock_operations *glops = gl->gl_ops; int demote = 1; if (test_bit(GLF_STICKY, &gl->gl_flags)) demote = 0; else if (glops->go_demote_ok) demote = glops->go_demote_ok(gl); return demote;}/** * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list * @gl: the glock * */void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl){ struct gfs2_sbd *sdp = gl->gl_sbd; spin_lock(&sdp->sd_reclaim_lock); if (list_empty(&gl->gl_reclaim)) { gfs2_glock_hold(gl); list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); atomic_inc(&sdp->sd_reclaim_count); } spin_unlock(&sdp->sd_reclaim_lock); wake_up(&sdp->sd_reclaim_wq);}/** * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list * @sdp: the filesystem * * Called from gfs2_glockd() glock reclaim daemon, or when promoting a * different glock and we notice that there are a lot of glocks in the * reclaim list. * */void gfs2_reclaim_glock(struct gfs2_sbd *sdp){ struct gfs2_glock *gl; spin_lock(&sdp->sd_reclaim_lock); if (list_empty(&sdp->sd_reclaim_list)) { spin_unlock(&sdp->sd_reclaim_lock); return; } gl = list_entry(sdp->sd_reclaim_list.next, struct gfs2_glock, gl_reclaim); list_del_init(&gl->gl_reclaim); spin_unlock(&sdp->sd_reclaim_lock); atomic_dec(&sdp->sd_reclaim_count); atomic_inc(&sdp->sd_reclaimed); if (gfs2_glmutex_trylock(gl)) { if (list_empty(&gl->gl_holders) && gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) handle_callback(gl, LM_ST_UNLOCKED, 0, 0); gfs2_glmutex_unlock(gl); } gfs2_glock_put(gl);}/** * examine_bucket - Call a function for glock in a hash bucket * @examiner: the function * @sdp: the filesystem * @bucket: the bucket * * Returns: 1 if the bucket has entries */static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, unsigned int hash){ struct gfs2_glock *gl, *prev = NULL; int has_entries = 0; struct hlist_head *head = &gl_hash_table[hash].hb_list; read_lock(gl_lock_addr(hash)); /* Can't use hlist_for_each_entry - don't want prefetch here */ if (hlist_empty(head)) goto out; gl = list_entry(head->first, struct gfs2_glock, gl_list); while(1) { if (!sdp || gl->gl_sbd == sdp) { gfs2_glock_hold(gl); read_unlock(gl_lock_addr(hash)); if (prev) gfs2_glock_put(prev); prev = gl; examiner(gl); has_entries = 1; read_lock(gl_lock_addr(hash)); } if (gl->gl_list.next == NULL) break; gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); }out: read_unlock(gl_lock_addr(hash)); if (prev) gfs2_glock_put(prev); cond_resched(); return has_entries;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -