📄 glock.c
字号:
if (gl->gl_state == gl->gl_demote_state || gl->gl_state == LM_ST_UNLOCKED) { gfs2_demote_wake(gl); return 0; } set_bit(GLF_LOCK, &gl->gl_flags); if (gl->gl_demote_state == LM_ST_UNLOCKED || gl->gl_state != LM_ST_EXCLUSIVE) { spin_unlock(&gl->gl_spin); gfs2_glock_drop_th(gl); } else { spin_unlock(&gl->gl_spin); gfs2_glock_xmote_th(gl, NULL); } spin_lock(&gl->gl_spin); return 0;}/** * run_queue - process holder structures on a glock * @gl: the glock * */static void run_queue(struct gfs2_glock *gl){ struct gfs2_holder *gh; int blocked = 1; for (;;) { if (test_bit(GLF_LOCK, &gl->gl_flags)) break; if (!list_empty(&gl->gl_waiters1)) { gh = list_entry(gl->gl_waiters1.next, struct gfs2_holder, gh_list); if (test_bit(HIF_MUTEX, &gh->gh_iflags)) blocked = rq_mutex(gh); else gfs2_assert_warn(gl->gl_sbd, 0); } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { blocked = rq_demote(gl); } else if (!list_empty(&gl->gl_waiters3)) { gh = list_entry(gl->gl_waiters3.next, struct gfs2_holder, gh_list); if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) blocked = rq_promote(gh); else gfs2_assert_warn(gl->gl_sbd, 0); } else break; if (blocked) break; }}/** * gfs2_glmutex_lock - acquire a local lock on a glock * @gl: the glock * * Gives caller exclusive access to manipulate a glock structure. */static void gfs2_glmutex_lock(struct gfs2_glock *gl){ struct gfs2_holder gh; gfs2_holder_init(gl, 0, 0, &gh); set_bit(HIF_MUTEX, &gh.gh_iflags); if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags)) BUG(); spin_lock(&gl->gl_spin); if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { list_add_tail(&gh.gh_list, &gl->gl_waiters1); } else { gl->gl_owner_pid = current->pid; gl->gl_ip = (unsigned long)__builtin_return_address(0); clear_bit(HIF_WAIT, &gh.gh_iflags); smp_mb(); wake_up_bit(&gh.gh_iflags, HIF_WAIT); } spin_unlock(&gl->gl_spin); wait_on_holder(&gh); gfs2_holder_uninit(&gh);}/** * gfs2_glmutex_trylock - try to acquire a local lock on a glock * @gl: the glock * * Returns: 1 if the glock is acquired */static int gfs2_glmutex_trylock(struct gfs2_glock *gl){ int acquired = 1; spin_lock(&gl->gl_spin); if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { acquired = 0; } else { gl->gl_owner_pid = current->pid; gl->gl_ip = (unsigned long)__builtin_return_address(0); } spin_unlock(&gl->gl_spin); return acquired;}/** * gfs2_glmutex_unlock - release a local lock on a glock * @gl: the glock * */static void gfs2_glmutex_unlock(struct gfs2_glock *gl){ spin_lock(&gl->gl_spin); clear_bit(GLF_LOCK, &gl->gl_flags); gl->gl_owner_pid = 0; gl->gl_ip = 0; run_queue(gl); BUG_ON(!spin_is_locked(&gl->gl_spin)); spin_unlock(&gl->gl_spin);}/** * handle_callback - process a demote request * @gl: the glock * @state: the state the caller wants us to change to * * There are only two requests that we are going to see in actual * practise: LM_ST_SHARED and LM_ST_UNLOCKED */static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote, unsigned long delay){ int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; spin_lock(&gl->gl_spin); set_bit(bit, &gl->gl_flags); if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && gl->gl_object) { gfs2_glock_schedule_for_reclaim(gl); spin_unlock(&gl->gl_spin); return; } } else if (gl->gl_demote_state != LM_ST_UNLOCKED && gl->gl_demote_state != state) { gl->gl_demote_state = LM_ST_UNLOCKED; } spin_unlock(&gl->gl_spin);}/** * state_change - record that the glock is now in a different state * @gl: the glock * @new_state the new state * */static void state_change(struct gfs2_glock *gl, unsigned int new_state){ int held1, held2; held1 = (gl->gl_state != LM_ST_UNLOCKED); held2 = (new_state != LM_ST_UNLOCKED); if (held1 != held2) { if (held2) gfs2_glock_hold(gl); else gfs2_glock_put(gl); } gl->gl_state = new_state; gl->gl_tchange = jiffies;}/** * xmote_bh - Called after the lock module is done acquiring a lock * @gl: The glock in question * @ret: the int returned from the lock module * */static void xmote_bh(struct gfs2_glock *gl, unsigned int ret){ struct gfs2_sbd *sdp = gl->gl_sbd; const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh = gl->gl_req_gh; int prev_state = gl->gl_state; int op_done = 1; gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); state_change(gl, ret & LM_OUT_ST_MASK); if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { if (glops->go_inval) glops->go_inval(gl, DIO_METADATA); } else if (gl->gl_state == LM_ST_DEFERRED) { /* We might not want to do this here. Look at moving to the inode glops. */ if (glops->go_inval) glops->go_inval(gl, 0); } /* Deal with each possible exit condition */ if (!gh) { gl->gl_stamp = jiffies; if (ret & LM_OUT_CANCELED) { op_done = 0; } else { spin_lock(&gl->gl_spin); if (gl->gl_state != gl->gl_demote_state) { gl->gl_req_bh = NULL; spin_unlock(&gl->gl_spin); gfs2_glock_drop_th(gl); gfs2_glock_put(gl); return; } gfs2_demote_wake(gl); spin_unlock(&gl->gl_spin); } } else { spin_lock(&gl->gl_spin); list_del_init(&gh->gh_list); gh->gh_error = -EIO; if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) goto out; gh->gh_error = GLR_CANCELED; if (ret & LM_OUT_CANCELED) goto out; if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { list_add_tail(&gh->gh_list, &gl->gl_holders); gh->gh_error = 0; set_bit(HIF_HOLDER, &gh->gh_iflags); set_bit(HIF_FIRST, &gh->gh_iflags); op_done = 0; goto out; } gh->gh_error = GLR_TRYFAILED; if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) goto out; gh->gh_error = -EINVAL; if (gfs2_assert_withdraw(sdp, 0) == -1) fs_err(sdp, "ret = 0x%.8X\n", ret);out: spin_unlock(&gl->gl_spin); } if (glops->go_xmote_bh) glops->go_xmote_bh(gl); if (op_done) { spin_lock(&gl->gl_spin); gl->gl_req_gh = NULL; gl->gl_req_bh = NULL; clear_bit(GLF_LOCK, &gl->gl_flags); spin_unlock(&gl->gl_spin); } gfs2_glock_put(gl); if (gh) gfs2_holder_wake(gh);}/** * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock * @gl: The glock in question * @state: the requested state * @flags: modifier flags to the lock call * */static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh){ struct gfs2_sbd *sdp = gl->gl_sbd; int flags = gh ? gh->gh_flags : 0; unsigned state = gh ? gh->gh_state : gl->gl_demote_state; const struct gfs2_glock_operations *glops = gl->gl_ops; int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | LM_FLAG_ANY | LM_FLAG_PRIORITY); unsigned int lck_ret; if (glops->go_xmote_th) glops->go_xmote_th(gl); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); gfs2_assert_warn(sdp, state != gl->gl_state); gfs2_glock_hold(gl); gl->gl_req_bh = xmote_bh; lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) return; if (lck_ret & LM_OUT_ASYNC) gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); else xmote_bh(gl, lck_ret);}/** * drop_bh - Called after a lock module unlock completes * @gl: the glock * @ret: the return status * * Doesn't wake up the process waiting on the struct gfs2_holder (if any) * Doesn't drop the reference on the glock the top half took out * */static void drop_bh(struct gfs2_glock *gl, unsigned int ret){ struct gfs2_sbd *sdp = gl->gl_sbd; const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh = gl->gl_req_gh; gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, !ret); state_change(gl, LM_ST_UNLOCKED); if (glops->go_inval) glops->go_inval(gl, DIO_METADATA); if (gh) { spin_lock(&gl->gl_spin); list_del_init(&gh->gh_list); gh->gh_error = 0; spin_unlock(&gl->gl_spin); } spin_lock(&gl->gl_spin); gfs2_demote_wake(gl); gl->gl_req_gh = NULL; gl->gl_req_bh = NULL; clear_bit(GLF_LOCK, &gl->gl_flags); spin_unlock(&gl->gl_spin); gfs2_glock_put(gl); if (gh) gfs2_holder_wake(gh);}/** * gfs2_glock_drop_th - call into the lock module to unlock a lock * @gl: the glock * */static void gfs2_glock_drop_th(struct gfs2_glock *gl){ struct gfs2_sbd *sdp = gl->gl_sbd; const struct gfs2_glock_operations *glops = gl->gl_ops; unsigned int ret; if (glops->go_drop_th) glops->go_drop_th(gl); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); gfs2_glock_hold(gl); gl->gl_req_bh = drop_bh; ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) return; if (!ret) drop_bh(gl, ret); else gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);}/** * do_cancels - cancel requests for locks stuck waiting on an expire flag * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock * * Don't cancel GL_NOCANCEL requests. */static void do_cancels(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; spin_lock(&gl->gl_spin); while (gl->gl_req_gh != gh && !test_bit(HIF_HOLDER, &gh->gh_iflags) && !list_empty(&gh->gh_list)) { if (gl->gl_req_bh && !(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { spin_unlock(&gl->gl_spin); gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); msleep(100); spin_lock(&gl->gl_spin); } else { spin_unlock(&gl->gl_spin); msleep(100); spin_lock(&gl->gl_spin); } } spin_unlock(&gl->gl_spin);}/** * glock_wait_internal - wait on a glock acquisition * @gh: the glock holder * * Returns: 0 on success */static int glock_wait_internal(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_sbd; const struct gfs2_glock_operations *glops = gl->gl_ops; if (test_bit(HIF_ABORTED, &gh->gh_iflags)) return -EIO; if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { spin_lock(&gl->gl_spin); if (gl->gl_req_gh != gh && !test_bit(HIF_HOLDER, &gh->gh_iflags) && !list_empty(&gh->gh_list)) { list_del_init(&gh->gh_list); gh->gh_error = GLR_TRYFAILED; run_queue(gl); spin_unlock(&gl->gl_spin); return gh->gh_error; } spin_unlock(&gl->gl_spin); } if (gh->gh_flags & LM_FLAG_PRIORITY) do_cancels(gh); wait_on_holder(gh); if (gh->gh_error) return gh->gh_error; gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)); if (test_bit(HIF_FIRST, &gh->gh_iflags)) { gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); if (glops->go_lock) { gh->gh_error = glops->go_lock(gh); if (gh->gh_error) { spin_lock(&gl->gl_spin); list_del_init(&gh->gh_list); spin_unlock(&gl->gl_spin); } } spin_lock(&gl->gl_spin); gl->gl_req_gh = NULL; gl->gl_req_bh = NULL; clear_bit(GLF_LOCK, &gl->gl_flags); run_queue(gl); spin_unlock(&gl->gl_spin); } return gh->gh_error;}static inline struct gfs2_holder *find_holder_by_owner(struct list_head *head, pid_t pid){ struct gfs2_holder *gh; list_for_each_entry(gh, head, gh_list) { if (gh->gh_owner_pid == pid) return gh; } return NULL;}static void print_dbg(struct glock_iter *gi, const char *fmt, ...){ va_list args; va_start(args, fmt); if (gi) { vsprintf(gi->string, fmt, args); seq_printf(gi->seq, gi->string); } else vprintk(fmt, args); va_end(args);}/** * add_to_queue - Add a holder to the wait queue (but look for recursion) * @gh: the holder structure to add * */static void add_to_queue(struct gfs2_holder *gh){ struct gfs2_glock *gl = gh->gh_gl; struct gfs2_holder *existing; BUG_ON(!gh->gh_owner_pid); if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) BUG(); if (!(gh->gh_flags & GL_FLOCK)) { existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid); if (existing) { print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); printk(KERN_INFO "lock type : %d lock state : %d\n", existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); printk(KERN_INFO "lock type : %d lock state : %d\n", gl->gl_name.ln_type, gl->gl_state); BUG(); } existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid); if (existing) { print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); BUG();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -