📄 nfs4proc.c
字号:
intnfs4_proc_async_renew(struct nfs4_client *clp){ struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = clp->cl_cred, }; return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, renew_done, (void *)jiffies);}intnfs4_proc_renew(struct nfs4_client *clp){ struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = clp->cl_cred, }; unsigned long now = jiffies; int status; status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status < 0) return status; spin_lock(&clp->cl_lock); if (time_before(clp->cl_last_renewal,now)) clp->cl_last_renewal = now; spin_unlock(&clp->cl_lock); return 0;}/* * We will need to arrange for the VFS layer to provide an atomic open. * Until then, this open method is prone to inefficiency and race conditions * due to the lookup, potential create, and open VFS calls from sys_open() * placed on the wire. */static intnfs4_proc_file_open(struct inode *inode, struct file *filp){ struct dentry *dentry = filp->f_dentry; struct nfs_open_context *ctx; struct nfs4_state *state = NULL; struct rpc_cred *cred; int status = -ENOMEM; dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n", (int)dentry->d_parent->d_name.len, dentry->d_parent->d_name.name, (int)dentry->d_name.len, dentry->d_name.name); /* Find our open stateid */ cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); if (unlikely(cred == NULL)) return -ENOMEM; ctx = alloc_nfs_open_context(dentry, cred); put_rpccred(cred); if (unlikely(ctx == NULL)) return -ENOMEM; status = -EIO; /* ERACE actually */ state = nfs4_find_state(inode, cred, filp->f_mode); if (unlikely(state == NULL)) goto no_state; ctx->state = state; nfs4_close_state(state, filp->f_mode); ctx->mode = filp->f_mode; nfs_file_set_open_context(filp, ctx); put_nfs_open_context(ctx); if (filp->f_mode & FMODE_WRITE) nfs_begin_data_update(inode); return 0;no_state: printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__); put_nfs_open_context(ctx); return status;}/* * Release our state */static intnfs4_proc_file_release(struct inode *inode, struct file *filp){ if (filp->f_mode & FMODE_WRITE) nfs_end_data_update(inode); nfs_file_clear_open_context(filp); return 0;}static intnfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server){ struct nfs4_client *clp = server->nfs4_state; if (!clp || task->tk_status >= 0) return 0; switch(task->tk_status) { case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL); nfs4_schedule_state_recovery(clp); if (test_bit(NFS4CLNT_OK, &clp->cl_state)) rpc_wake_up_task(task); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_OLD_STATEID: task->tk_status = 0; return -EAGAIN; } task->tk_status = nfs4_map_errors(task->tk_status); return 0;}int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp){ DEFINE_WAIT(wait); sigset_t oldset; int interruptible, res = 0; might_sleep(); rpc_clnt_sigmask(clnt, &oldset); interruptible = TASK_UNINTERRUPTIBLE; if (clnt->cl_intr) interruptible = TASK_INTERRUPTIBLE; prepare_to_wait(&clp->cl_waitq, &wait, interruptible); nfs4_schedule_state_recovery(clp); if (clnt->cl_intr && signalled()) res = -ERESTARTSYS; else if (!test_bit(NFS4CLNT_OK, &clp->cl_state)) schedule(); finish_wait(&clp->cl_waitq, &wait); rpc_clnt_sigunmask(clnt, &oldset); return res;}static int nfs4_delay(struct rpc_clnt *clnt, long *timeout){ sigset_t oldset; int res = 0; might_sleep(); if (*timeout <= 0) *timeout = NFS4_POLL_RETRY_MIN; if (*timeout > NFS4_POLL_RETRY_MAX) *timeout = NFS4_POLL_RETRY_MAX; rpc_clnt_sigmask(clnt, &oldset); if (clnt->cl_intr) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(*timeout); if (signalled()) res = -ERESTARTSYS; } else { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(*timeout); } rpc_clnt_sigunmask(clnt, &oldset); *timeout <<= 1; return res;}/* This is the error handling routine for processes that are allowed * to sleep. */int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception){ struct nfs4_client *clp = server->nfs4_state; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: ret = nfs4_wait_clnt_recover(server->client, clp); if (ret == 0) exception->retry = 1; break; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: ret = nfs4_delay(server->client, &exception->timeout); if (ret == 0) exception->retry = 1; break; case -NFS4ERR_OLD_STATEID: if (ret == 0) exception->retry = 1; } /* We failed to handle the error */ return nfs4_map_errors(ret);}int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port){ static nfs4_verifier sc_verifier; static int initialized; struct nfs4_setclientid setclientid = { .sc_verifier = &sc_verifier, .sc_prog = program, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], .rpc_argp = &setclientid, .rpc_resp = clp, .rpc_cred = clp->cl_cred, }; if (!initialized) { struct timespec boot_time; u32 *p; initialized = 1; boot_time = CURRENT_TIME; p = (u32*)sc_verifier.data; *p++ = htonl((u32)boot_time.tv_sec); *p = htonl((u32)boot_time.tv_nsec); } setclientid.sc_name_len = scnprintf(setclientid.sc_name, sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u", clp->cl_ipaddr, NIPQUAD(clp->cl_addr.s_addr)); setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, sizeof(setclientid.sc_netid), "tcp"); setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, sizeof(setclientid.sc_uaddr), "%s.%d.%d", clp->cl_ipaddr, port >> 8, port & 255); return rpc_call_sync(clp->cl_rpcclient, &msg, 0);}intnfs4_proc_setclientid_confirm(struct nfs4_client *clp){ struct nfs_fsinfo fsinfo; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], .rpc_argp = clp, .rpc_resp = &fsinfo, .rpc_cred = clp->cl_cred, }; unsigned long now; int status; now = jiffies; status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status == 0) { spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = now; spin_unlock(&clp->cl_lock); } return status;}static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid){ struct nfs4_delegreturnargs args = { .fhandle = NFS_FH(inode), .stateid = stateid, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], .rpc_argp = &args, .rpc_cred = cred, }; return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);}int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid){ struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_proc_delegreturn(inode, cred, stateid); switch (err) { case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: nfs4_schedule_state_recovery(server->nfs4_state); case 0: return 0; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err;}#define NFS4_LOCK_MINTIMEOUT (1 * HZ)#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)/* * sleep, with exponential backoff, and retry the LOCK operation. */static unsigned longnfs4_set_lock_task_retry(unsigned long timeout){ current->state = TASK_INTERRUPTIBLE; schedule_timeout(timeout); timeout <<= 1; if (timeout > NFS4_LOCK_MAXTIMEOUT) return NFS4_LOCK_MAXTIMEOUT; return timeout;}static inline intnfs4_lck_type(int cmd, struct file_lock *request){ /* set lock type */ switch (request->fl_type) { case F_RDLCK: return IS_SETLKW(cmd) ? NFS4_READW_LT : NFS4_READ_LT; case F_WRLCK: return IS_SETLKW(cmd) ? NFS4_WRITEW_LT : NFS4_WRITE_LT; case F_UNLCK: return NFS4_WRITE_LT; } BUG(); return 0;}static inline uint64_tnfs4_lck_length(struct file_lock *request){ if (request->fl_end == OFFSET_MAX) return ~(uint64_t)0; return request->fl_end - request->fl_start + 1;}static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request){ struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs4_client *clp = server->nfs4_state; struct nfs_lockargs arg = { .fh = NFS_FH(inode), .type = nfs4_lck_type(cmd, request), .offset = request->fl_start, .length = nfs4_lck_length(request), }; struct nfs_lockres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; struct nfs_lowner nlo; struct nfs4_lock_state *lsp; int status; down_read(&clp->cl_sem); nlo.clientid = clp->cl_clientid; down(&state->lock_sema); lsp = nfs4_find_lock_state(state, request->fl_owner); if (lsp) nlo.id = lsp->ls_id; else { spin_lock(&clp->cl_lock); nlo.id = nfs4_alloc_lockowner_id(clp); spin_unlock(&clp->cl_lock); } arg.u.lockt = &nlo; status = rpc_call_sync(server->client, &msg, 0); if (!status) { request->fl_type = F_UNLCK; } else if (status == -NFS4ERR_DENIED) { int64_t len, start, end; start = res.u.denied.offset; len = res.u.denied.length; end = start + len - 1; if (end < 0 || len == 0) request->fl_end = OFFSET_MAX; else request->fl_end = (loff_t)end; request->fl_start = (loff_t)start; request->fl_type = F_WRLCK; if (res.u.denied.type & 1) request->fl_type = F_RDLCK; request->fl_pid = 0; status = 0; } if (lsp) nfs4_put_lock_state(lsp); up(&state->lock_sema); up_read(&clp->cl_sem); return status;}static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request){ struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(state->inode), _nfs4_proc_getlk(state, cmd, request), &exception); } while (exception.retry); return err;}static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request){ struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs4_client *clp = server->nfs4_state; struct nfs_lockargs arg = { .fh = NFS_FH(inode), .type = nfs4_lck_type(cmd, request), .offset = request->fl_start, .length = nfs4_lck_length(request), }; struct nfs_lockres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; struct nfs4_lock_state *lsp; struct nfs_locku_opargs luargs; int status = 0; down_read(&clp->cl_sem); down(&state->lock_sema); lsp = nfs4_find_lock_state(state, request->fl_owner); if (!lsp) goto out; /* We might have lost the locks! */ if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) { luargs.seqid = lsp->ls_seqid; memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); arg.u.locku = &luargs; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); nfs4_increment_lock_seqid(status, lsp); } if (status == 0) { memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(lsp->ls_stateid)); nfs4_notify_unlck(state, request, lsp); } nfs4_put_lock_state(lsp);out: up(&state->lock_sema); if (status == 0) posix_lock_file(request->fl_file, request); up_read(&clp->cl_sem); return status;}static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request){ struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(state->inode), _nfs4_proc_unlck(state, cmd, request), &exception); } while (exception.retry); return err;}static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim){ struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs4_lock_state *lsp; struct nfs_
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -