📄 nfs_socket.c
字号:
rep->r_prev->r_next = rep->r_next; rep->r_next->r_prev = rep->r_prev; splx(s); /* * Decrement the outstanding request count. */ if (rep->r_flags & R_SENT) { rep->r_flags &= ~R_SENT; /* paranoia */ nmp->nm_sent -= NFS_CWNDSCALE; } /* * If there was a successful reply and a tprintf msg. * tprintf a response. */ if (!error && (rep->r_flags & R_TPRINTFMSG)) nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, "is alive again"); mrep = rep->r_mrep; md = rep->r_md; dpos = rep->r_dpos; if (error) { m_freem(rep->r_mreq); free((caddr_t)rep, M_NFSREQ); return (error); } /* * break down the rpc header and check if ok */ nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); if (*tl++ == rpc_msgdenied) { if (*tl == rpc_mismatch) error = EOPNOTSUPP; else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { if (*tl == rpc_rejectedcred && failed_auth == 0) { failed_auth++; mheadend->m_next = (struct mbuf *)0; m_freem(mrep); m_freem(rep->r_mreq); goto kerbauth; } else error = EAUTH; } else error = EACCES; m_freem(mrep); m_freem(rep->r_mreq); free((caddr_t)rep, M_NFSREQ); return (error); } /* * skip over the auth_verf, someday we may want to cache auth_short's * for nfs_reqhead(), but for now just dump it */ if (*++tl != 0) { i = nfsm_rndup(fxdr_unsigned(long, *tl)); nfsm_adv(i); } nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); /* 0 == ok */ if (*tl == 0) { nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); if (*tl != 0) { error = fxdr_unsigned(int, *tl); m_freem(mrep); if ((nmp->nm_flag & NFSMNT_NQNFS) && error == NQNFS_TRYLATER) { error = 0; waituntil = time.tv_sec + trylater_delay; while (time.tv_sec < waituntil) (void) tsleep((caddr_t)&lbolt, PSOCK, "nqnfstry", 0); trylater_delay *= nfs_backoff[trylater_cnt]; if (trylater_cnt < 7) trylater_cnt++; goto tryagain; } /* * If the File Handle was stale, invalidate the * lookup cache, just in case. */ if (error == ESTALE) cache_purge(vp); m_freem(rep->r_mreq); free((caddr_t)rep, M_NFSREQ); return (error); } /* * For nqnfs, get any lease in reply */ if (nmp->nm_flag & NFSMNT_NQNFS) { nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); if (*tl) { np = VTONFS(vp); nqlflag = fxdr_unsigned(int, *tl); nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); cachable = fxdr_unsigned(int, *tl++); reqtime += fxdr_unsigned(int, *tl++); if (reqtime > time.tv_sec) { fxdr_hyper(tl, &frev); nqnfs_clientlease(nmp, np, nqlflag, cachable, reqtime, frev); } } } *mrp = mrep; *mdp = md; *dposp = dpos; m_freem(rep->r_mreq); FREE((caddr_t)rep, M_NFSREQ); return (0); } m_freem(mrep); m_freem(rep->r_mreq); free((caddr_t)rep, M_NFSREQ); error = EPROTONOSUPPORT;nfsmout: return (error);}/* * Generate the rpc reply header * siz arg. is used to decide if adding a cluster is worthwhile */nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp) int siz; struct nfsd *nd; int err; int cache; u_quad_t *frev; struct mbuf **mrq; struct mbuf **mbp; caddr_t *bposp;{ register u_long *tl; register struct mbuf *mreq; caddr_t bpos; struct mbuf *mb, *mb2; MGETHDR(mreq, M_WAIT, MT_DATA); mb = mreq; /* * If this is a big reply, use a cluster else * try and leave leading space for the lower level headers. */ siz += RPC_REPLYSIZ; if (siz >= MINCLSIZE) { MCLGET(mreq, M_WAIT); } else mreq->m_data += max_hdr; tl = mtod(mreq, u_long *); mreq->m_len = 6*NFSX_UNSIGNED; bpos = ((caddr_t)tl)+mreq->m_len; *tl++ = nd->nd_retxid; *tl++ = rpc_reply; if (err == ERPCMISMATCH || err == NQNFS_AUTHERR) { *tl++ = rpc_msgdenied; if (err == NQNFS_AUTHERR) { *tl++ = rpc_autherr; *tl = rpc_rejectedcred; mreq->m_len -= NFSX_UNSIGNED; bpos -= NFSX_UNSIGNED; } else { *tl++ = rpc_mismatch; *tl++ = txdr_unsigned(2); *tl = txdr_unsigned(2); } } else { *tl++ = rpc_msgaccepted; *tl++ = 0; *tl++ = 0; switch (err) { case EPROGUNAVAIL: *tl = txdr_unsigned(RPC_PROGUNAVAIL); break; case EPROGMISMATCH: *tl = txdr_unsigned(RPC_PROGMISMATCH); nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); *tl++ = txdr_unsigned(2); *tl = txdr_unsigned(2); /* someday 3 */ break; case EPROCUNAVAIL: *tl = txdr_unsigned(RPC_PROCUNAVAIL); break; default: *tl = 0; if (err != VNOVAL) { nfsm_build(tl, u_long *, NFSX_UNSIGNED); if (err) *tl = txdr_unsigned(nfsrv_errmap[err - 1]); else *tl = 0; } break; }; } /* * For nqnfs, piggyback lease as requested. */ if (nd->nd_nqlflag != NQL_NOVAL && err == 0) { if (nd->nd_nqlflag) { nfsm_build(tl, u_long *, 5*NFSX_UNSIGNED); *tl++ = txdr_unsigned(nd->nd_nqlflag); *tl++ = txdr_unsigned(cache); *tl++ = txdr_unsigned(nd->nd_duration); txdr_hyper(frev, tl); } else { if (nd->nd_nqlflag != 0) panic("nqreph"); nfsm_build(tl, u_long *, NFSX_UNSIGNED); *tl = 0; } } *mrq = mreq; *mbp = mb; *bposp = bpos; if (err != 0 && err != VNOVAL) nfsstats.srvrpc_errs++; return (0);}/* * Nfs timer routine * Scan the nfsreq list and retranmit any requests that have timed out * To avoid retransmission attempts on STREAM sockets (in the future) make * sure to set the r_retry field to 0 (implies nm_retry == 0). */voidnfs_timer(arg) void *arg;{ register struct nfsreq *rep; register struct mbuf *m; register struct socket *so; register struct nfsmount *nmp; register int timeo; static long lasttime = 0; int s, error; s = splnet(); for (rep = nfsreqh.r_next; rep != &nfsreqh; rep = rep->r_next) { nmp = rep->r_nmp; if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) continue; if (nfs_sigintr(nmp, rep, rep->r_procp)) { rep->r_flags |= R_SOFTTERM; continue; } if (rep->r_rtt >= 0) { rep->r_rtt++; if (nmp->nm_flag & NFSMNT_DUMBTIMR) timeo = nmp->nm_timeo; else timeo = NFS_RTO(nmp, proct[rep->r_procnum]); if (nmp->nm_timeouts > 0) timeo *= nfs_backoff[nmp->nm_timeouts - 1]; if (rep->r_rtt <= timeo) continue; if (nmp->nm_timeouts < 8) nmp->nm_timeouts++; } /* * Check for server not responding */ if ((rep->r_flags & R_TPRINTFMSG) == 0 && rep->r_rexmit > nmp->nm_deadthresh) { nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, "not responding"); rep->r_flags |= R_TPRINTFMSG; } if (rep->r_rexmit >= rep->r_retry) { /* too many */ nfsstats.rpctimeouts++; rep->r_flags |= R_SOFTTERM; continue; } if (nmp->nm_sotype != SOCK_DGRAM) { if (++rep->r_rexmit > NFS_MAXREXMIT) rep->r_rexmit = NFS_MAXREXMIT; continue; } if ((so = nmp->nm_so) == NULL) continue; /* * If there is enough space and the window allows.. * Resend it * Set r_rtt to -1 in case we fail to send it now. */ rep->r_rtt = -1; if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && ((nmp->nm_flag & NFSMNT_DUMBTIMR) || (rep->r_flags & R_SENT) || nmp->nm_sent < nmp->nm_cwnd) && (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, (struct mbuf *)0, (struct mbuf *)0); else error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, nmp->nm_nam, (struct mbuf *)0); if (error) { if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) so->so_error = 0; } else { /* * Iff first send, start timing * else turn timing off, backoff timer * and divide congestion window by 2. */ if (rep->r_flags & R_SENT) { rep->r_flags &= ~R_TIMING; if (++rep->r_rexmit > NFS_MAXREXMIT) rep->r_rexmit = NFS_MAXREXMIT; nmp->nm_cwnd >>= 1; if (nmp->nm_cwnd < NFS_CWNDSCALE) nmp->nm_cwnd = NFS_CWNDSCALE; nfsstats.rpcretries++; } else { rep->r_flags |= R_SENT; nmp->nm_sent += NFS_CWNDSCALE; } rep->r_rtt = 0; } } } /* * Call the nqnfs server timer once a second to handle leases. */ if (lasttime != time.tv_sec) { lasttime = time.tv_sec; nqnfs_serverd(); } splx(s); timeout(nfs_timer, (void *)0, hz / NFS_HZ);}/* * Test for a termination condition pending on the process. * This is used for NFSMNT_INT mounts. */nfs_sigintr(nmp, rep, p) struct nfsmount *nmp; struct nfsreq *rep; register struct proc *p;{ if (rep && (rep->r_flags & R_SOFTTERM)) return (EINTR); if (!(nmp->nm_flag & NFSMNT_INT)) return (0); if (p && p->p_siglist && (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) & NFSINT_SIGMASK)) return (EINTR); return (0);}/* * Lock a socket against others. * Necessary for STREAM sockets to ensure you get an entire rpc request/reply * and also to avoid race conditions between the processes with nfs requests * in progress when a reconnect is necessary. */nfs_sndlock(flagp, rep) register int *flagp; struct nfsreq *rep;{ struct proc *p; int slpflag = 0, slptimeo = 0; if (rep) { p = rep->r_procp; if (rep->r_nmp->nm_flag & NFSMNT_INT) slpflag = PCATCH; } else p = (struct proc *)0; while (*flagp & NFSMNT_SNDLOCK) { if (nfs_sigintr(rep->r_nmp, rep, p)) return (EINTR); *flagp |= NFSMNT_WANTSND; (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsndlck", slptimeo); if (slpflag == PCATCH) { slpflag = 0; slptimeo = 2 * hz; } } *flagp |= NFSMNT_SNDLOCK; return (0);}/* * Unlock the stream socket for others. */voidnfs_sndunlock(flagp) register int *flagp;{ if ((*flagp & NFSMNT_SNDLOCK) == 0) panic("nfs sndunlock"); *flagp &= ~NFSMNT_SNDLOCK; if (*flagp & NFSMNT_WANTSND) { *flagp &= ~NFSMNT_WANTSND; wakeup((caddr_t)flagp); }}nfs_rcvlock(rep) register struct nfsreq *rep;{ register int *flagp = &rep->r_nmp->nm_flag; int slpflag, slptimeo = 0; if (*flagp & NFSMNT_INT) slpflag = PCATCH; else slpflag = 0; while (*flagp & NFSMNT_RCVLOCK) { if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) return (EINTR); *flagp |= NFSMNT_WANTRCV; (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsrcvlk", slptimeo); if (slpflag == PCATCH) { slpflag = 0; slptimeo = 2 * hz; } } *flagp |= NFSMNT_RCVLOCK; return (0);}/* * Unlock the stream socket for others. */voidnfs_rcvunlock(flagp) register int *flagp;{ if ((*flagp & NFSMNT_RCVLOCK) == 0) panic("nfs rcvunlock"); *flagp &= ~NFSMNT_RCVLOCK; if (*flagp & NFSMNT_WANTRCV) { *flagp &= ~NFSMNT_WANTRCV; wakeup((caddr_t)flagp); }}/* * Check for badly aligned mbuf data areas and * realign data in an mbuf list by copying the data areas up, as required. */voidnfs_realign(m, hsiz) register struct mbuf *m; int hsiz;{ register struct mbuf *m2; register int siz, mlen, olen; register caddr_t tcp, fcp; struct mbuf *mnew; while (m) { /* * This never happens for UDP, rarely happens for TCP * but frequently happens for iso transport. */ if ((m->m_len & 0x3) || (mtod(m, int) & 0x3)) { olen = m->m_len; fcp = mtod(m, caddr_t); if ((int)fcp & 0x3) { m->m_flags &= ~M_PKTHDR; if (m->m_flags & M_EXT) m->m_data = m->m_ext.ext_buf + ((m->m_ext.ext_size - olen) & ~0x3); else m->m_data = m->m_dat; } m->m_len = 0; tcp = mtod(m, caddr_t); mnew = m; m2 = m->m_next; /* * If possible, only put the first invariant part * of the RPC header in the first mbuf. */ mlen = M_TRAILINGSPACE(m); if (olen <= hsiz && mlen > hsiz)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -