📄 svc_kudp.c
字号:
} /* * This is completely disgusting. If public is set it is * a pointer to a structure whose first field is the address * of the function to free that structure and any related * stuff. (see rrokfree in nfs_xdr.c). */ if (xdrs->x_public) { (**((int (**)())xdrs->x_public))(xdrs->x_public); }#ifdef RPCDEBUG rpc_debug(5, "svckudp_send done\n");#endif return (stat);}/* * Return transport status. *//*ARGSUSED*/enum xprt_statsvckudp_stat(xprt) SVCXPRT *xprt;{ return (XPRT_IDLE); }/* * Deserialize arguments. */bool_tsvckudp_getargs(xprt, xdr_args, args_ptr) SVCXPRT *xprt; xdrproc_t xdr_args; caddr_t args_ptr;{ return ((*xdr_args)(&(((struct udp_data *)(xprt->xp_p2))->ud_xdrin), args_ptr));}bool_tsvckudp_freeargs(xprt, xdr_args, args_ptr) SVCXPRT *xprt; xdrproc_t xdr_args; caddr_t args_ptr;{ register XDR *xdrs = &(((struct udp_data *)(xprt->xp_p2))->ud_xdrin); register struct udp_data *ud = (struct udp_data *)xprt->xp_p2; if (ud->ud_inmbuf) { m_freem(ud->ud_inmbuf); } ud->ud_inmbuf = (struct mbuf *)0; if (args_ptr) { xdrs->x_op = XDR_FREE; return ((*xdr_args)(xdrs, args_ptr)); } else { return (TRUE); }}/* * The dup cacheing routines below provide a cache of recent * transaction id's. Rpc service routines can use this to detect * retransmissions and either ignore them, re-send a non-failure * response, or re-process. * * svckudp_dupenter() is the only routine that performs memory allocation * and entry initialization. * svckudp_dupbusy() is the only routine that marks an entry busy * (in-progress). * svckudp_dupsave() updates an entry with completion information. * svckudp_dup() searches for a duplicate and returns completion * information if found. * svckudp_dupdone() is the only routine that makes a busy entry un-busy. */struct dupreq { u_long dr_xid; /* 0: unique transaction ID */ u_long dr_addr; /* 4: client address */ u_long dr_proc; /* 8: proc within prog, vers */ u_long dr_flags; /* 12: DUP_BUSY, DUP_DONE, DUP_FAIL */ struct timeval dr_time; /* 16: time associated with req */ struct dupreq *dr_next; /* 24: linked list of all entries */ struct dupreq *dr_chain; /* 28: hash chain */};/* * dupcache_max is the number of cached items. It is set * based on "system size". It should be large enough to hold * transaction history long enough so that a given entry is still * around for a few retransmissions of that transaction. */#define MINDUPREQS 1024#define MAXDUPREQS 8192struct dupreq **drhashtbl; /* array of heads of hash lists */int drhashsz; /* number of hash lists */int drhashszminus1; /* hash modulus *//* * cache support functions: * duplicate request -> xid * xid hash function (the remainder function samples all 32 bits of xid) * xid -> head of hash list * duplicate request -> head of hash list */#define REQTOXID(req) (((struct udp_data *)((req)->rq_xprt->xp_p2))->ud_xid)#define XIDHASH(xid) ((xid) % drhashszminus1)#define XIDTOLIST(xid) ((struct dupreq *)(drhashtbl[XIDHASH(xid)]))#define REQTOLIST(dr) XIDTOLIST((dr)->dr_xid)/* SMP lock for dupreq hash list and dupreq event counters */struct lock_t lk_rpcdupreq;int ndupreqs;int busyreqs;int secondreqs;int dupreqs_done;int dupreqs_busy;int dupreqs_fail;int dupcache_max; /* duplicate cache high water mark *//* routine to compare dup cache entries */#define NOTDUP(dr, xid, req) (dr->dr_xid != xid || \ dr->dr_proc != req->rq_proc || \ dr->dr_addr != \ req->rq_xprt->xp_raddr.sin_addr.s_addr)/* * drmru points to the head of a circular linked list in lru order. * drmru->dr_next == least recently entered (i.e. oldest) entry. * entries are not moved on this list when they are modified. */struct dupreq *dupreqcache, *drmru;svckudp_dupsave(req, transtime, transmark) register struct svc_req *req; struct timeval transtime; int transmark;{ register struct dupreq *dr; u_long xid; xid = REQTOXID(req); smp_lock(&lk_rpcdupreq, LK_RETRY); dr = XIDTOLIST(xid); while (dr != NULL) { if (NOTDUP(dr, xid, req)) { dr = dr->dr_chain; continue; } break; } if (dr == NULL) /* if not there, put it there */ svckudp_dupenter(req, transtime, transmark); else { /* simply update time and completion status */ dr->dr_time = transtime; dr->dr_flags = DUP_BUSY | transmark; } smp_unlock(&lk_rpcdupreq);}svckudp_dupenter(req, transtime, transmark) register struct svc_req *req; struct timeval transtime; int transmark;{ register struct dupreq *dr; /* * NB: * This routine must only be called while holding * the lk_rpcdupreq lock. */ dr = drmru->dr_next; unhash(dr); drmru = dr; dr->dr_xid = REQTOXID(req); dr->dr_proc = req->rq_proc; dr->dr_addr = req->rq_xprt->xp_raddr.sin_addr.s_addr; dr->dr_time = transtime; dr->dr_flags = DUP_BUSY | transmark; dr->dr_chain = REQTOLIST(dr); REQTOLIST(dr) = dr;}svckudp_dup(req, ptime, pmark) register struct svc_req *req; struct timeval *ptime; int *pmark;{ register struct dupreq *dr; u_long xid; xid = REQTOXID(req); smp_lock(&lk_rpcdupreq, LK_RETRY); dr = XIDTOLIST(xid); while (dr != NULL) { if (NOTDUP(dr, xid, req)) { dr = dr->dr_chain; continue; } if (!(dr->dr_flags & DUP_BUSY)) mprintf("RPC dup cache: lost entry\n"); if (dr->dr_flags & DUP_DONE) { dupreqs_done++; *pmark = DUP_DONE; *ptime = dr->dr_time; smp_unlock(&lk_rpcdupreq); return (1); } if (dr->dr_flags & DUP_BUSY) dupreqs_busy++; else dupreqs_fail++; smp_unlock(&lk_rpcdupreq); return (0); } smp_unlock(&lk_rpcdupreq); return (0);}int avoid_seconds = 1;svckudp_dupbusy(req) register struct svc_req *req;{ register struct dupreq *dr; register struct dupreq **dt; u_long xid; extern int prattached; xid = REQTOXID(req); smp_lock(&lk_rpcdupreq, LK_RETRY); /* First time through, allocate and init hash list and cache area */ if (!dupreqcache) { int i; dupcache_max = 1024 * nfs_system_size(); /* Prestoserve allows a server to cycle many more requests */ if (prattached) /* Prestoserve is present */ dupcache_max *= 2; dupcache_max = MAX(MINDUPREQS, dupcache_max); dupcache_max = MIN(MAXDUPREQS, dupcache_max); drhashsz = dupcache_max / 16; drhashszminus1 = drhashsz - 1; smp_unlock(&lk_rpcdupreq); /* * The following assumes that kmem_alloc will block * until success and clear the space. We give up the * lock in case we block. */ kmem_alloc(dr, struct dupreq *, sizeof(*dr) * dupcache_max, KM_RPC); kmem_alloc(dt, struct dupreq **, sizeof(struct dupreq *) * drhashsz, KM_RPC); smp_lock(&lk_rpcdupreq, LK_RETRY); if (!dupreqcache) { /* we got it first */ for (i = 0; i < dupcache_max; i++) dr[i].dr_next = &(dr[i + 1]); dr[dupcache_max - 1].dr_next = dr; ndupreqs = dupcache_max; dupreqcache = dr; drmru = dr; drhashtbl = dt; } else { /* someone beat us to it */ kmem_free((caddr_t)dr, KM_RPC); kmem_free((caddr_t)dt, KM_RPC); } } dr = XIDTOLIST(xid); while (dr != NULL) { if (NOTDUP(dr, xid, req)) { dr = dr->dr_chain; continue; } if (dr->dr_flags & DUP_BUSY) { busyreqs++; smp_unlock(&lk_rpcdupreq); return (1); } if (avoid_seconds && (dr->dr_flags & DUP_DONE) && timepick->tv_sec - dr->dr_time.tv_sec < avoid_seconds) { secondreqs++; smp_unlock(&lk_rpcdupreq); return (1); } dr->dr_flags |= DUP_BUSY; smp_unlock(&lk_rpcdupreq); return (0); } svckudp_dupenter(req, *timepick, DUP_BUSY); smp_unlock(&lk_rpcdupreq); return (0);}svckudp_dupdone(req) register struct svc_req *req;{ register struct dupreq *dr; u_long xid; xid = REQTOXID(req); smp_lock(&lk_rpcdupreq, LK_RETRY); dr = XIDTOLIST(xid); while (dr != NULL) { if (NOTDUP(dr, xid, req)) { dr = dr->dr_chain; continue; } dr->dr_flags &= ~DUP_BUSY; smp_unlock(&lk_rpcdupreq); return (0); } smp_unlock(&lk_rpcdupreq); return (0);}staticunhash(dr) struct dupreq *dr;{ struct dupreq *drt; struct dupreq *drtprev = NULL; /* * NB: * This routine must only be called while holding * the lk_rpcdupreq lock. */ drt = REQTOLIST(dr); while (drt != NULL) { if (drt == dr) { if (drtprev == NULL) { REQTOLIST(dr) = drt->dr_chain; } else { drtprev->dr_chain = drt->dr_chain; } return; } drtprev = drt; drt = drt->dr_chain; } }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -