📄 xprtsock.c
字号:
} else base -= xdr->head[0].iov_len; if (base < xdr->page_len) { unsigned int len = xdr->page_len - base; remainder -= len; err = xs_send_pagedata(sock, xdr, base, remainder != 0); if (remainder == 0 || err != len) goto out; sent += err; base = 0; } else base -= xdr->page_len; if (base >= xdr->tail[0].iov_len) return sent; err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);out: if (sent == 0) return err; if (err > 0) sent += err; return sent;}/** * xs_nospace - place task on wait queue if transmit was incomplete * @task: task to put to sleep * */static void xs_nospace(struct rpc_task *task){ struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", task->tk_pid, req->rq_slen - req->rq_bytes_sent, req->rq_slen); if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { /* Protect against races with write_space */ spin_lock_bh(&xprt->transport_lock); /* Don't race with disconnect */ if (!xprt_connected(xprt)) task->tk_status = -ENOTCONN; else if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) xprt_wait_for_buffer_space(task); spin_unlock_bh(&xprt->transport_lock); } else /* Keep holding the socket if it is blocked */ rpc_delay(task, HZ>>4);}/** * xs_udp_send_request - write an RPC request to a UDP socket * @task: address of RPC task that manages the state of an RPC request * * Return values: * 0: The request has been sent * EAGAIN: The socket was blocked, please call again later to * complete the request * ENOTCONN: Caller needs to invoke connect logic then call again * other: Some other error occured, the request was not sent */static int xs_udp_send_request(struct rpc_task *task){ struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct xdr_buf *xdr = &req->rq_snd_buf; int status; xs_pktdump("packet data:", req->rq_svec->iov_base, req->rq_svec->iov_len); req->rq_xtime = jiffies; status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, xdr, req->rq_bytes_sent); dprintk("RPC: xs_udp_send_request(%u) = %d\n", xdr->len - req->rq_bytes_sent, status); if (status >= 0) { task->tk_bytes_sent += status; if (status >= req->rq_slen) return 0; /* Still some bytes left; set up for a retry later. */ status = -EAGAIN; } switch (status) { case -ENETUNREACH: case -EPIPE: case -ECONNREFUSED: /* When the server has died, an ICMP port unreachable message * prompts ECONNREFUSED. */ break; case -EAGAIN: xs_nospace(task); break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); break; } return status;}static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf){ u32 reclen = buf->len - sizeof(rpc_fraghdr); rpc_fraghdr *base = buf->head[0].iov_base; *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);}/** * xs_tcp_send_request - write an RPC request to a TCP socket * @task: address of RPC task that manages the state of an RPC request * * Return values: * 0: The request has been sent * EAGAIN: The socket was blocked, please call again later to * complete the request * ENOTCONN: Caller needs to invoke connect logic then call again * other: Some other error occured, the request was not sent * * XXX: In the case of soft timeouts, should we eventually give up * if sendmsg is not able to make progress? */static int xs_tcp_send_request(struct rpc_task *task){ struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct xdr_buf *xdr = &req->rq_snd_buf; int status; unsigned int retry = 0; xs_encode_tcp_record_marker(&req->rq_snd_buf); xs_pktdump("packet data:", req->rq_svec->iov_base, req->rq_svec->iov_len); /* Continue transmitting the packet/record. We must be careful * to cope with writespace callbacks arriving _after_ we have * called sendmsg(). */ while (1) { req->rq_xtime = jiffies; status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent); dprintk("RPC: xs_tcp_send_request(%u) = %d\n", xdr->len - req->rq_bytes_sent, status); if (unlikely(status < 0)) break; /* If we've sent the entire packet, immediately * reset the count of bytes sent. */ req->rq_bytes_sent += status; task->tk_bytes_sent += status; if (likely(req->rq_bytes_sent >= req->rq_slen)) { req->rq_bytes_sent = 0; return 0; } status = -EAGAIN; if (retry++ > XS_SENDMSG_RETRY) break; } switch (status) { case -EAGAIN: xs_nospace(task); break; case -ECONNREFUSED: case -ECONNRESET: case -ENOTCONN: case -EPIPE: status = -ENOTCONN; break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); xprt_disconnect(xprt); break; } return status;}/** * xs_tcp_release_xprt - clean up after a tcp transmission * @xprt: transport * @task: rpc task * * This cleans up if an error causes us to abort the transmission of a request. * In this case, the socket may need to be reset in order to avoid confusing * the server. */static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task){ struct rpc_rqst *req; if (task != xprt->snd_task) return; if (task == NULL) goto out_release; req = task->tk_rqstp; if (req->rq_bytes_sent == 0) goto out_release; if (req->rq_bytes_sent == req->rq_snd_buf.len) goto out_release; set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);out_release: xprt_release_xprt(xprt, task);}/** * xs_close - close a socket * @xprt: transport * * This is used when all requests are complete; ie, no DRC state remains * on the server we want to save. */static void xs_close(struct rpc_xprt *xprt){ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct socket *sock = transport->sock; struct sock *sk = transport->inet; if (!sk) goto clear_close_wait; dprintk("RPC: xs_close xprt %p\n", xprt); write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; sk->sk_user_data = NULL; sk->sk_data_ready = transport->old_data_ready; sk->sk_state_change = transport->old_state_change; sk->sk_write_space = transport->old_write_space; write_unlock_bh(&sk->sk_callback_lock); sk->sk_no_check = 0; sock_release(sock);clear_close_wait: smp_mb__before_clear_bit(); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); smp_mb__after_clear_bit();}/** * xs_destroy - prepare to shutdown a transport * @xprt: doomed transport * */static void xs_destroy(struct rpc_xprt *xprt){ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); dprintk("RPC: xs_destroy xprt %p\n", xprt); cancel_rearming_delayed_work(&transport->connect_worker); xprt_disconnect(xprt); xs_close(xprt); xs_free_peer_addresses(xprt); kfree(xprt->slot); kfree(xprt); module_put(THIS_MODULE);}static inline struct rpc_xprt *xprt_from_sock(struct sock *sk){ return (struct rpc_xprt *) sk->sk_user_data;}/** * xs_udp_data_ready - "data ready" callback for UDP sockets * @sk: socket with data to read * @len: how much data to read * */static void xs_udp_data_ready(struct sock *sk, int len){ struct rpc_task *task; struct rpc_xprt *xprt; struct rpc_rqst *rovr; struct sk_buff *skb; int err, repsize, copied; u32 _xid; __be32 *xp; read_lock(&sk->sk_callback_lock); dprintk("RPC: xs_udp_data_ready...\n"); if (!(xprt = xprt_from_sock(sk))) goto out; if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) goto out; if (xprt->shutdown) goto dropit; repsize = skb->len - sizeof(struct udphdr); if (repsize < 4) { dprintk("RPC: impossible RPC reply size %d!\n", repsize); goto dropit; } /* Copy the XID from the skb... */ xp = skb_header_pointer(skb, sizeof(struct udphdr), sizeof(_xid), &_xid); if (xp == NULL) goto dropit; /* Look up and lock the request corresponding to the given XID */ spin_lock(&xprt->transport_lock); rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; task = rovr->rq_task; if ((copied = rovr->rq_private_buf.buflen) > repsize) copied = repsize; /* Suck it into the iovec, verify checksum if not done by hw. */ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) goto out_unlock; /* Something worked... */ dst_confirm(skb->dst); xprt_adjust_cwnd(task, copied); xprt_update_rtt(task); xprt_complete_rqst(task, copied); out_unlock: spin_unlock(&xprt->transport_lock); dropit: skb_free_datagram(sk, skb); out: read_unlock(&sk->sk_callback_lock);}static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc){ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); size_t len, used; char *p; p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; used = xdr_skb_read_bits(desc, p, len); transport->tcp_offset += used; if (used != len) return; transport->tcp_reclen = ntohl(transport->tcp_fraghdr); if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) transport->tcp_flags |= TCP_RCV_LAST_FRAG; else transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; transport->tcp_offset = 0; /* Sanity check of the record length */ if (unlikely(transport->tcp_reclen < 4)) { dprintk("RPC: invalid TCP record fragment length\n"); xprt_disconnect(xprt); return; } dprintk("RPC: reading TCP record fragment of length %d\n", transport->tcp_reclen);}static void xs_tcp_check_fraghdr(struct sock_xprt *transport){ if (transport->tcp_offset == transport->tcp_reclen) { transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; transport->tcp_offset = 0; if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { transport->tcp_flags &= ~TCP_RCV_COPY_DATA; transport->tcp_flags |= TCP_RCV_COPY_XID; transport->tcp_copied = 0; } }}static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc){ size_t len, used; char *p; len = sizeof(transport->tcp_xid) - transport->tcp_offset; dprintk("RPC: reading XID (%Zu bytes)\n", len); p = ((char *) &transport->tcp_xid) + transport->tcp_offset; used = xdr_skb_read_bits(desc, p, len); transport->tcp_offset += used; if (used != len) return; transport->tcp_flags &= ~TCP_RCV_COPY_XID; transport->tcp_flags |= TCP_RCV_COPY_DATA; transport->tcp_copied = 4; dprintk("RPC: reading reply for XID %08x\n", ntohl(transport->tcp_xid)); xs_tcp_check_fraghdr(transport);}static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc){ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct rpc_rqst *req; struct xdr_buf *rcvbuf; size_t len; ssize_t r; /* Find and lock the request corresponding to this xid */ spin_lock(&xprt->transport_lock); req = xprt_lookup_rqst(xprt, transport->tcp_xid); if (!req) { transport->tcp_flags &= ~TCP_RCV_COPY_DATA; dprintk("RPC: XID %08x request not found!\n", ntohl(transport->tcp_xid)); spin_unlock(&xprt->transport_lock); return; } rcvbuf = &req->rq_private_buf; len = desc->count; if (len > transport->tcp_reclen - transport->tcp_offset) { struct xdr_skb_reader my_desc; len = transport->tcp_reclen - transport->tcp_offset; memcpy(&my_desc, desc, sizeof(my_desc)); my_desc.count = len; r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, &my_desc, xdr_skb_read_bits); desc->count -= r; desc->offset += r; } else r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, desc, xdr_skb_read_bits); if (r > 0) { transport->tcp_copied += r; transport->tcp_offset += r; } if (r != len) { /* Error when copying to the receive buffer, * usually because we weren't able to allocate * additional buffer pages. All we can do now * is turn off TCP_RCV_COPY_DATA, so the request * will not receive any additional updates, * and time out. * Any remaining data from this record will * be discarded. */ transport->tcp_flags &= ~TCP_RCV_COPY_DATA; dprintk("RPC: XID %08x truncated request\n", ntohl(transport->tcp_xid)); dprintk("RPC: xprt = %p, tcp_copied = %lu, " "tcp_offset = %u, tcp_reclen = %u\n", xprt, transport->tcp_copied, transport->tcp_offset, transport->tcp_reclen); goto out; } dprintk("RPC: XID %08x read %Zd bytes\n", ntohl(transport->tcp_xid), r); dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " "tcp_reclen = %u\n", xprt, transport->tcp_copied, transport->tcp_offset, transport->tcp_reclen); if (transport->tcp_copied == req->rq_private_buf.buflen) transport->tcp_flags &= ~TCP_RCV_COPY_DATA; else if (transport->tcp_offset == transport->tcp_reclen) { if (transport->tcp_flags & TCP_RCV_LAST_FRAG) transport->tcp_flags &= ~TCP_RCV_COPY_DATA; }out: if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) xprt_complete_rqst(req->rq_task, transport->tcp_copied); spin_unlock(&xprt->transport_lock); xs_tcp_check_fraghdr(transport);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -