⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			    int nonagle){	struct tcp_sock *tp = tcp_sk(sk);	if (tcp_send_head(sk)) {		struct sk_buff *skb = tcp_write_queue_tail(sk);		if (!(flags & MSG_MORE) || forced_push(tp))			tcp_mark_push(tp, skb);		tcp_mark_urg(tp, flags, skb);		__tcp_push_pending_frames(sk, mss_now,					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);	}}static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,			 size_t psize, int flags){	struct tcp_sock *tp = tcp_sk(sk);	int mss_now, size_goal;	int err;	ssize_t copied;	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);	/* Wait for a connection to finish. */	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)			goto out_err;	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));	size_goal = tp->xmit_size_goal;	copied = 0;	err = -EPIPE;	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))		goto do_error;	while (psize > 0) {		struct sk_buff *skb = tcp_write_queue_tail(sk);		struct page *page = pages[poffset / PAGE_SIZE];		int copy, i, can_coalesce;		int offset = poffset % PAGE_SIZE;		int size = min_t(size_t, psize, PAGE_SIZE - offset);		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {new_segment:			if (!sk_stream_memory_free(sk))				goto wait_for_sndbuf;			skb = sk_stream_alloc_pskb(sk, 0, 0,						   sk->sk_allocation);			if (!skb)				goto wait_for_memory;			skb_entail(sk, skb);			copy = size_goal;		}		if (copy > size)			copy = size;		i = skb_shinfo(skb)->nr_frags;		can_coalesce = skb_can_coalesce(skb, i, page, offset);		if (!can_coalesce && i >= MAX_SKB_FRAGS) {			tcp_mark_push(tp, skb);			goto new_segment;		}		if (!sk_stream_wmem_schedule(sk, copy))			goto wait_for_memory;		if (can_coalesce) {			skb_shinfo(skb)->frags[i - 1].size += copy;		} else {			get_page(page);			skb_fill_page_desc(skb, i, page, offset, copy);		}		skb->len += copy;		skb->data_len += copy;		skb->truesize += copy;		sk->sk_wmem_queued += copy;		sk->sk_forward_alloc -= copy;		skb->ip_summed = CHECKSUM_PARTIAL;		tp->write_seq += copy;		TCP_SKB_CB(skb)->end_seq += copy;		skb_shinfo(skb)->gso_segs = 0;		if (!copied)			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;		copied += copy;		poffset += copy;		if (!(psize -= copy))			goto out;		if (skb->len < mss_now || (flags & MSG_OOB))			continue;		if (forced_push(tp)) {			tcp_mark_push(tp, skb);			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);		} else if (skb == tcp_send_head(sk))			tcp_push_one(sk, mss_now);		continue;wait_for_sndbuf:		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);wait_for_memory:		if (copied)			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)			goto do_error;		mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));		size_goal = tp->xmit_size_goal;	}out:	if (copied)		tcp_push(sk, flags, mss_now, tp->nonagle);	return copied;do_error:	if (copied)		goto out;out_err:	return sk_stream_error(sk, flags, err);}ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,		     size_t size, int flags){	ssize_t res;	struct sock *sk = sock->sk;	if (!(sk->sk_route_caps & NETIF_F_SG) ||	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))		return sock_no_sendpage(sock, page, offset, size, flags);	lock_sock(sk);	TCP_CHECK_TIMER(sk);	res = do_tcp_sendpages(sk, &page, offset, size, flags);	TCP_CHECK_TIMER(sk);	release_sock(sk);	return res;}#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)#define TCP_OFF(sk)	(sk->sk_sndmsg_off)static inline int select_size(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	int tmp = tp->mss_cache;	if (sk->sk_route_caps & NETIF_F_SG) {		if (sk_can_gso(sk))			tmp = 0;		else {			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);			if (tmp >= pgbreak &&			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)				tmp = pgbreak;		}	}	return tmp;}int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,		size_t size){	struct sock *sk = sock->sk;	struct iovec *iov;	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *skb;	int iovlen, flags;	int mss_now, size_goal;	int err, copied;	long timeo;	lock_sock(sk);	TCP_CHECK_TIMER(sk);	flags = msg->msg_flags;	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);	/* Wait for a connection to finish. */	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)			goto out_err;	/* This should be in poll */	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));	size_goal = tp->xmit_size_goal;	/* Ok commence sending. */	iovlen = msg->msg_iovlen;	iov = msg->msg_iov;	copied = 0;	err = -EPIPE;	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))		goto do_error;	while (--iovlen >= 0) {		int seglen = iov->iov_len;		unsigned char __user *from = iov->iov_base;		iov++;		while (seglen > 0) {			int copy;			skb = tcp_write_queue_tail(sk);			if (!tcp_send_head(sk) ||			    (copy = size_goal - skb->len) <= 0) {new_segment:				/* Allocate new segment. If the interface is SG,				 * allocate skb fitting to single page.				 */				if (!sk_stream_memory_free(sk))					goto wait_for_sndbuf;				skb = sk_stream_alloc_pskb(sk, select_size(sk),							   0, sk->sk_allocation);				if (!skb)					goto wait_for_memory;				/*				 * Check whether we can use HW checksum.				 */				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)					skb->ip_summed = CHECKSUM_PARTIAL;				skb_entail(sk, skb);				copy = size_goal;			}			/* Try to append data to the end of skb. */			if (copy > seglen)				copy = seglen;			/* Where to copy to? */			if (skb_tailroom(skb) > 0) {				/* We have some space in skb head. Superb! */				if (copy > skb_tailroom(skb))					copy = skb_tailroom(skb);				if ((err = skb_add_data(skb, from, copy)) != 0)					goto do_fault;			} else {				int merge = 0;				int i = skb_shinfo(skb)->nr_frags;				struct page *page = TCP_PAGE(sk);				int off = TCP_OFF(sk);				if (skb_can_coalesce(skb, i, page, off) &&				    off != PAGE_SIZE) {					/* We can extend the last page					 * fragment. */					merge = 1;				} else if (i == MAX_SKB_FRAGS ||					   (!i &&					   !(sk->sk_route_caps & NETIF_F_SG))) {					/* Need to add new fragment and cannot					 * do this because interface is non-SG,					 * or because all the page slots are					 * busy. */					tcp_mark_push(tp, skb);					goto new_segment;				} else if (page) {					if (off == PAGE_SIZE) {						put_page(page);						TCP_PAGE(sk) = page = NULL;						off = 0;					}				} else					off = 0;				if (copy > PAGE_SIZE - off)					copy = PAGE_SIZE - off;				if (!sk_stream_wmem_schedule(sk, copy))					goto wait_for_memory;				if (!page) {					/* Allocate new cache page. */					if (!(page = sk_stream_alloc_page(sk)))						goto wait_for_memory;				}				/* Time to copy data. We are close to				 * the end! */				err = skb_copy_to_page(sk, from, skb, page,						       off, copy);				if (err) {					/* If this page was new, give it to the					 * socket so it does not get leaked.					 */					if (!TCP_PAGE(sk)) {						TCP_PAGE(sk) = page;						TCP_OFF(sk) = 0;					}					goto do_error;				}				/* Update the skb. */				if (merge) {					skb_shinfo(skb)->frags[i - 1].size +=									copy;				} else {					skb_fill_page_desc(skb, i, page, off, copy);					if (TCP_PAGE(sk)) {						get_page(page);					} else if (off + copy < PAGE_SIZE) {						get_page(page);						TCP_PAGE(sk) = page;					}				}				TCP_OFF(sk) = off + copy;			}			if (!copied)				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;			tp->write_seq += copy;			TCP_SKB_CB(skb)->end_seq += copy;			skb_shinfo(skb)->gso_segs = 0;			from += copy;			copied += copy;			if ((seglen -= copy) == 0 && iovlen == 0)				goto out;			if (skb->len < mss_now || (flags & MSG_OOB))				continue;			if (forced_push(tp)) {				tcp_mark_push(tp, skb);				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);			} else if (skb == tcp_send_head(sk))				tcp_push_one(sk, mss_now);			continue;wait_for_sndbuf:			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);wait_for_memory:			if (copied)				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)				goto do_error;			mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));			size_goal = tp->xmit_size_goal;		}	}out:	if (copied)		tcp_push(sk, flags, mss_now, tp->nonagle);	TCP_CHECK_TIMER(sk);	release_sock(sk);	return copied;do_fault:	if (!skb->len) {		tcp_unlink_write_queue(skb, sk);		/* It is the one place in all of TCP, except connection		 * reset, where we can be unlinking the send_head.		 */		tcp_check_send_head(sk, skb);		sk_stream_free_skb(sk, skb);	}do_error:	if (copied)		goto out;out_err:	err = sk_stream_error(sk, flags, err);	TCP_CHECK_TIMER(sk);	release_sock(sk);	return err;}/* *	Handle reading urgent data. BSD has very simple semantics for *	this, no blocking and very strange errors 8) */static int tcp_recv_urg(struct sock *sk, long timeo,			struct msghdr *msg, int len, int flags,			int *addr_len){	struct tcp_sock *tp = tcp_sk(sk);	/* No URG data to read. */	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||	    tp->urg_data == TCP_URG_READ)		return -EINVAL;	/* Yes this is right ! */	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))		return -ENOTCONN;	if (tp->urg_data & TCP_URG_VALID) {		int err = 0;		char c = tp->urg_data;		if (!(flags & MSG_PEEK))			tp->urg_data = TCP_URG_READ;		/* Read urgent data. */		msg->msg_flags |= MSG_OOB;		if (len > 0) {			if (!(flags & MSG_TRUNC))				err = memcpy_toiovec(msg->msg_iov, &c, 1);			len = 1;		} else			msg->msg_flags |= MSG_TRUNC;		return err ? -EFAULT : len;	}	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))		return 0;	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and	 * the available implementations agree in this case:	 * this call should never block, independent of the	 * blocking state of the socket.	 * Mike <pall@rz.uni-karlsruhe.de>	 */	return -EAGAIN;}/* Clean up the receive buffer for full frames taken by the user, * then send an ACK if necessary.  COPIED is the number of bytes * tcp_recvmsg has given to the user so far, it speeds up the * calculation of whether or not we must ACK for the sake of * a window update. */void tcp_cleanup_rbuf(struct sock *sk, int copied){	struct tcp_sock *tp = tcp_sk(sk);	int time_to_ack = 0;#if TCP_DEBUG	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);	BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));#endif	if (inet_csk_ack_scheduled(sk)) {		const struct inet_connection_sock *icsk = inet_csk(sk);		   /* Delayed ACKs frequently hit locked sockets during bulk		    * receive. */		if (icsk->icsk_ack.blocked ||		    /* Once-per-two-segments ACK was not sent by tcp_input.c */		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||		    /*		     * If this read emptied read buffer, we send ACK, if		     * connection is not bidirectional, user drained		     * receive buffer and there was a small segment		     * in queue.		     */		    (copied > 0 &&		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&		       !icsk->icsk_ack.pingpong)) &&		      !atomic_read(&sk->sk_rmem_alloc)))			time_to_ack = 1;	}	/* We send an ACK if we can now advertise a non-zero window	 * which has been raised "significantly".	 *	 * Even if window raised up to infinity, do not send window open ACK	 * in states, where we will not receive more. It is useless.	 */	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -