⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	  	return(0);	}	counted = sk->tp_pinfo.af_tcp.copied_seq;	/* Where we are at the moment */	amount = 0;	/* Do until a push or until we are out of data. */	do {		/* Found a hole so stops here. */		if (before(counted, TCP_SKB_CB(skb)->seq))	/* should not happen */			break;		/* Length - header but start from where we are up to		 * avoid overlaps.		 */		sum = skb->len - (counted - TCP_SKB_CB(skb)->seq);		if (sum >= 0) {			/* Add it up, move on. */			amount += sum;			counted += sum;			if (skb->h.th->syn)				counted++;		}		/* Don't count urg data ... but do it in the right place!		 * Consider: "old_data (ptr is here) URG PUSH data"		 * The old code would stop at the first push because		 * it counted the urg (amount==1) and then does amount--		 * *after* the loop.  This means tcp_readable() always		 * returned zero if any URG PUSH was in the queue, even		 * though there was normal data available. If we subtract		 * the urg data right here, we even get it to work for more		 * than one URG PUSH skb without normal data.		 * This means that poll() finally works now with urg data		 * in the queue.  Note that rlogin was never affected		 * because it doesn't use poll(); it uses two processes		 * and a blocking read().  And the queue scan in tcp_read()		 * was correct.  Mike <pall@rz.uni-karlsruhe.de>		 */		/* Don't count urg data. */		if (skb->h.th->urg)			amount--;#if 0		if (amount && skb->h.th->psh) break;#endif		skb = skb->next;	} while(skb != (struct sk_buff *)&sk->receive_queue);	SOCK_DEBUG(sk, "got %lu bytes.\n",amount);	return(amount);}/* * LISTEN is a special case for poll.. */static unsigned int tcp_listen_poll(struct sock *sk, poll_table *wait){	struct open_request *req, *dummy;	lock_sock(sk);	req = tcp_find_established(&sk->tp_pinfo.af_tcp, &dummy);	release_sock(sk);	if (req)		return POLLIN | POLLRDNORM;	return 0;}/* *	Compute minimal free write space needed to queue new packets. */#define tcp_min_write_space(__sk) \	(atomic_read(&(__sk)->wmem_alloc) / 2)/* *	Wait for a TCP event. * *	Note that we don't need to lock the socket, as the upper poll layers *	take care of normal races (between the test and the event) and we don't *	go look at any of the socket buffers directly. */unsigned int tcp_poll(struct file * file, struct socket *sock, poll_table *wait){	unsigned int mask;	struct sock *sk = sock->sk;	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	poll_wait(file, sk->sleep, wait);	if (sk->state == TCP_LISTEN)		return tcp_listen_poll(sk, wait);	mask = 0;	if (sk->err)		mask = POLLERR;	/*	 * POLLHUP is certainly not done right. But poll() doesn't	 * have a notion of HUP in just one direction, and for a	 * socket the read side is more interesting.	 *	 * Some poll() documentation says that POLLHUP is incompatible	 * with the POLLOUT/POLLWR flags, so somebody should check this	 * all. But careful, it tends to be safer to return too many	 * bits than too few, and you can easily break real applications	 * if you don't tell them that something has hung up!	 *	 * Check-me.	 */	if (sk->shutdown & RCV_SHUTDOWN)		mask |= POLLHUP;	/* Connected? */	if ((1 << sk->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV)) {		if ((tp->rcv_nxt != tp->copied_seq) &&		    (tp->urg_seq != tp->copied_seq ||		     tp->rcv_nxt != tp->copied_seq+1 ||		     sk->urginline || !tp->urg_data))			mask |= POLLIN | POLLRDNORM;		if (!(sk->shutdown & SEND_SHUTDOWN)) {			if (sock_wspace(sk) >= tcp_min_write_space(sk)) {				mask |= POLLOUT | POLLWRNORM;			} else {  /* send SIGIO later */				sk->socket->flags |= SO_NOSPACE;			}		}		if (tp->urg_data & URG_VALID)			mask |= POLLPRI;	}	return mask;}/* *	Socket write_space callback. *	This (or rather the sock_wake_async) should agree with poll. */void tcp_write_space(struct sock *sk){	if (sk->dead)		return;	wake_up_interruptible(sk->sleep);	if (sock_wspace(sk) >=	    tcp_min_write_space(sk))		sock_wake_async(sk->socket, 2);}#ifdef _HURD_#define tcp_ioctl 0error_ttcp_tiocinq(struct sock *sk, mach_msg_type_number_t *amount){  if (sk->state == TCP_LISTEN)    return EINVAL;  lock_sock(sk);  *amount = tcp_readable(sk);  release_sock(sk);  return 0;}#elseint tcp_ioctl(struct sock *sk, int cmd, unsigned long arg){	int answ;	switch(cmd) {	case TIOCINQ:#ifdef FIXME	/* FIXME: */	case FIONREAD:#endif		if (sk->state == TCP_LISTEN)			return(-EINVAL);		lock_sock(sk);		answ = tcp_readable(sk);		release_sock(sk);		break;	case SIOCATMARK:		{			struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);			answ = tp->urg_data && tp->urg_seq == tp->copied_seq;			break;		}	case TIOCOUTQ:		if (sk->state == TCP_LISTEN)			return(-EINVAL);		answ = sock_wspace(sk);		break;	default:		return(-ENOIOCTLCMD);	};	return put_user(answ, (int *)arg);}#endif/* *	Wait for a socket to get into the connected state * *	Note: must be called with the socket locked. */static int wait_for_tcp_connect(struct sock * sk, int flags){	struct task_struct *tsk = current;	struct wait_queue wait = { tsk, NULL };	while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {		if(sk->err)			return sock_error(sk);		if((1 << sk->state) &		   ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {			if(sk->keepopen && !(flags&MSG_NOSIGNAL))				send_sig(SIGPIPE, tsk, 0);			return -EPIPE;		}		if(flags & MSG_DONTWAIT)			return -EAGAIN;		if(signal_pending(tsk))			return -ERESTARTSYS;		tsk->state = TASK_INTERRUPTIBLE;		add_wait_queue(sk->sleep, &wait);		release_sock(sk);		if (((1 << sk->state) & ~(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT)) &&		    sk->err == 0)			schedule();		tsk->state = TASK_RUNNING;		remove_wait_queue(sk->sleep, &wait);		lock_sock(sk);	}	return 0;}static inline int tcp_memory_free(struct sock *sk){	return atomic_read(&sk->wmem_alloc) < sk->sndbuf;}/* *	Wait for more memory for a socket */static void wait_for_tcp_memory(struct sock * sk){	release_sock(sk);	if (!tcp_memory_free(sk)) {		struct wait_queue wait = { current, NULL };		sk->socket->flags &= ~SO_NOSPACE;		add_wait_queue(sk->sleep, &wait);		for (;;) {			if (signal_pending(current))				break;			current->state = TASK_INTERRUPTIBLE;			if (tcp_memory_free(sk))				break;			if (sk->shutdown & SEND_SHUTDOWN)				break;			if (sk->err)				break;			schedule();		}		current->state = TASK_RUNNING;		remove_wait_queue(sk->sleep, &wait);	}	lock_sock(sk);}/* * Wait for a buffer. */static int wait_for_buffer(struct sock *sk){	struct wait_queue wait = { current, NULL };	release_sock(sk);	add_wait_queue(sk->sleep, &wait);	current->state = TASK_INTERRUPTIBLE;	schedule();	current->state = TASK_RUNNING;	remove_wait_queue(sk->sleep, &wait);	lock_sock(sk);	return 0;}/* When all user supplied data has been queued set the PSH bit */#define PSH_NEEDED (seglen == 0 && iovlen == 0)/* *	This routine copies from a user buffer into a socket, *	and starts the transmit system. * *	Note: must be called with the socket locked. */int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg){	struct iovec *iov;	struct tcp_opt *tp;	struct sk_buff *skb;	int iovlen, flags;	int mss_now;	int err, copied;	lock_sock(sk);	err = 0;	tp = &(sk->tp_pinfo.af_tcp);	/* Wait for a connection to finish. */	flags = msg->msg_flags;	if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))		if((err = wait_for_tcp_connect(sk, flags)) != 0)			goto out;	/* This should be in poll */	sk->socket->flags &= ~SO_NOSPACE; /* clear SIGIO XXX */	mss_now = tcp_current_mss(sk);	/* Ok commence sending. */	iovlen = msg->msg_iovlen;	iov = msg->msg_iov;	copied = 0;	while(--iovlen >= 0) {		int seglen=iov->iov_len;		unsigned char * from=iov->iov_base;		iov++;		while(seglen > 0) {			int copy, tmp, queue_it, psh;			if (err)				goto do_fault2;			/* Stop on errors. */			if (sk->err)				goto do_sock_err;			/* Make sure that we are established. */			if (sk->shutdown & SEND_SHUTDOWN)				goto do_shutdown;			/* Now we need to check if we have a half			 * built packet we can tack some data onto.			 */			if (tp->send_head && !(flags & MSG_OOB)) {				skb = sk->write_queue.prev;				copy = skb->len;				/* If the remote does SWS avoidance we should				 * queue the best we can if not we should in				 * fact send multiple packets...				 * A method for detecting this would be most				 * welcome.				 */				if (skb_tailroom(skb) > 0 &&				    (mss_now - copy) > 0 &&				    tp->snd_nxt < TCP_SKB_CB(skb)->end_seq) {					int last_byte_was_odd = (copy % 4);					/*					 * Check for parallel writers sleeping in user access.					 */					if (tp->partial_writers++ > 0) {						wait_for_buffer(sk);						tp->partial_writers--;						continue;					}					copy = mss_now - copy;					if(copy > skb_tailroom(skb))						copy = skb_tailroom(skb);					if(copy > seglen)						copy = seglen;					if(last_byte_was_odd) {						if(copy_from_user(skb_put(skb, copy),								  from, copy))							err = -EFAULT;						skb->csum = csum_partial(skb->data,									 skb->len, 0);					} else {						skb->csum =							csum_and_copy_from_user(							from, skb_put(skb, copy),							copy, skb->csum, &err);					}					/*					 * FIXME: the *_user functions should					 *	  return how much data was					 *	  copied before the fault					 *	  occurred and then a partial					 *	  packet with this data should					 *	  be sent.  Unfortunately					 *	  csum_and_copy_from_user doesn't					 *	  return this information.					 *	  ATM it might send partly zeroed					 *	  data in this case.					 */					tp->write_seq += copy;					TCP_SKB_CB(skb)->end_seq += copy;					from += copy;					copied += copy;					seglen -= copy;					if (PSH_NEEDED)						TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;					if (--tp->partial_writers > 0)						wake_up_interruptible(sk->sleep);					continue;				}			}			/* We also need to worry about the window.  If			 * window < 1/2 the maximum window we've seen			 * from this host, don't use it.  This is			 * sender side silly window prevention, as			 * specified in RFC1122.  (Note that this is			 * different than earlier versions of SWS			 * prevention, e.g. RFC813.).  What we			 * actually do is use the whole MSS.  Since			 * the results in the right edge of the packet			 * being outside the window, it will be queued			 * for later rather than sent.			 */			psh = 0;			copy = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);			if(copy > (tp->max_window >> 1)) {				copy = min(copy, mss_now);				psh = 1;			} else {				copy = mss_now;			}			if(copy > seglen)				copy = seglen;			/* Determine how large of a buffer to allocate.  */			tmp = MAX_HEADER + sk->prot->max_header;			if (copy < min(mss_now, tp->max_window >> 1) &&			    !(flags & MSG_OOB)) {				tmp += min(mss_now, tp->max_window);				/* What is happening here is that we want to				 * tack on later members of the users iovec				 * if possible into a single frame.  When we				 * leave this loop our caller checks to see if				 * we can send queued frames onto the wire.				 * See tcp_v[46]_sendmsg() for this.				 */				queue_it = 1;			} else {				tmp += copy;				queue_it = 0;			}			skb = sock_wmalloc(sk, tmp, 0, GFP_KERNEL);			/* If we didn't get any memory, we need to sleep. */			if (skb == NULL) {				sk->socket->flags |= SO_NOSPACE;				if (flags&MSG_DONTWAIT) {					err = -EAGAIN;					goto do_interrupted;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -