⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
static unsigned char new_state[16] = {  /* current state:        new state:      action:	*/  /* (Invalid)		*/ TCP_CLOSE,  /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,  /* TCP_SYN_SENT	*/ TCP_CLOSE,  /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,  /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,  /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,  /* TCP_TIME_WAIT	*/ TCP_CLOSE,  /* TCP_CLOSE		*/ TCP_CLOSE,  /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,  /* TCP_LAST_ACK	*/ TCP_LAST_ACK,  /* TCP_LISTEN		*/ TCP_CLOSE,  /* TCP_CLOSING	*/ TCP_CLOSING,};static int tcp_close_state(struct sock *sk, int dead){	int next = (int) new_state[sk->state];	int ns = (next & TCP_STATE_MASK);	tcp_set_state(sk, ns);	/*	This is a (useful) BSD violating of the RFC. There is a	 *	problem with TCP as specified in that the other end could	 *	keep a socket open forever with no application left this end.	 *	We use a 3 minute timeout (about the same as BSD) then kill	 *	our end. If they send after that then tough - BUT: long enough	 *	that we won't make the old 4*rto = almost no time - whoops	 *	reset mistake.	 */	if (dead)		tcp_check_fin_timer(sk);	return (next & TCP_ACTION_FIN);}/* *	Shutdown the sending side of a connection. Much like close except *	that we don't receive shut down or set sk->dead. */void tcp_shutdown(struct sock *sk, int how){	/*	We need to grab some memory, and put together a FIN,	 *	and then put it into the queue to be sent.	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.	 */	if (!(how & SEND_SHUTDOWN))		return;	/* If we've already sent a FIN, or it's a closed state, skip this. */	if ((1 << sk->state) &	    (TCPF_ESTABLISHED|TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE_WAIT)) {		lock_sock(sk);		/* Clear out any half completed packets.  FIN if needed. */		if (tcp_close_state(sk,0))			tcp_send_fin(sk);		release_sock(sk);	}}/* *	Return 1 if we still have things to send in our buffers. */static inline int closing(struct sock * sk){	return ((1 << sk->state) & (TCPF_FIN_WAIT1|TCPF_CLOSING|TCPF_LAST_ACK));}/* *	This routine closes sockets which have been at least partially *	opened, but not yet accepted. Currently it is only called by *	tcp_close, and timeout mirrors the value there. */static void tcp_close_pending (struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	struct open_request *req = tp->syn_wait_queue;	while(req) {		struct open_request *iter;		if (req->sk)			tcp_close(req->sk, 0);		iter = req;		req = req->dl_next;		(*iter->class->destructor)(iter);		tcp_dec_slow_timer(TCP_SLT_SYNACK);		sk->ack_backlog--;		tcp_openreq_free(iter);	}	tcp_synq_init(tp);}void tcp_close(struct sock *sk, long timeout){	struct sk_buff *skb;	int data_was_unread = 0;	/*	 * Check whether the socket is locked ... supposedly	 * it's impossible to tcp_close() a locked socket.	 */	if (atomic_read(&sk->sock_readers))		printk("tcp_close: socket already locked!\n");	/* We need to grab some memory, and put together a FIN,	 * and then put it into the queue to be sent.	 */	lock_sock(sk);	if(sk->state == TCP_LISTEN) {		/* Special case. */		tcp_set_state(sk, TCP_CLOSE);		tcp_close_pending(sk);		release_sock(sk);		sk->dead = 1;		return;	}	/* It is questionable, what the role of this is now.	 * In any event either it should be removed, or	 * increment of SLT_KEEPALIVE be done, this is causing	 * big problems.  For now I comment it out.  -DaveM	 */	/* sk->keepopen = 1; */	sk->shutdown = SHUTDOWN_MASK;	if (!sk->dead)	  	sk->state_change(sk);	/*  We need to flush the recv. buffs.  We do this only on the	 *  descriptor close, not protocol-sourced closes, because the	 *  reader process may not have drained the data yet!	 */	while((skb=__skb_dequeue(&sk->receive_queue))!=NULL) {		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - skb->h.th->fin;		data_was_unread += len;		kfree_skb(skb);	}	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section	 * 3.10, we send a RST here because data was lost.  To	 * witness the awful effects of the old behavior of always	 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start	 * a bulk GET in an FTP client, suspend the process, wait	 * for the client to advertise a zero window, then kill -9	 * the FTP client, wheee...  Note: timeout is always zero	 * in such a case.	 */	if(data_was_unread != 0) {		/* Unread data was tossed, zap the connection. */		tcp_set_state(sk, TCP_CLOSE);		tcp_send_active_reset(sk);	} else if (tcp_close_state(sk,1)) {		/* We FIN if the application ate all the data before		 * zapping the connection.		 */		tcp_send_fin(sk);	}	if (timeout) {		struct task_struct *tsk = current;		struct wait_queue wait = { tsk, NULL };		add_wait_queue(sk->sleep, &wait);		release_sock(sk);		while (1) {			tsk->state = TASK_INTERRUPTIBLE;			if (!closing(sk))				break;			timeout = schedule_timeout(timeout);			if (signal_pending(tsk) || !timeout)				break;		}		tsk->state = TASK_RUNNING;		remove_wait_queue(sk->sleep, &wait);		lock_sock(sk);	}	/* Now that the socket is dead, if we are in the FIN_WAIT2 state	 * we may need to set up a timer.         */	tcp_check_fin_timer(sk);	release_sock(sk);	sk->dead = 1;}/* *	Wait for an incoming connection, avoid race *	conditions. This must be called with the socket locked. */static struct open_request * wait_for_connect(struct sock * sk,					      struct open_request **pprev){	struct wait_queue wait = { current, NULL };	struct open_request *req;	add_wait_queue(sk->sleep, &wait);	for (;;) {		current->state = TASK_INTERRUPTIBLE;		release_sock(sk);		schedule();		lock_sock(sk);		req = tcp_find_established(&(sk->tp_pinfo.af_tcp), pprev);		if (req)			break;		if (signal_pending(current))			break;	}	current->state = TASK_RUNNING;	remove_wait_queue(sk->sleep, &wait);	return req;}/* *	This will accept the next outstanding connection. * *	Be careful about race conditions here - this is subtle. */struct sock *tcp_accept(struct sock *sk, int flags){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	struct open_request *req, *prev;	struct sock *newsk = NULL;	int error;	lock_sock(sk);	/* We need to make sure that this socket is listening,	 * and that it has something pending.	 */	error = EINVAL;	if (sk->state != TCP_LISTEN)		goto out;	/* Find already established connection */	req = tcp_find_established(tp, &prev);	if (!req) {		/* If this is a non blocking socket don't sleep */		error = EAGAIN;		if (flags & O_NONBLOCK)			goto out;		error = ERESTARTSYS;		req = wait_for_connect(sk, &prev);		if (!req)			goto out;	}	tcp_synq_unlink(tp, req, prev);	newsk = req->sk;	req->class->destructor(req);	tcp_openreq_free(req);	sk->ack_backlog--;	if(sk->keepopen)		tcp_inc_slow_timer(TCP_SLT_KEEPALIVE);	release_sock(sk);	return newsk;out:	/* sk should be in LISTEN state, thus accept can use sk->err for	 * internal purposes without stomping one anyone's feed.	 */	sk->err = error;	release_sock(sk);	return newsk;}/* *	Socket option code for TCP. */int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,		   int optlen){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	int val;	if (level != SOL_TCP)		return tp->af_specific->setsockopt(sk, level, optname,						   optval, optlen);	if(optlen<sizeof(int))		return -EINVAL;  	if (get_user(val, (int *)optval))		return -EFAULT;	switch(optname) {	case TCP_MAXSEG:		/* values greater than interface MTU won't take effect.  however at		 * the point when this call is done we typically don't yet know		 * which interface is going to be used		 */		if(val < 1 || val > MAX_WINDOW)			return -EINVAL;		tp->user_mss = val;		return 0;	case TCP_NODELAY:		/* You cannot try to use this and TCP_CORK in		 * tandem, so let the user know.		 */		if (sk->nonagle == 2)			return -EINVAL;		sk->nonagle = (val == 0) ? 0 : 1;		return 0;	case TCP_CORK:		/* When set indicates to always queue non-full frames.		 * Later the user clears this option and we transmit		 * any pending partial frames in the queue.  This is		 * meant to be used alongside sendfile() to get properly		 * filled frames when the user (for example) must write		 * out headers with a write() call first and then use		 * sendfile to send out the data parts.		 *		 * You cannot try to use TCP_NODELAY and this mechanism		 * at the same time, so let the user know.		 */		if (sk->nonagle == 1)			return -EINVAL;		if (val != 0) {			sk->nonagle = 2;		} else {			sk->nonagle = 0;			lock_sock(sk);			tcp_push_pending_frames(sk, tp);			release_sock(sk);		}		return 0;	default:		return -ENOPROTOOPT;	};}int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,		   int *optlen){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	int val, len;	if(level != SOL_TCP)		return tp->af_specific->getsockopt(sk, level, optname,						   optval, optlen);	if(get_user(len,optlen))		return -EFAULT;	len = min(len, sizeof(int));	switch(optname) {	case TCP_MAXSEG:		val = tp->user_mss;		break;	case TCP_NODELAY:		val = (sk->nonagle == 1);		break;	case TCP_CORK:		val = (sk->nonagle == 2);		break;	default:		return -ENOPROTOOPT;	};  	if(put_user(len, optlen))  		return -EFAULT;	if(copy_to_user(optval, &val,len))		return -EFAULT;  	return 0;}void tcp_set_keepalive(struct sock *sk, int val){	if (!sk->keepopen && val)		tcp_inc_slow_timer(TCP_SLT_KEEPALIVE);	else if (sk->keepopen && !val)		tcp_dec_slow_timer(TCP_SLT_KEEPALIVE);}extern void __skb_cb_too_small_for_tcp(int, int);void __init tcp_init(void){	struct sk_buff *skb = NULL;	unsigned long goal;	int order;	if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),					   sizeof(skb->cb));	tcp_openreq_cachep = kmem_cache_create("tcp_open_request",						   sizeof(struct open_request),					       0, SLAB_HWCACHE_ALIGN,					       NULL, NULL);	if(!tcp_openreq_cachep)		panic("tcp_init: Cannot alloc open_request cache.");	tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",					      sizeof(struct tcp_bind_bucket),					      0, SLAB_HWCACHE_ALIGN,					      NULL, NULL);	if(!tcp_bucket_cachep)		panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");	tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",						sizeof(struct tcp_tw_bucket),						0, SLAB_HWCACHE_ALIGN,						NULL, NULL);	if(!tcp_timewait_cachep)		panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");	/* Size and allocate TCP hash tables. */	goal = num_physpages >> (20 - PAGE_SHIFT);	for (order = 0; (1UL << order) < goal; order++)		;	do {		tcp_ehash_size = (1UL << order) * PAGE_SIZE /			sizeof(struct sock *);		tcp_ehash = (struct sock **)			__get_free_pages(GFP_ATOMIC, order);	} while (tcp_ehash == NULL && --order >= 0);	if (!tcp_ehash)		panic("Failed to allocate TCP established hash table\n");	memset(tcp_ehash, 0, tcp_ehash_size * sizeof(struct sock *));	goal = (((1UL << order) * PAGE_SIZE) / sizeof(struct tcp_bind_bucket *));	if (goal > (64 * 1024)) {		/* Don't size the bind-hash larger than the port		 * space, that is just silly.		 */		goal = (((64 * 1024) * sizeof(struct tcp_bind_bucket *)) / PAGE_SIZE);		for (order = 0; (1UL << order) < goal; order++)			;	}	do {		tcp_bhash_size = (1UL << order) * PAGE_SIZE /			sizeof(struct tcp_bind_bucket *);		tcp_bhash = (struct tcp_bind_bucket **)			__get_free_pages(GFP_ATOMIC, order);	} while (tcp_bhash == NULL && --order >= 0);	if (!tcp_bhash)		panic("Failed to allocate TCP bind hash table\n");	memset(tcp_bhash, 0, tcp_bhash_size * sizeof(struct tcp_bind_bucket *));	printk("TCP: Hash tables configured (ehash %d bhash %d)\n",	       tcp_ehash_size, tcp_bhash_size);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -