⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_output.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);		TCP_SKB_CB(skb)->sacked = 0;		TCP_SKB_CB(skb)->urg_ptr = 0;		/* FIN eats a sequence byte, write_seq advanced by tcp_send_skb(). */		TCP_SKB_CB(skb)->seq = tp->write_seq;		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;		tcp_send_skb(sk, skb, 0);	}}/* We get here when a process closes a file descriptor (either due to * an explicit close() or as a byproduct of exit()'ing) and there * was unread data in the receive queue.  This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM */void tcp_send_active_reset(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	struct sk_buff *skb;	/* NOTE: No TCP options attached and we never retransmit this. */	skb = alloc_skb(MAX_HEADER + sk->prot->max_header, GFP_KERNEL);	if (!skb)		return;	/* Reserve space for headers and prepare control bits. */	skb_reserve(skb, MAX_HEADER + sk->prot->max_header);	skb->csum = 0;	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);	TCP_SKB_CB(skb)->sacked = 0;	TCP_SKB_CB(skb)->urg_ptr = 0;	/* Send it off. */	TCP_SKB_CB(skb)->seq = tp->write_seq;	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;	TCP_SKB_CB(skb)->when = tcp_time_stamp;	tcp_transmit_skb(sk, skb);}/* WARNING: This routine must only be called when we have already sent * a SYN packet that crossed the incoming SYN that caused this routine * to get called. If this assumption fails then the initial rcv_wnd * and rcv_wscale values will not be correct. */int tcp_send_synack(struct sock *sk){	struct tcp_opt* tp = &(sk->tp_pinfo.af_tcp);	struct sk_buff* skb;			skb = sock_wmalloc(sk, (MAX_HEADER + sk->prot->max_header),			   1, GFP_ATOMIC);	if (skb == NULL) 		return -ENOMEM;	/* Reserve space for headers and prepare control bits. */	skb_reserve(skb, MAX_HEADER + sk->prot->max_header);	skb->csum = 0;	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_SYN);	TCP_SKB_CB(skb)->sacked = 0;	TCP_SKB_CB(skb)->urg_ptr = 0;	/* SYN eats a sequence byte. */	TCP_SKB_CB(skb)->seq = tp->snd_una;	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;	__skb_queue_tail(&sk->write_queue, skb);	TCP_SKB_CB(skb)->when = tcp_time_stamp;	tp->packets_out++;	tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));	return 0;}/* * Prepare a SYN-ACK. */struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,				 struct open_request *req, int mss){	struct tcphdr *th;	int tcp_header_size;	struct sk_buff *skb;	skb = sock_wmalloc(sk, MAX_HEADER + sk->prot->max_header, 1, GFP_ATOMIC);	if (skb == NULL)		return NULL;	/* Reserve space for headers. */	skb_reserve(skb, MAX_HEADER + sk->prot->max_header);	skb->dst = dst_clone(dst);	/* Don't offer more than they did.	 * This way we don't have to memorize who said what.	 * FIXME: maybe this should be changed for better performance	 * with syncookies.	 */	req->mss = min(mss, req->mss);	if (req->mss < 8) {		printk(KERN_DEBUG "initial req->mss below 8\n");		req->mss = 8;	}	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +			   (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +			   (req->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +			   /* SACK_PERM is in the place of NOP NOP of TS */			   ((req->sack_ok && !req->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);	memset(th, 0, sizeof(struct tcphdr));	th->syn = 1;	th->ack = 1;	th->source = sk->sport;	th->dest = req->rmt_port;	TCP_SKB_CB(skb)->seq = req->snt_isn;	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;	th->seq = htonl(TCP_SKB_CB(skb)->seq);	th->ack_seq = htonl(req->rcv_isn + 1);	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */		__u8 rcv_wscale; 		/* Set this up on the first call only */		req->window_clamp = skb->dst->window;		tcp_select_initial_window(sock_rspace(sk)/2,req->mss,			&req->rcv_wnd,			&req->window_clamp,			req->wscale_ok,			&rcv_wscale);		req->rcv_wscale = rcv_wscale; 	}	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */	th->window = htons(req->rcv_wnd);	TCP_SKB_CB(skb)->when = tcp_time_stamp;	tcp_syn_build_options((__u32 *)(th + 1), req->mss, req->tstamp_ok,			      req->sack_ok, req->wscale_ok, req->rcv_wscale,			      TCP_SKB_CB(skb)->when,			      req->ts_recent);	skb->csum = 0;	th->doff = (tcp_header_size >> 2);	tcp_statistics.TcpOutSegs++; 	return skb;}void tcp_connect(struct sock *sk, struct sk_buff *buff, int mtu){	struct dst_entry *dst = sk->dst_cache;	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	/* Reserve space for headers. */	skb_reserve(buff, MAX_HEADER + sk->prot->max_header);	tp->snd_wnd = 0;	tp->snd_wl1 = 0;	tp->snd_wl2 = tp->write_seq;	tp->snd_una = tp->write_seq;	tp->rcv_nxt = 0;	sk->err = 0;		/* We'll fix this up when we get a response from the other end.	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.	 */	tp->tcp_header_len = sizeof(struct tcphdr) +		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);	/* If user gave his TCP_MAXSEG, record it to clamp */	if (tp->user_mss)		tp->mss_clamp = tp->user_mss;	tcp_sync_mss(sk, mtu);	/* Now unpleasant action: if initial pmtu is too low	   set lower clamp. I am not sure that it is good.	   To be more exact, I do not think that clamping at value, which	   is apparently transient and may improve in future is good idea.	   It would be better to wait until peer will returns its MSS	   (probably 65535 too) and now advertise something sort of 65535	   or at least first hop device mtu. Is it clear, what I mean?	   We should tell peer what maximal mss we expect to RECEIVE,	   it has nothing to do with pmtu.	   I am afraid someone will be confused by such huge value.	                                                   --ANK (980731)	 */	if (tp->mss_cache + tp->tcp_header_len - sizeof(struct tcphdr) < tp->mss_clamp )		tp->mss_clamp = tp->mss_cache + tp->tcp_header_len - sizeof(struct tcphdr);	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;	TCP_SKB_CB(buff)->sacked = 0;	TCP_SKB_CB(buff)->urg_ptr = 0;	buff->csum = 0;	TCP_SKB_CB(buff)->seq = tp->write_seq++;	TCP_SKB_CB(buff)->end_seq = tp->write_seq;	tp->snd_nxt = TCP_SKB_CB(buff)->end_seq;	tp->window_clamp = dst->window;	tcp_select_initial_window(sock_rspace(sk)/2,tp->mss_clamp,		&tp->rcv_wnd,		&tp->window_clamp,		sysctl_tcp_window_scaling,		&tp->rcv_wscale);	/* Ok, now lock the socket before we make it visible to	 * the incoming packet engine.	 */	lock_sock(sk);	/* Socket identity change complete, no longer	 * in TCP_CLOSE, so enter ourselves into the	 * hash tables.	 */	tcp_set_state(sk,TCP_SYN_SENT);	sk->prot->hash(sk);	tp->rto = dst->rtt;	tcp_init_xmit_timers(sk);	tp->retransmits = 0;	tp->fackets_out = 0;	tp->retrans_out = 0;	/* Send it off. */	__skb_queue_tail(&sk->write_queue, buff);	TCP_SKB_CB(buff)->when = tcp_time_stamp;	tp->packets_out++;	tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));	tcp_statistics.TcpActiveOpens++;	/* Timer for repeating the SYN until an answer. */	tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);	/* Now, it is safe to release the socket. */	release_sock(sk);}/* Send out a delayed ack, the caller does the policy checking * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check() * for details. */void tcp_send_delayed_ack(struct tcp_opt *tp, int max_timeout){	unsigned long timeout;	/* Stay within the limit we were given */	timeout = tp->ato;	if (timeout > max_timeout)		timeout = max_timeout;	timeout += jiffies;	/* Use new timeout only if there wasn't a older one earlier. */	if (!tp->delack_timer.prev) {		tp->delack_timer.expires = timeout;		add_timer(&tp->delack_timer);        } else {		if (time_before(timeout, tp->delack_timer.expires))			mod_timer(&tp->delack_timer, timeout);	}}/* This routine sends an ack and also updates the window. */void tcp_send_ack(struct sock *sk){	/* If we have been reset, we may not send again. */	if(!sk->zapped) {		struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);		struct sk_buff *buff;		/* We are not putting this on the write queue, so		 * tcp_transmit_skb() will set the ownership to this		 * sock.		 */		buff = alloc_skb(MAX_HEADER + sk->prot->max_header, GFP_ATOMIC);		if (buff == NULL) {			/* Force it to send an ack. We don't have to do this			 * (ACK is unreliable) but it's much better use of			 * bandwidth on slow links to send a spare ack than			 * resend packets.			 *			 * This is the one possible way that we can delay an			 * ACK and have tp->ato indicate that we are in			 * quick ack mode, so clear it.			 */			if(tcp_in_quickack_mode(tp))				tcp_exit_quickack_mode(tp);			tcp_send_delayed_ack(tp, HZ/2);			return;		}		/* Reserve space for headers and prepare control bits. */		skb_reserve(buff, MAX_HEADER + sk->prot->max_header);		buff->csum = 0;		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;		TCP_SKB_CB(buff)->sacked = 0;		TCP_SKB_CB(buff)->urg_ptr = 0;		/* Send it off, this clears delayed acks for us. */		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tp->snd_nxt;		TCP_SKB_CB(buff)->when = tcp_time_stamp;		tcp_transmit_skb(sk, buff);	}}/* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. */void tcp_write_wakeup(struct sock *sk){	/* After a valid reset we can send no more. */	if (!sk->zapped) {		struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);		struct sk_buff *skb;		/* Write data can still be transmitted/retransmitted in the		 * following states.  If any other state is encountered, return.		 * [listen/close will never occur here anyway]		 */		if ((1 << sk->state) &		    ~(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|		      TCPF_LAST_ACK|TCPF_CLOSING))			return;		if (before(tp->snd_nxt, tp->snd_una + tp->snd_wnd) &&		    ((skb = tp->send_head) != NULL)) {			unsigned long win_size;			/* We are probing the opening of a window			 * but the window size is != 0			 * must have been a result SWS avoidance ( sender )			 */			win_size = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);			if (win_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq) {				if (tcp_fragment(sk, skb, win_size))					return; /* Let a retransmit get it. */			}			update_send_head(sk);			TCP_SKB_CB(skb)->when = tcp_time_stamp;			tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;			tp->packets_out++;			tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));			if (!tcp_timer_is_set(sk, TIME_RETRANS))				tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);		} else {			/* We don't queue it, tcp_transmit_skb() sets ownership. */			skb = alloc_skb(MAX_HEADER + sk->prot->max_header,					GFP_ATOMIC);			if (skb == NULL) 				return;			/* Reserve space for headers and set control bits. */			skb_reserve(skb, MAX_HEADER + sk->prot->max_header);			skb->csum = 0;			TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;			TCP_SKB_CB(skb)->sacked = 0;			TCP_SKB_CB(skb)->urg_ptr = 0;			/* Use a previous sequence.  This should cause the other			 * end to send an ack.  Don't queue or clone SKB, just			 * send it.			 */			TCP_SKB_CB(skb)->seq = tp->snd_nxt - 1;			TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;			TCP_SKB_CB(skb)->when = tcp_time_stamp;			tcp_transmit_skb(sk, skb);		}	}}/* A window probe timeout has occurred.  If window is not closed send * a partial packet else a zero probe. */void tcp_send_probe0(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	tcp_write_wakeup(sk);	tp->pending = TIME_PROBE0;	tp->backoff++;	tp->probes_out++;	tcp_reset_xmit_timer (sk, TIME_PROBE0, 			      min(tp->rto << tp->backoff, 120*HZ));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -