📄 tcp.h
字号:
struct inet6_skb_parm h6;#endif } header; /* For incoming frames */ __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ __u8 flags; /* TCP header flags. */ /* NOTE: These must match up to the flags byte in a * real TCP header. */#define TCPCB_FLAG_FIN 0x01#define TCPCB_FLAG_SYN 0x02#define TCPCB_FLAG_RST 0x04#define TCPCB_FLAG_PSH 0x08#define TCPCB_FLAG_ACK 0x10#define TCPCB_FLAG_URG 0x20 __u8 sacked; /* State flags for SACK/FACK. */#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ __u16 urg_ptr; /* Valid w/URG flags is set. */ __u32 ack_seq; /* Sequence number ACK'd */};#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))/* This determines how many packets are "in the network" to the best * of our knowledge. In many cases it is conservative, but where * detailed information is available from the receiver (via SACK * blocks etc.) we can make more aggressive calculations. * * Use this for decisions involving congestion control, use just * tp->packets_out to determine if the send queue is empty or not. * * Read this equation as: * * "Packets sent once on transmission queue" MINUS * "Packets acknowledged by FACK information" PLUS * "Packets fast retransmitted" */static __inline__ int tcp_packets_in_flight(struct tcp_opt *tp){ return tp->packets_out - tp->fackets_out + tp->retrans_out;}/* This checks if the data bearing packet SKB (usually tp->send_head) * should be put on the wire right now. */static __inline__ int tcp_snd_test(struct sock *sk, struct sk_buff *skb){ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); int nagle_check = 1; /* RFC 1122 - section 4.2.3.4 * * We must queue if * * a) The right edge of this frame exceeds the window * b) There are packets in flight and we have a small segment * [SWS avoidance and Nagle algorithm] * (part of SWS is done on packetization) * c) We are retransmiting [Nagle] * d) We have too many packets 'in flight' * * Don't use the nagle rule for urgent data (or * for the final FIN -DaveM). */ if ((sk->nonagle == 2 && (skb->len < tp->mss_cache)) || (!sk->nonagle && skb->len < (tp->mss_cache >> 1) && tp->packets_out && !(TCP_SKB_CB(skb)->flags & (TCPCB_FLAG_URG|TCPCB_FLAG_FIN)))) nagle_check = 0; /* Don't be strict about the congestion window for the * final FIN frame. -DaveM */ return (nagle_check && ((tcp_packets_in_flight(tp) < tp->snd_cwnd) || (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) && tp->retransmits == 0);}/* Push out any pending frames which were held back due to * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */static __inline__ void tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp){ if(tp->send_head) { if(tcp_snd_test(sk, tp->send_head)) tcp_write_xmit(sk); else if(tp->packets_out == 0 && !tp->pending) { /* We held off on this in tcp_send_skb() */ tp->pending = TIME_PROBE0; tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto); } }}/* This tells the input processing path that an ACK should go out * right now. */#define tcp_enter_quickack_mode(__tp) ((__tp)->ato |= (1<<31))#define tcp_exit_quickack_mode(__tp) ((__tp)->ato &= ~(1<<31))#define tcp_in_quickack_mode(__tp) (((__tp)->ato & (1 << 31)) != 0)/* * List all states of a TCP socket that can be viewed as a "connected" * state. This now includes TCP_SYN_RECV, although I am not yet fully * convinced that this is the solution for the 'getpeername(2)' * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK */extern __inline const int tcp_connected(const int state){ return ((1 << state) & (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1| TCPF_FIN_WAIT2|TCPF_SYN_RECV));}/* * Calculate(/check) TCP checksum */static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len, unsigned long saddr, unsigned long daddr, unsigned long base){ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);}#undef STATE_TRACE#ifdef STATE_TRACEstatic char *statename[]={ "Unused","Established","Syn Sent","Syn Recv", "Fin Wait 1","Fin Wait 2","Time Wait", "Close", "Close Wait","Last ACK","Listen","Closing"};#endifstatic __inline__ void tcp_set_state(struct sock *sk, int state){ int oldstate = sk->state; sk->state = state;#ifdef STATE_TRACE SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);#endif switch (state) { case TCP_ESTABLISHED: if (oldstate != TCP_ESTABLISHED) tcp_statistics.TcpCurrEstab++; break; case TCP_CLOSE: { struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; /* Should be about 2 rtt's */ net_reset_timer(sk, TIME_DONE, min(tp->srtt * 2, TCP_DONE_TIME)); sk->prot->unhash(sk); /* fall through */ } default: if (oldstate==TCP_ESTABLISHED) tcp_statistics.TcpCurrEstab--; }}static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp){ if (tp->tstamp_ok) { *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *ptr++ = htonl(tstamp); *ptr++ = htonl(tp->ts_recent); } if(tp->sack_ok && tp->num_sacks) { int this_sack; *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK << 8) | (TCPOLEN_SACK_BASE + (tp->num_sacks * TCPOLEN_SACK_PERBLOCK))); for(this_sack = 0; this_sack < tp->num_sacks; this_sack++) { *ptr++ = htonl(tp->selective_acks[this_sack].start_seq); *ptr++ = htonl(tp->selective_acks[this_sack].end_seq); } }}/* Construct a tcp options header for a SYN or SYN_ACK packet. * If this is every changed make sure to change the definition of * MAX_SYN_SIZE to match the new maximum number of options that you * can generate. */extern __inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent){ /* We always get an MSS option. * The option bytes which will be seen in normal data * packets should timestamps be used, must be in the MSS * advertised. But we subtract them from sk->mss so * that calculations in tcp_sendmsg are simpler etc. * So account for this fact here if necessary. If we * don't do this correctly, as a receiver we won't * recognize data packets as being full sized when we * should, and thus we won't abide by the delayed ACK * rules correctly. * SACKs don't matter, we never delay an ACK when we * have any of those going out. */ *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); if (ts) { if(sack) *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); else *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *ptr++ = htonl(tstamp); /* TSVAL */ *ptr++ = htonl(ts_recent); /* TSECR */ } else if(sack) *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); if (offer_wscale) *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));}/* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. * NOTE: for smooth operation initial space offering should * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */extern __inline__ void tcp_select_initial_window(__u32 space, __u16 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale){ /* If no clamp set the clamp to the max possible scaled window */ if (*window_clamp == 0) (*window_clamp) = (65535<<14); space = min(*window_clamp,space); /* Quantize space offering to a multiple of mss if possible. */ if (space > mss) space = (space/mss)*mss; /* NOTE: offering an initial window larger than 32767 * will break some buggy TCP stacks. We try to be nice. * If we are not window scaling, then this truncates * our initial window offering to 32k. There should also * be a sysctl option to stop being nice. */ (*rcv_wnd) = min(space, MAX_WINDOW); (*rcv_wscale) = 0; if (wscale_ok) { /* See RFC1323 for an explanation of the limit to 14 */ while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; } } /* Set the clamp no higher than max representable value */ (*window_clamp) = min(65535<<(*rcv_wscale),*window_clamp);}extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request *prev){ if(!req->dl_next) tp->syn_wait_last = (struct open_request **)prev; prev->dl_next = req->dl_next;}extern __inline__ void tcp_synq_queue(struct tcp_opt *tp, struct open_request *req){ req->dl_next = NULL; *tp->syn_wait_last = req; tp->syn_wait_last = &req->dl_next;}extern __inline__ void tcp_synq_init(struct tcp_opt *tp){ tp->syn_wait_queue = NULL; tp->syn_wait_last = &tp->syn_wait_queue;}extern void __tcp_inc_slow_timer(struct tcp_sl_timer *slt);extern __inline__ void tcp_inc_slow_timer(int timer){ struct tcp_sl_timer *slt = &tcp_slt_array[timer]; if (atomic_read(&slt->count) == 0) { __tcp_inc_slow_timer(slt); } atomic_inc(&slt->count);}extern __inline__ void tcp_dec_slow_timer(int timer){ struct tcp_sl_timer *slt = &tcp_slt_array[timer]; atomic_dec(&slt->count);}extern const char timer_bug_msg[];static inline void tcp_clear_xmit_timer(struct sock *sk, int what){ struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; struct timer_list *timer; switch (what) { case TIME_RETRANS: timer = &tp->retransmit_timer; break; case TIME_DACK: timer = &tp->delack_timer; break; case TIME_PROBE0: timer = &tp->probe_timer; break; default: printk(timer_bug_msg); return; }; if(timer->prev != NULL) del_timer(timer);}static inline int tcp_timer_is_set(struct sock *sk, int what){ struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; switch (what) { case TIME_RETRANS: return tp->retransmit_timer.prev != NULL; break; case TIME_DACK: return tp->delack_timer.prev != NULL; break; case TIME_PROBE0: return tp->probe_timer.prev != NULL; break; default: printk(timer_bug_msg); }; return 0;}#endif /* _TCP_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -