📄 tcp.h
字号:
#define tcp_openreq_free(req) kmem_cache_free(tcp_openreq_cachep, req)/* * Pointers to address related TCP functions * (i.e. things that depend on the address family) * * BUGGG_FUTURE: all the idea behind this struct is wrong. * It mixes socket frontend with transport function. * With port sharing between IPv6/v4 it gives the only advantage, * only poor IPv6 needs to permanently recheck, that it * is still IPv6 8)8) It must be cleaned up as soon as possible. * --ANK (980802) */struct tcp_func { void (*queue_xmit) (struct sk_buff *skb); void (*send_check) (struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb); int (*rebuild_header) (struct sock *sk); int (*conn_request) (struct sock *sk, struct sk_buff *skb, __u32 isn); struct sock * (*syn_recv_sock) (struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst); struct sock * (*get_sock) (struct sk_buff *skb, struct tcphdr *th); __u16 net_header_len; int (*setsockopt) (struct sock *sk, int level, int optname, char *optval, int optlen); int (*getsockopt) (struct sock *sk, int level, int optname, char *optval, int *optlen); void (*addr2sockaddr) (struct sock *sk, struct sockaddr *); int sockaddr_len;};/* * The next routines deal with comparing 32 bit unsigned ints * and worry about wraparound (automatic with unsigned arithmetic). */extern __inline int before(__u32 seq1, __u32 seq2){ return (__s32)(seq1-seq2) < 0;}extern __inline int after(__u32 seq1, __u32 seq2){ return (__s32)(seq2-seq1) < 0;}/* is s2<=s1<=s3 ? */extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3){ return seq3 - seq2 >= seq1 - seq2;}extern struct proto tcp_prot;extern struct tcp_mib tcp_statistics;extern void tcp_put_port(struct sock *sk);extern void __tcp_put_port(struct sock *sk);extern void tcp_inherit_port(struct sock *sk, struct sock *child);extern void tcp_v4_err(struct sk_buff *skb, unsigned char *, int);extern void tcp_shutdown (struct sock *sk, int how);extern int tcp_v4_rcv(struct sk_buff *skb, unsigned short len);extern int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg);extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len);extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len);enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, TCP_TW_ACK = 2 };extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len);extern void tcp_close(struct sock *sk, long timeout);extern struct sock * tcp_accept(struct sock *sk, int flags);extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);extern void tcp_write_space(struct sock *sk); extern int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);extern int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);extern void tcp_set_keepalive(struct sock *sk, int val);extern int tcp_recvmsg(struct sock *sk, struct msghdr *msg, int len, int nonblock, int flags, int *addr_len);extern void tcp_parse_options(struct sock *sk, struct tcphdr *th, struct tcp_opt *tp, int no_fancy);/* * TCP v4 functions exported for the inet6 API */extern int tcp_v4_rebuild_header(struct sock *sk);extern int tcp_v4_build_header(struct sock *sk, struct sk_buff *skb);extern void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb);extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb, __u32 isn);extern struct sock * tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb);extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst);extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);extern void tcp_connect(struct sock *sk, struct sk_buff *skb, int est_mss);extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct open_request *req, int mss);/* From syncookies.c */extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt);extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);/* tcp_output.c */extern void tcp_read_wakeup(struct sock *);extern void tcp_write_xmit(struct sock *);extern void tcp_time_wait(struct sock *);extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);extern void tcp_fack_retransmit(struct sock *);extern void tcp_xmit_retransmit_queue(struct sock *);extern void tcp_simple_retransmit(struct sock *);extern void tcp_send_probe0(struct sock *);extern void tcp_send_partial(struct sock *);extern void tcp_write_wakeup(struct sock *);extern void tcp_send_fin(struct sock *sk);extern void tcp_send_active_reset(struct sock *sk);extern int tcp_send_synack(struct sock *);extern void tcp_transmit_skb(struct sock *, struct sk_buff *);extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue);extern void tcp_send_ack(struct sock *sk);extern void tcp_send_delayed_ack(struct tcp_opt *tp, int max_timeout);/* CONFIG_IP_TRANSPARENT_PROXY */extern int tcp_chkaddr(struct sk_buff *);/* tcp_timer.c */#define tcp_reset_msl_timer(x,y,z) net_reset_timer(x,y,z)extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);extern void tcp_init_xmit_timers(struct sock *);extern void tcp_clear_xmit_timers(struct sock *);extern void tcp_retransmit_timer(unsigned long);extern void tcp_delack_timer(unsigned long);extern void tcp_probe_timer(unsigned long);extern struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct open_request *req);/* * TCP slow timer */extern struct timer_list tcp_slow_timer;struct tcp_sl_timer { atomic_t count; unsigned long period; unsigned long last; void (*handler) (unsigned long);};#define TCP_SLT_SYNACK 0#define TCP_SLT_KEEPALIVE 1#define TCP_SLT_TWKILL 2#define TCP_SLT_MAX 3extern struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX]; extern int tcp_sync_mss(struct sock *sk, u32 pmtu);/* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. */static __inline__ unsigned int tcp_current_mss(struct sock *sk){ struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; struct dst_entry *dst = sk->dst_cache; int mss_now = tp->mss_cache; if (dst && dst->pmtu != tp->pmtu_cookie) mss_now = tcp_sync_mss(sk, dst->pmtu); if(tp->sack_ok && tp->num_sacks) mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + (tp->num_sacks * TCPOLEN_SACK_PERBLOCK)); return mss_now > 8 ? mss_now : 8;}/* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. */static __inline__ u32 tcp_receive_window(struct tcp_opt *tp){ s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; if (win < 0) win = 0; return (u32) win;}/* Choose a new window, without checks for shrinking, and without * scaling applied to the result. The caller does these things * if necessary. This is a "raw" window selection. */extern u32 __tcp_select_window(struct sock *sk);/* Chose a new window to advertise, update state in tcp_opt for the * socket, and return result with RFC1323 scaling applied. The return * value can be stuffed directly into th->window for an outgoing * frame. */extern __inline__ u16 tcp_select_window(struct sock *sk){ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); /* Never shrink the offered window */ if(new_win < cur_win) { /* Danger Will Robinson! * Don't update rcv_wup/rcv_wnd here or else * we will not be able to advertise a zero * window in time. --DaveM */ new_win = cur_win; } else { tp->rcv_wnd = new_win; tp->rcv_wup = tp->rcv_nxt; } /* RFC1323 scaling applied */ return new_win >> tp->rcv_wscale;}/* See if we can advertise non-zero, and if so how much we * can increase our advertisement. If it becomes more than * twice what we are talking about right now, return true. */extern __inline__ int tcp_raise_window(struct sock *sk){ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); return (new_win && (new_win > (cur_win << 1)));}/* Recalculate snd_ssthresh, we want to set it to: * * one half the current congestion window, but no * less than two segments * * We must take into account the current send window * as well, however we keep track of that using different * units so a conversion is necessary. -DaveM */extern __inline__ __u32 tcp_recalc_ssthresh(struct tcp_opt *tp){ __u32 snd_wnd_packets = tp->snd_wnd / max(tp->mss_cache, 1); return max(min(snd_wnd_packets, tp->snd_cwnd) >> 1, 2);}/* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot * of jiffies in the buffer control blocks below. We decidely * only use of the low 32-bits of jiffies and hide the ugly * casts with the following macro. */#define tcp_time_stamp ((__u32)(jiffies))/* This is what the send packet queueing engine uses to pass * TCP per-packet control information to the transmission * code. We also store the host-order sequence numbers in * here too. This is 36 bytes on 32-bit architectures, * 40 bytes on 64-bit machines, if this grows please adjust * skbuff.h:skbuff->cb[xxx] size appropriately. */struct tcp_skb_cb { union { struct inet_skb_parm h4;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -