📄 mytcp.c
字号:
DEFINE_SNMP_STAT(struct tcp_mib, mytcp_statistics) __read_mostly;static __initdata unsigned long mythash_entries;extern struct inet_hashinfo mytcp_hashinfo;extern int mysysctl_local_port_range[2];static unsigned long ehash_order;static unsigned long bhash_order;int mysysctl_tcp_mem[3];int mysysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };int mysysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };int mytcp_memory_pressure;atomic_t mytcp_memory_allocated;atomic_t mytcp_sockets_allocated;atomic_t mytcp_orphan_count = ATOMIC_INIT(0);int mysysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;#define MYTCP_PAGE(sk) (sk->sk_sndmsg_page)#define MYTCP_OFF(sk) (sk->sk_sndmsg_off)static const unsigned char mynew_state[16] = { TCP_CLOSE, TCP_FIN_WAIT1 | TCP_ACTION_FIN, TCP_CLOSE, TCP_FIN_WAIT1 | TCP_ACTION_FIN, TCP_FIN_WAIT1, TCP_FIN_WAIT2, TCP_CLOSE, TCP_CLOSE, TCP_LAST_ACK | TCP_ACTION_FIN, TCP_LAST_ACK, TCP_CLOSE, TCP_CLOSING,};unsigned int mytcp_poll(struct file *file, struct socket *sock, poll_table *wait){ return 0;}ssize_t mytcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags){ return 0;}static int mytcp_close_state(struct sock *sk){ int next = (int)mynew_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; tcp_set_state(sk, ns); return next & TCP_ACTION_FIN;}static int mytcp_fin_time(const struct sock *sk){ int fin_timeout = tcp_sk(sk)->linger2 ? : mysysctl_tcp_fin_timeout; const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) fin_timeout = (rto << 2) - (rto >> 1); return fin_timeout;}void mytcp_close(struct sock *sk, long timeout){ struct sk_buff *skb; int data_was_unread = 0; unsigned int cpu = get_cpu(); lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); myinet_csk_listen_stop(sk); goto adjudge_to_death; } while( (skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL ){ u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - skb->h.th->fin; data_was_unread += len; __kfree_skb(skb); } sk_stream_mem_reclaim(sk); if( data_was_unread ){ MYNET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); mytcp_send_active_reset(sk, GFP_KERNEL); }else if( sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime ){ sk->sk_prot->disconnect(sk, 0); MYNET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); }else if( mytcp_close_state(sk) ){ mytcp_send_fin(sk); } sk_stream_wait_close(sk, timeout);adjudge_to_death: release_sock(sk); local_bh_disable(); bh_lock_sock(sk); BUG_TRAP(!sock_owned_by_user(sk)); sock_hold(sk); sock_orphan(sk); if (sk->sk_state == TCP_FIN_WAIT2) { struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); mytcp_send_active_reset(sk, GFP_ATOMIC); MYNET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); }else{ const int tmo = mytcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { myinet_csk_reset_keepalive_timer(sk, mytcp_fin_time(sk)); } else { atomic_inc(sk->sk_prot->orphan_count); mytcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; } } } if (sk->sk_state != TCP_CLOSE) { sk_stream_mem_reclaim(sk); if (atomic_read(sk->sk_prot->orphan_count) > mysysctl_tcp_max_orphans || (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && atomic_read(&tcp_memory_allocated) > mysysctl_tcp_mem[2])) { if (net_ratelimit()) printk(KERN_INFO "TCP: too many of orphaned sockets\n"); tcp_set_state(sk, TCP_CLOSE); mytcp_send_active_reset(sk, GFP_ATOMIC); MYNET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); } } atomic_inc(sk->sk_prot->orphan_count); if (sk->sk_state == TCP_CLOSE) myinet_csk_destroy_sock(sk);out: bh_unlock_sock(sk); local_bh_enable(); printk(KERN_INFO "%s:%d: the ref of the module: %d--%d\n", __FUNCTION__,__LINE__, atomic_read(&sk->sk_refcnt), sk->sk_prot_creator->owner->ref[0].count ); sock_put(sk);}static inline void mytcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb){ TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; tp->pushed_seq = tp->write_seq;}static inline void mytcp_mark_urg(struct tcp_sock *tp, int flags, struct sk_buff *skb){ if (flags & MSG_OOB) { tp->urg_mode = 1; tp->snd_up = tp->write_seq; TCP_SKB_CB(skb)->sacked |= TCPCB_URG; }}static inline int myforced_push(struct tcp_sock *tp){ return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));}static inline void mytcp_push(struct sock *sk, struct tcp_sock *tp, int flags, int mss_now, int nonagle){ if (sk->sk_send_head) { struct sk_buff *skb = sk->sk_write_queue.prev; if (!(flags & MSG_MORE) || myforced_push(tp)) mytcp_mark_push(tp, skb); mytcp_mark_urg(tp, flags, skb); __mytcp_push_pending_frames(sk, tp, mss_now, (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); }}static inline void mytcp_clear_xmit_timers(struct sock *sk){ myinet_csk_clear_xmit_timers(sk);}static inline int mytcp_need_reset(int state){ return (1 << state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV);}int mytcp_disconnect(struct sock *sk, int flags){ struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); if (old_state == TCP_LISTEN) { myinet_csk_listen_stop(sk); }else if( mytcp_need_reset(old_state) || ( tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))){ mytcp_send_active_reset(sk, gfp_any()); sk->sk_err = ECONNRESET; }else if( old_state == TCP_SYN_SENT ) sk->sk_err = ECONNRESET; mytcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); sk_stream_writequeue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); inet->dport = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt = 0; if ((tp->write_seq += tp->max_window + 2) == 0) tp->write_seq = 1; icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->packets_out = 0; tp->snd_ssthresh = 0x7fffffff; tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; tcp_set_ca_state(sk, TCP_CA_Open); mytcp_clear_retrans(tp); inet_csk_delack_init(sk); sk->sk_send_head = NULL; tp->rx_opt.saw_tstamp = 0; tcp_sack_reset(&tp->rx_opt); __sk_dst_reset(sk); BUG_TRAP(!inet->num || icsk->icsk_bind_hash); sk->sk_error_report(sk); return err;}int mytcp_ioctl(struct sock *sk, int cmd, unsigned long arg){ return 0;}void mytcp_shutdown(struct sock *sk, int how){}int mytcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen){ return 0;}int mytcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen){ return 0;}static inline int myselect_size(struct sock *sk, struct tcp_sock *tp){ int tmp = tp->mss_cache; if (sk->sk_route_caps & NETIF_F_SG) { if (sk->sk_route_caps & NETIF_F_TSO) tmp = 0; else { int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); if (tmp >= pgbreak && tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) tmp = pgbreak; } } return tmp;}void mytcp_done(struct sock *sk){ tcp_set_state(sk, TCP_CLOSE); mytcp_clear_xmit_timers(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); else myinet_csk_destroy_sock(sk);}static inline void myskb_entail(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb){ skb->csum = 0; TCP_SKB_CB(skb)->seq = tp->write_seq; TCP_SKB_CB(skb)->end_seq = tp->write_seq; TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; TCP_SKB_CB(skb)->sacked = 0; skb_header_release(skb); __skb_queue_tail(&sk->sk_write_queue, skb); sk_charge_skb(sk, skb); if (!sk->sk_send_head) sk->sk_send_head = skb; if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; }int mytcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size){ struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags; int mss_now, size_goal; int err, copied; long timeo; lock_sock(sk); TCP_CHECK_TIMER(sk); flags = msg->msg_flags; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if( (1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) ){ if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; } clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = mytcp_current_mss( sk, !( flags & MSG_OOB ) ); size_goal = tp->xmit_size_goal; iovlen = msg->msg_iovlen; iov = msg->msg_iov; copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; while( --iovlen >= 0 ){ int seglen = iov->iov_len; unsigned char __user *from = iov->iov_base; iov++; while (seglen > 0) { int copy; skb = sk->sk_write_queue.prev; if(!sk->sk_send_head || (copy = size_goal - skb->len) <= 0){new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_pskb(sk, myselect_size(sk, tp), 0, sk->sk_allocation); if (!skb) goto wait_for_memory; if (sk->sk_route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) skb->ip_summed = CHECKSUM_HW; myskb_entail(sk, tp, skb); copy = size_goal; } if (copy > seglen) copy = seglen; if (skb_tailroom(skb) > 0) { if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); if ((err = skb_add_data(skb, from, copy)) != 0) goto do_fault; }else{ int merge = 0; int i = skb_shinfo(skb)->nr_frags; struct page *page = MYTCP_PAGE(sk); int off = MYTCP_OFF(sk); if( skb_can_coalesce(skb, i, page, off) && off != PAGE_SIZE ){ merge = 1; } else if (i == MAX_SKB_FRAGS || (!i && !(sk->sk_route_caps & NETIF_F_SG))) { mytcp_mark_push(tp, skb); goto new_segment; }else if( page ){ if( off == PAGE_SIZE ){ put_page(page); MYTCP_PAGE(sk) = page = NULL; off = 0; } }else off = 0; if( copy > PAGE_SIZE - off ) copy = PAGE_SIZE - off; if( !sk_stream_wmem_schedule(sk, copy) ) goto wait_for_memory; if( !page ){ if( !(page = sk_stream_alloc_page(sk)) ) goto wait_for_memory; } err = skb_copy_to_page(sk, from, skb, page, off, copy); if( err ){ if( !MYTCP_PAGE(sk) ){ MYTCP_PAGE(sk) = page; MYTCP_OFF(sk) = 0; } goto do_error; } if( merge ){ skb_shinfo(skb)->frags[i - 1].size += copy; }else{ skb_fill_page_desc(skb, i, page, off, copy); if( MYTCP_PAGE(sk) ){ get_page(page); }else if( off + copy < PAGE_SIZE ){ get_page(page); MYTCP_PAGE(sk) = page; } } MYTCP_OFF(sk) = off + copy; } if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; skb_shinfo(skb)->tso_segs = 0; from += copy; copied += copy; if( (seglen -= copy) == 0 && iovlen == 0 ) goto out; if( skb->len < mss_now || (flags & MSG_OOB) ) continue; if( myforced_push(tp) ){ mytcp_mark_push(tp, skb); __mytcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); }else if( skb == sk->sk_send_head ) mytcp_push_one(sk, mss_now); continue;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -