📄 mytcp_ipv4.c
字号:
err = mytcp_connect(sk); rt = NULL; if (err) goto failure; return 0;failure: tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->dport = 0; return err;}extern int myip_queue_xmit(struct sk_buff *skb, int ipfragok);void mytcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb){ struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = skb->h.th; if (skb->ip_summed == CHECKSUM_HW) { th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); skb->csum = offsetof(struct tcphdr, check); }else{ th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, csum_partial((char *)th, th->doff << 2, skb->csum)); }}extern int myinet_sk_rebuild_header(struct sock *sk);static struct ip_options *mytcp_v4_save_options(struct sock *sk, struct sk_buff *skb){ struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *dopt = NULL; if (opt && opt->optlen) { int opt_size = optlength(opt); dopt = kmalloc(opt_size, GFP_ATOMIC); if (dopt) { if( myip_options_echo(dopt, skb) ){ kfree(dopt); dopt = NULL; } } } return dopt;}extern struct request_sock_ops mytcp_request_sock_ops;static int mytcp_v4_send_synack(struct sock *sk, struct request_sock *req, struct dst_entry *dst);int mytcp_v4_conn_request(struct sock *sk, struct sk_buff *skb){ struct inet_request_sock *ireq; struct tcp_options_received tmp_opt; struct request_sock *req; __u32 saddr = skb->nh.iph->saddr; __u32 daddr = skb->nh.iph->daddr; __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL;#ifdef CONFIG_SYN_COOKIES int want_cookie = 0;#else#define want_cookie 0#endif if( ((struct rtable *)skb->dst)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST) ) goto drop; if (inet_csk_reqsk_queue_is_full(sk) && !isn) {#ifdef CONFIG_SYN_COOKIES// if (sysctl_tcp_syncookies) {// want_cookie = 1;// } else#endif goto drop; } if( sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = reqsk_alloc( &mytcp_request_sock_ops ); if (!req) goto drop; tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = 536; tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; mytcp_parse_options(skb, &tmp_opt, 0); if (want_cookie) { tcp_clear_options(&tmp_opt); tmp_opt.saw_tstamp = 0; } if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) { tmp_opt.saw_tstamp = 0; tmp_opt.tstamp_ok = 0; } tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb); ireq = inet_rsk(req); ireq->loc_addr = daddr; ireq->rmt_addr = saddr; ireq->opt = mytcp_v4_save_options(sk, skb); if (!want_cookie) TCP_ECN_create_request(req, skb->h.th); if (want_cookie) {#ifdef CONFIG_SYN_COOKIES// syn_flood_warning(skb);#endif //isn = cookie_v4_init_sequence(sk, skb, &req->mss); } else if (!isn) { printk(KERN_INFO "%s:%d: isn is zero!\n", __FUNCTION__, __LINE__ ); struct inet_peer *peer = NULL; if (tmp_opt.saw_tstamp && mytcp_death_row.sysctl_tw_recycle && (dst = myinet_csk_route_req(sk, req)) != NULL && (peer = myrt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { MYNET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); dst_release(dst); goto drop_and_free; } }else if (!mysysctl_tcp_syncookies && ( mysysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < ( mysysctl_max_syn_backlog >> 2)) && (!peer || !peer->tcp_ts_stamp) && (!dst || !dst_metric(dst, RTAX_RTT))) { dst_release(dst); goto drop_and_free; } isn = 1981; //tcp_v4_init_sequence(sk, skb); } tcp_rsk(req)->snt_isn = isn; if( mytcp_v4_send_synack(sk, req, dst) ){ printk(KERN_INFO "%s:%d: tcp_v4_send_synack failed!\n", __FUNCTION__, __LINE__ ); goto drop_and_free; } if (want_cookie) { printk(KERN_INFO "cookie????\n"); reqsk_free(req); }else{ printk(KERN_INFO "%s:%d: add to reqsk queue hash\n", __FUNCTION__, __LINE__ ); myinet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); } return 0;drop_and_free: reqsk_free(req);drop: MYTCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); return 0;}struct sock *mytcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst ){ struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; if( sk_acceptq_is_full(sk) ) goto exit_overflow; if (!dst && (dst = myinet_csk_route_req(sk, req)) == NULL) goto exit; newsk = mytcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit; sk_setup_caps(newsk, dst); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->daddr = ireq->rmt_addr; newinet->rcv_saddr = ireq->loc_addr; newinet->saddr = ireq->loc_addr; newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = skb->nh.iph->ttl; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newinet->opt) inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; newinet->id = newtp->write_seq ^ jiffies; mytcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric(dst, RTAX_ADVMSS); mytcp_initialize_rcv_mss(newsk); __inet_hash(&mytcp_hashinfo, newsk, 0); __inet_inherit_port(&mytcp_hashinfo, sk, newsk); return newsk;exit_overflow: MYNET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);exit: MYNET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); dst_release(dst); return NULL;}int mytcp_v4_remember_stamp(struct sock *sk){ struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_get(sk); struct inet_peer *peer = NULL; int release_it = 0; if( !rt || rt->rt_dst != inet->daddr ){ peer = myinet_getpeer(inet->daddr, 1); release_it = 1; }else{ if (!rt->peer) myrt_bind_peer(rt, 1); peer = rt->peer; } if( peer ){ if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; peer->tcp_ts = tp->rx_opt.ts_recent; } if (release_it) myinet_putpeer(peer); return 1; } return 0;}extern int myip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen);extern int myip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);struct inet_connection_sock_af_ops myipv4_specific = { .queue_xmit = myip_queue_xmit, .send_check = mytcp_v4_send_check, .rebuild_header = myinet_sk_rebuild_header, .conn_request = mytcp_v4_conn_request, .syn_recv_sock = mytcp_v4_syn_recv_sock, .remember_stamp = mytcp_v4_remember_stamp, .net_header_len = sizeof(struct iphdr), .setsockopt = myip_setsockopt, .getsockopt = myip_getsockopt, .addr2sockaddr = myinet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in),};static int mytcp_v4_init_sock(struct sock *sk){ struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); skb_queue_head_init(&tp->out_of_order_queue); mytcp_init_xmit_timers(sk); tcp_prequeue_init(tp); icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT; tp->snd_cwnd = 2; tp->snd_ssthresh = 0x7fffffff; tp->snd_cwnd_clamp = ~0; tp->mss_cache = 536; tp->reordering = mysysctl_tcp_reordering; icsk->icsk_ca_ops = &mytcp_init_congestion_ops; sk->sk_state = TCP_CLOSE; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); icsk->icsk_af_ops = &myipv4_specific; icsk->icsk_sync_mss = mytcp_sync_mss; sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; atomic_inc( &mytcp_sockets_allocated ); return 0;}int mytcp_v4_destroy_sock(struct sock *sk){ struct tcp_sock *tp = tcp_sk(sk); mytcp_clear_xmit_timers(sk); mytcp_cleanup_congestion_control(sk); sk_stream_writequeue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->ucopy.prequeue); if (inet_csk(sk)->icsk_bind_hash) myinet_put_port(&mytcp_hashinfo, sk); if (sk->sk_sndmsg_page) { __free_page(sk->sk_sndmsg_page); sk->sk_sndmsg_page = NULL; } atomic_dec(&tcp_sockets_allocated); return 0;}static void mytcp_v4_hash(struct sock *sk){ printk(KERN_INFO "%s:%d\n", __FUNCTION__, __LINE__ ); inet_hash( &mytcp_hashinfo, sk );}void mytcp_unhash(struct sock *sk){ printk(KERN_INFO "%s:%d\n", __FUNCTION__, __LINE__ ); inet_unhash( &mytcp_hashinfo, sk );}static int mytcp_v4_get_port(struct sock *sk, unsigned short snum){ return myinet_csk_get_port( &mytcp_hashinfo, sk, snum, myinet_csk_bind_conflict);}int myip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, u32 saddr, u32 daddr, struct ip_options *opt){ struct inet_sock *inet = inet_sk(sk); struct rtable *rt = (struct rtable *)skb->dst; struct iphdr *iph; if (opt) iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen); else iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr)); iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; if( ip_dont_fragment(sk, &rt->u.dst) ) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = myip_select_ttl(inet, &rt->u.dst); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->protocol = sk->sk_protocol - ( MY_IPPROTO_UDP - IPPROTO_UDP ); iph->tot_len = htons(skb->len); ip_select_ident(iph, &rt->u.dst, sk); skb->nh.iph = iph; if (opt && opt->optlen) { iph->ihl += opt->optlen>>2; myip_options_build(skb, opt, daddr, rt, 0); } myip_send_check(iph); skb->priority = sk->sk_priority; return NF_HOOK(MY_PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);}static int mytcp_v4_send_synack(struct sock *sk, struct request_sock *req, struct dst_entry *dst){ const struct inet_request_sock *ireq = inet_rsk(req); int err = -1; struct sk_buff * skb; if (!dst && (dst = myinet_csk_route_req(sk, req)) == NULL) goto out; skb = mytcp_make_synack(sk, dst, req); if (skb) { struct tcphdr *th = skb->h.th; th->check = tcp_v4_check(th, skb->len, ireq->loc_addr, ireq->rmt_addr, csum_partial((char *)th, skb->len, skb->csum)); printk(KERN_INFO "%s:%d: send synack\n", __FUNCTION__, __LINE__ ); err = myip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); if (err == NET_XMIT_CN) err = 0; } printk(KERN_INFO "%s:%d: the return err: %d\n", __FUNCTION__, __LINE__, err );out: dst_release(dst); return err;}static void mytcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req){ mytcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);}static void mytcp_v4_reqsk_destructor(struct request_sock *req){}struct request_sock_ops mytcp_request_sock_ops = { .family = MY_PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = mytcp_v4_send_synack, .send_ack = mytcp_v4_reqsk_send_ack, .destructor = mytcp_v4_reqsk_destructor, .send_reset = mytcp_v4_send_reset,};int mytcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp){ const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); if (tcptw->tw_ts_recent_stamp && ( twp == NULL || ( mysysctl_tcp_tw_reuse && xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; sock_hold(sktw); return 1; } return 0;}static struct timewait_sock_ops mytcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = mytcp_twsk_unique,};struct proto mytcp_prot = { .name = "MY_TCP", .owner = THIS_MODULE, .close = mytcp_close, .connect = mytcp_v4_connect, .disconnect = mytcp_disconnect, .accept = myinet_csk_accept, .ioctl = mytcp_ioctl, .init = mytcp_v4_init_sock, .destroy = mytcp_v4_destroy_sock, .shutdown = mytcp_shutdown, .setsockopt = mytcp_setsockopt, .getsockopt = mytcp_getsockopt, .sendmsg = mytcp_sendmsg, .recvmsg = mytcp_recvmsg, .backlog_rcv = mytcp_v4_do_rcv, .hash = mytcp_v4_hash, .unhash = mytcp_unhash, .get_port = mytcp_v4_get_port, .enter_memory_pressure = mytcp_enter_memory_pressure, .sockets_allocated = &mytcp_sockets_allocated, .orphan_count = &mytcp_orphan_count, .memory_allocated = &mytcp_memory_allocated, .memory_pressure = &mytcp_memory_pressure, .sysctl_mem = mysysctl_tcp_mem, .sysctl_wmem = mysysctl_tcp_wmem, .sysctl_rmem = mysysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .twsk_prot = &mytcp_timewait_sock_ops, .rsk_prot = &mytcp_request_sock_ops,};void __init mytcp_v4_init(void){ int err = sock_create_kern(MY_PF_INET, SOCK_RAW, MY_IPPROTO_TCP, &mytcp_socket); if (err < 0) panic("Failed to create the TCP control socket.\n"); module_put( mytcp_socket->ops->owner ); module_put( mytcp_socket->ops->owner ); mytcp_socket->sk->sk_allocation = GFP_ATOMIC; inet_sk( mytcp_socket->sk )->uc_ttl = -1; mytcp_socket->sk->sk_prot->unhash( mytcp_socket->sk );}void __exit mytcp_v4_exit(void){ try_module_get( mytcp_socket->ops->owner ); try_module_get( mytcp_socket->ops->owner ); sock_release( mytcp_socket );}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -