📄 tcp_ipv6.c
字号:
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len){ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_opt *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_opt *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p = NULL, final; struct flowi fl; struct dst_entry *dst; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return(-EAFNOSUPPORT); memset(&fl, 0, sizeof(fl)); if (np->sndflow) { fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl.fl6_flowlabel); if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if(ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 0x1; addr_type = ipv6_addr_type(&usin->sin6_addr); if(addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->ts_recent_stamp && ipv6_addr_cmp(&np->daddr, &usin->sin6_addr)) { tp->ts_recent = 0; tp->ts_recent_stamp = 0; tp->write_seq = 0; } ipv6_addr_copy(&np->daddr, &usin->sin6_addr); np->flow_label = fl.fl6_flowlabel; /* * TCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = tp->ext_header_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; tp->af_specific = &ipv6_mapped; sk->sk_backlog_rcv = tcp_v4_do_rcv; err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { tp->ext_header_len = exthdrlen; tp->af_specific = &ipv6_specific; sk->sk_backlog_rcv = tcp_v6_do_rcv; goto failure; } else { ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), inet->saddr); ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), inet->rcv_saddr); } return err; } if (!ipv6_addr_any(&np->rcv_saddr)) saddr = &np->rcv_saddr; fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_dst, &np->daddr); ipv6_addr_copy(&fl.fl6_src, (saddr ? saddr : &np->saddr)); fl.oif = sk->sk_bound_dev_if; fl.fl_ip_dport = usin->sin6_port; fl.fl_ip_sport = inet->sport; if (np->opt && np->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; ipv6_addr_copy(&final, &fl.fl6_dst); ipv6_addr_copy(&fl.fl6_dst, rt0->addr); final_p = &final; } err = ip6_dst_lookup(sk, &dst, &fl); if (err) goto failure; if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { dst_release(dst); goto failure; } if (saddr == NULL) { saddr = &fl.fl6_src; ipv6_addr_copy(&np->rcv_saddr, saddr); } /* set the source address */ ipv6_addr_copy(&np->saddr, saddr); inet->rcv_saddr = LOOPBACK4_IPV6; ip6_dst_store(sk, dst, NULL); sk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); tp->ext_header_len = 0; if (np->opt) tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen; tp->ext2_header_len = dst->header_len; tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = tcp_v6_hash_connect(sk); if (err) goto late_failure; if (!tp->write_seq) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, np->daddr.s6_addr32, inet->sport, inet->dport); err = tcp_connect(sk); if (err) goto late_failure; return 0;late_failure: tcp_set_state(sk, TCP_CLOSE); __sk_dst_reset(sk);failure: inet->dport = 0; sk->sk_route_caps = 0; return err;}static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, int code, int offset, __u32 info){ struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct ipv6_pinfo *np; struct sock *sk; int err; struct tcp_opt *tp; __u32 seq; sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); if (sk == NULL) { ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { tcp_tw_put((struct tcp_tw_bucket*)sk); return; } bh_lock_sock(sk); if (sock_owned_by_user(sk)) NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; tp = tcp_sk(sk); seq = ntohl(th->seq); if (sk->sk_state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst = NULL; if (sock_owned_by_user(sk)) goto out; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) goto out; /* icmp should have updated the destination cache entry */ dst = __sk_dst_check(sk, np->dst_cookie); if (dst == NULL) { struct inet_opt *inet = inet_sk(sk); struct flowi fl; /* BUGGG_FUTURE: Again, it is not clear how to handle rthdr case. Ignore this complexity for now. */ memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_dst, &np->daddr); ipv6_addr_copy(&fl.fl6_src, &np->saddr); fl.oif = sk->sk_bound_dev_if; fl.fl_ip_dport = inet->dport; fl.fl_ip_sport = inet->sport; if ((err = ip6_dst_lookup(sk, &dst, &fl))) { sk->sk_err_soft = -err; goto out; } if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { sk->sk_err_soft = -err; goto out; } } else dst_hold(dst); if (tp->pmtu_cookie > dst_pmtu(dst)) { tcp_sync_mss(sk, dst_pmtu(dst)); tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ dst_release(dst); goto out; } icmpv6_err_convert(type, code, &err); /* Might be for an open_request */ switch (sk->sk_state) { struct open_request *req, **prev; case TCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, &hdr->saddr, tcp_v6_iif(skb)); if (!req) goto out; /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ BUG_TRAP(req->sk == NULL); if (seq != req->snt_isn) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } tcp_synq_drop(sk, req, prev); goto out; case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err;out: bh_unlock_sock(sk); sock_put(sk);}static int tcp_v6_send_synack(struct sock *sk, struct open_request *req, struct dst_entry *dst){ struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff * skb; struct ipv6_txoptions *opt = NULL; struct in6_addr * final_p = NULL, final; struct flowi fl; int err = -1; memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr); ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr); fl.fl6_flowlabel = 0; fl.oif = req->af.v6_req.iif; fl.fl_ip_dport = req->rmt_port; fl.fl_ip_sport = inet_sk(sk)->sport; if (dst == NULL) { opt = np->opt; if (opt == NULL && np->rxopt.bits.srcrt == 2 && req->af.v6_req.pktopts) { struct sk_buff *pktopts = req->af.v6_req.pktopts; struct inet6_skb_parm *rxopt = IP6CB(pktopts); if (rxopt->srcrt) opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); } if (opt && opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; ipv6_addr_copy(&final, &fl.fl6_dst); ipv6_addr_copy(&fl.fl6_dst, rt0->addr); final_p = &final; } err = ip6_dst_lookup(sk, &dst, &fl); if (err) goto done; if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) goto done; } skb = tcp_make_synack(sk, dst, req); if (skb) { struct tcphdr *th = skb->h.th; th->check = tcp_v6_check(th, skb->len, &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr, csum_partial((char *)th, skb->len, skb->csum)); ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr); err = ip6_xmit(sk, skb, &fl, opt, 0); if (err == NET_XMIT_CN) err = 0; }done: dst_release(dst); if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); return err;}static void tcp_v6_or_free(struct open_request *req){ if (req->af.v6_req.pktopts) kfree_skb(req->af.v6_req.pktopts);}static struct or_calltable or_ipv6 = { .family = AF_INET6, .rtx_syn_ack = tcp_v6_send_synack, .send_ack = tcp_v6_or_send_ack, .destructor = tcp_v6_or_free, .send_reset = tcp_v6_send_reset};static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb){ struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && np->rxopt.bits.hopopts) || ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) && np->rxopt.bits.rxflow) || (opt->srcrt && np->rxopt.bits.srcrt) || ((opt->dst1 || opt->dst0) && np->rxopt.bits.dstopts)) return 1; } return 0;}static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb){ struct ipv6_pinfo *np = inet6_sk(sk); if (skb->ip_summed == CHECKSUM_HW) { th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); skb->csum = offsetof(struct tcphdr, check); } else { th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, csum_partial((char *)th, th->doff<<2, skb->csum)); }}static void tcp_v6_send_reset(struct sk_buff *skb){ struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; if (th->rst) return; if (!ipv6_unicast_destination(skb)) return; /* * We need to grab some memory, and put together an RST, * and then put it into the queue to be sent. */ buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), GFP_ATOMIC); if (buff == NULL) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = sizeof(*t1)/4; t1->rst = 1; if(th->ack) { t1->seq = th->ack_seq; } else { t1->ack = 1; t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff<<2)); } buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); memset(&fl, 0, sizeof(fl)); ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr); t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, sizeof(*t1), IPPROTO_TCP, buff->csum); fl.proto = IPPROTO_TCP; fl.oif = tcp_v6_iif(skb); fl.fl_ip_dport = t1->dest; fl.fl_ip_sport = t1->source; /* sk = NULL, but it is safe for now. RST socket required. */ if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0) { dst_release(buff->dst); return; } ip6_xmit(NULL, buff, &fl, NULL, 0); TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); return; } kfree_skb(buff);}static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts){ struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(struct tcphdr); if (ts) tot_len += 3*4; buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (buff == NULL) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); t1 = (struct tcphdr *) skb_push(buff,tot_len); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len/4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = 1; t1->window = htons(win);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -