📄 tcp_ipv6.c
字号:
flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if(ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 0x1; addr_type = ipv6_addr_type(&usin->sin6_addr); if(addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->bound_dev_if && sk->bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (sk->bound_dev_if == 0) return -EINVAL; } if (tp->ts_recent_stamp && ipv6_addr_cmp(&np->daddr, &usin->sin6_addr)) { tp->ts_recent = 0; tp->ts_recent_stamp = 0; tp->write_seq = 0; } ipv6_addr_copy(&np->daddr, &usin->sin6_addr); np->flow_label = fl.fl6_flowlabel; /* * TCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = tp->ext_header_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; sk->tp_pinfo.af_tcp.af_specific = &ipv6_mapped; sk->backlog_rcv = tcp_v4_do_rcv; err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { tp->ext_header_len = exthdrlen; sk->tp_pinfo.af_tcp.af_specific = &ipv6_specific; sk->backlog_rcv = tcp_v6_do_rcv; goto failure; } else { ipv6_addr_set(&np->saddr, 0, 0, __constant_htonl(0x0000FFFF), sk->saddr); ipv6_addr_set(&np->rcv_saddr, 0, 0, __constant_htonl(0x0000FFFF), sk->rcv_saddr); } return err; } if (!ipv6_addr_any(&np->rcv_saddr)) saddr = &np->rcv_saddr; fl.proto = IPPROTO_TCP; fl.fl6_dst = &np->daddr; fl.fl6_src = saddr; fl.oif = sk->bound_dev_if; fl.uli_u.ports.dport = usin->sin6_port; fl.uli_u.ports.sport = sk->sport; if (np->opt && np->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; fl.nl_u.ip6_u.daddr = rt0->addr; } dst = ip6_route_output(sk, &fl); if ((err = dst->error) != 0) { dst_release(dst); goto failure; } ip6_dst_store(sk, dst, NULL); sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM; if (saddr == NULL) { err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf); if (err) goto failure; saddr = &saddr_buf; } /* set the source address */ ipv6_addr_copy(&np->rcv_saddr, saddr); ipv6_addr_copy(&np->saddr, saddr); sk->rcv_saddr= LOOPBACK4_IPV6; tp->ext_header_len = 0; if (np->opt) tp->ext_header_len = np->opt->opt_flen+np->opt->opt_nflen; tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); err = -ENOBUFS; buff = alloc_skb(MAX_TCP_HEADER + 15, sk->allocation); if (buff == NULL) goto failure; sk->dport = usin->sin6_port; /* * Init variables */ if (!tp->write_seq) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, np->daddr.s6_addr32, sk->sport, sk->dport); err = tcp_connect(sk, buff); if (err == 0) return 0;failure: __sk_dst_reset(sk); sk->dport = 0; sk->route_caps = 0; return err;}void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, int code, int offset, __u32 info){ struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; struct in6_addr *saddr = &hdr->saddr; struct in6_addr *daddr = &hdr->daddr; struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct ipv6_pinfo *np; struct sock *sk; int err; struct tcp_opt *tp; __u32 seq; sk = tcp_v6_lookup(daddr, th->dest, saddr, th->source, skb->dev->ifindex); if (sk == NULL) { ICMP6_INC_STATS_BH(Icmp6InErrors); return; } if (sk->state == TCP_TIME_WAIT) { tcp_tw_put((struct tcp_tw_bucket*)sk); return; } bh_lock_sock(sk); if (sk->lock.users) NET_INC_STATS_BH(LockDroppedIcmps); if (sk->state == TCP_CLOSE) goto out; tp = &sk->tp_pinfo.af_tcp; seq = ntohl(th->seq); if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS_BH(OutOfWindowIcmps); goto out; } np = &sk->net_pinfo.af_inet6; if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst = NULL; if (sk->lock.users) goto out; if ((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE)) goto out; /* icmp should have updated the destination cache entry */ dst = __sk_dst_check(sk, np->dst_cookie); if (dst == NULL) { struct flowi fl; /* BUGGG_FUTURE: Again, it is not clear how to handle rthdr case. Ignore this complexity for now. */ fl.proto = IPPROTO_TCP; fl.nl_u.ip6_u.daddr = &np->daddr; fl.nl_u.ip6_u.saddr = &np->saddr; fl.oif = sk->bound_dev_if; fl.uli_u.ports.dport = sk->dport; fl.uli_u.ports.sport = sk->sport; dst = ip6_route_output(sk, &fl); } else dst_clone(dst); if (dst->error) { sk->err_soft = -dst->error; } else if (tp->pmtu_cookie > dst->pmtu) { tcp_sync_mss(sk, dst->pmtu); tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ dst_release(dst); goto out; } icmpv6_err_convert(type, code, &err); /* Might be for an open_request */ switch (sk->state) { struct open_request *req, **prev; struct ipv6hdr hd; case TCP_LISTEN: if (sk->lock.users) goto out; /* Grrrr - fix this later. */ ipv6_addr_copy(&hd.saddr, saddr); ipv6_addr_copy(&hd.daddr, daddr); req = tcp_v6_search_req(tp, &hd, th, tcp_v6_iif(skb), &prev); if (!req) goto out; /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ BUG_TRAP(req->sk == NULL); if (seq != req->snt_isn) { NET_INC_STATS_BH(OutOfWindowIcmps); goto out; } tcp_synq_drop(sk, req, prev); goto out; case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (sk->lock.users == 0) { TCP_INC_STATS_BH(TcpAttemptFails); sk->err = err; sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else { sk->err_soft = err; } goto out; } if (sk->lock.users == 0 && np->recverr) { sk->err = err; sk->error_report(sk); } else { sk->err_soft = err; }out: bh_unlock_sock(sk); sock_put(sk);}static int tcp_v6_send_synack(struct sock *sk, struct open_request *req, struct dst_entry *dst){ struct sk_buff * skb; struct ipv6_txoptions *opt = NULL; struct flowi fl; int err = -1; fl.proto = IPPROTO_TCP; fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr; fl.nl_u.ip6_u.saddr = &req->af.v6_req.loc_addr; fl.fl6_flowlabel = 0; fl.oif = req->af.v6_req.iif; fl.uli_u.ports.dport = req->rmt_port; fl.uli_u.ports.sport = sk->sport; if (dst == NULL) { opt = sk->net_pinfo.af_inet6.opt; if (opt == NULL && sk->net_pinfo.af_inet6.rxopt.bits.srcrt == 2 && req->af.v6_req.pktopts) { struct sk_buff *pktopts = req->af.v6_req.pktopts; struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)pktopts->cb; if (rxopt->srcrt) opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); } if (opt && opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; fl.nl_u.ip6_u.daddr = rt0->addr; } dst = ip6_route_output(sk, &fl); if (dst->error) goto done; } skb = tcp_make_synack(sk, dst, req); if (skb) { struct tcphdr *th = skb->h.th; th->check = tcp_v6_check(th, skb->len, &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr, csum_partial((char *)th, skb->len, skb->csum)); fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr; err = ip6_xmit(sk, skb, &fl, opt); if (err == NET_XMIT_CN) err = 0; }done: dst_release(dst); if (opt && opt != sk->net_pinfo.af_inet6.opt) sock_kfree_s(sk, opt, opt->tot_len); return err;}static void tcp_v6_or_free(struct open_request *req){ if (req->af.v6_req.pktopts) kfree_skb(req->af.v6_req.pktopts);}static struct or_calltable or_ipv6 = { AF_INET6, tcp_v6_send_synack, tcp_v6_or_send_ack, tcp_v6_or_free, tcp_v6_send_reset};static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb){ struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb; if (sk->net_pinfo.af_inet6.rxopt.all) { if ((opt->hop && sk->net_pinfo.af_inet6.rxopt.bits.hopopts) || ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) && sk->net_pinfo.af_inet6.rxopt.bits.rxflow) || (opt->srcrt && sk->net_pinfo.af_inet6.rxopt.bits.srcrt) || ((opt->dst1 || opt->dst0) && sk->net_pinfo.af_inet6.rxopt.bits.dstopts)) return 1; } return 0;}static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb){ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6; if (skb->ip_summed == CHECKSUM_HW) { th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); skb->csum = offsetof(struct tcphdr, check); } else { th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, csum_partial((char *)th, th->doff<<2, skb->csum)); }}static void tcp_v6_send_reset(struct sk_buff *skb){ struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; if (th->rst) return; if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) return; /* * We need to grab some memory, and put together an RST, * and then put it into the queue to be sent. */ buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC); if (buff == NULL) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr)); t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = sizeof(*t1)/4; t1->rst = 1; if(th->ack) { t1->seq = th->ack_seq; } else { t1->ack = 1; t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff<<2)); } buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); fl.nl_u.ip6_u.daddr = &skb->nh.ipv6h->saddr; fl.nl_u.ip6_u.saddr = &skb->nh.ipv6h->daddr; fl.fl6_flowlabel = 0; t1->check = csum_ipv6_magic(fl.nl_u.ip6_u.saddr, fl.nl_u.ip6_u.daddr, sizeof(*t1), IPPROTO_TCP, buff->csum); fl.proto = IPPROTO_TCP; fl.oif = tcp_v6_iif(skb); fl.uli_u.ports.dport = t1->dest; fl.uli_u.ports.sport = t1->source; /* sk = NULL, but it is safe for now. RST socket required. */ buff->dst = ip6_route_output(NULL, &fl); if (buff->dst->error == 0) { ip6_xmit(NULL, buff, &fl, NULL); TCP_INC_STATS_BH(TcpOutSegs); TCP_INC_STATS_BH(TcpOutRsts); return; } kfree_skb(buff);}static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts){ struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(struct tcphdr); buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC); if (buff == NULL) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr)); if (ts) tot_len += 3*4; t1 = (struct tcphdr *) skb_push(buff,tot_len); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len/4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = 1; t1->window = htons(win); if (ts) { u32 *ptr = (u32*)(t1 + 1); *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *ptr++ = htonl(tcp_time_stamp); *ptr = htonl(ts); } buff->csum = csum_partial((char *)t1, tot_len, 0); fl.nl_u.ip6_u.daddr = &skb->nh.ipv6h->saddr; fl.nl_u.ip6_u.saddr = &skb->nh.ipv6h->daddr; fl.fl6_flowlabel = 0; t1->check = csum_ipv6_magic(fl.nl_u.ip6_u.saddr, fl.nl_u.ip6_u.daddr, tot_len, IPPROTO_TCP, buff->csum); fl.proto = IPPROTO_TCP; fl.oif = tcp_v6_iif(skb); fl.uli_u.ports.dport = t1->dest; fl.uli_u.ports.sport = t1->source; buff->dst = ip6_route_output(NULL, &fl); if (buff->dst->error == 0) { ip6_xmit(NULL, buff, &fl, NULL); TCP_INC_STATS_BH(TcpOutSegs); return; } kfree_skb(buff);}static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb){ struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; tcp_v6_send_ack(skb, tw->snd_nxt, tw->rcv_nxt, tw->rcv_wnd>>tw->rcv_wscale, tw->ts_recent); tcp_tw_put(tw);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -