📄 ipsec_tunnel.c
字号:
{ len = ixs->ips.ips_ident_d.len * IPSEC_PFKEYv2_ALIGN - sizeof(struct sadb_ident); KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_xmit_SAlookup: " "allocating %d bytes for ident_d shunt SA of HOLD: skb stored in HOLD.\n", len); if ((ixs->ips.ips_ident_d.data = kmalloc(len, GFP_ATOMIC)) == NULL) { printk(KERN_WARNING "klips_debug:ipsec_xmit_SAlookup: " "Failed, tried to allocate %d bytes for dest ident.\n", len); ixs->stats->tx_dropped++; spin_unlock(&eroute_lock); return IPSEC_XMIT_ERRMEMALLOC; } memcpy(ixs->ips.ips_ident_d.data, ixs->eroute->er_ident_d.data, len); } } } spin_unlock(&eroute_lock); return IPSEC_XMIT_OK;}enum ipsec_xmit_valueipsec_tunnel_restore_hard_header(struct ipsec_xmit_state*ixs){ KLIPS_PRINT(debug_tunnel & DB_TN_CROUT, "klips_debug:ipsec_xmit_restore_hard_header: " "After recursive xforms -- head,tailroom: %d,%d\n", skb_headroom(ixs->skb), skb_tailroom(ixs->skb)); if(ixs->saved_header) { if(skb_headroom(ixs->skb) < ixs->hard_header_len) { printk(KERN_WARNING "klips_error:ipsec_xmit_restore_hard_header: " "tried to skb_push hhlen=%d, %d available. This should never happen, please report.\n", ixs->hard_header_len, skb_headroom(ixs->skb)); ixs->stats->tx_errors++; return IPSEC_XMIT_PUSHPULLERR; } skb_push(ixs->skb, ixs->hard_header_len); { int i; for (i = 0; i < ixs->hard_header_len; i++) { ixs->skb->data[i] = ixs->saved_header[i]; } } }#ifdef CONFIG_KLIPS_NAT_TRAVERSAL if (ixs->natt_type && ixs->natt_head) { struct iphdr *ipp = ixs->skb->nh.iph; struct udphdr *udp; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: " "encapsuling packet into UDP (NAT-Traversal) (%d %d)\n", ixs->natt_type, ixs->natt_head); ixs->iphlen = ipp->ihl << 2; ipp->tot_len = htons(ntohs(ipp->tot_len) + ixs->natt_head); if(skb_tailroom(ixs->skb) < ixs->natt_head) { printk(KERN_WARNING "klips_error:ipsec_tunnel_start_xmit: " "tried to skb_put %d, %d available. " "This should never happen, please report.\n", ixs->natt_head, skb_tailroom(ixs->skb)); ixs->stats->tx_errors++; return IPSEC_XMIT_ESPUDP; } skb_put(ixs->skb, ixs->natt_head); udp = (struct udphdr *)((char *)ipp + ixs->iphlen); /* move ESP hdr after UDP hdr */ memmove((void *)((char *)udp + ixs->natt_head), (void *)(udp), ntohs(ipp->tot_len) - ixs->iphlen - ixs->natt_head); /* clear UDP & Non-IKE Markers (if any) */ memset(udp, 0, ixs->natt_head); /* fill UDP with usefull informations ;-) */ udp->source = htons(ixs->natt_sport); udp->dest = htons(ixs->natt_dport); udp->len = htons(ntohs(ipp->tot_len) - ixs->iphlen); /* set protocol */ ipp->protocol = IPPROTO_UDP; /* fix IP checksum */ ipp->check = 0; ipp->check = ip_fast_csum((unsigned char *)ipp, ipp->ihl); }#endif KLIPS_PRINT(debug_tunnel & DB_TN_CROUT, "klips_debug:ipsec_xmit_restore_hard_header: " "With hard_header, final head,tailroom: %d,%d\n", skb_headroom(ixs->skb), skb_tailroom(ixs->skb)); return IPSEC_XMIT_OK;}enum ipsec_xmit_valueipsec_tunnel_send(struct ipsec_xmit_state*ixs){#ifdef NETDEV_25 struct flowi fl;#endif #ifdef NET_21 /* 2.2 and 2.4 kernels */ /* new route/dst cache code from James Morris */ ixs->skb->dev = ixs->physdev;#ifdef NETDEV_25 fl.oif = ixs->physdev->iflink; fl.nl_u.ip4_u.daddr = ixs->skb->nh.iph->daddr; fl.nl_u.ip4_u.saddr = ixs->pass ? 0 : ixs->skb->nh.iph->saddr; fl.nl_u.ip4_u.tos = RT_TOS(ixs->skb->nh.iph->tos); fl.proto = ixs->skb->nh.iph->protocol; if ((ixs->error = ip_route_output_key(&ixs->route, &fl))) {#else /*skb_orphan(ixs->skb);*/ if((ixs->error = ip_route_output(&ixs->route, ixs->skb->nh.iph->daddr, ixs->pass ? 0 : ixs->skb->nh.iph->saddr, RT_TOS(ixs->skb->nh.iph->tos), /* mcr->rgb: should this be 0 instead? */ ixs->physdev->iflink))) {#endif ixs->stats->tx_errors++; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_xmit_send: " "ip_route_output failed with error code %d, rt->u.dst.dev=%s, dropped\n", ixs->error, ixs->route->u.dst.dev->name); return IPSEC_XMIT_ROUTEERR; } if(ixs->dev == ixs->route->u.dst.dev) { ip_rt_put(ixs->route); /* This is recursion, drop it. */ ixs->stats->tx_errors++; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_xmit_send: " "suspect recursion, dev=rt->u.dst.dev=%s, dropped\n", ixs->dev->name); return IPSEC_XMIT_RECURSDETECT; } dst_release(ixs->skb->dst); ixs->skb->dst = &ixs->route->u.dst; ixs->stats->tx_bytes += ixs->skb->len; if(ixs->skb->len < ixs->skb->nh.raw - ixs->skb->data) { ixs->stats->tx_errors++; printk(KERN_WARNING "klips_error:ipsec_xmit_send: " "tried to __skb_pull nh-data=%ld, %d available. This should never happen, please report.\n", (unsigned long)(ixs->skb->nh.raw - ixs->skb->data), ixs->skb->len); return IPSEC_XMIT_PUSHPULLERR; } __skb_pull(ixs->skb, ixs->skb->nh.raw - ixs->skb->data);#ifdef SKB_RESET_NFCT if(!ixs->pass) { nf_conntrack_put(ixs->skb->nfct); ixs->skb->nfct = NULL; }#ifdef CONFIG_NETFILTER_DEBUG ixs->skb->nf_debug = 0;#endif /* CONFIG_NETFILTER_DEBUG */#endif /* SKB_RESET_NFCT */ KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_xmit_send: " "...done, calling ip_send() on device:%s\n", ixs->skb->dev ? ixs->skb->dev->name : "NULL"); KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->skb->nh.iph);#ifdef NETDEV_23 /* 2.4 kernels */ { int err; err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, ixs->skb, NULL, ixs->route->u.dst.dev, ipsec_tunnel_xmit2); if(err != NET_XMIT_SUCCESS && err != NET_XMIT_CN) { if(net_ratelimit()) printk(KERN_ERR "klips_error:ipsec_xmit_send: " "ip_send() failed, err=%d\n", -err); ixs->stats->tx_errors++; ixs->stats->tx_aborted_errors++; ixs->skb = NULL; return IPSEC_XMIT_IPSENDFAILURE; } }#else /* NETDEV_23 */ /* 2.2 kernels */ ip_send(ixs->skb);#endif /* NETDEV_23 */#else /* NET_21 */ /* 2.0 kernels */ ixs->skb->arp = 1; /* ISDN/ASYNC PPP from Matjaz Godec. */ /* skb->protocol = htons(ETH_P_IP); */ KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_xmit_send: " "...done, calling dev_queue_xmit() or ip_fragment().\n"); IP_SEND(ixs->skb, ixs->physdev);#endif /* NET_21 */ ixs->stats->tx_packets++; ixs->skb = NULL; return IPSEC_XMIT_OK;}voidipsec_tunnel_cleanup(struct ipsec_xmit_state*ixs){#if defined(HAS_NETIF_QUEUE) || defined (HAVE_NETIF_QUEUE) netif_wake_queue(ixs->dev);#else /* defined(HAS_NETIF_QUEUE) || defined (HAVE_NETIF_QUEUE) */ ixs->dev->tbusy = 0;#endif /* defined(HAS_NETIF_QUEUE) || defined (HAVE_NETIF_QUEUE) */ if(ixs->saved_header) { kfree(ixs->saved_header); } if(ixs->skb) { dev_kfree_skb(ixs->skb, FREE_WRITE); } if(ixs->oskb) { dev_kfree_skb(ixs->oskb, FREE_WRITE); } if (ixs->ips.ips_ident_s.data) { kfree(ixs->ips.ips_ident_s.data); } if (ixs->ips.ips_ident_d.data) { kfree(ixs->ips.ips_ident_d.data); }}/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */intipsec_tunnel_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct ipsec_xmit_state ixs_mem; struct ipsec_xmit_state *ixs = &ixs_mem; enum ipsec_xmit_value stat;#ifdef CONFIG_KLIPS_NAT_TRAVERSAL ixs->natt_type = 0, ixs->natt_head = 0; ixs->natt_sport = 0, ixs->natt_dport = 0;#endif memset((caddr_t)ixs, 0, sizeof(*ixs)); ixs->oskb = NULL; ixs->saved_header = NULL; /* saved copy of the hard header */ ixs->route = NULL; memset((caddr_t)&(ixs->ips), 0, sizeof(ixs->ips)); ixs->dev = dev; ixs->skb = skb; stat = ipsec_xmit_sanity_check_dev(ixs); if(stat != IPSEC_XMIT_OK) { goto cleanup; } stat = ipsec_xmit_sanity_check_skb(ixs); if(stat != IPSEC_XMIT_OK) { goto cleanup; } stat = ipsec_tunnel_strip_hard_header(ixs); if(stat != IPSEC_XMIT_OK) { goto cleanup; } stat = ipsec_tunnel_SAlookup(ixs); if(stat != IPSEC_XMIT_OK) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: SAlookup failed: %d\n", stat); goto cleanup; } ixs->innersrc = ixs->iph->saddr; /* start encapsulation loop here XXX */ do { stat = ipsec_xmit_encap_bundle(ixs); if(stat != IPSEC_XMIT_OK) { if(stat == IPSEC_XMIT_PASS) { goto bypass; } KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: encap_bundle failed: %d\n", stat); goto cleanup; } ixs->matcher.sen_ip_src.s_addr = ixs->iph->saddr; ixs->matcher.sen_ip_dst.s_addr = ixs->iph->daddr; ixs->matcher.sen_proto = ixs->iph->protocol; ipsec_extract_ports(ixs->iph, &ixs->matcher); spin_lock(&eroute_lock); ixs->eroute = ipsec_findroute(&ixs->matcher); if(ixs->eroute) { ixs->outgoing_said = ixs->eroute->er_said; ixs->eroute_pid = ixs->eroute->er_pid; ixs->eroute->er_count++; ixs->eroute->er_lasttime = jiffies/HZ; } spin_unlock(&eroute_lock); KLIPS_PRINT((debug_tunnel & DB_TN_XMIT) && /* ((ixs->orgdst != ixs->newdst) || (ixs->orgsrc != ixs->newsrc)) */ (ixs->orgedst != ixs->outgoing_said.dst.u.v4.sin_addr.s_addr) && ixs->outgoing_said.dst.u.v4.sin_addr.s_addr && ixs->eroute, "klips_debug:ipsec_tunnel_start_xmit: " "We are recursing here.\n"); } while(/*((ixs->orgdst != ixs->newdst) || (ixs->orgsrc != ixs->newsrc))*/ (ixs->orgedst != ixs->outgoing_said.dst.u.v4.sin_addr.s_addr) && ixs->outgoing_said.dst.u.v4.sin_addr.s_addr && ixs->eroute); stat = ipsec_tunnel_restore_hard_header(ixs); if(stat != IPSEC_XMIT_OK) { goto cleanup; } bypass: stat = ipsec_tunnel_send(ixs); cleanup: ipsec_tunnel_cleanup(ixs); return 0;}DEBUG_NO_STATIC struct net_device_stats *ipsec_tunnel_get_stats(struct net_device *dev){ return &(((struct ipsecpriv *)(dev->priv))->mystats);}/* * Revectored calls. * For each of these calls, a field exists in our private structure. */DEBUG_NO_STATIC intipsec_tunnel_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len){ struct ipsecpriv *prv = dev->priv; struct net_device *tmp; int ret; struct net_device_stats *stats; /* This device's statistics */ if(skb == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "no skb...\n"); return -ENODATA; } if(dev == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "no device...\n"); return -ENODEV; } KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "skb->dev=%s dev=%s.\n", skb->dev ? skb->dev->name : "NULL", dev->name); if(prv == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "no private space associated with dev=%s\n", dev->name ? dev->name : "NULL"); return -ENODEV; } stats = (struct net_device_stats *) &(prv->mystats); if(prv->dev == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "no physical device associated with dev=%s\n", dev->name ? dev->name : "NULL"); stats->tx_dropped++; return -ENODEV; } /* check if we have to send a IPv6 packet. It might be a Router Solicitation, where the building of the packet happens in reverse order: 1. ll hdr, 2. IPv6 hdr, 3. ICMPv6 hdr -> skb->nh.raw is still uninitialized when this function is called!! If this is no IPv6 packet, we can print debugging messages, otherwise we skip all debugging messages and just build the ll header */ if(type != ETH_P_IPV6) { /* execute this only, if we don't have to build the header for a IPv6 packet */ if(!prv->hard_header) { KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "physical device has been detached, packet dropped 0p%p->0p%p len=%d type=%d dev=%s->NULL ", saddr, daddr, len, type, dev->name);#ifdef NET_21 KLIPS_PRINTMORE(debug_tunnel & DB_TN_REVEC, "ip=%08x->%08x\n", (__u32)ntohl(skb->nh.iph->saddr), (__u32)ntohl(skb->nh.iph->daddr) );#else /* NET_21 */ KLIPS_PRINTMORE(debug_tunnel & DB_TN_REVEC, "ip=%08x->%08x\n", (__u32)ntohl(skb->ip_hdr->saddr), (__u32)ntohl(skb->ip_hdr->daddr) );#endif /* NET_21 */ stats->tx_dropped++; return -ENODEV; } #define da ((struct net_device *)(prv->dev))->dev_addr KLIPS_PRINT(debug_tunnel & DB_TN_REVEC, "klips_debug:ipsec_tunnel_hard_header: " "Revectored 0p%p->0p%p len=%d type=%d dev=%s->%s dev_addr=%02x:%02x:%02x:%02x:%02x:%02x ",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -