📄 ipsec_tunnel.c
字号:
}#endif /* CONFIG_IPSEC_DEBUG */#ifdef REAL_LOCKING_P/* * Locking */ #if 0DEBUG_NO_STATIC intipsec_tunnel_lock(struct ipsecpriv *prv){ unsigned long flags; save_flags(flags); cli(); /* * Lock in an interrupt may fail */ if(prv->locked && in_interrupt()) { restore_flags(flags); return 0; } while(prv->locked) sleep_on(&prv->wait_queue); prv->locked=1; restore_flags(flags); return 1;}#endif#if 0DEBUG_NO_STATIC voidipsec_tunnel_unlock(struct ipsecpriv *prv){ prv->locked=0; wake_up(&prv->wait_queue);}#endif#endif /* REAL_LOCKING_P */DEBUG_NO_STATIC intipsec_tunnel_open(struct device *dev){ struct ipsecpriv *prv = dev->priv; /* * Can't open until attached. */ KLIPS_PRINT(debug_tunnel & DB_TN_INIT, "klips_debug:ipsec_tunnel_open: " "dev = %s, prv->dev = %s\n", dev->name, prv->dev?prv->dev->name:"NONE"); if (prv->dev == NULL) return -ENODEV; MOD_INC_USE_COUNT; return 0;}DEBUG_NO_STATIC intipsec_tunnel_close(struct device *dev){ MOD_DEC_USE_COUNT; return 0;}#ifdef MSS_HACK/* * Issues: * 1) Fragments arriving in the tunnel should probably be rejected. * 2) How does this affect syncookies, mss_cache, dst cache ? * 3) Path MTU discovery handling needs to be reviewed. For example, * if we receive an ICMP 'packet too big' message from an intermediate * router specifying it's next hop MTU, our stack may process this and * adjust the MSS without taking our AH/ESP overheads into account. */ /* * Recaclulate checksum using differences between changed datum, * borrowed from netfilter. */DEBUG_NO_STATIC u_int16_t ipsec_fast_csum(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck){ u_int32_t diffs[] = { oldvalinv, newval }; return csum_fold(csum_partial((char *)diffs, sizeof(diffs), oldcheck^0xFFFF));}/* * Determine effective MSS. * * Note that we assume that there is always an MSS option for our own * SYN segments, which is mentioned in tcp_syn_build_options(), kernel 2.2.x. * This could change, and we should probably parse TCP options instead. * */DEBUG_NO_STATIC u_int8_tipsec_adjust_mss(struct sk_buff *skb, struct tcphdr *tcph, u_int16_t mtu){ u_int16_t oldmss, newmss; u_int32_t *mssp; struct sock *sk = skb->sk; newmss = tcp_sync_mss(sk, mtu); printk(KERN_INFO "klips: setting mss to %u\n", newmss); mssp = (u_int32_t *)tcph + sizeof(struct tcphdr) / sizeof(u_int32_t); oldmss = ntohl(*mssp) & 0x0000FFFF; *mssp = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | newmss); tcph->check = ipsec_fast_csum(htons(~oldmss), htons(newmss), tcph->check); return 1;}#endif /* MSS_HACK */ #ifdef NETDEV_23static inline int ipsec_tunnel_xmit2(struct sk_buff *skb){ return ip_send(skb);}#endif /* NETDEV_23 *//* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */intipsec_tunnel_start_xmit(struct sk_buff *skb, struct device *dev){ struct ipsecpriv *prv; /* Our device' private space */ struct sk_buff *oskb = NULL; /* Original skb pointer */ struct net_device_stats *stats; /* This device's statistics */ struct iphdr *iph; /* Our new IP header */ __u32 newdst; /* The other SG's IP address */ __u32 orgdst; /* Original IP destination address */ __u32 orgedst; /* 1st SG's IP address */ __u32 newsrc; /* The new source SG's IP address */ __u32 orgsrc; /* Original IP source address */ __u32 innersrc; /* Innermost IP source address */ int iphlen; /* IP header length */ int pyldsz; /* upper protocol payload size */ int headroom; int tailroom; int max_headroom = 0; /* The extra header space needed */ int max_tailroom = 0; /* The extra stuffing needed */ int ll_headroom; /* The extra link layer hard_header space needed */ int tot_headroom = 0; /* The total header space needed */ int tot_tailroom = 0; /* The totalstuffing needed */ __u8 *saved_header = NULL; /* saved copy of the hard header */ int i; unsigned short sport,dport; struct sockaddr_encap matcher; /* eroute search key */ struct eroute *er; struct ipsec_sa *tdbp, *tdbq; /* Tunnel Descriptor Block pointers */ char sa[SATOA_BUF]; size_t sa_len; int hard_header_stripped = 0; /* has the hard header been removed yet? */ int hard_header_len = 0; struct device *physdev;/* struct device *virtdev; */ short physmtu; short mtudiff;#ifdef NET_21 struct rtable *rt = NULL;#endif /* NET_21 */ struct sa_id outgoing_said;#ifdef NET_21 int pass = 0;#endif /* NET_21 */ int error = 0; uint32_t eroute_pid = 0; struct ipsec_sa tdb; dport=sport=0; memset((char*)&tdb, 0, sizeof(struct ipsec_sa)); /* * Return if there is nothing to do. (Does this ever happen?) XXX */ if (skb == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "Nothing to do!\n" ); goto cleanup; } if (dev == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "No device associated with skb!\n" ); goto cleanup; } prv = dev->priv; if (prv == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "Device has no private structure!\n" ); goto cleanup; } physdev = prv->dev; if (physdev == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "Device is not attached to physical device!\n" ); goto cleanup; } physmtu = physdev->mtu; stats = (struct net_device_stats *) &(prv->mystats);#ifdef NET_21 /* if skb was cloned (most likely due to a packet sniffer such as tcpdump being momentarily attached to the interface), make a copy of our own to modify */ if(skb_cloned(skb)) { if#ifdef SKB_COW_NEW (skb_cow(skb, skb_headroom(skb)) != 0)#else /* SKB_COW_NEW */ ((skb = skb_cow(skb, skb_headroom(skb))) == NULL)#endif /* SKB_COW_NEW */ { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "skb_cow failed to allocate buffer, dropping.\n" ); stats->tx_dropped++; goto cleanup; } }#endif /* NET_21 */#ifdef NET_21 iph = skb->nh.iph;#else /* NET_21 */ iph = skb->ip_hdr;#endif /* NET_21 */ /* sanity check for IP version as we can't handle IPv6 right now */ if (iph->version != 4) { KLIPS_PRINT(debug_tunnel, "klips_debug:ipsec_tunnel_start_xmit: " "found IP Version %d but cannot process other IP versions than v4.\n", iph->version); /* XXX */ stats->tx_dropped++; goto cleanup; } /* physdev->hard_header_len is unreliable and should not be used */ //是MAC头的长度,为16(MAC的协议长度是14,aligned后的长度为16) hard_header_len = (unsigned char *)iph - skb->data; if(hard_header_len < 0) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_error:ipsec_tunnel_start_xmit: " "Negative hard_header_len (%d)?!\n", hard_header_len); stats->tx_dropped++; goto cleanup; } if(hard_header_len == 0) { /* no hard header present */ hard_header_stripped = 1; }#ifdef CONFIG_IPSEC_DEBUG if (debug_tunnel & DB_TN_XMIT) { int i; char c; printk(KERN_INFO "klips_debug:ipsec_tunnel_start_xmit: " ">>> skb->len=%ld hard_header_len:%d", (unsigned long int)skb->len, hard_header_len); c = ' '; for (i=0; i < hard_header_len; i++) { printk("%c%02x", c, skb->data[i]); c = ':'; } printk(" \n"); }#endif /* CONFIG_IPSEC_DEBUG */ KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, iph); /* * Sanity checks */ //IP头的长度是以4个字节为单位 if ((iph->ihl << 2) != sizeof (struct iphdr)) { KLIPS_PRINT(debug_tunnel, "klips_debug:ipsec_tunnel_start_xmit: " "cannot process IP header options yet. May be mal-formed packet.\n"); /* XXX */ stats->tx_dropped++; goto cleanup; } #ifndef NET_21 /* TTL decrement code (on the way out!) borrowed from ip_forward.c */ if(0) { unsigned long checksum = iph->check; iph->ttl--; /* * Re-compute the IP header checksum. * This is efficient. We know what has happened to the header * and can thus adjust the checksum as Phil Karn does in KA9Q * except we do this in "network byte order". */ checksum += htons(0x0100); /* carry overflow? */ checksum += checksum >> 16; iph->check = checksum; } if (iph->ttl <= 0) { /* Tell the sender its packet died... */ ICMP_SEND(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, physdev); KLIPS_PRINT(debug_tunnel, "klips_debug:ipsec_tunnel_start_xmit: " "TTL=0, too many hops!\n"); stats->tx_dropped++; goto cleanup; }#endif /* !NET_21 */ /* * First things first -- look us up in the erouting tables. */ matcher.sen_len = sizeof (struct sockaddr_encap); matcher.sen_family = AF_ENCAP; matcher.sen_type = SENT_IP4; matcher.sen_ip_src.s_addr = iph->saddr; matcher.sen_ip_dst.s_addr = iph->daddr; matcher.sen_proto = iph->protocol; extract_ports(iph, &matcher); /* * The spinlock is to prevent any other process from accessing or deleting * the eroute while we are using and updating it. */ spin_lock(&eroute_lock); er = ipsec_findroute(&matcher); if(iph->protocol == IPPROTO_UDP) { if(skb->sk) { sport=ntohs(skb->sk->sport); dport=ntohs(skb->sk->dport); //分片并且是最后一片??? } else if((ntohs(iph->frag_off) & IP_OFFSET) == 0 && //IP_OFFSET=0x1FFF iph->ihl << 2 > sizeof(struct iphdr) + sizeof(struct udphdr)) { sport=ntohs(((struct udphdr*)((caddr_t)iph+(iph->ihl<<2)))->source); dport=ntohs(((struct udphdr*)((caddr_t)iph + (iph->ihl<<2)))->dest); } else { sport=0; dport=0; } } /* default to a %drop eroute */ outgoing_said.proto = IPPROTO_INT; outgoing_said.spi = htonl(SPI_DROP); outgoing_said.dst.s_addr = INADDR_ANY; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: " "checking for local udp/500 IKE packet " "saddr=%x, er=%p, daddr=%x, er_dst=%x, proto=%d sport=%d dport=%d\n", ntohl((unsigned int)iph->saddr), er, ntohl((unsigned int)iph->daddr), er ? ntohl((unsigned int)er->er_said.dst.s_addr) : 0, iph->protocol, sport, dport); /* * Quick cheat for now...are we udp/500? If so, let it through * without interference since it is most likely an IKE packet. */ if (ip_chk_addr((unsigned long)iph->saddr) == IS_MYADDR && (!er || iph->daddr == er->er_said.dst.s_addr || INADDR_ANY == er->er_said.dst.s_addr) && (sport == 500)) { /* Whatever the eroute, this is an IKE message * from us (i.e. not being forwarded). * Furthermore, if there is a tunnel eroute, * the destination is the peer for this eroute. * So %pass the packet: modify the default %drop. */ outgoing_said.spi = htonl(SPI_PASS); if(!(skb->sk) && ((ntohs(iph->frag_off) & IP_MF) != 0)) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: " "local UDP/500 (probably IKE) passthrough: base fragment, rest of fragments will probably get filtered.\n"); } } else if (er) { er->er_count++; er->er_lasttime = jiffies/HZ; if(er->er_said.proto==IPPROTO_INT && er->er_said.spi==htonl(SPI_HOLD)) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_tunnel_start_xmit: " "shunt SA of HOLD: skb stored in HOLD.\n"); if(er->er_last != NULL) { kfree_skb(er->er_last); } er->er_last = skb; skb = NULL; stats->tx_dropped++; spin_unlock(&eroute_lock); goto cleanup; } outgoing_said = er->er_said; eroute_pid = er->er_pid; /* Copy of the ident for the TRAP/TRAPSUBNET eroutes */ if(outgoing_said.proto==IPPROTO_INT && (outgoing_said.spi==htonl(SPI_TRAP) || (outgoing_said.spi==htonl(SPI_TRAPSUBNET)))) { int len; tdb.tdb_ident_s.type = er->er_ident_s.type; tdb.tdb_ident_s.id = er->er_ident_s.id; tdb.tdb_ident_s.len = er->er_ident_s.len; if (tdb.tdb_ident_s.len) { len = tdb.tdb_ident_s.len * IPSEC_PFKEYv2_ALIGN - sizeof(struct sadb_ident); if ((tdb.tdb_ident_s.data = kmalloc(len, GFP_ATOMIC)) == NULL) { printk(KERN_WARNING "klips_debug:ipsec_tunnel_start_xmit: "
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -