📄 ipsec_rcv.c
字号:
struct tcphdr *tcp = skb->h.th; if (natt_oa) { __u32 buff[2] = { ~natt_oa, ipp->saddr }; KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: " "fix TCP checksum using NAT-OA\n"); tcp->check = csum_fold( csum_partial((unsigned char *)buff, sizeof(buff), tcp->check^0xffff)); } else { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: recalc TCP checksum\n"); if (pkt_len > (ntohs(ipp->tot_len))) data_len -= (pkt_len - ntohs(ipp->tot_len)); tcp->check = 0; tcp->check = csum_tcpudp_magic(ipp->saddr, ipp->daddr, data_len, IPPROTO_TCP, csum_partial((unsigned char *)tcp, data_len, 0)); } } else { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: can't fix TCP checksum\n"); } break; case IPPROTO_UDP: if (data_len >= sizeof(struct udphdr)) { struct udphdr *udp = skb->h.uh; if (udp->check == 0) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: UDP checksum already 0\n"); } else if (natt_oa) { __u32 buff[2] = { ~natt_oa, ipp->saddr }; KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: " "fix UDP checksum using NAT-OA\n"); udp->check = csum_fold( csum_partial((unsigned char *)buff, sizeof(buff), udp->check^0xffff)); } else { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: zero UDP checksum\n"); udp->check = 0; } } else { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: can't fix UDP checksum\n"); } break; default: KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "NAT-T & TRANSPORT: non TCP/UDP packet -- do nothing\n"); break; } }#endif /* * XXX this needs to be locked from when it was first looked * up in the decapsulation loop. Perhaps it is better to put * the IPIP decap inside the loop. */ if(ipsnext) { ipsp = ipsnext; irs->sa_len = satot(&irs->said, 0, irs->sa, sizeof(irs->sa)); if((ipp->protocol != IPPROTO_IPIP) && (ipp->protocol != IPPROTO_ATT_HEARTBEAT)) { /* AT&T heartbeats to SIG/GIG */ spin_unlock(&tdb_lock); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "SA:%s, Hey! How did this get through? Dropped.\n", irs->sa_len ? irs->sa : " (error)"); if(irs->stats) { irs->stats->rx_dropped++; } goto rcvleave; } if(sysctl_ipsec_inbound_policy_check) { struct sockaddr_in *psin = (struct sockaddr_in*)(ipsp->ips_addr_s); if((ipsnext = ipsp->ips_inext)) { char sa2[SATOT_BUF]; size_t sa_len2; sa_len2 = satot(&ipsnext->ips_said, 0, sa2, sizeof(sa2)); spin_unlock(&tdb_lock); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "unexpected SA:%s after IPIP SA:%s\n", sa_len2 ? sa2 : " (error)", irs->sa_len ? irs->sa : " (error)"); if(irs->stats) { irs->stats->rx_dropped++; } goto rcvleave; } if(ipp->saddr != psin->sin_addr.s_addr) { spin_unlock(&tdb_lock); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "SA:%s, src=%s(%08x) does match expected 0x%08x.\n", irs->sa_len ? irs->sa : " (error)", irs->ipsaddr_txt, ipp->saddr, psin->sin_addr.s_addr); if(irs->stats) { irs->stats->rx_dropped++; } goto rcvleave; } } if(ipp->protocol == IPPROTO_IPIP) /* added to support AT&T heartbeats to SIG/GIG */ { /* * XXX this needs to be locked from when it was first looked * up in the decapsulation loop. Perhaps it is better to put * the IPIP decap inside the loop. */ ipsp->ips_life.ipl_bytes.ipl_count += skb->len; ipsp->ips_life.ipl_bytes.ipl_last = skb->len; if(!ipsp->ips_life.ipl_usetime.ipl_count) { ipsp->ips_life.ipl_usetime.ipl_count = jiffies / HZ; } ipsp->ips_life.ipl_usetime.ipl_last = jiffies / HZ; ipsp->ips_life.ipl_packets.ipl_count += 1; if(skb->len < irs->iphlen) { spin_unlock(&tdb_lock); printk(KERN_WARNING "klips_debug:ipsec_rcv: " "tried to skb_pull iphlen=%d, %d available. This should never happen, please report.\n", irs->iphlen, (int)(skb->len)); goto rcvleave; } /* * we need to pull up by size of IP header, * options, but also by any UDP/ESP encap there might * have been, and this deals with all cases. */ skb_pull(skb, (skb->h.raw - skb->nh.raw)); /* new L3 header is where L4 payload was */ skb->nh.raw = skb->h.raw; /* now setup new L4 payload location */ ipp = (struct iphdr *)skb->nh.raw; skb->h.raw = skb->nh.raw + (ipp->ihl << 2); /* remove any saved options that we might have, * since we have a new IP header. */ memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));#if 0 KLIPS_PRINT(debug_rcv, "csum: %d\n", ip_fast_csum((u8 *)ipp, ipp->ihl));#endif /* re-do any strings for debugging */ ipsaddr.s_addr = ipp->saddr; addrtoa(ipsaddr, 0, irs->ipsaddr_txt, sizeof(irs->ipsaddr_txt)); ipdaddr.s_addr = ipp->daddr; addrtoa(ipdaddr, 0, irs->ipdaddr_txt, sizeof(irs->ipdaddr_txt)); skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; KLIPS_PRINT(debug_rcv & DB_RX_PKTRX, "klips_debug:ipsec_rcv: " "IPIP tunnel stripped.\n"); KLIPS_IP_PRINT(debug_rcv & DB_RX_PKTRX, ipp); } if(sysctl_ipsec_inbound_policy_check /* Note: "xor" (^) logically replaces "not equal" (!=) and "bitwise or" (|) logically replaces "boolean or" (||). This is done to speed up execution by doing only bitwise operations and no branch operations */ && (((ipp->saddr & ipsp->ips_mask_s.u.v4.sin_addr.s_addr) ^ ipsp->ips_flow_s.u.v4.sin_addr.s_addr) | ((ipp->daddr & ipsp->ips_mask_d.u.v4.sin_addr.s_addr) ^ ipsp->ips_flow_d.u.v4.sin_addr.s_addr)) ) { char sflow_txt[SUBNETTOA_BUF], dflow_txt[SUBNETTOA_BUF]; subnettoa(ipsp->ips_flow_s.u.v4.sin_addr, ipsp->ips_mask_s.u.v4.sin_addr, 0, sflow_txt, sizeof(sflow_txt)); subnettoa(ipsp->ips_flow_d.u.v4.sin_addr, ipsp->ips_mask_d.u.v4.sin_addr, 0, dflow_txt, sizeof(dflow_txt)); spin_unlock(&tdb_lock); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "SA:%s, inner tunnel policy [%s -> %s] does not agree with pkt contents [%s -> %s].\n", irs->sa_len ? irs->sa : " (error)", sflow_txt, dflow_txt, irs->ipsaddr_txt, irs->ipdaddr_txt); if(irs->stats) { irs->stats->rx_dropped++; } goto rcvleave; }#ifdef CONFIG_NETFILTER skb->nfmark = (skb->nfmark & (~(IPsecSAref2NFmark(IPSEC_SA_REF_TABLE_MASK)))) | IPsecSAref2NFmark(IPsecSA2SAref(ipsp)); KLIPS_PRINT(debug_rcv & DB_RX_PKTRX, "klips_debug:ipsec_rcv: " "IPIP SA sets skb->nfmark=0x%x.\n", (unsigned)skb->nfmark);#endif /* CONFIG_NETFILTER */ } spin_unlock(&tdb_lock); if(irs->stats) { irs->stats->rx_bytes += skb->len; } if(skb->dst) { dst_release(skb->dst); skb->dst = NULL; } skb->pkt_type = PACKET_HOST; if(irs->hard_header_len && (skb->mac.raw != (skb->nh.raw - irs->hard_header_len)) && (irs->hard_header_len <= skb_headroom(skb))) { /* copy back original MAC header */ memmove(skb->nh.raw - irs->hard_header_len, skb->mac.raw, irs->hard_header_len); skb->mac.raw = skb->nh.raw - irs->hard_header_len; }#ifdef CONFIG_KLIPS_IPCOMP if(ipp->protocol == IPPROTO_COMP) { unsigned int flags = 0; if(sysctl_ipsec_inbound_policy_check) { KLIPS_PRINT(debug_rcv & DB_RX_PKTRX, "klips_debug:ipsec_rcv: " "inbound policy checking enabled, IPCOMP follows IPIP, dropped.\n"); if (irs->stats) { irs->stats->rx_errors++; } goto rcvleave; } /* XXX need a ipsec_sa for updating ratio counters but it is not following policy anyways so it is not a priority */ skb = skb_decompress(skb, NULL, &flags); if (!skb || flags) { KLIPS_PRINT(debug_rcv & DB_RX_PKTRX, "klips_debug:ipsec_rcv: " "skb_decompress() returned error flags: %d, dropped.\n", flags); if (irs->stats) { irs->stats->rx_errors++; } goto rcvleave; } }#endif /* CONFIG_KLIPS_IPCOMP */ /* * make sure that data now starts at IP header, since we are going * to pass this back to ip_input (aka netif_rx). Rules for what the * pointers wind up a different for 2.6 vs 2.4, so we just fudge it here. */#ifdef NET_26 skb->data = skb_push(skb, skb->h.raw - skb->nh.raw);#else skb->data = skb->nh.raw; { struct iphdr *iph = skb->nh.iph; int len = ntohs(iph->tot_len); skb->len = len; }#endif#ifdef SKB_RESET_NFCT nf_conntrack_put(skb->nfct); skb->nfct = NULL;#if defined(CONFIG_NETFILTER_DEBUG) && defined(HAVE_SKB_NF_DEBUG) skb->nf_debug = 0;#endif /* CONFIG_NETFILTER_DEBUG */#endif /* SKB_RESET_NFCT */ KLIPS_PRINT(debug_rcv & DB_RX_PKTRX, "klips_debug:ipsec_rcv: " "netif_rx() called.\n"); netif_rx(skb); skb=NULL; rcvleave: if(skb) { ipsec_kfree_skb(skb); } /* KLIPS_DEC_USE; Artifact from refactor? bug # 454 */ return(0);}struct sk_buff *ipsec_rcv_unclone(struct sk_buff *skb, struct ipsec_rcv_state *irs){ /* if skb was cloned (most likely due to a packet sniffer such as tcpdump being momentarily attached to the interface), make a copy of our own to modify */ if(skb_cloned(skb)) { /* include any mac header while copying.. */ if(skb_headroom(skb) < irs->hard_header_len) { printk(KERN_WARNING "klips_error:ipsec_rcv: " "tried to skb_push hhlen=%d, %d available. This should never happen, please report.\n", irs->hard_header_len, skb_headroom(skb)); goto rcvleave; } skb_push(skb, irs->hard_header_len); if#ifdef SKB_COW_NEW (skb_cow(skb, skb_headroom(skb)) != 0)#else /* SKB_COW_NEW */ ((skb = skb_cow(skb, skb_headroom(skb))) == NULL)#endif /* SKB_COW_NEW */ { goto rcvleave; } if(skb->len < irs->hard_header_len) { printk(KERN_WARNING "klips_error:ipsec_rcv: " "tried to skb_pull hhlen=%d, %d available. This should never happen, please report.\n", irs->hard_header_len, skb->len); goto rcvleave; } skb_pull(skb, irs->hard_header_len); } return skb;rcvleave: ipsec_kfree_skb(skb); return NULL;}#if !defined(NET_26) && defined(CONFIG_IPSEC_NAT_TRAVERSAL)/* * decapsulate a UDP encapsulated ESP packet */struct sk_buff *ipsec_rcv_natt_decap(struct sk_buff *skb , struct ipsec_rcv_state *irs , int *udp_decap_ret_p){ *udp_decap_ret_p = 0; if (skb->sk && skb->nh.iph && skb->nh.iph->protocol==IPPROTO_UDP) { /** * Packet comes from udp_queue_rcv_skb so it is already defrag, * checksum verified, ... (ie safe to use) * * If the packet is not for us, return -1 and udp_queue_rcv_skb * will continue to handle it (do not kfree skb !!). */#ifndef UDP_OPT_IN_SOCK struct udp_opt { __u32 esp_in_udp; }; struct udp_opt *tp = (struct udp_opt *)&(skb->sk->tp_pinfo.af_tcp);#else struct udp_opt *tp = &(skb->sk->tp_pinfo.af_udp);#endif struct iphdr *ip = (struct iphdr *)skb->nh.iph; struct udphdr *udp = (struct udphdr *)((__u32 *)ip+ip->ihl); __u8 *udpdata = (__u8 *)udp + sizeof(struct udphdr); __u32 *udpdata32 = (__u32 *)udpdata; irs->natt_sport = ntohs(udp->source); irs->natt_dport = ntohs(udp->dest); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "suspected ESPinUDP packet (NAT-Traversal) [%d].\n", tp->esp_in_udp); KLIPS_IP_PRINT(debug_rcv, ip); if (udpdata < skb->tail) { unsigned int len = skb->tail - udpdata; if ((len==1) && (udpdata[0]==0xff)) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " /* not IPv6 compliant message */ "NAT-keepalive from %d.%d.%d.%d.\n", NIPQUAD(ip->saddr)); *udp_decap_ret_p = 0; return NULL; } else if ( (tp->esp_in_udp == ESPINUDP_WITH_NON_IKE) && (len > (2*sizeof(__u32) + sizeof(struct esphdr))) && (udpdata32[0]==0) && (udpdata32[1]==0) ) { /* ESP Packet with Non-IKE header */ KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "ESPinUDP pkt with Non-IKE - spi=0x%x\n", ntohl(udpdata32[2])); irs->natt_type = ESPINUDP_WITH_NON_IKE; irs->natt_len = sizeof(struct udphdr)+(2*sizeof(__u32)); } else if ( (tp->esp_in_udp == ESPINUDP_WITH_NON_ESP) && (len > sizeof(struct esphdr)) && (udpdata32[0]!=0) ) { /* ESP Packet without Non-ESP header */ irs->natt_type = ESPINUDP_WITH_NON_ESP; irs->natt_len = sizeof(struct udphdr); KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "ESPinUDP pkt without Non-ESP - spi=0x%x\n", ntohl(udpdata32[0])); } else { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_rcv: " "IKE packet - not handled here\n"); *udp_decap_ret_p = -1; return NULL; } } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -