⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip6_output.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *	IPv6 output functions *	Linux INET6 implementation  * *	Authors: *	Pedro Roque		<roque@di.fc.ul.pt>	 * *	$Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $ * *	Based on linux/net/ipv4/ip_output.c * *	This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. * *	Changes: *	A.N.Kuznetsov	:	airthmetics in fragmentation. *				extension headers are implemented. *				route changes now work. *				ip6_forward does not confuse sniffers. *				etc. * *      H. von Brand    :       Added missing #include <linux/string.h> *	Imran Patel	: 	frag id should be in NBO *      Kazunori MIYAZAWA @USAGI *			:       add ip6_append_data and related functions *				for datagram xmit */#include <linux/config.h>#include <linux/errno.h>#include <linux/types.h>#include <linux/string.h>#include <linux/socket.h>#include <linux/net.h>#include <linux/netdevice.h>#include <linux/if_arp.h>#include <linux/in6.h>#include <linux/tcp.h>#include <linux/route.h>#include <linux/netfilter.h>#include <linux/netfilter_ipv6.h>#include <net/sock.h>#include <net/snmp.h>#include <net/ipv6.h>#include <net/ndisc.h>#include <net/protocol.h>#include <net/ip6_route.h>#include <net/addrconf.h>#include <net/rawv6.h>#include <net/icmp.h>#include <net/xfrm.h>#include <net/checksum.h>static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**));static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr){	static u32 ipv6_fragmentation_id = 1;	static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED;	spin_lock_bh(&ip6_id_lock);	fhdr->identification = htonl(ipv6_fragmentation_id);	if (++ipv6_fragmentation_id == 0)		ipv6_fragmentation_id = 1;	spin_unlock_bh(&ip6_id_lock);}static inline int ip6_output_finish(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct hh_cache *hh = dst->hh;	if (hh) {		int hh_alen;		read_lock_bh(&hh->hh_lock);		hh_alen = HH_DATA_ALIGN(hh->hh_len);		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);		read_unlock_bh(&hh->hh_lock);	        skb_push(skb, hh->hh_len);		return hh->hh_output(skb);	} else if (dst->neighbour)		return dst->neighbour->output(skb);	IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);	kfree_skb(skb);	return -EINVAL;}/* dev_loopback_xmit for use with netfilter. */static int ip6_dev_loopback_xmit(struct sk_buff *newskb){	newskb->mac.raw = newskb->data;	__skb_pull(newskb, newskb->nh.raw - newskb->data);	newskb->pkt_type = PACKET_LOOPBACK;	newskb->ip_summed = CHECKSUM_UNNECESSARY;	BUG_TRAP(newskb->dst);	netif_rx(newskb);	return 0;}static int ip6_output2(struct sk_buff **pskb){	struct sk_buff *skb = *pskb;	struct dst_entry *dst = skb->dst;	struct net_device *dev = dst->dev;	skb->protocol = htons(ETH_P_IPV6);	skb->dev = dev;	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {		struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;		if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&		    ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,				&skb->nh.ipv6h->saddr)) {			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);			/* Do not check for IFF_ALLMULTI; multicast routing			   is not supported in any case.			 */			if (newskb)				NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,					newskb->dev,					ip6_dev_loopback_xmit);			if (skb->nh.ipv6h->hop_limit == 0) {				IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);				kfree_skb(skb);				return 0;			}		}		IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);	}	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);}int ip6_output(struct sk_buff **pskb){	struct sk_buff *skb = *pskb;	if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list))		return ip6_fragment(pskb, ip6_output2);	else		return ip6_output2(pskb);}#ifdef CONFIG_NETFILTERint ip6_route_me_harder(struct sk_buff *skb){	struct ipv6hdr *iph = skb->nh.ipv6h;	struct dst_entry *dst;	struct flowi fl = {		.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,		.nl_u =		{ .ip6_u =		  { .daddr = iph->daddr,		    .saddr = iph->saddr, } },		.proto = iph->nexthdr,	};	dst = ip6_route_output(skb->sk, &fl);	if (dst->error) {		IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);		LIMIT_NETDEBUG(			printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n"));		dst_release(dst);		return -EINVAL;	}	/* Drop old route. */	dst_release(skb->dst);	skb->dst = dst;	return 0;}#endifstatic inline int ip6_maybe_reroute(struct sk_buff *skb){#ifdef CONFIG_NETFILTER	if (skb->nfcache & NFC_ALTERED){		if (ip6_route_me_harder(skb) != 0){			kfree_skb(skb);			return -EINVAL;		}	}#endif /* CONFIG_NETFILTER */	return dst_output(skb);}/* *	xmit an sk_buff (used by TCP) */int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,	     struct ipv6_txoptions *opt, int ipfragok){	struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL;	struct in6_addr *first_hop = &fl->fl6_dst;	struct dst_entry *dst = skb->dst;	struct ipv6hdr *hdr;	u8  proto = fl->proto;	int seg_len = skb->len;	int hlimit;	u32 mtu;	if (opt) {		int head_room;		/* First: exthdrs may take lots of space (~8K for now)		   MAX_HEADER is not enough.		 */		head_room = opt->opt_nflen + opt->opt_flen;		seg_len += head_room;		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);		if (skb_headroom(skb) < head_room) {			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);			kfree_skb(skb);			skb = skb2;			if (skb == NULL) {					IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);				return -ENOBUFS;			}			if (sk)				skb_set_owner_w(skb, sk);		}		if (opt->opt_flen)			ipv6_push_frag_opts(skb, opt, &proto);		if (opt->opt_nflen)			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);	}	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));	/*	 *	Fill in the IPv6 header	 */	*(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel;	hlimit = -1;	if (np)		hlimit = np->hop_limit;	if (hlimit < 0)		hlimit = dst_metric(dst, RTAX_HOPLIMIT);	hdr->payload_len = htons(seg_len);	hdr->nexthdr = proto;	hdr->hop_limit = hlimit;	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);	ipv6_addr_copy(&hdr->daddr, first_hop);	mtu = dst_pmtu(dst);	if ((skb->len <= mtu) || ipfragok) {		IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);	}	if (net_ratelimit())		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");	skb->dev = dst->dev;	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);	IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);	kfree_skb(skb);	return -EMSGSIZE;}/* *	To avoid extra problems ND packets are send through this *	routine. It's code duplication but I really want to avoid *	extra checks since ipv6_build_header is used by TCP (which *	is for us performance critical) */int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,	       struct in6_addr *saddr, struct in6_addr *daddr,	       int proto, int len){	struct ipv6_pinfo *np = inet6_sk(sk);	struct ipv6hdr *hdr;	int totlen;	skb->protocol = htons(ETH_P_IPV6);	skb->dev = dev;	totlen = len + sizeof(struct ipv6hdr);	hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));	skb->nh.ipv6h = hdr;	*(u32*)hdr = htonl(0x60000000);	hdr->payload_len = htons(len);	hdr->nexthdr = proto;	hdr->hop_limit = np->hop_limit;	ipv6_addr_copy(&hdr->saddr, saddr);	ipv6_addr_copy(&hdr->daddr, daddr);	return 0;}int ip6_call_ra_chain(struct sk_buff *skb, int sel){	struct ip6_ra_chain *ra;	struct sock *last = NULL;	read_lock(&ip6_ra_lock);	for (ra = ip6_ra_chain; ra; ra = ra->next) {		struct sock *sk = ra->sk;		if (sk && ra->sel == sel) {			if (last) {				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);				if (skb2)					rawv6_rcv(last, skb2);			}			last = sk;		}	}	if (last) {		rawv6_rcv(last, skb);		read_unlock(&ip6_ra_lock);		return 1;	}	read_unlock(&ip6_ra_lock);	return 0;}static inline int ip6_forward_finish(struct sk_buff *skb){	return dst_output(skb);}int ip6_forward(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct ipv6hdr *hdr = skb->nh.ipv6h;	struct inet6_skb_parm *opt = IP6CB(skb);		if (ipv6_devconf.forwarding == 0)		goto error;	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);		goto drop;	}	skb->ip_summed = CHECKSUM_NONE;	/*	 *	We DO NOT make any processing on	 *	RA packets, pushing them to user level AS IS	 *	without ane WARRANTY that application will be able	 *	to interpret them. The reason is that we	 *	cannot make anything clever here.	 *	 *	We are not end-node, so that if packet contains	 *	AH/ESP, we cannot make anything.	 *	Defragmentation also would be mistake, RA packets	 *	cannot be fragmented, because there is no warranty	 *	that different fragments will go along one path. --ANK	 */	if (opt->ra) {		u8 *ptr = skb->nh.raw + opt->ra;		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))			return 0;	}	/*	 *	check and decrement ttl	 */	if (hdr->hop_limit <= 1) {		/* Force OUTPUT device used as source address */		skb->dev = dst->dev;		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,			    0, skb->dev);		kfree_skb(skb);		return -ETIMEDOUT;	}	if (!xfrm6_route_forward(skb)) {		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);		goto drop;	}	/* IPv6 specs say nothing about it, but it is clear that we cannot	   send redirects to source routed frames.	 */	if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {		struct in6_addr *target = NULL;		struct rt6_info *rt;		struct neighbour *n = dst->neighbour;		/*		 *	incoming and outgoing devices are the same		 *	send a redirect.		 */		rt = (struct rt6_info *) dst;		if ((rt->rt6i_flags & RTF_GATEWAY))			target = (struct in6_addr*)&n->primary_key;		else			target = &hdr->daddr;		/* Limit redirects both by destination (here)		   and by source (inside ndisc_send_redirect)		 */		if (xrlim_allow(dst, 1*HZ))			ndisc_send_redirect(skb, n, target);	} else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK						|IPV6_ADDR_LINKLOCAL)) {		/* This check is security critical. */		goto error;	}	if (skb->len > dst_pmtu(dst)) {		/* Again, force OUTPUT device used as source address */		skb->dev = dst->dev;		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev);		IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);		IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);		kfree_skb(skb);		return -EMSGSIZE;	}	if (skb_cow(skb, dst->dev->hard_header_len)) {		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);		goto drop;	}	hdr = skb->nh.ipv6h;	/* Mangling hops number delayed to point after skb COW */ 	hdr->hop_limit--;	IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);	return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);error:	IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);drop:	kfree_skb(skb);	return -EINVAL;}static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from){	to->pkt_type = from->pkt_type;	to->priority = from->priority;	to->protocol = from->protocol;	to->security = from->security;	to->dst = dst_clone(from->dst);	to->dev = from->dev;#ifdef CONFIG_NET_SCHED	to->tc_index = from->tc_index;#endif#ifdef CONFIG_NETFILTER	to->nfmark = from->nfmark;	/* Connection association is same as pre-frag packet */	to->nfct = from->nfct;	nf_conntrack_get(to->nfct);	to->nfctinfo = from->nfctinfo;#ifdef CONFIG_BRIDGE_NETFILTER	nf_bridge_put(to->nf_bridge);	to->nf_bridge = from->nf_bridge;	nf_bridge_get(to->nf_bridge);#endif#ifdef CONFIG_NETFILTER_DEBUG	to->nf_debug = from->nf_debug;#endif#endif}int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr){	u16 offset = sizeof(struct ipv6hdr);	struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);	unsigned int packet_len = skb->tail - skb->nh.raw;	int found_rhdr = 0;	*nexthdr = &skb->nh.ipv6h->nexthdr;	while (offset + 1 <= packet_len) {		switch (**nexthdr) {		case NEXTHDR_HOP:		case NEXTHDR_ROUTING:		case NEXTHDR_DEST:			if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1;			if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset;			offset += ipv6_optlen(exthdr);			*nexthdr = &exthdr->nexthdr;			exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);			break;		default :			return offset;		}	}	return offset;}static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)){	struct net_device *dev;	struct sk_buff *frag, *skb = *pskb;	struct rt6_info *rt = (struct rt6_info*)skb->dst;	struct ipv6hdr *tmp_hdr;	struct frag_hdr *fh;	unsigned int mtu, hlen, left, len;	u32 frag_id = 0;	int ptr, offset = 0, err=0;	u8 *prevhdr, nexthdr = 0;	dev = rt->u.dst.dev;	hlen = ip6_find_1stfragopt(skb, &prevhdr);	nexthdr = *prevhdr;	mtu = dst_pmtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr);	if (skb_shinfo(skb)->frag_list) {		int first_len = skb_pagelen(skb);		if (first_len - hlen > mtu ||		    ((first_len - hlen) & 7) ||		    skb_cloned(skb))			goto slow_path;		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {			/* Correct geometry. */			if (frag->len > mtu ||			    ((frag->len & 7) && frag->next) ||			    skb_headroom(frag) < hlen)			    goto slow_path;			/* Correct socket ownership. */			if (frag->sk == NULL)				goto slow_path;			/* Partially cloned skb? */			if (skb_shared(frag))				goto slow_path;		}		err = 0;		offset = 0;		frag = skb_shinfo(skb)->frag_list;		skb_shinfo(skb)->frag_list = NULL;		/* BUILD HEADER */		tmp_hdr = kmalloc(hlen, GFP_ATOMIC);		if (!tmp_hdr) {			IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);			return -ENOMEM;		}		*prevhdr = NEXTHDR_FRAGMENT;		memcpy(tmp_hdr, skb->nh.raw, hlen);		__skb_pull(skb, hlen);		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));		skb->nh.raw = __skb_push(skb, hlen);		memcpy(skb->nh.raw, tmp_hdr, hlen);		ipv6_select_ident(skb, fh);		fh->nexthdr = nexthdr;		fh->reserved = 0;		fh->frag_off = htons(IP6_MF);		frag_id = fh->identification;		first_len = skb_pagelen(skb);		skb->data_len = first_len - skb_headlen(skb);		skb->len = first_len;		skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); 		for (;;) {			/* Prepare header of the next frame,			 * before previous one went down. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -