⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip6_output.c

📁 嵌入式系统设计与实验教材二源码linux内核移植与编译
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *	IPv6 output functions *	Linux INET6 implementation  * *	Authors: *	Pedro Roque		<roque@di.fc.ul.pt>	 * *	$Id: ip6_output.c,v 1.33 2001/09/20 00:35:35 davem Exp $ * *	Based on linux/net/ipv4/ip_output.c * *	This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. * *	Changes: *	A.N.Kuznetsov	:	airthmetics in fragmentation. *				extension headers are implemented. *				route changes now work. *				ip6_forward does not confuse sniffers. *				etc. * *      H. von Brand    :       Added missing #include <linux/string.h> *	Imran Patel	: 	frag id should be in NBO */#include <linux/config.h>#include <linux/errno.h>#include <linux/types.h>#include <linux/string.h>#include <linux/socket.h>#include <linux/net.h>#include <linux/netdevice.h>#include <linux/if_arp.h>#include <linux/in6.h>#include <linux/route.h>#include <linux/netfilter.h>#include <linux/netfilter_ipv6.h>#include <net/sock.h>#include <net/snmp.h>#include <net/ipv6.h>#include <net/ndisc.h>#include <net/protocol.h>#include <net/ip6_route.h>#include <net/addrconf.h>#include <net/rawv6.h>#include <net/icmp.h>static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr){	static u32 ipv6_fragmentation_id = 1;	static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED;	spin_lock_bh(&ip6_id_lock);	fhdr->identification = htonl(ipv6_fragmentation_id);	if (++ipv6_fragmentation_id == 0)		ipv6_fragmentation_id = 1;	spin_unlock_bh(&ip6_id_lock);}static inline int ip6_output_finish(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct hh_cache *hh = dst->hh;	if (hh) {		read_lock_bh(&hh->hh_lock);		memcpy(skb->data - 16, hh->hh_data, 16);		read_unlock_bh(&hh->hh_lock);	        skb_push(skb, hh->hh_len);		return hh->hh_output(skb);	} else if (dst->neighbour)		return dst->neighbour->output(skb);	kfree_skb(skb);	return -EINVAL;}/* dev_loopback_xmit for use with netfilter. */static int ip6_dev_loopback_xmit(struct sk_buff *newskb){	newskb->mac.raw = newskb->data;	__skb_pull(newskb, newskb->nh.raw - newskb->data);	newskb->pkt_type = PACKET_LOOPBACK;	newskb->ip_summed = CHECKSUM_UNNECESSARY;	BUG_TRAP(newskb->dst);	netif_rx(newskb);	return 0;}int ip6_output(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct net_device *dev = dst->dev;	skb->protocol = __constant_htons(ETH_P_IPV6);	skb->dev = dev;	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {		if (!(dev->flags&IFF_LOOPBACK) &&		    (skb->sk == NULL || skb->sk->net_pinfo.af_inet6.mc_loop) &&		    ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr)) {			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);			/* Do not check for IFF_ALLMULTI; multicast routing			   is not supported in any case.			 */			if (newskb)				NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,					newskb->dev,					ip6_dev_loopback_xmit);			if (skb->nh.ipv6h->hop_limit == 0) {				kfree_skb(skb);				return 0;			}		}		IP6_INC_STATS(Ip6OutMcastPkts);	}	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);}#ifdef CONFIG_NETFILTERstatic int route6_me_harder(struct sk_buff *skb){	struct ipv6hdr *iph = skb->nh.ipv6h;	struct dst_entry *dst;	struct flowi fl;	fl.proto = iph->nexthdr;	fl.fl6_dst = &iph->daddr;	fl.fl6_src = &iph->saddr;	fl.oif = skb->sk ? skb->sk->bound_dev_if : 0;	fl.fl6_flowlabel = 0;	fl.uli_u.ports.dport = 0;	fl.uli_u.ports.sport = 0;	dst = ip6_route_output(skb->sk, &fl);	if (dst->error) {		if (net_ratelimit())			printk(KERN_DEBUG "route6_me_harder: No more route.\n");		return -EINVAL;	}	/* Drop old route. */	dst_release(skb->dst);	skb->dst = dst;	return 0;}#endifstatic inline int ip6_maybe_reroute(struct sk_buff *skb){#ifdef CONFIG_NETFILTER	if (skb->nfcache & NFC_ALTERED){		if (route6_me_harder(skb) != 0){			kfree_skb(skb);			return -EINVAL;		}	}#endif /* CONFIG_NETFILTER */	return skb->dst->output(skb);}/* *	xmit an sk_buff (used by TCP) */int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,	     struct ipv6_txoptions *opt){	struct ipv6_pinfo * np = sk ? &sk->net_pinfo.af_inet6 : NULL;	struct in6_addr *first_hop = fl->nl_u.ip6_u.daddr;	struct dst_entry *dst = skb->dst;	struct ipv6hdr *hdr;	u8  proto = fl->proto;	int seg_len = skb->len;	int hlimit;	if (opt) {		int head_room;		/* First: exthdrs may take lots of space (~8K for now)		   MAX_HEADER is not enough.		 */		head_room = opt->opt_nflen + opt->opt_flen;		seg_len += head_room;		head_room += sizeof(struct ipv6hdr) + ((dst->dev->hard_header_len + 15)&~15);		if (skb_headroom(skb) < head_room) {			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);			kfree_skb(skb);			skb = skb2;			if (skb == NULL)				return -ENOBUFS;			if (sk)				skb_set_owner_w(skb, sk);		}		if (opt->opt_flen)			ipv6_push_frag_opts(skb, opt, &proto);		if (opt->opt_nflen)			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);	}	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));	/*	 *	Fill in the IPv6 header	 */	*(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel;	hlimit = -1;	if (np)		hlimit = np->hop_limit;	if (hlimit < 0)		hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit;	hdr->payload_len = htons(seg_len);	hdr->nexthdr = proto;	hdr->hop_limit = hlimit;	ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr);	ipv6_addr_copy(&hdr->daddr, first_hop);	if (skb->len <= dst->pmtu) {		IP6_INC_STATS(Ip6OutRequests);		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);	}	if (net_ratelimit())		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);	kfree_skb(skb);	return -EMSGSIZE;}/* *	To avoid extra problems ND packets are send through this *	routine. It's code duplication but I really want to avoid *	extra checks since ipv6_build_header is used by TCP (which *	is for us performace critical) */int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,	       struct in6_addr *saddr, struct in6_addr *daddr,	       int proto, int len){	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	struct ipv6hdr *hdr;	int totlen;	skb->protocol = __constant_htons(ETH_P_IPV6);	skb->dev = dev;	totlen = len + sizeof(struct ipv6hdr);	hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));	skb->nh.ipv6h = hdr;	*(u32*)hdr = htonl(0x60000000);	hdr->payload_len = htons(len);	hdr->nexthdr = proto;	hdr->hop_limit = np->hop_limit;	ipv6_addr_copy(&hdr->saddr, saddr);	ipv6_addr_copy(&hdr->daddr, daddr);	return 0;}static struct ipv6hdr * ip6_bld_1(struct sock *sk, struct sk_buff *skb, struct flowi *fl,				  int hlimit, unsigned pktlength){	struct ipv6hdr *hdr;		skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr));	hdr = skb->nh.ipv6h;		*(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000);	hdr->payload_len = htons(pktlength - sizeof(struct ipv6hdr));	hdr->hop_limit = hlimit;	hdr->nexthdr = fl->proto;	ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr);	ipv6_addr_copy(&hdr->daddr, fl->nl_u.ip6_u.daddr);	return hdr;}static __inline__ u8 * ipv6_build_fraghdr(struct sk_buff *skb, u8* prev_hdr, unsigned offset){	struct frag_hdr *fhdr;	fhdr = (struct frag_hdr *) skb_put(skb, sizeof(struct frag_hdr));	fhdr->nexthdr  = *prev_hdr;	*prev_hdr = NEXTHDR_FRAGMENT;	prev_hdr = &fhdr->nexthdr;	fhdr->reserved = 0;	fhdr->frag_off = htons(offset);	ipv6_select_ident(skb, fhdr);	return &fhdr->nexthdr;}static int ip6_frag_xmit(struct sock *sk, inet_getfrag_t getfrag,			 const void *data, struct dst_entry *dst,			 struct flowi *fl, struct ipv6_txoptions *opt,			 struct in6_addr *final_dst,			 int hlimit, int flags, unsigned length, int mtu){	struct ipv6hdr *hdr;	struct sk_buff *last_skb;	u8 *prev_hdr;	int unfrag_len;	int frag_len;	int last_len;	int nfrags;	int fhdr_dist;	int frag_off;	int data_off;	int err;	/*	 *	Fragmentation	 *	 *	Extension header order:	 *	Hop-by-hop -> Dest0 -> Routing -> Fragment -> Auth -> Dest1 -> rest (...)	 *		 *	We must build the non-fragmented part that	 *	will be in every packet... this also means	 *	that other extension headers (Dest, Auth, etc)	 *	must be considered in the data to be fragmented	 */	unfrag_len = sizeof(struct ipv6hdr) + sizeof(struct frag_hdr);	last_len = length;	if (opt) {		unfrag_len += opt->opt_nflen;		last_len += opt->opt_flen;	}	/*	 *	Length of fragmented part on every packet but 	 *	the last must be an:	 *	"integer multiple of 8 octects".	 */	frag_len = (mtu - unfrag_len) & ~0x7;	/* Unfragmentable part exceeds mtu. */	if (frag_len <= 0) {		ipv6_local_error(sk, EMSGSIZE, fl, mtu);		return -EMSGSIZE;	}	nfrags = last_len / frag_len;	/*	 *	We must send from end to start because of 	 *	UDP/ICMP checksums. We do a funny trick:	 *	fill the last skb first with the fixed	 *	header (and its data) and then use it	 *	to create the following segments and send it	 *	in the end. If the peer is checking the M_flag	 *	to trigger the reassembly code then this 	 *	might be a good idea.	 */	frag_off = nfrags * frag_len;	last_len -= frag_off;	if (last_len == 0) {		last_len = frag_len;		frag_off -= frag_len;		nfrags--;	}	data_off = frag_off;	/* And it is implementation problem: for now we assume, that	   all the exthdrs will fit to the first fragment.	 */	if (opt) {		if (frag_len < opt->opt_flen) {			ipv6_local_error(sk, EMSGSIZE, fl, mtu);			return -EMSGSIZE;		}		data_off = frag_off - opt->opt_flen;	}	if (flags&MSG_PROBE)		return 0;	last_skb = sock_alloc_send_skb(sk, unfrag_len + frag_len +				       dst->dev->hard_header_len + 15,				       flags & MSG_DONTWAIT, &err);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -