⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip6_output.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *	IPv6 output functions *	Linux INET6 implementation * *	Authors: *	Pedro Roque		<roque@di.fc.ul.pt> * *	$Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $ * *	Based on linux/net/ipv4/ip_output.c * *	This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. * *	Changes: *	A.N.Kuznetsov	:	airthmetics in fragmentation. *				extension headers are implemented. *				route changes now work. *				ip6_forward does not confuse sniffers. *				etc. * *      H. von Brand    :       Added missing #include <linux/string.h> *	Imran Patel	: 	frag id should be in NBO *      Kazunori MIYAZAWA @USAGI *			:       add ip6_append_data and related functions *				for datagram xmit */#include <linux/errno.h>#include <linux/types.h>#include <linux/string.h>#include <linux/socket.h>#include <linux/net.h>#include <linux/netdevice.h>#include <linux/if_arp.h>#include <linux/in6.h>#include <linux/tcp.h>#include <linux/route.h>#include <linux/module.h>#include <linux/netfilter.h>#include <linux/netfilter_ipv6.h>#include <net/sock.h>#include <net/snmp.h>#include <net/ipv6.h>#include <net/ndisc.h>#include <net/protocol.h>#include <net/ip6_route.h>#include <net/addrconf.h>#include <net/rawv6.h>#include <net/icmp.h>#include <net/xfrm.h>#include <net/checksum.h>static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr){	static u32 ipv6_fragmentation_id = 1;	static DEFINE_SPINLOCK(ip6_id_lock);	spin_lock_bh(&ip6_id_lock);	fhdr->identification = htonl(ipv6_fragmentation_id);	if (++ipv6_fragmentation_id == 0)		ipv6_fragmentation_id = 1;	spin_unlock_bh(&ip6_id_lock);}static int ip6_output_finish(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	if (dst->hh)		return neigh_hh_output(dst->hh, skb);	else if (dst->neighbour)		return dst->neighbour->output(skb);	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);	kfree_skb(skb);	return -EINVAL;}/* dev_loopback_xmit for use with netfilter. */static int ip6_dev_loopback_xmit(struct sk_buff *newskb){	skb_reset_mac_header(newskb);	__skb_pull(newskb, skb_network_offset(newskb));	newskb->pkt_type = PACKET_LOOPBACK;	newskb->ip_summed = CHECKSUM_UNNECESSARY;	BUG_TRAP(newskb->dst);	netif_rx(newskb);	return 0;}static int ip6_output2(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct net_device *dev = dst->dev;	skb->protocol = htons(ETH_P_IPV6);	skb->dev = dev;	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {		struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;		struct inet6_dev *idev = ip6_dst_idev(skb->dst);		if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&		    ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,					&ipv6_hdr(skb)->saddr)) {			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);			/* Do not check for IFF_ALLMULTI; multicast routing			   is not supported in any case.			 */			if (newskb)				NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,					newskb->dev,					ip6_dev_loopback_xmit);			if (ipv6_hdr(skb)->hop_limit == 0) {				IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);				kfree_skb(skb);				return 0;			}		}		IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);	}	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);}static inline int ip6_skb_dst_mtu(struct sk_buff *skb){	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;	return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?	       skb->dst->dev->mtu : dst_mtu(skb->dst);}int ip6_output(struct sk_buff *skb){	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||				dst_allfrag(skb->dst))		return ip6_fragment(skb, ip6_output2);	else		return ip6_output2(skb);}/* *	xmit an sk_buff (used by TCP) */int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,	     struct ipv6_txoptions *opt, int ipfragok){	struct ipv6_pinfo *np = inet6_sk(sk);	struct in6_addr *first_hop = &fl->fl6_dst;	struct dst_entry *dst = skb->dst;	struct ipv6hdr *hdr;	u8  proto = fl->proto;	int seg_len = skb->len;	int hlimit, tclass;	u32 mtu;	if (opt) {		unsigned int head_room;		/* First: exthdrs may take lots of space (~8K for now)		   MAX_HEADER is not enough.		 */		head_room = opt->opt_nflen + opt->opt_flen;		seg_len += head_room;		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);		if (skb_headroom(skb) < head_room) {			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);			if (skb2 == NULL) {				IP6_INC_STATS(ip6_dst_idev(skb->dst),					      IPSTATS_MIB_OUTDISCARDS);				kfree_skb(skb);				return -ENOBUFS;			}			kfree_skb(skb);			skb = skb2;			if (sk)				skb_set_owner_w(skb, sk);		}		if (opt->opt_flen)			ipv6_push_frag_opts(skb, opt, &proto);		if (opt->opt_nflen)			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);	}	skb_push(skb, sizeof(struct ipv6hdr));	skb_reset_network_header(skb);	hdr = ipv6_hdr(skb);	/*	 *	Fill in the IPv6 header	 */	hlimit = -1;	if (np)		hlimit = np->hop_limit;	if (hlimit < 0)		hlimit = dst_metric(dst, RTAX_HOPLIMIT);	if (hlimit < 0)		hlimit = ipv6_get_hoplimit(dst->dev);	tclass = -1;	if (np)		tclass = np->tclass;	if (tclass < 0)		tclass = 0;	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;	hdr->payload_len = htons(seg_len);	hdr->nexthdr = proto;	hdr->hop_limit = hlimit;	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);	ipv6_addr_copy(&hdr->daddr, first_hop);	skb->priority = sk->sk_priority;	mtu = dst_mtu(dst);	if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {		IP6_INC_STATS(ip6_dst_idev(skb->dst),			      IPSTATS_MIB_OUTREQUESTS);		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,				dst_output);	}	if (net_ratelimit())		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");	skb->dev = dst->dev;	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);	kfree_skb(skb);	return -EMSGSIZE;}EXPORT_SYMBOL(ip6_xmit);/* *	To avoid extra problems ND packets are send through this *	routine. It's code duplication but I really want to avoid *	extra checks since ipv6_build_header is used by TCP (which *	is for us performance critical) */int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,	       struct in6_addr *saddr, struct in6_addr *daddr,	       int proto, int len){	struct ipv6_pinfo *np = inet6_sk(sk);	struct ipv6hdr *hdr;	int totlen;	skb->protocol = htons(ETH_P_IPV6);	skb->dev = dev;	totlen = len + sizeof(struct ipv6hdr);	skb_reset_network_header(skb);	skb_put(skb, sizeof(struct ipv6hdr));	hdr = ipv6_hdr(skb);	*(__be32*)hdr = htonl(0x60000000);	hdr->payload_len = htons(len);	hdr->nexthdr = proto;	hdr->hop_limit = np->hop_limit;	ipv6_addr_copy(&hdr->saddr, saddr);	ipv6_addr_copy(&hdr->daddr, daddr);	return 0;}static int ip6_call_ra_chain(struct sk_buff *skb, int sel){	struct ip6_ra_chain *ra;	struct sock *last = NULL;	read_lock(&ip6_ra_lock);	for (ra = ip6_ra_chain; ra; ra = ra->next) {		struct sock *sk = ra->sk;		if (sk && ra->sel == sel &&		    (!sk->sk_bound_dev_if ||		     sk->sk_bound_dev_if == skb->dev->ifindex)) {			if (last) {				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);				if (skb2)					rawv6_rcv(last, skb2);			}			last = sk;		}	}	if (last) {		rawv6_rcv(last, skb);		read_unlock(&ip6_ra_lock);		return 1;	}	read_unlock(&ip6_ra_lock);	return 0;}static int ip6_forward_proxy_check(struct sk_buff *skb){	struct ipv6hdr *hdr = ipv6_hdr(skb);	u8 nexthdr = hdr->nexthdr;	int offset;	if (ipv6_ext_hdr(nexthdr)) {		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);		if (offset < 0)			return 0;	} else		offset = sizeof(struct ipv6hdr);	if (nexthdr == IPPROTO_ICMPV6) {		struct icmp6hdr *icmp6;		if (!pskb_may_pull(skb, (skb_network_header(skb) +					 offset + 1 - skb->data)))			return 0;		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);		switch (icmp6->icmp6_type) {		case NDISC_ROUTER_SOLICITATION:		case NDISC_ROUTER_ADVERTISEMENT:		case NDISC_NEIGHBOUR_SOLICITATION:		case NDISC_NEIGHBOUR_ADVERTISEMENT:		case NDISC_REDIRECT:			/* For reaction involving unicast neighbor discovery			 * message destined to the proxied address, pass it to			 * input function.			 */			return 1;		default:			break;		}	}	/*	 * The proxying router can't forward traffic sent to a link-local	 * address, so signal the sender and discard the packet. This	 * behavior is clarified by the MIPv6 specification.	 */	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {		dst_link_failure(skb);		return -1;	}	return 0;}static inline int ip6_forward_finish(struct sk_buff *skb){	return dst_output(skb);}int ip6_forward(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct ipv6hdr *hdr = ipv6_hdr(skb);	struct inet6_skb_parm *opt = IP6CB(skb);	if (ipv6_devconf.forwarding == 0)		goto error;	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);		goto drop;	}	skb_forward_csum(skb);	/*	 *	We DO NOT make any processing on	 *	RA packets, pushing them to user level AS IS	 *	without ane WARRANTY that application will be able	 *	to interpret them. The reason is that we	 *	cannot make anything clever here.	 *	 *	We are not end-node, so that if packet contains	 *	AH/ESP, we cannot make anything.	 *	Defragmentation also would be mistake, RA packets	 *	cannot be fragmented, because there is no warranty	 *	that different fragments will go along one path. --ANK	 */	if (opt->ra) {		u8 *ptr = skb_network_header(skb) + opt->ra;		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))			return 0;	}	/*	 *	check and decrement ttl	 */	if (hdr->hop_limit <= 1) {		/* Force OUTPUT device used as source address */		skb->dev = dst->dev;		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,			    0, skb->dev);		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);		kfree_skb(skb);		return -ETIMEDOUT;	}	/* XXX: idev->cnf.proxy_ndp? */	if (ipv6_devconf.proxy_ndp &&	    pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {		int proxied = ip6_forward_proxy_check(skb);		if (proxied > 0)			return ip6_input(skb);		else if (proxied < 0) {			IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);			goto drop;		}	}	if (!xfrm6_route_forward(skb)) {		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);		goto drop;	}	dst = skb->dst;	/* IPv6 specs say nothing about it, but it is clear that we cannot	   send redirects to source routed frames.	   We don't send redirects to frames decapsulated from IPsec.	 */	if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 &&	    !skb->sp) {		struct in6_addr *target = NULL;		struct rt6_info *rt;		struct neighbour *n = dst->neighbour;		/*		 *	incoming and outgoing devices are the same		 *	send a redirect.		 */		rt = (struct rt6_info *) dst;		if ((rt->rt6i_flags & RTF_GATEWAY))			target = (struct in6_addr*)&n->primary_key;		else			target = &hdr->daddr;		/* Limit redirects both by destination (here)		   and by source (inside ndisc_send_redirect)		 */		if (xrlim_allow(dst, 1*HZ))			ndisc_send_redirect(skb, n, target);	} else {		int addrtype = ipv6_addr_type(&hdr->saddr);		/* This check is security critical. */		if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))			goto error;		if (addrtype & IPV6_ADDR_LINKLOCAL) {			icmpv6_send(skb, ICMPV6_DEST_UNREACH,				ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);			goto error;		}	}	if (skb->len > dst_mtu(dst)) {		/* Again, force OUTPUT device used as source address */		skb->dev = dst->dev;		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -