⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ipmr.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb){	int err;	struct mfc_cache *c;	const struct iphdr *iph = ip_hdr(skb);	spin_lock_bh(&mfc_unres_lock);	for (c=mfc_unres_queue; c; c=c->next) {		if (c->mfc_mcastgrp == iph->daddr &&		    c->mfc_origin == iph->saddr)			break;	}	if (c == NULL) {		/*		 *	Create a new entry if allowable		 */		if (atomic_read(&cache_resolve_queue_len)>=10 ||		    (c=ipmr_cache_alloc_unres())==NULL) {			spin_unlock_bh(&mfc_unres_lock);			kfree_skb(skb);			return -ENOBUFS;		}		/*		 *	Fill in the new cache entry		 */		c->mfc_parent	= -1;		c->mfc_origin	= iph->saddr;		c->mfc_mcastgrp	= iph->daddr;		/*		 *	Reflect first query at mrouted.		 */		if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {			/* If the report failed throw the cache entry			   out - Brad Parker			 */			spin_unlock_bh(&mfc_unres_lock);			kmem_cache_free(mrt_cachep, c);			kfree_skb(skb);			return err;		}		atomic_inc(&cache_resolve_queue_len);		c->next = mfc_unres_queue;		mfc_unres_queue = c;		mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);	}	/*	 *	See if we can append the packet	 */	if (c->mfc_un.unres.unresolved.qlen>3) {		kfree_skb(skb);		err = -ENOBUFS;	} else {		skb_queue_tail(&c->mfc_un.unres.unresolved,skb);		err = 0;	}	spin_unlock_bh(&mfc_unres_lock);	return err;}/* *	MFC cache manipulation by user space mroute daemon */static int ipmr_mfc_delete(struct mfcctl *mfc){	int line;	struct mfc_cache *c, **cp;	line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);	for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {			write_lock_bh(&mrt_lock);			*cp = c->next;			write_unlock_bh(&mrt_lock);			kmem_cache_free(mrt_cachep, c);			return 0;		}	}	return -ENOENT;}static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock){	int line;	struct mfc_cache *uc, *c, **cp;	line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);	for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)			break;	}	if (c != NULL) {		write_lock_bh(&mrt_lock);		c->mfc_parent = mfc->mfcc_parent;		ipmr_update_thresholds(c, mfc->mfcc_ttls);		if (!mrtsock)			c->mfc_flags |= MFC_STATIC;		write_unlock_bh(&mrt_lock);		return 0;	}	if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))		return -EINVAL;	c=ipmr_cache_alloc();	if (c==NULL)		return -ENOMEM;	c->mfc_origin=mfc->mfcc_origin.s_addr;	c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;	c->mfc_parent=mfc->mfcc_parent;	ipmr_update_thresholds(c, mfc->mfcc_ttls);	if (!mrtsock)		c->mfc_flags |= MFC_STATIC;	write_lock_bh(&mrt_lock);	c->next = mfc_cache_array[line];	mfc_cache_array[line] = c;	write_unlock_bh(&mrt_lock);	/*	 *	Check to see if we resolved a queued list. If so we	 *	need to send on the frames and tidy up.	 */	spin_lock_bh(&mfc_unres_lock);	for (cp = &mfc_unres_queue; (uc=*cp) != NULL;	     cp = &uc->next) {		if (uc->mfc_origin == c->mfc_origin &&		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {			*cp = uc->next;			if (atomic_dec_and_test(&cache_resolve_queue_len))				del_timer(&ipmr_expire_timer);			break;		}	}	spin_unlock_bh(&mfc_unres_lock);	if (uc) {		ipmr_cache_resolve(uc, c);		kmem_cache_free(mrt_cachep, uc);	}	return 0;}/* *	Close the multicast socket, and clear the vif tables etc */static void mroute_clean_tables(struct sock *sk){	int i;	/*	 *	Shut down all active vif entries	 */	for (i=0; i<maxvif; i++) {		if (!(vif_table[i].flags&VIFF_STATIC))			vif_delete(i);	}	/*	 *	Wipe the cache	 */	for (i=0;i<MFC_LINES;i++) {		struct mfc_cache *c, **cp;		cp = &mfc_cache_array[i];		while ((c = *cp) != NULL) {			if (c->mfc_flags&MFC_STATIC) {				cp = &c->next;				continue;			}			write_lock_bh(&mrt_lock);			*cp = c->next;			write_unlock_bh(&mrt_lock);			kmem_cache_free(mrt_cachep, c);		}	}	if (atomic_read(&cache_resolve_queue_len) != 0) {		struct mfc_cache *c;		spin_lock_bh(&mfc_unres_lock);		while (mfc_unres_queue != NULL) {			c = mfc_unres_queue;			mfc_unres_queue = c->next;			spin_unlock_bh(&mfc_unres_lock);			ipmr_destroy_unres(c);			spin_lock_bh(&mfc_unres_lock);		}		spin_unlock_bh(&mfc_unres_lock);	}}static void mrtsock_destruct(struct sock *sk){	rtnl_lock();	if (sk == mroute_socket) {		IPV4_DEVCONF_ALL(MC_FORWARDING)--;		write_lock_bh(&mrt_lock);		mroute_socket=NULL;		write_unlock_bh(&mrt_lock);		mroute_clean_tables(sk);	}	rtnl_unlock();}/* *	Socket options and virtual interface manipulation. The whole *	virtual interface system is a complete heap, but unfortunately *	that's how BSD mrouted happens to think. Maybe one day with a proper *	MOSPF/PIM router set up we can clean this up. */int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen){	int ret;	struct vifctl vif;	struct mfcctl mfc;	if (optname != MRT_INIT) {		if (sk != mroute_socket && !capable(CAP_NET_ADMIN))			return -EACCES;	}	switch (optname) {	case MRT_INIT:		if (sk->sk_type != SOCK_RAW ||		    inet_sk(sk)->num != IPPROTO_IGMP)			return -EOPNOTSUPP;		if (optlen!=sizeof(int))			return -ENOPROTOOPT;		rtnl_lock();		if (mroute_socket) {			rtnl_unlock();			return -EADDRINUSE;		}		ret = ip_ra_control(sk, 1, mrtsock_destruct);		if (ret == 0) {			write_lock_bh(&mrt_lock);			mroute_socket=sk;			write_unlock_bh(&mrt_lock);			IPV4_DEVCONF_ALL(MC_FORWARDING)++;		}		rtnl_unlock();		return ret;	case MRT_DONE:		if (sk!=mroute_socket)			return -EACCES;		return ip_ra_control(sk, 0, NULL);	case MRT_ADD_VIF:	case MRT_DEL_VIF:		if (optlen!=sizeof(vif))			return -EINVAL;		if (copy_from_user(&vif,optval,sizeof(vif)))			return -EFAULT;		if (vif.vifc_vifi >= MAXVIFS)			return -ENFILE;		rtnl_lock();		if (optname==MRT_ADD_VIF) {			ret = vif_add(&vif, sk==mroute_socket);		} else {			ret = vif_delete(vif.vifc_vifi);		}		rtnl_unlock();		return ret;		/*		 *	Manipulate the forwarding caches. These live		 *	in a sort of kernel/user symbiosis.		 */	case MRT_ADD_MFC:	case MRT_DEL_MFC:		if (optlen!=sizeof(mfc))			return -EINVAL;		if (copy_from_user(&mfc,optval, sizeof(mfc)))			return -EFAULT;		rtnl_lock();		if (optname==MRT_DEL_MFC)			ret = ipmr_mfc_delete(&mfc);		else			ret = ipmr_mfc_add(&mfc, sk==mroute_socket);		rtnl_unlock();		return ret;		/*		 *	Control PIM assert.		 */	case MRT_ASSERT:	{		int v;		if (get_user(v,(int __user *)optval))			return -EFAULT;		mroute_do_assert=(v)?1:0;		return 0;	}#ifdef CONFIG_IP_PIMSM	case MRT_PIM:	{		int v, ret;		if (get_user(v,(int __user *)optval))			return -EFAULT;		v = (v)?1:0;		rtnl_lock();		ret = 0;		if (v != mroute_do_pim) {			mroute_do_pim = v;			mroute_do_assert = v;#ifdef CONFIG_IP_PIMSM_V2			if (mroute_do_pim)				ret = inet_add_protocol(&pim_protocol,							IPPROTO_PIM);			else				ret = inet_del_protocol(&pim_protocol,							IPPROTO_PIM);			if (ret < 0)				ret = -EAGAIN;#endif		}		rtnl_unlock();		return ret;	}#endif	/*	 *	Spurious command, or MRT_VERSION which you cannot	 *	set.	 */	default:		return -ENOPROTOOPT;	}}/* *	Getsock opt support for the multicast routing system. */int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen){	int olr;	int val;	if (optname!=MRT_VERSION &&#ifdef CONFIG_IP_PIMSM	   optname!=MRT_PIM &&#endif	   optname!=MRT_ASSERT)		return -ENOPROTOOPT;	if (get_user(olr, optlen))		return -EFAULT;	olr = min_t(unsigned int, olr, sizeof(int));	if (olr < 0)		return -EINVAL;	if (put_user(olr,optlen))		return -EFAULT;	if (optname==MRT_VERSION)		val=0x0305;#ifdef CONFIG_IP_PIMSM	else if (optname==MRT_PIM)		val=mroute_do_pim;#endif	else		val=mroute_do_assert;	if (copy_to_user(optval,&val,olr))		return -EFAULT;	return 0;}/* *	The IP multicast ioctl support routines. */int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg){	struct sioc_sg_req sr;	struct sioc_vif_req vr;	struct vif_device *vif;	struct mfc_cache *c;	switch (cmd) {	case SIOCGETVIFCNT:		if (copy_from_user(&vr,arg,sizeof(vr)))			return -EFAULT;		if (vr.vifi>=maxvif)			return -EINVAL;		read_lock(&mrt_lock);		vif=&vif_table[vr.vifi];		if (VIF_EXISTS(vr.vifi))	{			vr.icount=vif->pkt_in;			vr.ocount=vif->pkt_out;			vr.ibytes=vif->bytes_in;			vr.obytes=vif->bytes_out;			read_unlock(&mrt_lock);			if (copy_to_user(arg,&vr,sizeof(vr)))				return -EFAULT;			return 0;		}		read_unlock(&mrt_lock);		return -EADDRNOTAVAIL;	case SIOCGETSGCNT:		if (copy_from_user(&sr,arg,sizeof(sr)))			return -EFAULT;		read_lock(&mrt_lock);		c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);		if (c) {			sr.pktcnt = c->mfc_un.res.pkt;			sr.bytecnt = c->mfc_un.res.bytes;			sr.wrong_if = c->mfc_un.res.wrong_if;			read_unlock(&mrt_lock);			if (copy_to_user(arg,&sr,sizeof(sr)))				return -EFAULT;			return 0;		}		read_unlock(&mrt_lock);		return -EADDRNOTAVAIL;	default:		return -ENOIOCTLCMD;	}}static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr){	struct net_device *dev = ptr;	struct vif_device *v;	int ct;	if (dev->nd_net != &init_net)		return NOTIFY_DONE;	if (event != NETDEV_UNREGISTER)		return NOTIFY_DONE;	v=&vif_table[0];	for (ct=0;ct<maxvif;ct++,v++) {		if (v->dev==dev)			vif_delete(ct);	}	return NOTIFY_DONE;}static struct notifier_block ip_mr_notifier={	.notifier_call = ipmr_device_event,};/* * 	Encapsulate a packet by attaching a valid IPIP header to it. *	This avoids tunnel drivers and other mess and gives us the speed so *	important for multicast video. */static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr){	struct iphdr *iph;	struct iphdr *old_iph = ip_hdr(skb);	skb_push(skb, sizeof(struct iphdr));	skb->transport_header = skb->network_header;	skb_reset_network_header(skb);	iph = ip_hdr(skb);	iph->version	= 	4;	iph->tos	=	old_iph->tos;	iph->ttl	=	old_iph->ttl;	iph->frag_off	=	0;	iph->daddr	=	daddr;	iph->saddr	=	saddr;	iph->protocol	=	IPPROTO_IPIP;	iph->ihl	=	5;	iph->tot_len	=	htons(skb->len);	ip_select_ident(iph, skb->dst, NULL);	ip_send_check(iph);	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));	nf_reset(skb);}static inline int ipmr_forward_finish(struct sk_buff *skb){	struct ip_options * opt	= &(IPCB(skb)->opt);	IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);	if (unlikely(opt->optlen))		ip_forward_options(skb);	return dst_output(skb);}/* *	Processing handlers for ipmr_forward */static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi){	const struct iphdr *iph = ip_hdr(skb);	struct vif_device *vif = &vif_table[vifi];	struct net_device *dev;	struct rtable *rt;	int    encap = 0;	if (vif->dev == NULL)		goto out_free;#ifdef CONFIG_IP_PIMSM	if (vif->flags & VIFF_REGISTER) {		vif->pkt_out++;		vif->bytes_out+=skb->len;		((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;		((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;		ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);		kfree_skb(skb);		return;	}#endif	if (vif->flags&VIFF_TUNNEL) {		struct flowi fl = { .oif = vif->link,				    .nl_u = { .ip4_u =					      { .daddr = vif->remote,						.saddr = vif->local,						.tos = RT_TOS(iph->tos) } },				    .proto = IPPROTO_IPIP };		if (ip_route_output_key(&rt, &fl))			goto out_free;		encap = sizeof(struct iphdr);	} else {		struct flowi fl = { .oif = vif->link,				    .nl_u = { .ip4_u =					      { .daddr = iph->daddr,						.tos = RT_TOS(iph->tos) } },				    .proto = IPPROTO_IPIP };		if (ip_route_output_key(&rt, &fl))			goto out_free;	}	dev = rt->u.dst.dev;	if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {		/* Do not fragment multicasts. Alas, IPv4 does not		   allow to send ICMP, so that packets will disappear		   to blackhole.		 */		IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);		ip_rt_put(rt);		goto out_free;	}	encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;	if (skb_cow(skb, encap)) {		ip_rt_put(rt);		goto out_free;	}	vif->pkt_out++;	vif->bytes_out+=skb->len;	dst_release(skb->dst);	skb->dst = &rt->u.dst;	ip_decrease_ttl(ip_hdr(skb));	/* FIXME: forward and output firewalls used to be called here.	 * What do we do with netfilter? -- RR */	if (vif->flags & VIFF_TUNNEL) {		ip_encap(skb, vif->local, vif->remote);		/* FIXME: extra output firewall step used to be here. --RR */		((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;		((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;	}	IPCB(skb)->flags |= IPSKB_FORWARDED;	/*	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally	 * not only before forwarding, but after forwarding on all output	 * interfaces. It is clear, if mrouter runs a multicasting	 * program, it should receive packets not depending to what interface	 * program is joined.	 * If we will not make it, the program will have to join on all	 * interfaces. On the other hand, multihoming host (or router, but	 * not mrouter) cannot join to more than one interface - it will	 * result in receiving multiple packets.	 */	NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,		ipmr_forward_finish);	return;out_free:	kfree_skb(skb);	return;}static int ipmr_find_vif(struct net_device *dev){	int ct;	for (ct=maxvif-1; ct>=0; ct--) {		if (vif_table[ct].dev == dev)			break;	}	return ct;}/* "local" means that we should preserve one skb (for local delivery) */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -