⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ipmr.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	msg->im_msgtype = assert;	igmp->code 	=	0;	skb->nh.iph->tot_len=htons(skb->len);			/* Fix the length */	skb->h.raw = skb->nh.raw;        }	if (mroute_socket == NULL) {		kfree_skb(skb);		return -EINVAL;	}	/*	 *	Deliver to mrouted	 */	if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {		if (net_ratelimit())			printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");		kfree_skb(skb);	}	return ret;}/* *	Queue a packet for resolution. It gets locked cache entry! */ static intipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb){	int err;	struct mfc_cache *c;	spin_lock_bh(&mfc_unres_lock);	for (c=mfc_unres_queue; c; c=c->next) {		if (c->mfc_mcastgrp == skb->nh.iph->daddr &&		    c->mfc_origin == skb->nh.iph->saddr)			break;	}	if (c == NULL) {		/*		 *	Create a new entry if allowable		 */		if (atomic_read(&cache_resolve_queue_len)>=10 ||		    (c=ipmr_cache_alloc_unres())==NULL) {			spin_unlock_bh(&mfc_unres_lock);			kfree_skb(skb);			return -ENOBUFS;		}		/*		 *	Fill in the new cache entry		 */		c->mfc_parent=-1;		c->mfc_origin=skb->nh.iph->saddr;		c->mfc_mcastgrp=skb->nh.iph->daddr;		/*		 *	Reflect first query at mrouted.		 */		if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {			/* If the report failed throw the cache entry 			   out - Brad Parker			 */			spin_unlock_bh(&mfc_unres_lock);			kmem_cache_free(mrt_cachep, c);			kfree_skb(skb);			return err;		}		atomic_inc(&cache_resolve_queue_len);		c->next = mfc_unres_queue;		mfc_unres_queue = c;		mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);	}	/*	 *	See if we can append the packet	 */	if (c->mfc_un.unres.unresolved.qlen>3) {		kfree_skb(skb);		err = -ENOBUFS;	} else {		skb_queue_tail(&c->mfc_un.unres.unresolved,skb);		err = 0;	}	spin_unlock_bh(&mfc_unres_lock);	return err;}/* *	MFC cache manipulation by user space mroute daemon */int ipmr_mfc_delete(struct mfcctl *mfc){	int line;	struct mfc_cache *c, **cp;	line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);	for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {			write_lock_bh(&mrt_lock);			*cp = c->next;			write_unlock_bh(&mrt_lock);			kmem_cache_free(mrt_cachep, c);			return 0;		}	}	return -ENOENT;}int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock){	int line;	struct mfc_cache *uc, *c, **cp;	line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);	for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)			break;	}	if (c != NULL) {		write_lock_bh(&mrt_lock);		c->mfc_parent = mfc->mfcc_parent;		ipmr_update_threshoulds(c, mfc->mfcc_ttls);		if (!mrtsock)			c->mfc_flags |= MFC_STATIC;		write_unlock_bh(&mrt_lock);		return 0;	}	if(!MULTICAST(mfc->mfcc_mcastgrp.s_addr))		return -EINVAL;	c=ipmr_cache_alloc();	if (c==NULL)		return -ENOMEM;	c->mfc_origin=mfc->mfcc_origin.s_addr;	c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;	c->mfc_parent=mfc->mfcc_parent;	ipmr_update_threshoulds(c, mfc->mfcc_ttls);	if (!mrtsock)		c->mfc_flags |= MFC_STATIC;	write_lock_bh(&mrt_lock);	c->next = mfc_cache_array[line];	mfc_cache_array[line] = c;	write_unlock_bh(&mrt_lock);	/*	 *	Check to see if we resolved a queued list. If so we	 *	need to send on the frames and tidy up.	 */	spin_lock_bh(&mfc_unres_lock);	for (cp = &mfc_unres_queue; (uc=*cp) != NULL;	     cp = &uc->next) {		if (uc->mfc_origin == c->mfc_origin &&		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {			*cp = uc->next;			if (atomic_dec_and_test(&cache_resolve_queue_len))				del_timer(&ipmr_expire_timer);			break;		}	}	spin_unlock_bh(&mfc_unres_lock);	if (uc) {		ipmr_cache_resolve(uc, c);		kmem_cache_free(mrt_cachep, uc);	}	return 0;}/* *	Close the multicast socket, and clear the vif tables etc */ static void mroute_clean_tables(struct sock *sk){	int i;			/*	 *	Shut down all active vif entries	 */	for(i=0; i<maxvif; i++) {		if (!(vif_table[i].flags&VIFF_STATIC))			vif_delete(i);	}	/*	 *	Wipe the cache	 */	for (i=0;i<MFC_LINES;i++) {		struct mfc_cache *c, **cp;		cp = &mfc_cache_array[i];		while ((c = *cp) != NULL) {			if (c->mfc_flags&MFC_STATIC) {				cp = &c->next;				continue;			}			write_lock_bh(&mrt_lock);			*cp = c->next;			write_unlock_bh(&mrt_lock);			kmem_cache_free(mrt_cachep, c);		}	}	if (atomic_read(&cache_resolve_queue_len) != 0) {		struct mfc_cache *c;		spin_lock_bh(&mfc_unres_lock);		while (mfc_unres_queue != NULL) {			c = mfc_unres_queue;			mfc_unres_queue = c->next;			spin_unlock_bh(&mfc_unres_lock);			ipmr_destroy_unres(c);			spin_lock_bh(&mfc_unres_lock);		}		spin_unlock_bh(&mfc_unres_lock);	}}static void mrtsock_destruct(struct sock *sk){	rtnl_lock();	if (sk == mroute_socket) {		ipv4_devconf.mc_forwarding--;		write_lock_bh(&mrt_lock);		mroute_socket=NULL;		write_unlock_bh(&mrt_lock);		mroute_clean_tables(sk);	}	rtnl_unlock();}/* *	Socket options and virtual interface manipulation. The whole *	virtual interface system is a complete heap, but unfortunately *	that's how BSD mrouted happens to think. Maybe one day with a proper *	MOSPF/PIM router set up we can clean this up. */ int ip_mroute_setsockopt(struct sock *sk,int optname,char *optval,int optlen){	int ret;	struct vifctl vif;	struct mfcctl mfc;		if(optname!=MRT_INIT)	{		if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))			return -EACCES;	}	switch(optname)	{		case MRT_INIT:			if(sk->type!=SOCK_RAW || sk->num!=IPPROTO_IGMP)				return -EOPNOTSUPP;			if(optlen!=sizeof(int))				return -ENOPROTOOPT;			rtnl_lock();			if (mroute_socket) {				rtnl_unlock();				return -EADDRINUSE;			}			ret = ip_ra_control(sk, 1, mrtsock_destruct);			if (ret == 0) {				write_lock_bh(&mrt_lock);				mroute_socket=sk;				write_unlock_bh(&mrt_lock);				ipv4_devconf.mc_forwarding++;			}			rtnl_unlock();			return ret;		case MRT_DONE:			if (sk!=mroute_socket)				return -EACCES;			return ip_ra_control(sk, 0, NULL);		case MRT_ADD_VIF:		case MRT_DEL_VIF:			if(optlen!=sizeof(vif))				return -EINVAL;			if (copy_from_user(&vif,optval,sizeof(vif)))				return -EFAULT; 			if(vif.vifc_vifi >= MAXVIFS)				return -ENFILE;			rtnl_lock();			if (optname==MRT_ADD_VIF) {				ret = vif_add(&vif, sk==mroute_socket);			} else {				ret = vif_delete(vif.vifc_vifi);			}			rtnl_unlock();			return ret;		/*		 *	Manipulate the forwarding caches. These live		 *	in a sort of kernel/user symbiosis.		 */		case MRT_ADD_MFC:		case MRT_DEL_MFC:			if(optlen!=sizeof(mfc))				return -EINVAL;			if (copy_from_user(&mfc,optval, sizeof(mfc)))				return -EFAULT;			rtnl_lock();			if (optname==MRT_DEL_MFC)				ret = ipmr_mfc_delete(&mfc);			else				ret = ipmr_mfc_add(&mfc, sk==mroute_socket);			rtnl_unlock();			return ret;		/*		 *	Control PIM assert.		 */		case MRT_ASSERT:		{			int v;			if(get_user(v,(int *)optval))				return -EFAULT;			mroute_do_assert=(v)?1:0;			return 0;		}#ifdef CONFIG_IP_PIMSM		case MRT_PIM:		{			int v;			if(get_user(v,(int *)optval))				return -EFAULT;			v = (v)?1:0;			rtnl_lock();			if (v != mroute_do_pim) {				mroute_do_pim = v;				mroute_do_assert = v;#ifdef CONFIG_IP_PIMSM_V2				if (mroute_do_pim)					inet_add_protocol(&pim_protocol);				else					inet_del_protocol(&pim_protocol);#endif			}			rtnl_unlock();			return 0;		}#endif		/*		 *	Spurious command, or MRT_VERSION which you cannot		 *	set.		 */		default:			return -ENOPROTOOPT;	}}/* *	Getsock opt support for the multicast routing system. */ int ip_mroute_getsockopt(struct sock *sk,int optname,char *optval,int *optlen){	int olr;	int val;	if(optname!=MRT_VERSION && #ifdef CONFIG_IP_PIMSM	   optname!=MRT_PIM &&#endif	   optname!=MRT_ASSERT)		return -ENOPROTOOPT;	if(get_user(olr, optlen))		return -EFAULT;	olr=min(olr,sizeof(int));	if(put_user(olr,optlen))		return -EFAULT;	if(optname==MRT_VERSION)		val=0x0305;#ifdef CONFIG_IP_PIMSM	else if(optname==MRT_PIM)		val=mroute_do_pim;#endif	else		val=mroute_do_assert;	if(copy_to_user(optval,&val,olr))		return -EFAULT;	return 0;}/* *	The IP multicast ioctl support routines. */ int ipmr_ioctl(struct sock *sk, int cmd, unsigned long arg){	struct sioc_sg_req sr;	struct sioc_vif_req vr;	struct vif_device *vif;	struct mfc_cache *c;		switch(cmd)	{		case SIOCGETVIFCNT:			if (copy_from_user(&vr,(void *)arg,sizeof(vr)))				return -EFAULT; 			if(vr.vifi>=maxvif)				return -EINVAL;			read_lock(&mrt_lock);			vif=&vif_table[vr.vifi];			if(VIF_EXISTS(vr.vifi))	{				vr.icount=vif->pkt_in;				vr.ocount=vif->pkt_out;				vr.ibytes=vif->bytes_in;				vr.obytes=vif->bytes_out;				read_unlock(&mrt_lock);				if (copy_to_user((void *)arg,&vr,sizeof(vr)))					return -EFAULT;				return 0;			}			read_unlock(&mrt_lock);			return -EADDRNOTAVAIL;		case SIOCGETSGCNT:			if (copy_from_user(&sr,(void *)arg,sizeof(sr)))				return -EFAULT;			read_lock(&mrt_lock);			c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);			if (c) {				sr.pktcnt = c->mfc_un.res.pkt;				sr.bytecnt = c->mfc_un.res.bytes;				sr.wrong_if = c->mfc_un.res.wrong_if;				read_unlock(&mrt_lock);				if (copy_to_user((void *)arg,&sr,sizeof(sr)))					return -EFAULT;				return 0;			}			read_unlock(&mrt_lock);			return -EADDRNOTAVAIL;		default:			return -ENOIOCTLCMD;	}}static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr){	struct vif_device *v;	int ct;	if (event != NETDEV_UNREGISTER)		return NOTIFY_DONE;	v=&vif_table[0];	for(ct=0;ct<maxvif;ct++,v++) {		if (v->dev==ptr)			vif_delete(ct);	}	return NOTIFY_DONE;}static struct notifier_block ip_mr_notifier={	ipmr_device_event,	NULL,	0};/* * 	Encapsulate a packet by attaching a valid IPIP header to it. *	This avoids tunnel drivers and other mess and gives us the speed so *	important for multicast video. */ static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr){	struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));	iph->version	= 	4;	iph->tos	=	skb->nh.iph->tos;	iph->ttl	=	skb->nh.iph->ttl;	iph->frag_off	=	0;	iph->daddr	=	daddr;	iph->saddr	=	saddr;	iph->protocol	=	IPPROTO_IPIP;	iph->ihl	=	5;	iph->tot_len	=	htons(skb->len);	ip_select_ident(iph, skb->dst);	ip_send_check(iph);	skb->h.ipiph = skb->nh.iph;	skb->nh.iph = iph;#ifdef CONFIG_NETFILTER	nf_conntrack_put(skb->nfct);	skb->nfct = NULL;#endif}static inline int ipmr_forward_finish(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	if (skb->len <= dst->pmtu)		return dst->output(skb);	else		return ip_fragment(skb, dst->output);}/* *	Processing handlers for ipmr_forward */static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,			   int vifi, int last){	struct iphdr *iph = skb->nh.iph;	struct vif_device *vif = &vif_table[vifi];	struct net_device *dev;	struct rtable *rt;	int    encap = 0;	struct sk_buff *skb2;	if (vif->dev == NULL)		return;#ifdef CONFIG_IP_PIMSM	if (vif->flags & VIFF_REGISTER) {		vif->pkt_out++;		vif->bytes_out+=skb->len;		((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len;		((struct net_device_stats*)vif->dev->priv)->tx_packets++;		ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);		return;	}#endif	if (vif->flags&VIFF_TUNNEL) {		if (ip_route_output(&rt, vif->remote, vif->local, RT_TOS(iph->tos), vif->link))			return;		encap = sizeof(struct iphdr);	} else {		if (ip_route_output(&rt, iph->daddr, 0, RT_TOS(iph->tos), vif->link))			return;	}	dev = rt->u.dst.dev;	if (skb->len+encap > rt->u.dst.pmtu && (ntohs(iph->frag_off) & IP_DF)) {		/* Do not fragment multicasts. Alas, IPv4 does not		   allow to send ICMP, so that packets will disappear		   to blackhole.		 */		IP_INC_STATS_BH(IpFragFails);		ip_rt_put(rt);		return;	}	encap += dev->hard_header_len;	if (skb_headroom(skb) < encap || skb_cloned(skb) || !last)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -