⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ipmr.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
		}		/*		 *	Fill in the new cache entry		 */		cache->mfc_parent=ALL_VIFS;		cache->mfc_origin=skb->nh.iph->saddr;		cache->mfc_mcastgrp=skb->nh.iph->daddr;		cache->mfc_flags=MFC_QUEUED;		/*		 *	Link to the unresolved list		 */		ipmr_cache_insert(cache);		cache_resolve_queue_len++;		/*		 *	Fire off the expiry timer		 */		cache->mfc_timer.expires=jiffies+10*HZ;		add_timer(&cache->mfc_timer);		/*		 *	Reflect first query at mrouted.		 */		if(mroute_socket)		{			/* If the report failed throw the cache entry 			   out - Brad Parker			   OK, OK, Brad. Only do not forget to free skb			   and return :-) --ANK			 */			if (ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE)<0) {				ipmr_cache_delete(cache);				kfree_skb(skb);				return -ENOBUFS;			}		}	}	/*	 *	See if we can append the packet	 */	if(cache->mfc_queuelen>3)	{		kfree_skb(skb);		return -ENOBUFS;	}	cache->mfc_queuelen++;	skb_queue_tail(&cache->mfc_unresolved,skb);	return 0;}/* *	MFC cache manipulation by user space mroute daemon */ int ipmr_mfc_modify(int action, struct mfcctl *mfc){	struct mfc_cache *cache;	if(!MULTICAST(mfc->mfcc_mcastgrp.s_addr))		return -EINVAL;	/*	 *	Find the cache line	 */		start_bh_atomic();	cache=ipmr_cache_find(mfc->mfcc_origin.s_addr,mfc->mfcc_mcastgrp.s_addr);		/*	 *	Delete an entry	 */	if(action==MRT_DEL_MFC)	{		if(cache)		{			ipmr_cache_delete(cache);			end_bh_atomic();			return 0;		}		end_bh_atomic();		return -ENOENT;	}	if(cache)	{		/*		 *	Update the cache, see if it frees a pending queue		 */		cache->mfc_flags|=MFC_RESOLVED;		cache->mfc_parent=mfc->mfcc_parent;		ipmr_update_threshoulds(cache, mfc->mfcc_ttls);		 		/*		 *	Check to see if we resolved a queued list. If so we		 *	need to send on the frames and tidy up.		 */		 		if(cache->mfc_flags&MFC_QUEUED)			ipmr_cache_resolve(cache);	/* Unhook & send the frames */		end_bh_atomic();		return 0;	}	/*	 *	Unsolicited update - that's ok, add anyway.	 */	 		cache=ipmr_cache_alloc(GFP_ATOMIC);	if(cache==NULL)	{		end_bh_atomic();		return -ENOMEM;	}	cache->mfc_flags=MFC_RESOLVED;	cache->mfc_origin=mfc->mfcc_origin.s_addr;	cache->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;	cache->mfc_parent=mfc->mfcc_parent;	ipmr_update_threshoulds(cache, mfc->mfcc_ttls);	ipmr_cache_insert(cache);	end_bh_atomic();	return 0;}static void mrtsock_destruct(struct sock *sk){	if (sk == mroute_socket) {		ipv4_devconf.mc_forwarding = 0;		mroute_socket=NULL;		synchronize_bh();		mroute_close(sk);	}}/* *	Socket options and virtual interface manipulation. The whole *	virtual interface system is a complete heap, but unfortunately *	that's how BSD mrouted happens to think. Maybe one day with a proper *	MOSPF/PIM router set up we can clean this up. */ int ip_mroute_setsockopt(struct sock *sk,int optname,char *optval,int optlen){	struct vifctl vif;	struct mfcctl mfc;		if(optname!=MRT_INIT)	{		if(sk!=mroute_socket)			return -EACCES;	}		switch(optname)	{		case MRT_INIT:			if(sk->type!=SOCK_RAW || sk->num!=IPPROTO_IGMP)				return -EOPNOTSUPP;			if(optlen!=sizeof(int))				return -ENOPROTOOPT;			{				int opt;				if (get_user(opt,(int *)optval))					return -EFAULT;				if (opt != 1)					return -ENOPROTOOPT;			}			if(mroute_socket)				return -EADDRINUSE;			mroute_socket=sk;			ipv4_devconf.mc_forwarding = 1;			if (ip_ra_control(sk, 1, mrtsock_destruct) == 0)				return 0;			mrtsock_destruct(sk);			return -EADDRINUSE;		case MRT_DONE:			return ip_ra_control(sk, 0, NULL);		case MRT_ADD_VIF:		case MRT_DEL_VIF:			if(optlen!=sizeof(vif))				return -EINVAL;			if (copy_from_user(&vif,optval,sizeof(vif)))				return -EFAULT; 			if(vif.vifc_vifi >= MAXVIFS)				return -ENFILE;			if(optname==MRT_ADD_VIF)			{				struct vif_device *v=&vif_table[vif.vifc_vifi];				struct device *dev;				struct in_device *in_dev;				/* Is vif busy ? */				if (vifc_map&(1<<vif.vifc_vifi))					return -EADDRINUSE;				switch (vif.vifc_flags) {#ifdef CONFIG_IP_PIMSM				case VIFF_REGISTER:				/*				 * Special Purpose VIF in PIM				 * All the packets will be sent to the daemon				 */					if (reg_vif_num >= 0)						return -EADDRINUSE;					reg_vif_num = vif.vifc_vifi;					dev = ipmr_reg_vif(&vif);					if (!dev) {						reg_vif_num = -1;						return -ENOBUFS;					}					break;#endif				case VIFF_TUNNEL:						dev = ipmr_new_tunnel(&vif);					if (!dev)						return -ENOBUFS;					break;				case 0:						dev=ip_dev_find(vif.vifc_lcl_addr.s_addr);					if (!dev)						return -EADDRNOTAVAIL;					break;				default:#if 0					printk(KERN_DEBUG "ipmr_add_vif: flags %02x\n", vif.vifc_flags);#endif					return -EINVAL;				}				if ((in_dev = dev->ip_ptr) == NULL)					return -EADDRNOTAVAIL;				if (in_dev->cnf.mc_forwarding)					return -EADDRINUSE;				in_dev->cnf.mc_forwarding = 1;				dev_set_allmulti(dev, +1);				ip_rt_multicast_event(in_dev);				/*				 *	Fill in the VIF structures				 */				start_bh_atomic();				v->rate_limit=vif.vifc_rate_limit;				v->local=vif.vifc_lcl_addr.s_addr;				v->remote=vif.vifc_rmt_addr.s_addr;				v->flags=vif.vifc_flags;				v->threshold=vif.vifc_threshold;				v->dev=dev;				v->bytes_in = 0;				v->bytes_out = 0;				v->pkt_in = 0;				v->pkt_out = 0;				v->link = dev->ifindex;				if (vif.vifc_flags&(VIFF_TUNNEL|VIFF_REGISTER))					v->link = dev->iflink;				vifc_map|=(1<<vif.vifc_vifi);				if (vif.vifc_vifi+1 > maxvif)					maxvif = vif.vifc_vifi+1;				end_bh_atomic();				return 0;			} else {				int ret;				rtnl_lock();				ret = vif_delete(vif.vifc_vifi);				rtnl_unlock();				return ret;			}		/*		 *	Manipulate the forwarding caches. These live		 *	in a sort of kernel/user symbiosis.		 */		case MRT_ADD_MFC:		case MRT_DEL_MFC:			if(optlen!=sizeof(mfc))				return -EINVAL;			if (copy_from_user(&mfc,optval, sizeof(mfc)))				return -EFAULT;			return ipmr_mfc_modify(optname, &mfc);		/*		 *	Control PIM assert.		 */		case MRT_ASSERT:		{			int v;			if(get_user(v,(int *)optval))				return -EFAULT;			mroute_do_assert=(v)?1:0;			return 0;		}#ifdef CONFIG_IP_PIMSM		case MRT_PIM:		{			int v;			if(get_user(v,(int *)optval))				return -EFAULT;			v = (v)?1:0;			if (v != mroute_do_pim) {				mroute_do_pim = v;				mroute_do_assert = v;#ifdef CONFIG_IP_PIMSM_V2				if (mroute_do_pim)					inet_add_protocol(&pim_protocol);				else					inet_del_protocol(&pim_protocol);#endif			}			return 0;		}#endif		/*		 *	Spurious command, or MRT_VERSION which you cannot		 *	set.		 */		default:			return -ENOPROTOOPT;	}}/* *	Getsock opt support for the multicast routing system. */ int ip_mroute_getsockopt(struct sock *sk,int optname,char *optval,int *optlen){	int olr;	int val;	if(sk!=mroute_socket)		return -EACCES;	if(optname!=MRT_VERSION && #ifdef CONFIG_IP_PIMSM	   optname!=MRT_PIM &&#endif	   optname!=MRT_ASSERT)		return -ENOPROTOOPT;		if(get_user(olr, optlen))		return -EFAULT;	olr=min(olr,sizeof(int));	if(put_user(olr,optlen))		return -EFAULT;	if(optname==MRT_VERSION)		val=0x0305;#ifdef CONFIG_IP_PIMSM	else if(optname==MRT_PIM)		val=mroute_do_pim;#endif	else		val=mroute_do_assert;	if(copy_to_user(optval,&val,olr))		return -EFAULT;	return 0;}/* *	The IP multicast ioctl support routines. */ int ipmr_ioctl(struct sock *sk, int cmd, unsigned long arg){	struct sioc_sg_req sr;	struct sioc_vif_req vr;	struct vif_device *vif;	struct mfc_cache *c;		switch(cmd)	{		case SIOCGETVIFCNT:			if (copy_from_user(&vr,(void *)arg,sizeof(vr)))				return -EFAULT; 			if(vr.vifi>=maxvif)				return -EINVAL;			vif=&vif_table[vr.vifi];			if(vifc_map&(1<<vr.vifi))			{				vr.icount=vif->pkt_in;				vr.ocount=vif->pkt_out;				vr.ibytes=vif->bytes_in;				vr.obytes=vif->bytes_out;				if (copy_to_user((void *)arg,&vr,sizeof(vr)))					return -EFAULT;				return 0;			}			return -EADDRNOTAVAIL;		case SIOCGETSGCNT:			if (copy_from_user(&sr,(void *)arg,sizeof(sr)))				return -EFAULT; 			for (c = mfc_cache_array[MFC_HASH(sr.grp.s_addr, sr.src.s_addr)];			     c; c = c->next) {				if (sr.grp.s_addr == c->mfc_mcastgrp &&				    sr.src.s_addr == c->mfc_origin) {					sr.pktcnt = c->mfc_pkt;					sr.bytecnt = c->mfc_bytes;					sr.wrong_if = c->mfc_wrong_if;					if (copy_to_user((void *)arg,&sr,sizeof(sr)))						return -EFAULT;					return 0;				}			}			return -EADDRNOTAVAIL;		default:			return -ENOIOCTLCMD;	}}/* *	Close the multicast socket, and clear the vif tables etc */ void mroute_close(struct sock *sk){	int i;			/*	 *	Shut down all active vif entries	 */	rtnl_lock();	for(i=0; i<maxvif; i++)		vif_delete(i);	rtnl_unlock();	/*	 *	Wipe the cache	 */	for(i=0;i<MFC_LINES;i++)	{		start_bh_atomic();		while(mfc_cache_array[i]!=NULL)			ipmr_cache_delete(mfc_cache_array[i]);		end_bh_atomic();	}}static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr){	struct vif_device *v;	int ct;	if (event != NETDEV_UNREGISTER)		return NOTIFY_DONE;	v=&vif_table[0];	for(ct=0;ct<maxvif;ct++) {		if (vifc_map&(1<<ct) && v->dev==ptr)			vif_delete(ct);		v++;	}	return NOTIFY_DONE;}static struct notifier_block ip_mr_notifier={	ipmr_device_event,	NULL,	0};/* * 	Encapsulate a packet by attaching a valid IPIP header to it. *	This avoids tunnel drivers and other mess and gives us the speed so *	important for multicast video. */ static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr){	struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));	iph->version	= 	4;	iph->tos	=	skb->nh.iph->tos;	iph->ttl	=	skb->nh.iph->ttl;	iph->frag_off	=	0;	iph->daddr	=	daddr;	iph->saddr	=	saddr;	iph->protocol	=	IPPROTO_IPIP;	iph->ihl	=	5;	iph->tot_len	=	htons(skb->len);	iph->id		=	htons(ip_id_count++);	ip_send_check(iph);	skb->h.ipiph = skb->nh.iph;	skb->nh.iph = iph;}/* *	Processing handlers for ipmr_forward */static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,			   int vifi, int last){	struct iphdr *iph = skb->nh.iph;	struct vif_device *vif = &vif_table[vifi];	struct device *dev;	struct rtable *rt;	int    encap = 0;	struct sk_buff *skb2;#ifdef CONFIG_IP_PIMSM	if (vif->flags & VIFF_REGISTER) {		vif->pkt_out++;		vif->bytes_out+=skb->len;		((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len;		((struct net_device_stats*)vif->dev->priv)->tx_packets++;		ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);		return;	}#endif	if (vif->flags&VIFF_TUNNEL) {		if (ip_route_output(&rt, vif->remote, vif->local, RT_TOS(iph->tos), vif->link))			return;		encap = sizeof(struct iphdr);	} else {		if (ip_route_output(&rt, iph->daddr, 0, RT_TOS(iph->tos), vif->link))			return;	}	dev = rt->u.dst.dev;	if (skb->len+encap > rt->u.dst.pmtu && (ntohs(iph->frag_off) & IP_DF)) {		/* Do not fragment multicasts. Alas, IPv4 does not		   allow to send ICMP, so that packets will disappear		   to blackhole.		 */		ip_statistics.IpFragFails++;		ip_rt_put(rt);		return;	}	encap += dev->hard_header_len;	if (skb_headroom(skb) < encap || skb_cloned(skb) || !last)		skb2 = skb_realloc_headroom(skb, (encap + 15)&~15);	else if (atomic_read(&skb->users) != 1)		skb2 = skb_clone(skb, GFP_ATOMIC);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -