mcast.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,496 行 · 第 1/4 页

C
2,496
字号
		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)			rv = 0;	}	read_unlock(&ipv6_sk_mc_lock);	return rv;}static void ma_put(struct ifmcaddr6 *mc){	if (atomic_dec_and_test(&mc->mca_refcnt)) {		in6_dev_put(mc->idev);		kfree(mc);	}}static void igmp6_group_added(struct ifmcaddr6 *mc){	struct net_device *dev = mc->idev->dev;	char buf[MAX_ADDR_LEN];	spin_lock_bh(&mc->mca_lock);	if (!(mc->mca_flags&MAF_LOADED)) {		mc->mca_flags |= MAF_LOADED;		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)			dev_mc_add(dev, buf, dev->addr_len, 0);	}	spin_unlock_bh(&mc->mca_lock);	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))		return;	if (MLD_V1_SEEN(mc->idev)) {		igmp6_join_group(mc);		return;	}	/* else v2 */	mc->mca_crcount = mc->idev->mc_qrv;	mld_ifc_event(mc->idev);}static void igmp6_group_dropped(struct ifmcaddr6 *mc){	struct net_device *dev = mc->idev->dev;	char buf[MAX_ADDR_LEN];	spin_lock_bh(&mc->mca_lock);	if (mc->mca_flags&MAF_LOADED) {		mc->mca_flags &= ~MAF_LOADED;		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)			dev_mc_delete(dev, buf, dev->addr_len, 0);	}	if (mc->mca_flags & MAF_NOREPORT)		goto done;	spin_unlock_bh(&mc->mca_lock);	if (!mc->idev->dead)		igmp6_leave_group(mc);	spin_lock_bh(&mc->mca_lock);	if (del_timer(&mc->mca_timer))		atomic_dec(&mc->mca_refcnt);done:	ip6_mc_clear_src(mc);	spin_unlock_bh(&mc->mca_lock);}/* * deleted ifmcaddr6 manipulation */static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im){	struct ifmcaddr6 *pmc;	/* this is an "ifmcaddr6" for convenience; only the fields below	 * are actually used. In particular, the refcnt and users are not	 * used for management of the delete list. Using the same structure	 * for deleted items allows change reports to use common code with	 * non-deleted or query-response MCA's.	 */	pmc = (struct ifmcaddr6 *)kmalloc(sizeof(*pmc), GFP_ATOMIC);	if (!pmc)		return;	memset(pmc, 0, sizeof(*pmc));	spin_lock_bh(&im->mca_lock);	pmc->mca_lock = SPIN_LOCK_UNLOCKED;	pmc->idev = im->idev;	in6_dev_hold(idev);	pmc->mca_addr = im->mca_addr;	pmc->mca_crcount = idev->mc_qrv;	pmc->mca_sfmode = im->mca_sfmode;	if (pmc->mca_sfmode == MCAST_INCLUDE) {		struct ip6_sf_list *psf;		pmc->mca_tomb = im->mca_tomb;		pmc->mca_sources = im->mca_sources;		im->mca_tomb = im->mca_sources = NULL;		for (psf=pmc->mca_sources; psf; psf=psf->sf_next)			psf->sf_crcount = pmc->mca_crcount;	}	spin_unlock_bh(&im->mca_lock);	write_lock_bh(&idev->mc_lock);	pmc->next = idev->mc_tomb;	idev->mc_tomb = pmc;	write_unlock_bh(&idev->mc_lock);}static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca){	struct ifmcaddr6 *pmc, *pmc_prev;	struct ip6_sf_list *psf, *psf_next;	write_lock_bh(&idev->mc_lock);	pmc_prev = NULL;	for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {		if (ipv6_addr_cmp(&pmc->mca_addr, pmca) == 0)			break;		pmc_prev = pmc;	}	if (pmc) {		if (pmc_prev)			pmc_prev->next = pmc->next;		else			idev->mc_tomb = pmc->next;	}	write_unlock_bh(&idev->mc_lock);	if (pmc) {		for (psf=pmc->mca_tomb; psf; psf=psf_next) {			psf_next = psf->sf_next;			kfree(psf);		}		in6_dev_put(pmc->idev);		kfree(pmc);	}}static void mld_clear_delrec(struct inet6_dev *idev){	struct ifmcaddr6 *pmc, *nextpmc;	write_lock_bh(&idev->mc_lock);	pmc = idev->mc_tomb;	idev->mc_tomb = NULL;	write_unlock_bh(&idev->mc_lock);	for (; pmc; pmc = nextpmc) {		nextpmc = pmc->next;		ip6_mc_clear_src(pmc);		in6_dev_put(pmc->idev);		kfree(pmc);	}	/* clear dead sources, too */	read_lock_bh(&idev->lock);	for (pmc=idev->mc_list; pmc; pmc=pmc->next) {		struct ip6_sf_list *psf, *psf_next;		spin_lock_bh(&pmc->mca_lock);		psf = pmc->mca_tomb;		pmc->mca_tomb = NULL;		spin_unlock_bh(&pmc->mca_lock);		for (; psf; psf=psf_next) {			psf_next = psf->sf_next;			kfree(psf);		}	}	read_unlock_bh(&idev->lock);}/* *	device multicast group inc (add if not found) */int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr){	struct ifmcaddr6 *mc;	struct inet6_dev *idev;	idev = in6_dev_get(dev);	if (idev == NULL)		return -EINVAL;	write_lock_bh(&idev->lock);	if (idev->dead) {		write_unlock_bh(&idev->lock);		in6_dev_put(idev);		return -ENODEV;	}	for (mc = idev->mc_list; mc; mc = mc->next) {		if (ipv6_addr_cmp(&mc->mca_addr, addr) == 0) {			mc->mca_users++;			write_unlock_bh(&idev->lock);			ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,				NULL, 0);			in6_dev_put(idev);			return 0;		}	}	/*	 *	not found: create a new one.	 */	mc = kmalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);	if (mc == NULL) {		write_unlock_bh(&idev->lock);		in6_dev_put(idev);		return -ENOMEM;	}	memset(mc, 0, sizeof(struct ifmcaddr6));	init_timer(&mc->mca_timer);	mc->mca_timer.function = igmp6_timer_handler;	mc->mca_timer.data = (unsigned long) mc;	ipv6_addr_copy(&mc->mca_addr, addr);	mc->idev = idev;	mc->mca_users = 1;	/* mca_stamp should be updated upon changes */	mc->mca_cstamp = mc->mca_tstamp = jiffies;	atomic_set(&mc->mca_refcnt, 2);	mc->mca_lock = SPIN_LOCK_UNLOCKED;	/* initial mode is (EX, empty) */	mc->mca_sfmode = MCAST_EXCLUDE;	mc->mca_sfcount[MCAST_EXCLUDE] = 1;	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)		mc->mca_flags |= MAF_NOREPORT;	mc->next = idev->mc_list;	idev->mc_list = mc;	write_unlock_bh(&idev->lock);	mld_del_delrec(idev, &mc->mca_addr);	igmp6_group_added(mc);	ma_put(mc);	return 0;}/* *	device multicast group del */int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr){	struct ifmcaddr6 *ma, **map;	write_lock_bh(&idev->lock);	for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {		if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0) {			if (--ma->mca_users == 0) {				*map = ma->next;				write_unlock_bh(&idev->lock);				igmp6_group_dropped(ma);				ma_put(ma);				return 0;			}			write_unlock_bh(&idev->lock);			return 0;		}	}	write_unlock_bh(&idev->lock);	return -ENOENT;}int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr){	struct inet6_dev *idev = in6_dev_get(dev);	int err;	if (!idev)		return -ENODEV;	err = __ipv6_dev_mc_dec(idev, addr);	in6_dev_put(idev);	return err;}/* * identify MLD packets for MLD filter exceptions */int ipv6_is_mld(struct sk_buff *skb, int nexthdr){	struct icmp6hdr *pic;	if (nexthdr != IPPROTO_ICMPV6)		return 0;	if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))		return 0;	pic = (struct icmp6hdr *)skb->h.raw;	switch (pic->icmp6_type) {	case ICMPV6_MGM_QUERY:	case ICMPV6_MGM_REPORT:	case ICMPV6_MGM_REDUCTION:	case ICMPV6_MLD2_REPORT:		return 1;	default:		break;	}	return 0;}/* *	check if the interface/address pair is valid */int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group,	struct in6_addr *src_addr){	struct inet6_dev *idev;	struct ifmcaddr6 *mc;	int rv = 0;	idev = in6_dev_get(dev);	if (idev) {		read_lock_bh(&idev->lock);		for (mc = idev->mc_list; mc; mc=mc->next) {			if (ipv6_addr_cmp(&mc->mca_addr, group) == 0)				break;		}		if (mc) {			if (src_addr && !ipv6_addr_any(src_addr)) {				struct ip6_sf_list *psf;				spin_lock_bh(&mc->mca_lock);				for (psf=mc->mca_sources;psf;psf=psf->sf_next) {					if (ipv6_addr_cmp(&psf->sf_addr,					    src_addr) == 0)						break;				}				if (psf)					rv = psf->sf_count[MCAST_INCLUDE] ||						psf->sf_count[MCAST_EXCLUDE] !=						mc->mca_sfcount[MCAST_EXCLUDE];				else					rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;				spin_unlock_bh(&mc->mca_lock);			} else				rv = 1; /* don't filter unspecified source */		}		read_unlock_bh(&idev->lock);		in6_dev_put(idev);	}	return rv;}static void mld_gq_start_timer(struct inet6_dev *idev){	int tv = net_random() % idev->mc_maxdelay;	idev->mc_gq_running = 1;	if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))		in6_dev_hold(idev);}static void mld_ifc_start_timer(struct inet6_dev *idev, int delay){	int tv = net_random() % delay;	if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))		in6_dev_hold(idev);}/* *	IGMP handling (alias multicast ICMPv6 messages) */static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime){	unsigned long delay = resptime;	/* Do not start timer for these addresses */	if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||	    IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)		return;	if (del_timer(&ma->mca_timer)) {		atomic_dec(&ma->mca_refcnt);		delay = ma->mca_timer.expires - jiffies;	}	if (delay >= resptime) {		if (resptime)			delay = net_random() % resptime;		else			delay = 1;	}	ma->mca_timer.expires = jiffies + delay;	if (!mod_timer(&ma->mca_timer, jiffies + delay))		atomic_inc(&ma->mca_refcnt);	ma->mca_flags |= MAF_TIMER_RUNNING;}static void mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,	struct in6_addr *srcs){	struct ip6_sf_list *psf;	int i, scount;	scount = 0;	for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {		if (scount == nsrcs)			break;		for (i=0; i<nsrcs; i++)			if (ipv6_addr_cmp(&srcs[i], &psf->sf_addr) == 0) {				psf->sf_gsresp = 1;				scount++;				break;			}	}}int igmp6_event_query(struct sk_buff *skb){	struct mld2_query *mlh2 = (struct mld2_query *) skb->h.raw;	struct ifmcaddr6 *ma;	struct in6_addr *group;	unsigned long max_delay;	struct inet6_dev *idev;	struct icmp6hdr *hdr;	int group_type;	int mark = 0;	int len;	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))		return -EINVAL;	/* compute payload length excluding extension headers */	len = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);	len -= (char *)skb->h.raw - (char *)skb->nh.ipv6h; 	/* Drop queries with not link local source */	if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL))		return -EINVAL;	idev = in6_dev_get(skb->dev);	if (idev == NULL)		return 0;	hdr = (struct icmp6hdr *) skb->h.raw;	group = (struct in6_addr *) (hdr + 1);	group_type = ipv6_addr_type(group);	if (group_type != IPV6_ADDR_ANY &&	    !(group_type&IPV6_ADDR_MULTICAST)) {		in6_dev_put(idev);		return -EINVAL;	}	if (len == 24) {		int switchback;		/* MLDv1 router present */		/* Translate milliseconds to jiffies */		max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000;		switchback = (idev->mc_qrv + 1) * max_delay;		idev->mc_v1_seen = jiffies + switchback;		/* cancel the interface change timer */		idev->mc_ifc_count = 0;		if (del_timer(&idev->mc_ifc_timer))			__in6_dev_put(idev);		/* clear deleted report items */		mld_clear_delrec(idev);	} else if (len >= 28) {		max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;		if (!max_delay)			max_delay = 1;		idev->mc_maxdelay = max_delay;		if (mlh2->qrv)			idev->mc_qrv = mlh2->qrv;		if (group_type == IPV6_ADDR_ANY) { /* general query */			if (mlh2->nsrcs) {				in6_dev_put(idev);				return -EINVAL; /* no sources allowed */			}			mld_gq_start_timer(idev);			in6_dev_put(idev);			return 0;		}		/* mark sources to include, if group & source-specific */		mark = mlh2->nsrcs != 0;	} else {		in6_dev_put(idev);		return -EINVAL;	}	read_lock_bh(&idev->lock);	if (group_type == IPV6_ADDR_ANY) {		for (ma = idev->mc_list; ma; ma=ma->next) {			spin_lock_bh(&ma->mca_lock);			igmp6_group_queried(ma, max_delay);			spin_unlock_bh(&ma->mca_lock);		}	} else {		for (ma = idev->mc_list; ma; ma=ma->next) {			if (group_type != IPV6_ADDR_ANY &&			    ipv6_addr_cmp(group, &ma->mca_addr) != 0)				continue;			spin_lock_bh(&ma->mca_lock);			if (ma->mca_flags & MAF_TIMER_RUNNING) {				/* gsquery <- gsquery && mark */				if (!mark)					ma->mca_flags &= ~MAF_GSQUERY;			} else {				/* gsquery <- mark */				if (mark)					ma->mca_flags |= MAF_GSQUERY;				else					ma->mca_flags &= ~MAF_GSQUERY;			}			if (ma->mca_flags & MAF_GSQUERY)				mld_marksources(ma, ntohs(mlh2->nsrcs),					mlh2->srcs);			igmp6_group_queried(ma, max_delay);			spin_unlock_bh(&ma->mca_lock);			if (group_type != IPV6_ADDR_ANY)				break;		}	}	read_unlock_bh(&idev->lock);	in6_dev_put(idev);	return 0;}int igmp6_event_report(struct sk_buff *skb){	struct ifmcaddr6 *ma;	struct in6_addr *addrp;	struct inet6_dev *idev;	struct icmp6hdr *hdr;	int addr_type;	/* Our own report looped back. Ignore it. */	if (skb->pkt_type == PACKET_LOOPBACK)		return 0;	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))		return -EINVAL;	hdr = (struct icmp6hdr*) skb->h.raw;	/* Drop reports with not link local source */	addr_type = ipv6_addr_type(&skb->nh.ipv6h->saddr);	if (addr_type != IPV6_ADDR_ANY && 	    !(addr_type&IPV6_ADDR_LINKLOCAL))		return -EINVAL;	addrp = (struct in6_addr *) (hdr + 1);	idev = in6_dev_get(skb->dev);	if (idev == NULL)		return -ENODEV;	/*	 *	Cancel the timer for this group	 */	read_lock_bh(&idev->lock);	for (ma = idev->mc_list; ma; ma=ma->next) {		if (ipv6_addr_cmp(&ma->mca_addr, addrp) == 0) {			spin_lock(&ma->mca_lock);			if (del_timer(&ma->mca_timer))				atomic_dec(&ma->mca_refcnt);			ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);			spin_unlock(&ma->mca_lock);			break;		}	}	read_unlock_bh(&idev->lock);	in6_dev_put(idev);	return 0;}static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,	int gdeleted, int sdeleted){	switch (type) {	case MLD2_MODE_IS_INCLUDE:	case MLD2_MODE_IS_EXCLUDE:		if (gdeleted || sdeleted)			return 0;		return !((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp);	case MLD2_CHANGE_TO_INCLUDE:		if (gdeleted || sdeleted)			return 0;		return psf->sf_count[MCAST_INCLUDE] != 0;	case MLD2_CHANGE_TO_EXCLUDE:		if (gdeleted || sdeleted)			return 0;		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||		    psf->sf_count[MCAST_INCLUDE])			return 0;		return pmc->mca_sfcount[MCAST_EXCLUDE] ==			psf->sf_count[MCAST_EXCLUDE];	case MLD2_ALLOW_NEW_SOURCES:		if (gdeleted || !psf->sf_crcount)			return 0;		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;	case MLD2_BLOCK_OLD_SOURCES:		if (pmc->mca_sfmode == MCAST_INCLUDE)			return gdeleted || (psf->sf_crcount && sdeleted);		return psf->sf_crcount && !gdeleted && !sdeleted;	}	return 0;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?