⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mcast.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (pmc->ifindex != gsf->gf_interface)			continue;		if (ipv6_addr_equal(&pmc->addr, group))			break;	}	if (!pmc) {		/* must have a prior join */		err = -EINVAL;		goto done;	}	if (gsf->gf_numsrc) {		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),							  GFP_ATOMIC);		if (!newpsl) {			err = -ENOBUFS;			goto done;		}		newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;		for (i=0; i<newpsl->sl_count; ++i) {			struct sockaddr_in6 *psin6;			psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];			newpsl->sl_addr[i] = psin6->sin6_addr;		}		err = ip6_mc_add_src(idev, group, gsf->gf_fmode,			newpsl->sl_count, newpsl->sl_addr, 0);		if (err) {			sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));			goto done;		}	} else {		newpsl = NULL;		(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);	}	write_lock_bh(&pmc->sflock);	psl = pmc->sflist;	if (psl) {		(void) ip6_mc_del_src(idev, group, pmc->sfmode,			psl->sl_count, psl->sl_addr, 0);		sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));	} else		(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);	pmc->sflist = newpsl;	pmc->sfmode = gsf->gf_fmode;	write_unlock_bh(&pmc->sflock);	err = 0;done:	read_unlock_bh(&ipv6_sk_mc_lock);	read_unlock_bh(&idev->lock);	in6_dev_put(idev);	dev_put(dev);	if (leavegroup)		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);	return err;}int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,	struct group_filter __user *optval, int __user *optlen){	int err, i, count, copycount;	struct in6_addr *group;	struct ipv6_mc_socklist *pmc;	struct inet6_dev *idev;	struct net_device *dev;	struct ipv6_pinfo *inet6 = inet6_sk(sk);	struct ip6_sf_socklist *psl;	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;	if (!ipv6_addr_is_multicast(group))		return -EINVAL;	idev = ip6_mc_find_dev(group, gsf->gf_interface);	if (!idev)		return -ENODEV;	dev = idev->dev;	err = -EADDRNOTAVAIL;	/*	 * changes to the ipv6_mc_list require the socket lock and	 * a read lock on ip6_sk_mc_lock. We have the socket lock,	 * so reading the list is safe.	 */	for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {		if (pmc->ifindex != gsf->gf_interface)			continue;		if (ipv6_addr_equal(group, &pmc->addr))			break;	}	if (!pmc)		/* must have a prior join */		goto done;	gsf->gf_fmode = pmc->sfmode;	psl = pmc->sflist;	count = psl ? psl->sl_count : 0;	read_unlock_bh(&idev->lock);	in6_dev_put(idev);	dev_put(dev);	copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;	gsf->gf_numsrc = count;	if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||	    copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {		return -EFAULT;	}	/* changes to psl require the socket lock, a read lock on	 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We	 * have the socket lock, so reading here is safe.	 */	for (i=0; i<copycount; i++) {		struct sockaddr_in6 *psin6;		struct sockaddr_storage ss;		psin6 = (struct sockaddr_in6 *)&ss;		memset(&ss, 0, sizeof(ss));		psin6->sin6_family = AF_INET6;		psin6->sin6_addr = psl->sl_addr[i];		if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))			return -EFAULT;	}	return 0;done:	read_unlock_bh(&idev->lock);	in6_dev_put(idev);	dev_put(dev);	return err;}int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,	struct in6_addr *src_addr){	struct ipv6_pinfo *np = inet6_sk(sk);	struct ipv6_mc_socklist *mc;	struct ip6_sf_socklist *psl;	int rv = 1;	read_lock(&ipv6_sk_mc_lock);	for (mc = np->ipv6_mc_list; mc; mc = mc->next) {		if (ipv6_addr_equal(&mc->addr, mc_addr))			break;	}	if (!mc) {		read_unlock(&ipv6_sk_mc_lock);		return 1;	}	read_lock(&mc->sflock);	psl = mc->sflist;	if (!psl) {		rv = mc->sfmode == MCAST_EXCLUDE;	} else {		int i;		for (i=0; i<psl->sl_count; i++) {			if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))				break;		}		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)			rv = 0;		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)			rv = 0;	}	read_unlock(&mc->sflock);	read_unlock(&ipv6_sk_mc_lock);	return rv;}static void ma_put(struct ifmcaddr6 *mc){	if (atomic_dec_and_test(&mc->mca_refcnt)) {		in6_dev_put(mc->idev);		kfree(mc);	}}static void igmp6_group_added(struct ifmcaddr6 *mc){	struct net_device *dev = mc->idev->dev;	char buf[MAX_ADDR_LEN];	spin_lock_bh(&mc->mca_lock);	if (!(mc->mca_flags&MAF_LOADED)) {		mc->mca_flags |= MAF_LOADED;		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)			dev_mc_add(dev, buf, dev->addr_len, 0);	}	spin_unlock_bh(&mc->mca_lock);	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))		return;	if (MLD_V1_SEEN(mc->idev)) {		igmp6_join_group(mc);		return;	}	/* else v2 */	mc->mca_crcount = mc->idev->mc_qrv;	mld_ifc_event(mc->idev);}static void igmp6_group_dropped(struct ifmcaddr6 *mc){	struct net_device *dev = mc->idev->dev;	char buf[MAX_ADDR_LEN];	spin_lock_bh(&mc->mca_lock);	if (mc->mca_flags&MAF_LOADED) {		mc->mca_flags &= ~MAF_LOADED;		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)			dev_mc_delete(dev, buf, dev->addr_len, 0);	}	if (mc->mca_flags & MAF_NOREPORT)		goto done;	spin_unlock_bh(&mc->mca_lock);	if (!mc->idev->dead)		igmp6_leave_group(mc);	spin_lock_bh(&mc->mca_lock);	if (del_timer(&mc->mca_timer))		atomic_dec(&mc->mca_refcnt);done:	ip6_mc_clear_src(mc);	spin_unlock_bh(&mc->mca_lock);}/* * deleted ifmcaddr6 manipulation */static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im){	struct ifmcaddr6 *pmc;	/* this is an "ifmcaddr6" for convenience; only the fields below	 * are actually used. In particular, the refcnt and users are not	 * used for management of the delete list. Using the same structure	 * for deleted items allows change reports to use common code with	 * non-deleted or query-response MCA's.	 */	pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);	if (!pmc)		return;	spin_lock_bh(&im->mca_lock);	spin_lock_init(&pmc->mca_lock);	pmc->idev = im->idev;	in6_dev_hold(idev);	pmc->mca_addr = im->mca_addr;	pmc->mca_crcount = idev->mc_qrv;	pmc->mca_sfmode = im->mca_sfmode;	if (pmc->mca_sfmode == MCAST_INCLUDE) {		struct ip6_sf_list *psf;		pmc->mca_tomb = im->mca_tomb;		pmc->mca_sources = im->mca_sources;		im->mca_tomb = im->mca_sources = NULL;		for (psf=pmc->mca_sources; psf; psf=psf->sf_next)			psf->sf_crcount = pmc->mca_crcount;	}	spin_unlock_bh(&im->mca_lock);	write_lock_bh(&idev->mc_lock);	pmc->next = idev->mc_tomb;	idev->mc_tomb = pmc;	write_unlock_bh(&idev->mc_lock);}static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca){	struct ifmcaddr6 *pmc, *pmc_prev;	struct ip6_sf_list *psf, *psf_next;	write_lock_bh(&idev->mc_lock);	pmc_prev = NULL;	for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {		if (ipv6_addr_equal(&pmc->mca_addr, pmca))			break;		pmc_prev = pmc;	}	if (pmc) {		if (pmc_prev)			pmc_prev->next = pmc->next;		else			idev->mc_tomb = pmc->next;	}	write_unlock_bh(&idev->mc_lock);	if (pmc) {		for (psf=pmc->mca_tomb; psf; psf=psf_next) {			psf_next = psf->sf_next;			kfree(psf);		}		in6_dev_put(pmc->idev);		kfree(pmc);	}}static void mld_clear_delrec(struct inet6_dev *idev){	struct ifmcaddr6 *pmc, *nextpmc;	write_lock_bh(&idev->mc_lock);	pmc = idev->mc_tomb;	idev->mc_tomb = NULL;	write_unlock_bh(&idev->mc_lock);	for (; pmc; pmc = nextpmc) {		nextpmc = pmc->next;		ip6_mc_clear_src(pmc);		in6_dev_put(pmc->idev);		kfree(pmc);	}	/* clear dead sources, too */	read_lock_bh(&idev->lock);	for (pmc=idev->mc_list; pmc; pmc=pmc->next) {		struct ip6_sf_list *psf, *psf_next;		spin_lock_bh(&pmc->mca_lock);		psf = pmc->mca_tomb;		pmc->mca_tomb = NULL;		spin_unlock_bh(&pmc->mca_lock);		for (; psf; psf=psf_next) {			psf_next = psf->sf_next;			kfree(psf);		}	}	read_unlock_bh(&idev->lock);}/* *	device multicast group inc (add if not found) */int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr){	struct ifmcaddr6 *mc;	struct inet6_dev *idev;	idev = in6_dev_get(dev);	if (idev == NULL)		return -EINVAL;	write_lock_bh(&idev->lock);	if (idev->dead) {		write_unlock_bh(&idev->lock);		in6_dev_put(idev);		return -ENODEV;	}	for (mc = idev->mc_list; mc; mc = mc->next) {		if (ipv6_addr_equal(&mc->mca_addr, addr)) {			mc->mca_users++;			write_unlock_bh(&idev->lock);			ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,				NULL, 0);			in6_dev_put(idev);			return 0;		}	}	/*	 *	not found: create a new one.	 */	mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);	if (mc == NULL) {		write_unlock_bh(&idev->lock);		in6_dev_put(idev);		return -ENOMEM;	}	init_timer(&mc->mca_timer);	mc->mca_timer.function = igmp6_timer_handler;	mc->mca_timer.data = (unsigned long) mc;	ipv6_addr_copy(&mc->mca_addr, addr);	mc->idev = idev;	mc->mca_users = 1;	/* mca_stamp should be updated upon changes */	mc->mca_cstamp = mc->mca_tstamp = jiffies;	atomic_set(&mc->mca_refcnt, 2);	spin_lock_init(&mc->mca_lock);	/* initial mode is (EX, empty) */	mc->mca_sfmode = MCAST_EXCLUDE;	mc->mca_sfcount[MCAST_EXCLUDE] = 1;	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)		mc->mca_flags |= MAF_NOREPORT;	mc->next = idev->mc_list;	idev->mc_list = mc;	write_unlock_bh(&idev->lock);	mld_del_delrec(idev, &mc->mca_addr);	igmp6_group_added(mc);	ma_put(mc);	return 0;}/* *	device multicast group del */int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr){	struct ifmcaddr6 *ma, **map;	write_lock_bh(&idev->lock);	for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {		if (ipv6_addr_equal(&ma->mca_addr, addr)) {			if (--ma->mca_users == 0) {				*map = ma->next;				write_unlock_bh(&idev->lock);				igmp6_group_dropped(ma);				ma_put(ma);				return 0;			}			write_unlock_bh(&idev->lock);			return 0;		}	}	write_unlock_bh(&idev->lock);	return -ENOENT;}int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr){	struct inet6_dev *idev = in6_dev_get(dev);	int err;	if (!idev)		return -ENODEV;	err = __ipv6_dev_mc_dec(idev, addr);	in6_dev_put(idev);	return err;}/* * identify MLD packets for MLD filter exceptions */int ipv6_is_mld(struct sk_buff *skb, int nexthdr){	struct icmp6hdr *pic;	if (nexthdr != IPPROTO_ICMPV6)		return 0;	if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))		return 0;	pic = icmp6_hdr(skb);	switch (pic->icmp6_type) {	case ICMPV6_MGM_QUERY:	case ICMPV6_MGM_REPORT:	case ICMPV6_MGM_REDUCTION:	case ICMPV6_MLD2_REPORT:		return 1;	default:		break;	}	return 0;}/* *	check if the interface/address pair is valid */int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group,	struct in6_addr *src_addr){	struct inet6_dev *idev;	struct ifmcaddr6 *mc;	int rv = 0;	idev = in6_dev_get(dev);	if (idev) {		read_lock_bh(&idev->lock);		for (mc = idev->mc_list; mc; mc=mc->next) {			if (ipv6_addr_equal(&mc->mca_addr, group))				break;		}		if (mc) {			if (src_addr && !ipv6_addr_any(src_addr)) {				struct ip6_sf_list *psf;				spin_lock_bh(&mc->mca_lock);				for (psf=mc->mca_sources;psf;psf=psf->sf_next) {					if (ipv6_addr_equal(&psf->sf_addr, src_addr))						break;				}				if (psf)					rv = psf->sf_count[MCAST_INCLUDE] ||						psf->sf_count[MCAST_EXCLUDE] !=						mc->mca_sfcount[MCAST_EXCLUDE];				else					rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;				spin_unlock_bh(&mc->mca_lock);			} else				rv = 1; /* don't filter unspecified source */		}		read_unlock_bh(&idev->lock);		in6_dev_put(idev);	}	return rv;}static void mld_gq_start_timer(struct inet6_dev *idev){	int tv = net_random() % idev->mc_maxdelay;	idev->mc_gq_running = 1;	if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))		in6_dev_hold(idev);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -