⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ipoib_multicast.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
		rec.hop_limit	  = priv->broadcast->mcmember.hop_limit;	}	set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);	mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,					 &rec, comp_mask, GFP_KERNEL,					 ipoib_mcast_join_complete, mcast);	if (IS_ERR(mcast->mc)) {		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);		ret = PTR_ERR(mcast->mc);		ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);		mcast->backoff *= 2;		if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)			mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;		mutex_lock(&mcast_mutex);		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))			queue_delayed_work(ipoib_workqueue,					   &priv->mcast_task,					   mcast->backoff * HZ);		mutex_unlock(&mcast_mutex);	}}void ipoib_mcast_join_task(struct work_struct *work){	struct ipoib_dev_priv *priv =		container_of(work, struct ipoib_dev_priv, mcast_task.work);	struct net_device *dev = priv->dev;	if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))		return;	if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))		ipoib_warn(priv, "ib_query_gid() failed\n");	else		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));	{		struct ib_port_attr attr;		if (!ib_query_port(priv->ca, priv->port, &attr))			priv->local_lid = attr.lid;		else			ipoib_warn(priv, "ib_query_port failed\n");	}	if (!priv->broadcast) {		struct ipoib_mcast *broadcast;		broadcast = ipoib_mcast_alloc(dev, 1);		if (!broadcast) {			ipoib_warn(priv, "failed to allocate broadcast group\n");			mutex_lock(&mcast_mutex);			if (test_bit(IPOIB_MCAST_RUN, &priv->flags))				queue_delayed_work(ipoib_workqueue,						   &priv->mcast_task, HZ);			mutex_unlock(&mcast_mutex);			return;		}		spin_lock_irq(&priv->lock);		memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,		       sizeof (union ib_gid));		priv->broadcast = broadcast;		__ipoib_mcast_add(dev, priv->broadcast);		spin_unlock_irq(&priv->lock);	}	if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {		if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))			ipoib_mcast_join(dev, priv->broadcast, 0);		return;	}	while (1) {		struct ipoib_mcast *mcast = NULL;		spin_lock_irq(&priv->lock);		list_for_each_entry(mcast, &priv->multicast_list, list) {			if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)			    && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)			    && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {				/* Found the next unjoined group */				break;			}		}		spin_unlock_irq(&priv->lock);		if (&mcast->list == &priv->multicast_list) {			/* All done */			break;		}		ipoib_mcast_join(dev, mcast, 1);		return;	}	priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -		IPOIB_ENCAP_LEN;	if (!ipoib_cm_admin_enabled(dev))		dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);	ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");	clear_bit(IPOIB_MCAST_RUN, &priv->flags);}int ipoib_mcast_start_thread(struct net_device *dev){	struct ipoib_dev_priv *priv = netdev_priv(dev);	ipoib_dbg_mcast(priv, "starting multicast thread\n");	mutex_lock(&mcast_mutex);	if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))		queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);	mutex_unlock(&mcast_mutex);	spin_lock_irq(&priv->lock);	set_bit(IPOIB_MCAST_STARTED, &priv->flags);	spin_unlock_irq(&priv->lock);	return 0;}int ipoib_mcast_stop_thread(struct net_device *dev, int flush){	struct ipoib_dev_priv *priv = netdev_priv(dev);	ipoib_dbg_mcast(priv, "stopping multicast thread\n");	spin_lock_irq(&priv->lock);	clear_bit(IPOIB_MCAST_STARTED, &priv->flags);	spin_unlock_irq(&priv->lock);	mutex_lock(&mcast_mutex);	clear_bit(IPOIB_MCAST_RUN, &priv->flags);	cancel_delayed_work(&priv->mcast_task);	mutex_unlock(&mcast_mutex);	if (flush)		flush_workqueue(ipoib_workqueue);	return 0;}static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast){	struct ipoib_dev_priv *priv = netdev_priv(dev);	int ret = 0;	if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))		ib_sa_free_multicast(mcast->mc);	if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {		ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",				IPOIB_GID_ARG(mcast->mcmember.mgid));		/* Remove ourselves from the multicast group */		ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),					 &mcast->mcmember.mgid);		if (ret)			ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);	}	return 0;}void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb){	struct ipoib_dev_priv *priv = netdev_priv(dev);	struct ipoib_mcast *mcast;	/*	 * We can only be called from ipoib_start_xmit, so we're	 * inside tx_lock -- no need to save/restore flags.	 */	spin_lock(&priv->lock);	if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags)	||	    !priv->broadcast					||	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {		++dev->stats.tx_dropped;		dev_kfree_skb_any(skb);		goto unlock;	}	mcast = __ipoib_mcast_find(dev, mgid);	if (!mcast) {		/* Let's create a new send only group now */		ipoib_dbg_mcast(priv, "setting up send only multicast group for "				IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid));		mcast = ipoib_mcast_alloc(dev, 0);		if (!mcast) {			ipoib_warn(priv, "unable to allocate memory for "				   "multicast structure\n");			++dev->stats.tx_dropped;			dev_kfree_skb_any(skb);			goto out;		}		set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);		memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));		__ipoib_mcast_add(dev, mcast);		list_add_tail(&mcast->list, &priv->multicast_list);	}	if (!mcast->ah) {		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)			skb_queue_tail(&mcast->pkt_queue, skb);		else {			++dev->stats.tx_dropped;			dev_kfree_skb_any(skb);		}		if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))			ipoib_dbg_mcast(priv, "no address vector, "					"but multicast join already started\n");		else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))			ipoib_mcast_sendonly_join(mcast);		/*		 * If lookup completes between here and out:, don't		 * want to send packet twice.		 */		mcast = NULL;	}out:	if (mcast && mcast->ah) {		if (skb->dst            &&		    skb->dst->neighbour &&		    !*to_ipoib_neigh(skb->dst->neighbour)) {			struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,									skb->dev);			if (neigh) {				kref_get(&mcast->ah->ref);				neigh->ah  	= mcast->ah;				list_add_tail(&neigh->list, &mcast->neigh_list);			}		}		ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);	}unlock:	spin_unlock(&priv->lock);}void ipoib_mcast_dev_flush(struct net_device *dev){	struct ipoib_dev_priv *priv = netdev_priv(dev);	LIST_HEAD(remove_list);	struct ipoib_mcast *mcast, *tmcast;	unsigned long flags;	ipoib_dbg_mcast(priv, "flushing multicast list\n");	spin_lock_irqsave(&priv->lock, flags);	list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {		list_del(&mcast->list);		rb_erase(&mcast->rb_node, &priv->multicast_tree);		list_add_tail(&mcast->list, &remove_list);	}	if (priv->broadcast) {		rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);		list_add_tail(&priv->broadcast->list, &remove_list);		priv->broadcast = NULL;	}	spin_unlock_irqrestore(&priv->lock, flags);	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {		ipoib_mcast_leave(dev, mcast);		ipoib_mcast_free(mcast);	}}void ipoib_mcast_restart_task(struct work_struct *work){	struct ipoib_dev_priv *priv =		container_of(work, struct ipoib_dev_priv, restart_task);	struct net_device *dev = priv->dev;	struct dev_mc_list *mclist;	struct ipoib_mcast *mcast, *tmcast;	LIST_HEAD(remove_list);	unsigned long flags;	struct ib_sa_mcmember_rec rec;	ipoib_dbg_mcast(priv, "restarting multicast task\n");	ipoib_mcast_stop_thread(dev, 0);	local_irq_save(flags);	netif_tx_lock(dev);	spin_lock(&priv->lock);	/*	 * Unfortunately, the networking core only gives us a list of all of	 * the multicast hardware addresses. We need to figure out which ones	 * are new and which ones have been removed	 */	/* Clear out the found flag */	list_for_each_entry(mcast, &priv->multicast_list, list)		clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);	/* Mark all of the entries that are found or don't exist */	for (mclist = dev->mc_list; mclist; mclist = mclist->next) {		union ib_gid mgid;		memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);		/* Add in the P_Key */		mgid.raw[4] = (priv->pkey >> 8) & 0xff;		mgid.raw[5] = priv->pkey & 0xff;		mcast = __ipoib_mcast_find(dev, &mgid);		if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {			struct ipoib_mcast *nmcast;			/* ignore group which is directly joined by userspace */			if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&			    !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {				ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid "						IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));				continue;			}			/* Not found or send-only group, let's add a new entry */			ipoib_dbg_mcast(priv, "adding multicast entry for mgid "					IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));			nmcast = ipoib_mcast_alloc(dev, 0);			if (!nmcast) {				ipoib_warn(priv, "unable to allocate memory for multicast structure\n");				continue;			}			set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);			nmcast->mcmember.mgid = mgid;			if (mcast) {				/* Destroy the send only entry */				list_move_tail(&mcast->list, &remove_list);				rb_replace_node(&mcast->rb_node,						&nmcast->rb_node,						&priv->multicast_tree);			} else				__ipoib_mcast_add(dev, nmcast);			list_add_tail(&nmcast->list, &priv->multicast_list);		}		if (mcast)			set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);	}	/* Remove all of the entries don't exist anymore */	list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {		if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&		    !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {			ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",					IPOIB_GID_ARG(mcast->mcmember.mgid));			rb_erase(&mcast->rb_node, &priv->multicast_tree);			/* Move to the remove list */			list_move_tail(&mcast->list, &remove_list);		}	}	spin_unlock(&priv->lock);	netif_tx_unlock(dev);	local_irq_restore(flags);	/* We have to cancel outside of the spinlock */	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {		ipoib_mcast_leave(mcast->dev, mcast);		ipoib_mcast_free(mcast);	}	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))		ipoib_mcast_start_thread(dev);}#ifdef CONFIG_INFINIBAND_IPOIB_DEBUGstruct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev){	struct ipoib_mcast_iter *iter;	iter = kmalloc(sizeof *iter, GFP_KERNEL);	if (!iter)		return NULL;	iter->dev = dev;	memset(iter->mgid.raw, 0, 16);	if (ipoib_mcast_iter_next(iter)) {		kfree(iter);		return NULL;	}	return iter;}int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter){	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);	struct rb_node *n;	struct ipoib_mcast *mcast;	int ret = 1;	spin_lock_irq(&priv->lock);	n = rb_first(&priv->multicast_tree);	while (n) {		mcast = rb_entry(n, struct ipoib_mcast, rb_node);		if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,			   sizeof (union ib_gid)) < 0) {			iter->mgid      = mcast->mcmember.mgid;			iter->created   = mcast->created;			iter->queuelen  = skb_queue_len(&mcast->pkt_queue);			iter->complete  = !!mcast->ah;			iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));			ret = 0;			break;		}		n = rb_next(n);	}	spin_unlock_irq(&priv->lock);	return ret;}void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,			   union ib_gid *mgid,			   unsigned long *created,			   unsigned int *queuelen,			   unsigned int *complete,			   unsigned int *send_only){	*mgid      = iter->mgid;	*created   = iter->created;	*queuelen  = iter->queuelen;	*complete  = iter->complete;	*send_only = iter->send_only;}#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -