📄 ipoib_multicast.c
字号:
void ipoib_mcast_join_task(void *dev_ptr){ struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) ipoib_warn(priv, "ib_gid_entry_get() failed\n"); else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); { struct ib_port_attr attr; if (!ib_query_port(priv->ca, priv->port, &attr)) { priv->local_lid = attr.lid; priv->local_rate = attr.active_speed * ib_width_enum_to_int(attr.active_width); } else ipoib_warn(priv, "ib_query_port failed\n"); } if (!priv->broadcast) { priv->broadcast = ipoib_mcast_alloc(dev, 1); if (!priv->broadcast) { ipoib_warn(priv, "failed to allocate broadcast group\n"); down(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, HZ); up(&mcast_mutex); return; } memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid)); spin_lock_irq(&priv->lock); __ipoib_mcast_add(dev, priv->broadcast); spin_unlock_irq(&priv->lock); } if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { ipoib_mcast_join(dev, priv->broadcast, 0); return; } while (1) { struct ipoib_mcast *mcast = NULL; spin_lock_irq(&priv->lock); list_for_each_entry(mcast, &priv->multicast_list, list) { if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { /* Found the next unjoined group */ break; } } spin_unlock_irq(&priv->lock); if (&mcast->list == &priv->multicast_list) { /* All done */ break; } ipoib_mcast_join(dev, mcast, 1); return; } priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - IPOIB_ENCAP_LEN; dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); clear_bit(IPOIB_MCAST_RUN, &priv->flags); netif_carrier_on(dev);}int ipoib_mcast_start_thread(struct net_device *dev){ struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg_mcast(priv, "starting multicast thread\n"); down(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_work(ipoib_workqueue, &priv->mcast_task); up(&mcast_mutex); return 0;}int ipoib_mcast_stop_thread(struct net_device *dev, int flush){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_mcast *mcast; ipoib_dbg_mcast(priv, "stopping multicast thread\n"); down(&mcast_mutex); clear_bit(IPOIB_MCAST_RUN, &priv->flags); cancel_delayed_work(&priv->mcast_task); up(&mcast_mutex); if (flush) flush_workqueue(ipoib_workqueue); if (priv->broadcast && priv->broadcast->query) { ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); priv->broadcast->query = NULL; ipoib_dbg_mcast(priv, "waiting for bcast\n"); wait_for_completion(&priv->broadcast->done); } list_for_each_entry(mcast, &priv->multicast_list, list) { if (mcast->query) { ib_sa_cancel_query(mcast->query_id, mcast->query); mcast->query = NULL; ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mcast->mcmember.mgid)); wait_for_completion(&mcast->done); } } return 0;}static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_sa_mcmember_rec rec = { .join_state = 1 }; int ret = 0; if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) return 0; ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mcast->mcmember.mgid)); rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); /* Remove ourselves from the multicast group */ ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), &mcast->mcmember.mgid); if (ret) ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret); /* * Just make one shot at leaving and don't wait for a reply; * if we fail, too bad. */ ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE, 0, GFP_ATOMIC, NULL, mcast, &mcast->query); if (ret < 0) ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed " "for leave (result = %d)\n", ret); return 0;}void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, struct sk_buff *skb){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_mcast *mcast; /* * We can only be called from ipoib_start_xmit, so we're * inside tx_lock -- no need to save/restore flags. */ spin_lock(&priv->lock); mcast = __ipoib_mcast_find(dev, mgid); if (!mcast) { /* Let's create a new send only group now */ ipoib_dbg_mcast(priv, "setting up send only multicast group for " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid)); mcast = ipoib_mcast_alloc(dev, 0); if (!mcast) { ipoib_warn(priv, "unable to allocate memory for " "multicast structure\n"); dev_kfree_skb_any(skb); goto out; } set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); mcast->mcmember.mgid = *mgid; __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); } if (!mcast->ah) { if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) skb_queue_tail(&mcast->pkt_queue, skb); else dev_kfree_skb_any(skb); if (mcast->query) ipoib_dbg_mcast(priv, "no address vector, " "but multicast join already started\n"); else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) ipoib_mcast_sendonly_join(mcast); /* * If lookup completes between here and out:, don't * want to send packet twice. */ mcast = NULL; }out: if (mcast && mcast->ah) { if (skb->dst && skb->dst->neighbour && !*to_ipoib_neigh(skb->dst->neighbour)) { struct ipoib_neigh *neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); if (neigh) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; neigh->neighbour = skb->dst->neighbour; *to_ipoib_neigh(skb->dst->neighbour) = neigh; list_add_tail(&neigh->list, &mcast->neigh_list); } } ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); } spin_unlock(&priv->lock);}void ipoib_mcast_dev_flush(struct net_device *dev){ struct ipoib_dev_priv *priv = netdev_priv(dev); LIST_HEAD(remove_list); struct ipoib_mcast *mcast, *tmcast, *nmcast; unsigned long flags; ipoib_dbg_mcast(priv, "flushing multicast list\n"); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { nmcast = ipoib_mcast_alloc(dev, 0); if (nmcast) { nmcast->flags = mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY); nmcast->mcmember.mgid = mcast->mcmember.mgid; /* Add the new group in before the to-be-destroyed group */ list_add_tail(&nmcast->list, &mcast->list); list_del_init(&mcast->list); rb_replace_node(&mcast->rb_node, &nmcast->rb_node, &priv->multicast_tree); list_add_tail(&mcast->list, &remove_list); } else { ipoib_warn(priv, "could not reallocate multicast group " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mcast->mcmember.mgid)); } } if (priv->broadcast) { nmcast = ipoib_mcast_alloc(dev, 0); if (nmcast) { nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid; rb_replace_node(&priv->broadcast->rb_node, &nmcast->rb_node, &priv->multicast_tree); list_add_tail(&priv->broadcast->list, &remove_list); } priv->broadcast = nmcast; } spin_unlock_irqrestore(&priv->lock, flags); list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(dev, mcast); ipoib_mcast_free(mcast); }}void ipoib_mcast_dev_down(struct net_device *dev){ struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned long flags; /* Delete broadcast since it will be recreated */ if (priv->broadcast) { ipoib_dbg_mcast(priv, "deleting broadcast group\n"); spin_lock_irqsave(&priv->lock, flags); rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); spin_unlock_irqrestore(&priv->lock, flags); ipoib_mcast_leave(dev, priv->broadcast); ipoib_mcast_free(priv->broadcast); priv->broadcast = NULL; }}void ipoib_mcast_restart_task(void *dev_ptr){ struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); struct dev_mc_list *mclist; struct ipoib_mcast *mcast, *tmcast; LIST_HEAD(remove_list); unsigned long flags; ipoib_dbg_mcast(priv, "restarting multicast task\n"); ipoib_mcast_stop_thread(dev, 0); spin_lock_irqsave(&priv->lock, flags); /* * Unfortunately, the networking core only gives us a list of all of * the multicast hardware addresses. We need to figure out which ones * are new and which ones have been removed */ /* Clear out the found flag */ list_for_each_entry(mcast, &priv->multicast_list, list) clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); /* Mark all of the entries that are found or don't exist */ for (mclist = dev->mc_list; mclist; mclist = mclist->next) { union ib_gid mgid; memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); /* Add in the P_Key */ mgid.raw[4] = (priv->pkey >> 8) & 0xff; mgid.raw[5] = priv->pkey & 0xff; mcast = __ipoib_mcast_find(dev, &mgid); if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { struct ipoib_mcast *nmcast; /* Not found or send-only group, let's add a new entry */ ipoib_dbg_mcast(priv, "adding multicast entry for mgid " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid)); nmcast = ipoib_mcast_alloc(dev, 0); if (!nmcast) { ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); continue; } set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); nmcast->mcmember.mgid = mgid; if (mcast) { /* Destroy the send only entry */ list_del(&mcast->list); list_add_tail(&mcast->list, &remove_list); rb_replace_node(&mcast->rb_node, &nmcast->rb_node, &priv->multicast_tree); } else __ipoib_mcast_add(dev, nmcast); list_add_tail(&nmcast->list, &priv->multicast_list); } if (mcast) set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); } /* Remove all of the entries don't exist anymore */ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mcast->mcmember.mgid)); rb_erase(&mcast->rb_node, &priv->multicast_tree); /* Move to the remove list */ list_del(&mcast->list); list_add_tail(&mcast->list, &remove_list); } } spin_unlock_irqrestore(&priv->lock, flags); /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); } if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) ipoib_mcast_start_thread(dev);}#ifdef CONFIG_INFINIBAND_IPOIB_DEBUGstruct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev){ struct ipoib_mcast_iter *iter; iter = kmalloc(sizeof *iter, GFP_KERNEL); if (!iter) return NULL; iter->dev = dev; memset(iter->mgid.raw, 0, 16); if (ipoib_mcast_iter_next(iter)) { kfree(iter); return NULL; } return iter;}int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter){ struct ipoib_dev_priv *priv = netdev_priv(iter->dev); struct rb_node *n; struct ipoib_mcast *mcast; int ret = 1; spin_lock_irq(&priv->lock); n = rb_first(&priv->multicast_tree); while (n) { mcast = rb_entry(n, struct ipoib_mcast, rb_node); if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, sizeof (union ib_gid)) < 0) { iter->mgid = mcast->mcmember.mgid; iter->created = mcast->created; iter->queuelen = skb_queue_len(&mcast->pkt_queue); iter->complete = !!mcast->ah; iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); ret = 0; break; } n = rb_next(n); } spin_unlock_irq(&priv->lock); return ret;}void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, union ib_gid *mgid, unsigned long *created, unsigned int *queuelen, unsigned int *complete, unsigned int *send_only){ *mgid = iter->mgid; *created = iter->created; *queuelen = iter->queuelen; *complete = iter->complete; *send_only = iter->send_only;}#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -