📄 ipoib_ib.c
字号:
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $ */#include <linux/delay.h>#include <linux/dma-mapping.h>#include <rdma/ib_cache.h>#include "ipoib.h"#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATAstatic int data_debug_level;module_param(data_debug_level, int, 0644);MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0");#endif#define IPOIB_OP_RECV (1ul << 31)static DECLARE_MUTEX(pkey_sem);struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr){ struct ipoib_ah *ah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) return NULL; ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); ah->ah = ib_create_ah(pd, attr); if (IS_ERR(ah->ah)) { kfree(ah); ah = NULL; } else ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); return ah;}void ipoib_free_ah(struct kref *kref){ struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); struct ipoib_dev_priv *priv = netdev_priv(ah->dev); unsigned long flags; if ((int) priv->tx_tail - (int) ah->last_send >= 0) { ipoib_dbg(priv, "Freeing ah %p\n", ah->ah); ib_destroy_ah(ah->ah); kfree(ah); } else { spin_lock_irqsave(&priv->lock, flags); list_add_tail(&ah->list, &priv->dead_ahs); spin_unlock_irqrestore(&priv->lock, flags); }}static int ipoib_ib_post_receive(struct net_device *dev, int id){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_sge list; struct ib_recv_wr param; struct ib_recv_wr *bad_wr; int ret; list.addr = priv->rx_ring[id].mapping; list.length = IPOIB_BUF_SIZE; list.lkey = priv->mr->lkey; param.next = NULL; param.wr_id = id | IPOIB_OP_RECV; param.sg_list = &list; param.num_sge = 1; ret = ib_post_recv(priv->qp, ¶m, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); dma_unmap_single(priv->ca->dma_device, priv->rx_ring[id].mapping, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } return ret;}static int ipoib_alloc_rx_skb(struct net_device *dev, int id){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; dma_addr_t addr; skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); if (!skb) return -ENOMEM; /* * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * header. So we need 4 more bytes to get to 48 and align the * IP header to a multiple of 16. */ skb_reserve(skb, 4); addr = dma_map_single(priv->ca->dma_device, skb->data, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(addr))) { dev_kfree_skb_any(skb); return -EIO; } priv->rx_ring[id].skb = skb; priv->rx_ring[id].mapping = addr; return 0;}static int ipoib_ib_post_receives(struct net_device *dev){ struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) { if (ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } if (ipoib_ib_post_receive(dev, i)) { ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); return -EIO; } } return 0;}static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc){ struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id; ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", wr_id, wc->opcode, wc->status); if (wr_id & IPOIB_OP_RECV) { wr_id &= ~IPOIB_OP_RECV; if (wr_id < IPOIB_RX_RING_SIZE) { struct sk_buff *skb = priv->rx_ring[wr_id].skb; dma_addr_t addr = priv->rx_ring[wr_id].mapping; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); dma_unmap_single(priv->ca->dma_device, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; } /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { ++priv->stats.rx_dropped; goto repost; } ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); dma_unmap_single(priv->ca->dma_device, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); skb_put(skb, wc->byte_len); skb_pull(skb, IB_GRH_BYTES); if (wc->slid != priv->local_lid || wc->src_qp != priv->qp->qp_num) { skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb->mac.raw = skb->data; skb_pull(skb, IPOIB_ENCAP_LEN); dev->last_rx = jiffies; ++priv->stats.rx_packets; priv->stats.rx_bytes += skb->len; skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; netif_rx_ni(skb); } else { ipoib_dbg_data(priv, "dropping loopback packet\n"); dev_kfree_skb_any(skb); } repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) ipoib_warn(priv, "ipoib_ib_post_receive failed " "for buf %d\n", wr_id); } else ipoib_warn(priv, "completion event with wrid %d\n", wr_id); } else { struct ipoib_tx_buf *tx_req; unsigned long flags; if (wr_id >= IPOIB_TX_RING_SIZE) { ipoib_warn(priv, "completion event with wrid %d (> %d)\n", wr_id, IPOIB_TX_RING_SIZE); return; } ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); tx_req = &priv->tx_ring[wr_id]; dma_unmap_single(priv->ca->dma_device, pci_unmap_addr(tx_req, mapping), tx_req->skb->len, DMA_TO_DEVICE); ++priv->stats.tx_packets; priv->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); spin_lock_irqsave(&priv->tx_lock, flags); ++priv->tx_tail; if (netif_queue_stopped(dev) && priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) netif_wake_queue(dev); spin_unlock_irqrestore(&priv->tx_lock, flags); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); }}void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr){ struct net_device *dev = (struct net_device *) dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); int n, i; ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); do { n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc); for (i = 0; i < n; ++i) ipoib_ib_handle_wc(dev, priv->ibwc + i); } while (n == IPOIB_NUM_WC);}static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, dma_addr_t addr, int len){ struct ib_send_wr *bad_wr; priv->tx_sge.addr = addr; priv->tx_sge.length = len; priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.ah = address; return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);}void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn){ struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; dma_addr_t addr; if (skb->len > dev->mtu + INFINIBAND_ALEN) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, dev->mtu + INFINIBAND_ALEN); ++priv->stats.tx_dropped; ++priv->stats.tx_errors; dev_kfree_skb_any(skb); return; } ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", skb->len, address, qpn); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)]; tx_req->skb = skb;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -