📄 myri10ge.c
字号:
static inline voidmyri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, struct skb_frag_struct *rx_frags, int len, int hlen){ struct skb_frag_struct *skb_frags; skb->len = skb->data_len = len; skb->truesize = len + sizeof(struct sk_buff); /* attach the page(s) */ skb_frags = skb_shinfo(skb)->frags; while (len > 0) { memcpy(skb_frags, rx_frags, sizeof(*skb_frags)); len -= rx_frags->size; skb_frags++; rx_frags++; skb_shinfo(skb)->nr_frags++; } /* pskb_may_pull is not available in irq context, but * skb_pull() (for ether_pad and eth_type_trans()) requires * the beginning of the packet in skb_headlen(), move it * manually */ skb_copy_to_linear_data(skb, va, hlen); skb_shinfo(skb)->frags[0].page_offset += hlen; skb_shinfo(skb)->frags[0].size -= hlen; skb->data_len -= hlen; skb->tail += hlen; skb_pull(skb, MXGEFW_PAD);}static voidmyri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, int bytes, int watchdog){ struct page *page; int idx; if (unlikely(rx->watchdog_needed && !watchdog)) return; /* try to refill entire ring */ while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { idx = rx->fill_cnt & rx->mask; if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { /* we can use part of previous page */ get_page(rx->page); } else { /* we need a new page */ page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MYRI10GE_ALLOC_ORDER); if (unlikely(page == NULL)) { if (rx->fill_cnt - rx->cnt < 16) rx->watchdog_needed = 1; return; } rx->page = page; rx->page_offset = 0; rx->bus = pci_map_page(mgp->pdev, page, 0, MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); } rx->info[idx].page = rx->page; rx->info[idx].page_offset = rx->page_offset; /* note that this is the address of the start of the * page */ pci_unmap_addr_set(&rx->info[idx], bus, rx->bus); rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus)); /* start next packet on a cacheline boundary */ rx->page_offset += SKB_DATA_ALIGN(bytes);#if MYRI10GE_ALLOC_SIZE > 4096 /* don't cross a 4KB boundary */ if ((rx->page_offset >> 12) != ((rx->page_offset + bytes - 1) >> 12)) rx->page_offset = (rx->page_offset + 4096) & ~4095;#endif rx->fill_cnt++; /* copy 8 descriptors to the firmware at a time */ if ((idx & 7) == 7) { if (rx->wc_fifo == NULL) myri10ge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); else { mb(); myri10ge_pio_copy(rx->wc_fifo, &rx->shadow[idx - 7], 64); } } }}static inline voidmyri10ge_unmap_rx_page(struct pci_dev *pdev, struct myri10ge_rx_buffer_state *info, int bytes){ /* unmap the recvd page if we're the only or last user of it */ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { pci_unmap_page(pdev, (pci_unmap_addr(info, bus) & ~(MYRI10GE_ALLOC_SIZE - 1)), MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); }}#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a * page into an skb */static inline intmyri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, int bytes, int len, __wsum csum){ struct sk_buff *skb; struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; int i, idx, hlen, remainder; struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; len += MXGEFW_PAD; idx = rx->cnt & rx->mask; va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); /* Fill skb_frag_struct(s) with data from our receive */ for (i = 0, remainder = len; remainder > 0; i++) { myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); rx_frags[i].page = rx->info[idx].page; rx_frags[i].page_offset = rx->info[idx].page_offset; if (remainder < MYRI10GE_ALLOC_SIZE) rx_frags[i].size = remainder; else rx_frags[i].size = MYRI10GE_ALLOC_SIZE; rx->cnt++; idx = rx->cnt & rx->mask; remainder -= MYRI10GE_ALLOC_SIZE; } if (mgp->csum_flag && myri10ge_lro) { rx_frags[0].page_offset += MXGEFW_PAD; rx_frags[0].size -= MXGEFW_PAD; len -= MXGEFW_PAD; lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, len, len, (void *)(unsigned long)csum, csum); return 1; } hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; /* allocate an skb to attach the page(s) to. This is done * after trying LRO, so as to avoid skb allocation overheads */ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); if (unlikely(skb == NULL)) { mgp->stats.rx_dropped++; do { i--; put_page(rx_frags[i].page); } while (i != 0); return 0; } /* Attach the pages to the skb, and trim off any padding */ myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); if (skb_shinfo(skb)->frags[0].size <= 0) { put_page(skb_shinfo(skb)->frags[0].page); skb_shinfo(skb)->nr_frags = 0; } skb->protocol = eth_type_trans(skb, dev); if (mgp->csum_flag) { if ((skb->protocol == htons(ETH_P_IP)) || (skb->protocol == htons(ETH_P_IPV6))) { skb->csum = csum; skb->ip_summed = CHECKSUM_COMPLETE; } else myri10ge_vlan_ip_csum(skb, csum); } netif_receive_skb(skb); dev->last_rx = jiffies; return 1;}static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index){ struct pci_dev *pdev = mgp->pdev; struct myri10ge_tx_buf *tx = &mgp->tx; struct sk_buff *skb; int idx, len; while (tx->pkt_done != mcp_index) { idx = tx->done & tx->mask; skb = tx->info[idx].skb; /* Mark as free */ tx->info[idx].skb = NULL; if (tx->info[idx].last) { tx->pkt_done++; tx->info[idx].last = 0; } tx->done++; len = pci_unmap_len(&tx->info[idx], len); pci_unmap_len_set(&tx->info[idx], len, 0); if (skb) { mgp->stats.tx_bytes += skb->len; mgp->stats.tx_packets++; dev_kfree_skb_irq(skb); if (len) pci_unmap_single(pdev, pci_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } else { if (len) pci_unmap_page(pdev, pci_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } } /* start the queue if we've stopped it */ if (netif_queue_stopped(mgp->dev) && tx->req - tx->done < (tx->mask >> 1)) { mgp->wake_queue++; netif_wake_queue(mgp->dev); }}static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget){ struct myri10ge_rx_done *rx_done = &mgp->rx_done; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long rx_ok; int idx = rx_done->idx; int cnt = rx_done->cnt; int work_done = 0; u16 length; __wsum checksum; while (rx_done->entry[idx].length != 0 && work_done < budget) { length = ntohs(rx_done->entry[idx].length); rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); if (length <= mgp->small_bytes) rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, mgp->small_bytes, length, checksum); else rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, mgp->big_bytes, length, checksum); rx_packets += rx_ok; rx_bytes += rx_ok * (unsigned long)length; cnt++; idx = cnt & (myri10ge_max_intr_slots - 1); work_done++; } rx_done->idx = idx; rx_done->cnt = cnt; mgp->stats.rx_packets += rx_packets; mgp->stats.rx_bytes += rx_bytes; if (myri10ge_lro) lro_flush_all(&rx_done->lro_mgr); /* restock receive rings if needed */ if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, mgp->small_bytes + MXGEFW_PAD, 0); if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); return work_done;}static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp){ struct mcp_irq_data *stats = mgp->fw_stats; if (unlikely(stats->stats_updated)) { unsigned link_up = ntohl(stats->link_up); if (mgp->link_state != link_up) { mgp->link_state = link_up; if (mgp->link_state == MXGEFW_LINK_UP) { if (netif_msg_link(mgp)) printk(KERN_INFO "myri10ge: %s: link up\n", mgp->dev->name); netif_carrier_on(mgp->dev); mgp->link_changes++; } else { if (netif_msg_link(mgp)) printk(KERN_INFO "myri10ge: %s: link %s\n", mgp->dev->name, (link_up == MXGEFW_LINK_MYRINET ? "mismatch (Myrinet detected)" : "down")); netif_carrier_off(mgp->dev); mgp->link_changes++; } } if (mgp->rdma_tags_available != ntohl(mgp->fw_stats->rdma_tags_available)) { mgp->rdma_tags_available = ntohl(mgp->fw_stats->rdma_tags_available); printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " "%d tags left\n", mgp->dev->name, mgp->rdma_tags_available); } mgp->down_cnt += stats->link_down; if (stats->link_down) wake_up(&mgp->down_wq); }}static int myri10ge_poll(struct napi_struct *napi, int budget){ struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); struct net_device *netdev = mgp->dev; int work_done; /* process as many rx events as NAPI will allow */ work_done = myri10ge_clean_rx_done(mgp, budget); if (work_done < budget) { netif_rx_complete(netdev, napi); put_be32(htonl(3), mgp->irq_claim); } return work_done;}static irqreturn_t myri10ge_intr(int irq, void *arg){ struct myri10ge_priv *mgp = arg; struct mcp_irq_data *stats = mgp->fw_stats; struct myri10ge_tx_buf *tx = &mgp->tx; u32 send_done_count; int i; /* make sure it is our IRQ, and that the DMA has finished */ if (unlikely(!stats->valid)) return (IRQ_NONE); /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) netif_rx_schedule(mgp->dev, &mgp->napi); if (!mgp->msi_enabled) { put_be32(0, mgp->irq_deassert); if (!myri10ge_deassert_wait) stats->valid = 0; mb(); } else stats->valid = 0; /* Wait for IRQ line to go low, if using INTx */ i = 0; while (1) { i++; /* check for transmit completes and receives */ send_done_count = ntohl(stats->send_done_count); if (send_done_count != tx->pkt_done) myri10ge_tx_done(mgp, (int)send_done_count); if (unlikely(i > myri10ge_max_irq_loops)) { printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", mgp->dev->name); stats->valid = 0; schedule_work(&mgp->watchdog_work); } if (likely(stats->valid == 0)) break; cpu_relax(); barrier(); } myri10ge_check_statblock(mgp); put_be32(htonl(3), mgp->irq_claim + 1); return (IRQ_HANDLED);}static intmyri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd){ cmd->autoneg = AUTONEG_DISABLE; cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; return 0;}static voidmyri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info){ struct myri10ge_priv *mgp = netdev_priv(netdev); strlcpy(info->driver, "myri10ge", sizeof(info->driver)); strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version)); strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));}static intmyri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal){ struct myri10ge_priv *mgp = netdev_priv(netdev); coal->rx_coalesce_usecs = mgp->intr_coal_delay; return 0;}static intmyri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal){ struct myri10ge_priv *mgp = netdev_priv(netdev); mgp->intr_coal_delay = coal->rx_coalesce_usecs; put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); return 0;}static voidmyri10ge_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause){ struct myri10ge_priv *mgp = netdev_priv(netdev); pause->autoneg = 0; pause->rx_pause = mgp->pause; pause->tx_pause = mgp->pause;}static intmyri10ge_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause){ struct myri10ge_priv *mgp = netdev_priv(netdev); if (pause->tx_pause != mgp->pause) return myri10ge_change_pause(mgp, pause->tx_pause); if (pause->rx_pause != mgp->pause) return myri10ge_change_pause(mgp, pause->tx_pause); if (pause->autoneg != 0)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -