fealnx.c
来自「linux 内核源代码」· C语言 代码 · 共 1,989 行 · 第 1/4 页
C
1,989 行
np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; /* crc error */ np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; } if (intr_status & (RI | RBU)) { if (intr_status & RI) netdev_rx(dev); else { stop_nic_rx(ioaddr, np->crvalue); reset_rx_descriptors(dev); iowrite32(np->crvalue, ioaddr + TCRRCR); } } while (np->really_tx_count) { long tx_status = np->cur_tx->status; long tx_control = np->cur_tx->control; if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ struct fealnx_desc *next; next = np->cur_tx->next_desc_logical; tx_status = next->status; tx_control = next->control; } if (tx_status & TXOWN) break; if (!(np->crvalue & CR_W_ENH)) { if (tx_status & (CSL | LC | EC | UDF | HF)) { np->stats.tx_errors++; if (tx_status & EC) np->stats.tx_aborted_errors++; if (tx_status & CSL) np->stats.tx_carrier_errors++; if (tx_status & LC) np->stats.tx_window_errors++; if (tx_status & UDF) np->stats.tx_fifo_errors++; if ((tx_status & HF) && np->mii.full_duplex == 0) np->stats.tx_heartbeat_errors++; } else { np->stats.tx_bytes += ((tx_control & PKTSMask) >> PKTSShift); np->stats.collisions += ((tx_status & NCRMask) >> NCRShift); np->stats.tx_packets++; } } else { np->stats.tx_bytes += ((tx_control & PKTSMask) >> PKTSShift); np->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->cur_tx->buffer, np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(np->cur_tx->skbuff); np->cur_tx->skbuff = NULL; --np->really_tx_count; if (np->cur_tx->control & TXLD) { np->cur_tx = np->cur_tx->next_desc_logical; ++np->free_tx_count; } else { np->cur_tx = np->cur_tx->next_desc_logical; np->cur_tx = np->cur_tx->next_desc_logical; np->free_tx_count += 2; } num_tx++; } /* end of for loop */ if (num_tx && np->free_tx_count >= 2) netif_wake_queue(dev); /* read transmit status for enhanced mode only */ if (np->crvalue & CR_W_ENH) { long data; data = ioread32(ioaddr + TSR); np->stats.tx_errors += (data & 0xff000000) >> 24; np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; np->stats.collisions += (data & 0x0000ffff); } if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); if (!np->reset_timer_armed) { np->reset_timer_armed = 1; np->reset_timer.expires = RUN_AT(HZ/2); add_timer(&np->reset_timer); stop_nic_rxtx(ioaddr, 0); netif_stop_queue(dev); /* or netif_tx_disable(dev); ?? */ /* Prevent other paths from enabling tx,rx,intrs */ np->crvalue_sv = np->crvalue; np->imrvalue_sv = np->imrvalue; np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ np->imrvalue = 0; } break; } } while (1); /* read the tally counters */ /* missed pkts */ np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; /* crc error */ np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; if (debug) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, ioread32(ioaddr + ISR)); iowrite32(np->imrvalue, ioaddr + IMR); spin_unlock(&np->lock); return IRQ_RETVAL(handled);}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { s32 rx_status = np->cur_rx->status; if (np->really_rx_count == 0) break; if (debug) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) || (rx_status & ErrorSummary)) { if (rx_status & ErrorSummary) { /* there was a fatal error */ if (debug) printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", dev->name, rx_status); np->stats.rx_errors++; /* end of a packet. */ if (rx_status & (LONG | RUNT)) np->stats.rx_length_errors++; if (rx_status & RXER) np->stats.rx_frame_errors++; if (rx_status & CRC) np->stats.rx_crc_errors++; } else { int need_to_reset = 0; int desno = 0; if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ struct fealnx_desc *cur; /* check this packet is received completely? */ cur = np->cur_rx; while (desno <= np->really_rx_count) { ++desno; if ((!(cur->status & RXOWN)) && (cur->status & RXLSD)) break; /* goto next rx descriptor */ cur = cur->next_desc_logical; } if (desno > np->really_rx_count) need_to_reset = 1; } else /* RXLSD did not find, something error */ need_to_reset = 1; if (need_to_reset == 0) { int i; np->stats.rx_length_errors++; /* free all rx descriptors related this long pkt */ for (i = 0; i < desno; ++i) { if (!np->cur_rx->skbuff) { printk(KERN_DEBUG "%s: I'm scared\n", dev->name); break; } np->cur_rx->status = RXOWN; np->cur_rx = np->cur_rx->next_desc_logical; } continue; } else { /* rx error, need to reset this chip */ stop_nic_rx(ioaddr, np->crvalue); reset_rx_descriptors(dev); iowrite32(np->crvalue, ioaddr + TCRRCR); } break; /* exit the while loop */ } } else { /* this received pkt is ok */ struct sk_buff *skb; /* Omit the four octet CRC from the length. */ short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;#ifndef final_version if (debug) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" " status %x.\n", pkt_len, rx_status);#endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(np->pci_dev, np->cur_rx->buffer, np->rx_buf_sz, PCI_DMA_FROMDEVICE); /* Call copy + cksum if available. */#if ! defined(__alpha__) skb_copy_to_linear_data(skb, np->cur_rx->skbuff->data, pkt_len); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), np->cur_rx->skbuff->data, pkt_len);#endif pci_dma_sync_single_for_device(np->pci_dev, np->cur_rx->buffer, np->rx_buf_sz, PCI_DMA_FROMDEVICE); } else { pci_unmap_single(np->pci_dev, np->cur_rx->buffer, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put(skb = np->cur_rx->skbuff, pkt_len); np->cur_rx->skbuff = NULL; --np->really_rx_count; } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; np->stats.rx_packets++; np->stats.rx_bytes += pkt_len; } np->cur_rx = np->cur_rx->next_desc_logical; } /* end of while loop */ /* allocate skb for rx buffers */ allocate_rx_buffers(dev); return 0;}static struct net_device_stats *get_stats(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; /* The chip only need report frame silently dropped. */ if (netif_running(dev)) { np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; } return &np->stats;}/* for dev->set_multicast_list */static void set_rx_mode(struct net_device *dev){ spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; unsigned long flags; spin_lock_irqsave(lp, flags); __set_rx_mode(dev); spin_unlock_irqrestore(lp, flags);}/* Take lock before calling */static void __set_rx_mode(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; u32 mc_filter[2]; /* Multicast hash filter */ u32 rx_mode; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = CR_W_AB | CR_W_AM; } else { struct dev_mc_list *mclist; int i; memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { unsigned int bit; bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; mc_filter[bit >> 5] |= (1 << bit); } rx_mode = CR_W_AB | CR_W_AM; } stop_nic_rxtx(ioaddr, np->crvalue); iowrite32(mc_filter[0], ioaddr + MAR0); iowrite32(mc_filter[1], ioaddr + MAR1); np->crvalue &= ~CR_W_RXMODEMASK; np->crvalue |= rx_mode; iowrite32(np->crvalue, ioaddr + TCRRCR);}static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info){ struct netdev_private *np = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev));}static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd){ struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); rc = mii_ethtool_gset(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc;}static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd){ struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); rc = mii_ethtool_sset(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc;}static int netdev_nway_reset(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii);}static u32 netdev_get_link(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); return mii_link_ok(&np->mii);}static u32 netdev_get_msglevel(struct net_device *dev){ return debug;}static void netdev_set_msglevel(struct net_device *dev, u32 value){ debug = value;}static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel,};static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ struct netdev_private *np = netdev_priv(dev); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); spin_unlock_irq(&np->lock); return rc;}static int netdev_close(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; int i; netif_stop_queue(dev); /* Disable interrupts by clearing the interrupt mask. */ iowrite32(0x0000, ioaddr + IMR); /* Stop the chip's Tx and Rx processes. */ stop_nic_rxtx(ioaddr, 0); del_timer_sync(&np->timer); del_timer_sync(&np->reset_timer); free_irq(dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = np->rx_ring[i].skbuff; np->rx_ring[i].status = 0; if (skb) { pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, np->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); np->rx_ring[i].skbuff = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = np->tx_ring[i].skbuff; if (skb) { pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); np->tx_ring[i].skbuff = NULL; } } return 0;}static struct pci_device_id fealnx_pci_tbl[] = { {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, {} /* terminate list */};MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);static struct pci_driver fealnx_driver = { .name = "fealnx", .id_table = fealnx_pci_tbl, .probe = fealnx_init_one, .remove = __devexit_p(fealnx_remove_one),};static int __init fealnx_init(void){/* when a module, this is printed whether or not devices are found in probe */#ifdef MODULE printk(version);#endif return pci_register_driver(&fealnx_driver);}static void __exit fealnx_exit(void){ pci_unregister_driver(&fealnx_driver);}module_init(fealnx_init);module_exit(fealnx_exit);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?