📄 via-rhine.c
字号:
if (rp->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(rp->rx_buf_sz); rp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb->dev = dev; /* Mark as being used by this device. */ rp->rx_skbuff_dma[entry] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } return count;}/* * Clears the "tally counters" for CRC errors and missed frames(?). * It has been reported that some chips need a write of 0 to clear * these, for others the counters are set to 1 when written to and * instead cleared when read. So we clear them both ways ... */static inline void clear_tally_counters(void __iomem *ioaddr){ iowrite32(0, ioaddr + RxMissed); ioread16(ioaddr + RxCRCErrs); ioread16(ioaddr + RxMissed);}static void rhine_restart_tx(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; int entry = rp->dirty_tx % TX_RING_SIZE; u32 intr_status; /* * If new errors occured, we need to sort them out before doing Tx. * In that case the ISR will be back here RSN anyway. */ intr_status = get_intr_status(dev); if ((intr_status & IntrTxErrSummary) == 0) { /* We know better than the chip where it should continue. */ iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), ioaddr + TxRingPtr); iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, ioaddr + ChipCmd); iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, ioaddr + ChipCmd1); IOSYNC; } else { /* This should never happen */ if (debug > 1) printk(KERN_WARNING "%s: rhine_restart_tx() " "Another error occured %8.8x.\n", dev->name, intr_status); }}static void rhine_error(struct net_device *dev, int intr_status){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; spin_lock(&rp->lock); if (intr_status & IntrLinkChange) rhine_check_media(dev, 0); if (intr_status & IntrStatsMax) { rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); clear_tally_counters(ioaddr); } if (intr_status & IntrTxAborted) { if (debug > 1) printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", dev->name, intr_status); } if (intr_status & IntrTxUnderrun) { if (rp->tx_thresh < 0xE0) iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); if (debug > 1) printk(KERN_INFO "%s: Transmitter underrun, Tx " "threshold now %2.2x.\n", dev->name, rp->tx_thresh); } if (intr_status & IntrTxDescRace) { if (debug > 2) printk(KERN_INFO "%s: Tx descriptor write-back race.\n", dev->name); } if ((intr_status & IntrTxError) && (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { if (rp->tx_thresh < 0xE0) { iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); } if (debug > 1) printk(KERN_INFO "%s: Unspecified error. Tx " "threshold now %2.2x.\n", dev->name, rp->tx_thresh); } if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | IntrTxError)) rhine_restart_tx(dev); if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | IntrTxError | IntrTxAborted | IntrNormalSummary | IntrTxDescRace)) { if (debug > 1) printk(KERN_ERR "%s: Something Wicked happened! " "%8.8x.\n", dev->name, intr_status); } spin_unlock(&rp->lock);}static struct net_device_stats *rhine_get_stats(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; unsigned long flags; spin_lock_irqsave(&rp->lock, flags); rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); clear_tally_counters(ioaddr); spin_unlock_irqrestore(&rp->lock, flags); return &rp->stats;}static void rhine_set_rx_mode(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; u32 mc_filter[2]; /* Multicast hash filter */ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ rx_mode = 0x1C; iowrite32(0xffffffff, ioaddr + MulticastFilter0); iowrite32(0xffffffff, ioaddr + MulticastFilter1); } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ iowrite32(0xffffffff, ioaddr + MulticastFilter0); iowrite32(0xffffffff, ioaddr + MulticastFilter1); rx_mode = 0x0C; } else { struct dev_mc_list *mclist; int i; memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } iowrite32(mc_filter[0], ioaddr + MulticastFilter0); iowrite32(mc_filter[1], ioaddr + MulticastFilter1); rx_mode = 0x0C; } iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);}static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info){ struct rhine_private *rp = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(rp->pdev));}static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd){ struct rhine_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_gset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); return rc;}static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd){ struct rhine_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_sset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); rhine_set_carrier(&rp->mii_if); return rc;}static int netdev_nway_reset(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); return mii_nway_restart(&rp->mii_if);}static u32 netdev_get_link(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); return mii_link_ok(&rp->mii_if);}static u32 netdev_get_msglevel(struct net_device *dev){ return debug;}static void netdev_set_msglevel(struct net_device *dev, u32 value){ debug = value;}static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol){ struct rhine_private *rp = netdev_priv(dev); if (!(rp->quirks & rqWOL)) return; spin_lock_irq(&rp->lock); wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ wol->wolopts = rp->wolopts; spin_unlock_irq(&rp->lock);}static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol){ struct rhine_private *rp = netdev_priv(dev); u32 support = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ if (!(rp->quirks & rqWOL)) return -EINVAL; if (wol->wolopts & ~support) return -EINVAL; spin_lock_irq(&rp->lock); rp->wolopts = wol->wolopts; spin_unlock_irq(&rp->lock); return 0;}static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, .get_wol = rhine_get_wol, .set_wol = rhine_set_wol,};static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ struct rhine_private *rp = netdev_priv(dev); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&rp->lock); rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irq(&rp->lock); rhine_set_carrier(&rp->mii_if); return rc;}static int rhine_close(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; spin_lock_irq(&rp->lock); netif_stop_queue(dev);#ifdef CONFIG_VIA_RHINE_NAPI napi_disable(&rp->napi);#endif if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " "status was %4.4x.\n", dev->name, ioread16(ioaddr + ChipCmd)); /* Switch to loopback mode to avoid hardware races. */ iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); /* Disable interrupts by clearing the interrupt mask. */ iowrite16(0x0000, ioaddr + IntrEnable); /* Stop the chip's Tx and Rx processes. */ iowrite16(CmdStop, ioaddr + ChipCmd); spin_unlock_irq(&rp->lock); free_irq(rp->pdev->irq, dev); free_rbufs(dev); free_tbufs(dev); free_ring(dev); return 0;}static void __devexit rhine_remove_one(struct pci_dev *pdev){ struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); unregister_netdev(dev); pci_iounmap(pdev, rp->base); pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL);}static void rhine_shutdown (struct pci_dev *pdev){ struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; if (!(rp->quirks & rqWOL)) return; /* Nothing to do for non-WOL adapters */ rhine_power_init(dev); /* Make sure we use pattern 0, 1 and not 4, 5 */ if (rp->quirks & rq6patterns) iowrite8(0x04, ioaddr + 0xA7); if (rp->wolopts & WAKE_MAGIC) { iowrite8(WOLmagic, ioaddr + WOLcrSet); /* * Turn EEPROM-controlled wake-up back on -- some hardware may * not cooperate otherwise. */ iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); } if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) iowrite8(WOLbmcast, ioaddr + WOLcgSet); if (rp->wolopts & WAKE_PHY) iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); if (rp->wolopts & WAKE_UCAST) iowrite8(WOLucast, ioaddr + WOLcrSet); if (rp->wolopts) { /* Enable legacy WOL (for old motherboards) */ iowrite8(0x01, ioaddr + PwcfgSet); iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); } /* Hit power state D3 (sleep) */ if (!avoid_D3) iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); /* TODO: Check use of pci_enable_wake() */}#ifdef CONFIG_PMstatic int rhine_suspend(struct pci_dev *pdev, pm_message_t state){ struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0;#ifdef CONFIG_VIA_RHINE_NAPI napi_disable(&rp->napi);#endif netif_device_detach(dev); pci_save_state(pdev); spin_lock_irqsave(&rp->lock, flags); rhine_shutdown(pdev); spin_unlock_irqrestore(&rp->lock, flags); free_irq(dev->irq, dev); return 0;}static int rhine_resume(struct pci_dev *pdev){ struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); unsigned long flags; int ret; if (!netif_running(dev)) return 0; if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); ret = pci_set_power_state(pdev, PCI_D0); if (debug > 1) printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n", dev->name, ret ? "failed" : "succeeded", ret); pci_restore_state(pdev); spin_lock_irqsave(&rp->lock, flags);#ifdef USE_MMIO enable_mmio(rp->pioaddr, rp->quirks);#endif rhine_power_init(dev); free_tbufs(dev); free_rbufs(dev); alloc_tbufs(dev); alloc_rbufs(dev); init_registers(dev); spin_unlock_irqrestore(&rp->lock, flags); netif_device_attach(dev); return 0;}#endif /* CONFIG_PM */static struct pci_driver rhine_driver = { .name = DRV_NAME, .id_table = rhine_pci_tbl, .probe = rhine_init_one, .remove = __devexit_p(rhine_remove_one),#ifdef CONFIG_PM .suspend = rhine_suspend, .resume = rhine_resume,#endif /* CONFIG_PM */ .shutdown = rhine_shutdown,};static struct dmi_system_id __initdata rhine_dmi_table[] = { { .ident = "EPIA-M", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), }, }, { .ident = "KV7", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), }, }, { NULL }};static int __init rhine_init(void){/* when a module, this is printed whether or not devices are found in probe */#ifdef MODULE printk(version);#endif if (dmi_check_system(rhine_dmi_table)) { /* these BIOSes fail at PXE boot if chip is in D3 */ avoid_D3 = 1; printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 " "enabled.\n", DRV_NAME); } else if (avoid_D3) printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME); return pci_register_driver(&rhine_driver);}static void __exit rhine_cleanup(void){ pci_unregister_driver(&rhine_driver);}module_init(rhine_init);module_exit(rhine_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -