📄 forcedeth.c
字号:
#define MII_READ (-1)/* mii_rw: read/write a register on the PHY. * * Caller must guarantee serialization */static int mii_rw(struct net_device *dev, int addr, int miireg, int value){ u8 *base = get_hwbase(dev); u32 reg; int retval; writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); reg = readl(base + NvRegMIIControl); if (reg & NVREG_MIICTL_INUSE) { writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); udelay(NV_MIIBUSY_DELAY); } reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; if (value != MII_READ) { writel(value, base + NvRegMIIData); reg |= NVREG_MIICTL_WRITE; } writel(reg, base + NvRegMIIControl); if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", dev->name, miireg, addr); retval = -1; } else if (value != MII_READ) { /* it was a write operation - fewer failures are detectable */ dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", dev->name, value, miireg, addr); retval = 0; } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", dev->name, miireg, addr); retval = -1; } else { retval = readl(base + NvRegMIIData); dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", dev->name, miireg, addr, retval); } return retval;}static int phy_reset(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 miicontrol; unsigned int tries = 0; miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); miicontrol |= BMCR_RESET; if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { return -1; } /* wait for 500ms */ msleep(500); /* must wait till reset is deasserted */ while (miicontrol & BMCR_RESET) { msleep(10); miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); /* FIXME: 100 tries seem excessive */ if (tries++ > 100) return -1; } return 0;}static int phy_init(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; /* set advertise register */ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } /* get phy interface type */ phyinterface = readl(base + NvRegPhyInterface); /* see if gigabit phy */ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); if (mii_status & PHY_GIGABIT) { np->gigabit = PHY_GIGABIT; mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); mii_control_1000 &= ~ADVERTISE_1000HALF; if (phyinterface & PHY_RGMII) mii_control_1000 |= ADVERTISE_1000FULL; else mii_control_1000 &= ~ADVERTISE_1000FULL; if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } else np->gigabit = 0; /* reset the phy */ if (phy_reset(dev)) { printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); return PHY_ERROR; } /* phy vendor specific configuration */ if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); phy_reserved |= (PHY_INIT3 | PHY_INIT4); if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); phy_reserved |= PHY_INIT5; if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } if (np->phy_oui == PHY_OUI_CICADA) { phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); phy_reserved |= PHY_INIT6; if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } /* restart auto negotiation */ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { return PHY_ERROR; } return 0;}static void nv_start_rx(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); /* Already running? Stop it. */ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { writel(0, base + NvRegReceiverControl); pci_push(base); } writel(np->linkspeed, base + NvRegLinkSpeed); pci_push(base); writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", dev->name, np->duplex, np->linkspeed); pci_push(base);}static void nv_stop_rx(struct net_device *dev){ u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); writel(0, base + NvRegReceiverControl); reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); udelay(NV_RXSTOP_DELAY2); writel(0, base + NvRegLinkSpeed);}static void nv_start_tx(struct net_device *dev){ u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); pci_push(base);}static void nv_stop_tx(struct net_device *dev){ u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); writel(0, base + NvRegTransmitterControl); reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); udelay(NV_TXSTOP_DELAY2); writel(0, base + NvRegUnknownTransmitterReg);}static void nv_txrx_reset(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl); pci_push(base); udelay(NV_TXRX_RESET_DELAY); writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl); pci_push(base);}/* * nv_get_stats: dev->get_stats function * Get latest stats value from the nic. * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */static struct net_device_stats *nv_get_stats(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); /* It seems that the nic always generates interrupts and doesn't * accumulate errors internally. Thus the current values in np->stats * are already up to date. */ return &np->stats;}static int nv_ethtool_ioctl(struct net_device *dev, void __user *useraddr){ struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); u32 ethcmd; if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; strcpy(info.driver, "forcedeth"); strcpy(info.version, FORCEDETH_VERSION); strcpy(info.bus_info, pci_name(np->pci_dev)); if (copy_to_user(useraddr, &info, sizeof (info))) return -EFAULT; return 0; } case ETHTOOL_GLINK: { struct ethtool_value edata = { ETHTOOL_GLINK }; edata.data = !!netif_carrier_ok(dev); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } case ETHTOOL_GWOL: { struct ethtool_wolinfo wolinfo; memset(&wolinfo, 0, sizeof(wolinfo)); wolinfo.supported = WAKE_MAGIC; spin_lock_irq(&np->lock); if (np->wolenabled) wolinfo.wolopts = WAKE_MAGIC; spin_unlock_irq(&np->lock); if (copy_to_user(useraddr, &wolinfo, sizeof(wolinfo))) return -EFAULT; return 0; } case ETHTOOL_SWOL: { struct ethtool_wolinfo wolinfo; if (copy_from_user(&wolinfo, useraddr, sizeof(wolinfo))) return -EFAULT; spin_lock_irq(&np->lock); if (wolinfo.wolopts == 0) { writel(0, base + NvRegWakeUpFlags); np->wolenabled = 0; } if (wolinfo.wolopts & WAKE_MAGIC) { writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags); np->wolenabled = 1; } spin_unlock_irq(&np->lock); return 0; } default: break; } return -EOPNOTSUPP;}/* * nv_ioctl: dev->do_ioctl function * Called with rtnl_lock held. */static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ switch(cmd) { case SIOCETHTOOL: return nv_ethtool_ioctl(dev, rq->ifr_data); default: return -EOPNOTSUPP; }}/* * nv_alloc_rx: fill rx ring entries. * Return 1 if the allocations for the skbs failed and the * rx engine is without Available descriptors */static int nv_alloc_rx(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); unsigned int refill_rx = np->refill_rx; int nr; while (np->cur_rx != refill_rx) { struct sk_buff *skb; nr = refill_rx % RX_RING; if (np->rx_skbuff[nr] == NULL) { skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); if (!skb) break; skb->dev = dev; np->rx_skbuff[nr] = skb; } else { skb = np->rx_skbuff[nr]; } np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_FROMDEVICE); np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); wmb(); np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", dev->name, refill_rx); refill_rx++; } np->refill_rx = refill_rx; if (np->cur_rx - refill_rx == RX_RING) return 1; return 0;}static void nv_do_rx_refill(unsigned long data){ struct net_device *dev = (struct net_device *) data; struct fe_priv *np = get_nvpriv(dev); disable_irq(dev->irq); if (nv_alloc_rx(dev)) { spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock(&np->lock); } enable_irq(dev->irq);}static int nv_init_ring(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); int i; np->next_tx = np->nic_tx = 0; for (i = 0; i < TX_RING; i++) np->tx_ring[i].FlagLen = 0; np->cur_rx = RX_RING; np->refill_rx = 0; for (i = 0; i < RX_RING; i++) np->rx_ring[i].FlagLen = 0; return nv_alloc_rx(dev);}static void nv_drain_tx(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < TX_RING; i++) { np->tx_ring[i].FlagLen = 0; if (np->tx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->tx_dma[i], np->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(np->tx_skbuff[i]); np->tx_skbuff[i] = NULL; np->stats.tx_dropped++; } }}static void nv_drain_rx(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < RX_RING; i++) { np->rx_ring[i].FlagLen = 0; wmb(); if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_dma[i], np->rx_skbuff[i]->len, PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skbuff[i]); np->rx_skbuff[i] = NULL; } }}static void drain_ring(struct net_device *dev){ nv_drain_tx(dev); nv_drain_rx(dev);}/* * nv_start_xmit: dev->hard_start_xmit function * Called with dev->xmit_lock held. */static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); int nr = np->next_tx % TX_RING; np->tx_skbuff[nr] = skb; np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, PCI_DMA_TODEVICE); np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); spin_lock_irq(&np->lock); wmb(); np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", dev->name, np->next_tx); { int j; for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); dprintk(" %02x", ((unsigned char*)skb->data)[j]); } dprintk("\n"); } np->next_tx++; dev->trans_start = jiffies; if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP) netif_stop_queue(dev); spin_unlock_irq(&np->lock); writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); pci_push(get_hwbase(dev)); return 0;}/* * nv_tx_done: check for completed packets, release the skbs. * * Caller must own np->lock. */static void nv_tx_done(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 Flags; int i; while (np->nic_tx != np->next_tx) { i = np->nic_tx % TX_RING; Flags = le32_to_cpu(np->tx_ring[i].FlagLen); dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", dev->name, np->nic_tx, Flags); if (Flags & NV_TX_VALID) break; if (np->desc_ver == DESC_VER_1) { if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| NV_TX_UNDERFLOW|NV_TX_ERROR)) { if (Flags & NV_TX_UNDERFLOW) np->stats.tx_fifo_errors++; if (Flags & NV_TX_CARRIERLOST) np->stats.tx_carrier_errors++; np->stats.tx_errors++; } else { np->stats.tx_packets++; np->stats.tx_bytes += np->tx_skbuff[i]->len; } } else { if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -