📄 forcedeth.c
字号:
#if HZ <= 1000 && !(1000 % HZ) return (m + (1000 / HZ) - 1) / (1000 / HZ);#elif HZ > 1000 && !(HZ % 1000) return m * (HZ / 1000);#else return (m * HZ + 999) / 1000;#endif}#endifstatic void nv_msleep(unsigned int msecs){#if NVVER > SLES9 msleep(msecs);#else unsigned long timeout = nv_msecs_to_jiffies(msecs); while (timeout) { set_current_state(TASK_UNINTERRUPTIBLE); timeout = schedule_timeout(timeout); }#endif}static inline struct fe_priv *get_nvpriv(struct net_device *dev){#if NVVER > RHES3 return netdev_priv(dev);#else return (struct fe_priv *) dev->priv;#endif}static void __init quirk_nforce_network_class(struct pci_dev *pdev){ /* Some implementations of the nVidia network controllers * show up as bridges, when we need to see them as network * devices. */ /* If this is already known as a network ctlr, do nothing. */ if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET) return; if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) { char c; /* Clearing bit 6 of the register at 0xf8 * selects Ethernet device class */ pci_read_config_byte(pdev, 0xf8, &c); c &= 0xbf; pci_write_config_byte(pdev, 0xf8, c); /* sysfs needs pdev->class to be set correctly */ pdev->class &= 0x0000ff; pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8); }}static inline u8 __iomem *get_hwbase(struct net_device *dev){ return ((struct fe_priv *)get_nvpriv(dev))->base;}static inline void pci_push(u8 __iomem *base){ /* force out pending posted writes */ readl(base);}static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v){ return le32_to_cpu(prd->FlagLen) & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);}static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v){ return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;}static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, int delay, int delaymax, const char *msg){ u8 __iomem *base = get_hwbase(dev); pci_push(base); do { udelay(delay); delaymax -= delay; if (delaymax < 0) { if (msg) printk(msg); return 1; } } while ((readl(base + offset) & mask) != target); return 0;}#define NV_SETUP_RX_RING 0x01#define NV_SETUP_TX_RING 0x02static void setup_hw_rings(struct net_device *dev, int rxtx_flags){ struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { if (rxtx_flags & NV_SETUP_RX_RING) { writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); } if (rxtx_flags & NV_SETUP_TX_RING) { writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); } } else { if (rxtx_flags & NV_SETUP_RX_RING) { writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); } if (rxtx_flags & NV_SETUP_TX_RING) { writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); } }}static void free_rings(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { if(np->rx_ring.orig) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), np->rx_ring.orig, np->ring_addr); } else { if (np->rx_ring.ex) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), np->rx_ring.ex, np->ring_addr); } if (np->rx_skb) kfree(np->rx_skb); if (np->tx_skb) kfree(np->tx_skb); }static int using_multi_irqs(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) return 0; else return 1;}static void nv_enable_irq(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__); /* modify network device class id */ if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq(np->pci_dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); }}static void nv_disable_irq(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else disable_irq(np->pci_dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); }}/* In MSIX mode, a write to irqmask behaves as XOR */static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask){ u8 __iomem *base = get_hwbase(dev); struct fe_priv *np = get_nvpriv(dev); writel(mask, base + NvRegIrqMask); if (np->msi_flags & NV_MSI_ENABLED) writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);}static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask){ struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); if (np->msi_flags & NV_MSI_X_ENABLED) { writel(mask, base + NvRegIrqMask); } else { if (np->msi_flags & NV_MSI_ENABLED) writel(0, base + NvRegMSIIrqMask); writel(0, base + NvRegIrqMask); }}#define MII_READ (-1)/* mii_rw: read/write a register on the PHY. * * Caller must guarantee serialization */static int mii_rw(struct net_device *dev, int addr, int miireg, int value){ u8 __iomem *base = get_hwbase(dev); u32 reg; int retval; writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); reg = readl(base + NvRegMIIControl); if (reg & NVREG_MIICTL_INUSE) { writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); udelay(NV_MIIBUSY_DELAY); } reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; if (value != MII_READ) { writel(value, base + NvRegMIIData); reg |= NVREG_MIICTL_WRITE; } writel(reg, base + NvRegMIIControl); if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", dev->name, miireg, addr); retval = -1; } else if (value != MII_READ) { /* it was a write operation - fewer failures are detectable */ dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", dev->name, value, miireg, addr); retval = 0; } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", dev->name, miireg, addr); retval = -1; } else { retval = readl(base + NvRegMIIData); dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", dev->name, miireg, addr, retval); } return retval;}static void nv_save_LED_stats(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 reg=0; u32 value=0; int i=0; reg = Mv_Page_Address; value = 3; mii_rw(dev,np->phyaddr,reg,value); udelay(5); reg = Mv_LED_Control; for(i=0;i<3;i++){ np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ); dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]); }}static void nv_restore_LED_stats(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 reg=0; u32 value=0; int i=0; reg = Mv_Page_Address; value = 3; mii_rw(dev,np->phyaddr,reg,value); udelay(5); reg = Mv_LED_Control; for(i=0;i<3;i++){ mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]); udelay(1); dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]); }}static void nv_LED_on(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 reg=0; u32 value=0; reg = Mv_Page_Address; value = 3; mii_rw(dev,np->phyaddr,reg,value); udelay(5); reg = Mv_LED_Control; mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3); }static void nv_LED_off(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u32 reg=0; u32 value=0; reg = Mv_Page_Address; value = 3; mii_rw(dev,np->phyaddr,reg,value); udelay(5); reg = Mv_LED_Control; mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF); udelay(1);}static int phy_reset(struct net_device *dev, u32 bmcr_setup){ struct fe_priv *np = get_nvpriv(dev); u32 miicontrol; unsigned int tries = 0; dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__); if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) { nv_save_LED_stats(dev); } miicontrol = BMCR_RESET | bmcr_setup; if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { return -1; } /* wait for 500ms */ nv_msleep(500); /* must wait till reset is deasserted */ while (miicontrol & BMCR_RESET) { nv_msleep(10); miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); /* FIXME: 100 tries seem excessive */ if (tries++ > 100) return -1; } if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) { nv_restore_LED_stats(dev); } return 0;}static int phy_init(struct net_device *dev){ struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__); /* phy errata for E3016 phy */ if (np->phy_model == PHY_MODEL_MARVELL_E3016) { reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); reg &= ~PHY_MARVELL_E3016_INITMASK; if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } if (np->phy_oui == PHY_OUI_REALTEK) { if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } /* set advertise register */ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO) reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL); if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX) reg |= ADVERTISE_10HALF; if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX) reg |= ADVERTISE_10FULL; if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX) reg |= ADVERTISE_100HALF; if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX) reg |= ADVERTISE_100FULL; if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) reg |= ADVERTISE_PAUSE_ASYM; np->fixed_mode = reg; if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } /* get phy interface type */ phyinterface = readl(base + NvRegPhyInterface); /* see if gigabit phy */ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); if (mii_status & PHY_GIGABIT) { np->gigabit = PHY_GIGABIT; mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); mii_control_1000 &= ~ADVERTISE_1000HALF; if (phyinterface & PHY_RGMII && (np->speed_duplex == NV_SPEED_DUPLEX_AUTO || (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE))) mii_control_1000 |= ADVERTISE_1000FULL; else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -