📄 sungem.c
字号:
} if (netif_msg_link(gp)) printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", gp->dev->name, speed, (full_duplex ? "full" : "half")); val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); if (full_duplex) { val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); } else { /* MAC_TXCFG_NBO must be zero. */ } writel(val, gp->regs + MAC_TXCFG); val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); if (!full_duplex && (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1)) { val |= MAC_XIFCFG_DISE; } else if (full_duplex) { val |= MAC_XIFCFG_FLED; } if (speed == 1000) val |= (MAC_XIFCFG_GMII); writel(val, gp->regs + MAC_XIFCFG); /* If gigabit and half-duplex, enable carrier extension * mode. Else, disable it. */ if (speed == 1000 && !full_duplex) { val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } else { val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) pause = 1; } if (!full_duplex) writel(512, gp->regs + MAC_STIME); else writel(64, gp->regs + MAC_STIME); val = readl(gp->regs + MAC_MCCFG); if (pause) val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); else val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); writel(val, gp->regs + MAC_MCCFG); gem_start_dma(gp);}static int gem_mdio_link_not_up(struct gem *gp){ u16 val; if (gp->lstate == link_force_ret) { if (netif_msg_link(gp)) printk(KERN_INFO "%s: Autoneg failed again, keeping" " forced mode\n", gp->dev->name); phy_write(gp, MII_BMCR, gp->link_fcntl); gp->timer_ticks = 5; gp->lstate = link_force_ok; } else if (gp->lstate == link_aneg) { val = phy_read(gp, MII_BMCR); if (netif_msg_link(gp)) printk(KERN_INFO "%s: switching to forced 100bt\n", gp->dev->name); /* Try forced modes. */ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); val &= ~(BMCR_FULLDPLX); val |= BMCR_SPEED100; phy_write(gp, MII_BMCR, val); gp->timer_ticks = 5; gp->lstate = link_force_try; } else { /* Downgrade from 100 to 10 Mbps if necessary. * If already at 10Mbps, warn user about the * situation every 10 ticks. */ val = phy_read(gp, MII_BMCR); if (val & BMCR_SPEED100) { val &= ~BMCR_SPEED100; phy_write(gp, MII_BMCR, val); gp->timer_ticks = 5; if (netif_msg_link(gp)) printk(KERN_INFO "%s: switching to forced 10bt\n", gp->dev->name); } else return 1; } return 0;}static void gem_init_rings(struct gem *, int);static void gem_init_hw(struct gem *, int);static void gem_reset_task(void *data){ struct gem *gp = (struct gem *) data; /* The link went down, we reset the ring, but keep * DMA stopped. Todo: Use this function for reset * on error as well. */ if (gp->hw_running && gp->opened) { /* Make sure we don't get interrupts or tx packets */ spin_lock_irq(&gp->lock); netif_stop_queue(gp->dev); writel(0xffffffff, gp->regs + GREG_IMASK); spin_unlock_irq(&gp->lock); /* Reset the chip & rings */ gem_stop(gp); gem_init_rings(gp, 0); gem_init_hw(gp, (gp->reset_task_pending == 2)); netif_wake_queue(gp->dev); } gp->reset_task_pending = 0;}static void gem_link_timer(unsigned long data){ struct gem *gp = (struct gem *) data; if (!gp->hw_running) return; /* If the link of task is still pending, we just * reschedule the link timer */ if (gp->reset_task_pending) goto restart; spin_lock_irq(&gp->lock); if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { u16 val = phy_read(gp, MII_BMSR); u16 cntl = phy_read(gp, MII_BMCR); int up; /* When using autoneg, we really wait for ANEGCOMPLETE or we may * get a "transcient" incorrect link state */ if (cntl & BMCR_ANENABLE) up = (val & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) == (BMSR_ANEGCOMPLETE | BMSR_LSTATUS); else up = (val & BMSR_LSTATUS) != 0; if (up) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we do * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if (gp->lstate == link_force_try && (gp->link_cntl & BMCR_ANENABLE)) { gp->lstate = link_force_ret; gp->link_fcntl = phy_read(gp, MII_BMCR); gp->timer_ticks = 5; if (netif_msg_link(gp)) printk(KERN_INFO "%s: Got link after fallback, retrying" " autoneg once...\n", gp->dev->name); phy_write(gp, MII_BMCR, gp->link_fcntl | BMCR_ANENABLE | BMCR_ANRESTART); } else if (gp->lstate != link_up) { gp->lstate = link_up; if (gp->opened) gem_set_link_modes(gp); } } else { int restart = 0; /* If the link was previously up, we restart the * whole process */ if (gp->lstate == link_up) { gp->lstate = link_down; if (netif_msg_link(gp)) printk(KERN_INFO "%s: Link down\n", gp->dev->name); gp->reset_task_pending = 1; schedule_task(&gp->reset_task); restart = 1; } else if (++gp->timer_ticks > 10) restart = gem_mdio_link_not_up(gp); if (restart) { spin_unlock_irq(&gp->lock); gem_begin_auto_negotiation(gp, NULL); return; } } } else { u32 val = readl(gp->regs + PCS_MIISTAT); if (!(val & PCS_MIISTAT_LS)) val = readl(gp->regs + PCS_MIISTAT); if ((val & PCS_MIISTAT_LS) != 0) { gp->lstate = link_up; if (gp->opened) gem_set_link_modes(gp); } }restart: gp->link_timer.expires = jiffies + ((12 * HZ) / 10); add_timer(&gp->link_timer); spin_unlock_irq(&gp->lock);}static void gem_clean_rings(struct gem *gp){ struct gem_init_block *gb = gp->init_block; struct sk_buff *skb; int i; dma_addr_t dma_addr; for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd; rxd = &gb->rxd[i]; if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); pci_unmap_page(gp->pdev, dma_addr, RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } rxd->status_word = 0; rxd->buffer = 0; } for (i = 0; i < TX_RING_SIZE; i++) { if (gp->tx_skbs[i] != NULL) { struct gem_txd *txd; int frag; skb = gp->tx_skbs[i]; gp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gb->txd[i]; dma_addr = le64_to_cpu(txd->buffer); pci_unmap_page(gp->pdev, dma_addr, le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } }}static void gem_init_rings(struct gem *gp, int from_irq){ struct gem_init_block *gb = gp->init_block; struct net_device *dev = gp->dev; int i, gfp_flags = GFP_KERNEL; dma_addr_t dma_addr; if (from_irq) gfp_flags = GFP_ATOMIC; gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; gem_clean_rings(gp); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; struct gem_rxd *rxd = &gb->rxd[i]; skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), gfp_flags); if (!skb) { rxd->buffer = 0; rxd->status_word = 0; continue; } gp->rx_skbs[i] = skb; skb->dev = dev; skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET)); dma_addr = pci_map_page(gp->pdev, virt_to_page(skb->data), ((unsigned long) skb->data & ~PAGE_MASK), RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); rxd->buffer = cpu_to_le64(dma_addr); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); skb_reserve(skb, RX_OFFSET); } for (i = 0; i < TX_RING_SIZE; i++) { struct gem_txd *txd = &gb->txd[i]; txd->control_word = 0; txd->buffer = 0; }}static int gem_reset_one_mii_phy(struct gem *gp, int phy_addr){ u16 val; int limit = 10000; val = __phy_read(gp, MII_BMCR, phy_addr); val &= ~BMCR_ISOLATE; val |= BMCR_RESET; __phy_write(gp, MII_BMCR, val, phy_addr); udelay(100); while (limit--) { val = __phy_read(gp, MII_BMCR, phy_addr); if ((val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) __phy_write(gp, MII_BMCR, val & ~BMCR_ISOLATE, phy_addr); return (limit <= 0);}static void gem_init_bcm5201_phy(struct gem *gp){ u16 data; data = phy_read(gp, MII_BCM5201_MULTIPHY); data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; phy_write(gp, MII_BCM5201_MULTIPHY, data);}static void gem_init_bcm5400_phy(struct gem *gp){ u16 data; /* Configure for gigabit full duplex */ data = phy_read(gp, MII_BCM5400_AUXCONTROL); data |= MII_BCM5400_AUXCONTROL_PWR10BASET; phy_write(gp, MII_BCM5400_AUXCONTROL, data); data = phy_read(gp, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(gp, MII_BCM5400_GB_CONTROL, data); mdelay(10); /* Reset and configure cascaded 10/100 PHY */ gem_reset_one_mii_phy(gp, 0x1f); data = __phy_read(gp, MII_BCM5201_MULTIPHY, 0x1f); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __phy_write(gp, MII_BCM5201_MULTIPHY, data, 0x1f); data = phy_read(gp, MII_BCM5400_AUXCONTROL); data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; phy_write(gp, MII_BCM5400_AUXCONTROL, data);}static void gem_init_bcm5401_phy(struct gem *gp){ u16 data; int rev; rev = phy_read(gp, MII_PHYSID2) & 0x000f; if (rev == 0 || rev == 3) { /* Some revisions of 5401 appear to need this * initialisation sequence to disable, according * to OF, "tap power management" * * WARNING ! OF and Darwin don't agree on the * register addresses. OF seem to interpret the * register numbers below as decimal */ phy_write(gp, 0x18, 0x0c20); phy_write(gp, 0x17, 0x0012); phy_write(gp, 0x15, 0x1804); phy_write(gp, 0x17, 0x0013); phy_write(gp, 0x15, 0x1204); phy_write(gp, 0x17, 0x8006); phy_write(gp, 0x15, 0x0132); phy_write(gp, 0x17, 0x8006); phy_write(gp, 0x15, 0x0232); phy_write(gp, 0x17, 0x201f); phy_write(gp, 0x15, 0x0a20); } /* Configure for gigabit full duplex */ data = phy_read(gp, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(gp, MII_BCM5400_GB_CONTROL, data); mdelay(1); /* Reset and configure cascaded 10/100 PHY */ gem_reset_one_mii_phy(gp, 0x1f); data = __phy_read(gp, MII_BCM5201_MULTIPHY, 0x1f); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __phy_write(gp, MII_BCM5201_MULTIPHY, data, 0x1f);}static void gem_init_bcm5411_phy(struct gem *gp){ u16 data; /* Here's some more Apple black magic to setup * some voltage stuffs. */ phy_write(gp, 0x1c, 0x8c23); phy_write(gp, 0x1c, 0x8ca3); phy_write(gp, 0x1c, 0x8c23); /* Here, Apple seems to want to reset it, do * it as well */ phy_write(gp, MII_BMCR, BMCR_RESET); /* Start autoneg */ phy_write(gp, MII_BMCR, (BMCR_ANENABLE | BMCR_FULLDPLX | BMCR_ANRESTART | BMCR_SPD2)); data = phy_read(gp, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(gp, MII_BCM5400_GB_CONTROL, data);}static void gem_init_phy(struct gem *gp){ u32 mifcfg; if (!gp->wake_on_lan && gp->phy_mod == phymod_bcm5201) phy_write(gp, MII_BCM5201_INTERRUPT, 0); /* Revert MIF CFG setting done on stop_phy */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_BBMODE; writel(mifcfg, gp->regs + MIF_CFG); #ifdef CONFIG_ALL_PPC if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; if (phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { printk(KERN_WARNING "%s: GMAC PHY not responding !\n", gp->dev->name); return; } }#endif /* CONFIG_ALL_PPC */ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { u32 val; /* Init datapath mode register. */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { val = PCS_DMODE_MGM; } else if (gp->phy_type == phy_serialink) { val = PCS_DMODE_SM | PCS_DMODE_GMOE; } else { val = PCS_DMODE_ESM; } writel(val, gp->regs + PCS_DMODE); } if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { u32 phy_id; u16 val; /* Take PHY out of isloate mode and reset it. */ gem_reset_one_mii_phy(gp, gp->mii_phy_addr); phy_id = (phy_read(gp, MII_PHYSID1) << 16 | phy_read(gp, MII_PHYSID2)) & 0xfffffff0; printk(KERN_INFO "%s: MII PHY ID: %x ", gp->dev->name, phy_id); switch(phy_id) { case 0x406210: gp->phy_mod = phymod_bcm5201; gem_init_bcm5201_phy(gp); printk("BCM 5201\n"); break; case 0x4061e0: printk("BCM 5221\n"); gp->phy_mod = phymod_bcm5221; break; case 0x206040: printk("BCM 5400\n"); gp->phy_mod = phymod_bcm5400;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -