📄 gianfar.c
字号:
{ struct net_device *dev = platform_get_drvdata(pdev); struct gfar_private *priv = netdev_priv(dev); unsigned long flags; u32 tempval; int magic_packet = priv->wol_magic_packet && (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); int wake_phy = priv->wol_wake_phy && (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_PHY); if (!netif_running(dev)) { netif_device_attach(dev); return 0; } if (!magic_packet && !wake_phy && priv->phydev) phy_start(priv->phydev); /* Disable Magic Packet mode, in case something * else woke us up. */ spin_lock_irqsave(&priv->txlock, flags); spin_lock(&priv->rxlock); if (magic_packet) { tempval = gfar_read(&priv->regs->maccfg2); tempval &= ~MACCFG2_MPEN; gfar_write(&priv->regs->maccfg2, tempval); } gfar_start(dev); spin_unlock(&priv->rxlock); spin_unlock_irqrestore(&priv->txlock, flags); netif_device_attach(dev);#ifdef CONFIG_GFAR_NAPI netif_poll_enable(dev);#endif return 0;}#else#define gfar_suspend NULL#define gfar_resume NULL#endif/* Reads the controller's registers to determine what interface * connects it to the PHY. */static phy_interface_t gfar_get_interface(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); u32 ecntrl = gfar_read(&priv->regs->ecntrl); if (ecntrl & ECNTRL_SGMII_MODE) return PHY_INTERFACE_MODE_SGMII; if (ecntrl & ECNTRL_TBI_MODE) { if (ecntrl & ECNTRL_REDUCED_MODE) return PHY_INTERFACE_MODE_RTBI; else return PHY_INTERFACE_MODE_TBI; } if (ecntrl & ECNTRL_REDUCED_MODE) { if (ecntrl & ECNTRL_REDUCED_MII_MODE) return PHY_INTERFACE_MODE_RMII; else { phy_interface_t interface = priv->einfo->interface; /* * This isn't autodetected right now, so it must * be set by the device tree or platform code. */ if (interface == PHY_INTERFACE_MODE_RGMII_ID) return PHY_INTERFACE_MODE_RGMII_ID; return PHY_INTERFACE_MODE_RGMII; } } if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) return PHY_INTERFACE_MODE_GMII; return PHY_INTERFACE_MODE_MII;}/* Initializes driver's PHY state, and attaches to the PHY. * Returns 0 on success. */static int init_phy(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); uint gigabit_support = priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0; struct phy_device *phydev; char phy_id[BUS_ID_SIZE]; phy_interface_t interface; priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); interface = gfar_get_interface(dev); phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); if (interface == PHY_INTERFACE_MODE_SGMII) {#ifdef CONFIG_SGMII_RISING_CARD if (phydev->addr < 0x10) phydev->addr += PHY_SGMII_ADDR_OFFSET;#endif gfar_configure_serdes(dev); } if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } /* Remove any features not supported by the controller */ phydev->supported &= (GFAR_SUPPORTED | gigabit_support); phydev->advertising = phydev->supported; priv->phydev = phydev; return 0;}static void gfar_configure_serdes(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct gfar_mii __iomem *regs = (void __iomem *)&priv->regs->gfar_mii_regs; /* Initialise TBI i/f to communicate with serdes (lynx phy) */ /* Single clk mode, mii mode off(for aerdes communication) */ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT); /* Supported pause and full-duplex, no half-duplex */ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE, ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM); /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);}static void init_registers(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); /* Clear IEVENT */ gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); /* Initialize IMASK */ gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); /* Init hash registers to zero */ gfar_write(&priv->regs->igaddr0, 0); gfar_write(&priv->regs->igaddr1, 0); gfar_write(&priv->regs->igaddr2, 0); gfar_write(&priv->regs->igaddr3, 0); gfar_write(&priv->regs->igaddr4, 0); gfar_write(&priv->regs->igaddr5, 0); gfar_write(&priv->regs->igaddr6, 0); gfar_write(&priv->regs->igaddr7, 0); gfar_write(&priv->regs->gaddr0, 0); gfar_write(&priv->regs->gaddr1, 0); gfar_write(&priv->regs->gaddr2, 0); gfar_write(&priv->regs->gaddr3, 0); gfar_write(&priv->regs->gaddr4, 0); gfar_write(&priv->regs->gaddr5, 0); gfar_write(&priv->regs->gaddr6, 0); gfar_write(&priv->regs->gaddr7, 0); /* Zero out the rmon mib registers if it has them */ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); /* Mask off the CAM interrupts */ gfar_write(&priv->regs->rmon.cam1, 0xffffffff); gfar_write(&priv->regs->rmon.cam2, 0xffffffff); } /* Initialize the max receive buffer length */ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); /* Initialize the Minimum Frame Length Register */ gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); /* Assign the TBI an address which won't conflict with the PHYs */ gfar_write(&priv->regs->tbipa, TBIPA_VALUE);}/* Halt the receive and transmit queues */static void gfar_halt_nodisable(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->regs; u32 tempval; /* Mask all interrupts */ gfar_write(®s->imask, IMASK_INIT_CLEAR); /* Clear all interrupts */ gfar_write(®s->ievent, IEVENT_INIT_CLEAR); /* Stop the DMA, and wait for it to stop */ tempval = gfar_read(&priv->regs->dmactrl); if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != (DMACTRL_GRS | DMACTRL_GTS)) { tempval |= (DMACTRL_GRS | DMACTRL_GTS); gfar_write(&priv->regs->dmactrl, tempval); while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) cpu_relax(); }}/* Halt the receive and transmit queues */void gfar_halt(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->regs; u32 tempval; /* Disable Rx and Tx */ tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(®s->maccfg1, tempval);}void stop_gfar(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->regs; unsigned long flags; phy_stop(priv->phydev); /* Lock it down */ spin_lock_irqsave(&priv->txlock, flags); spin_lock(&priv->rxlock); gfar_halt(dev); spin_unlock(&priv->rxlock); spin_unlock_irqrestore(&priv->txlock, flags); /* Free the IRQs */ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { free_irq(priv->interruptError, dev); free_irq(priv->interruptTransmit, dev); free_irq(priv->interruptReceive, dev); } else { free_irq(priv->interruptTransmit, dev); } free_skb_resources(priv); dma_free_coherent(NULL, sizeof(struct txbd8)*priv->tx_ring_size + sizeof(struct rxbd8)*priv->rx_ring_size, priv->tx_bd_base, gfar_read(®s->tbase0));}#ifdef CONFIG_GFAR_SKBUFF_RECYCLING/* * function: gfar_reset_skb_handler * Resetting skb handler spin lock entry in the driver initialization. * Execute only once. */static void gfar_reset_skb_handler(struct gfar_skb_handler *sh){ spin_lock_init(&sh->lock); sh->recycle_max = GFAR_DEFAULT_RECYCLE_MAX; sh->recycle_count = 0; sh->recycle_queue = NULL; printk(KERN_INFO"GFAR: SKB Handler initialized at CPU#%d" "(max=%d)\n", smp_processor_id(), sh->recycle_max);}/* * function: gfar_free_recycle_queue * Reset SKB handler struction and free existance socket buffer * and data buffer in the recycling queue. */void gfar_free_recycle_queue(struct gfar_skb_handler *sh, int lock_flag){ unsigned long flags = 0; struct sk_buff *clist = NULL; struct sk_buff *skb; /* Get recycling queue */ /* just for making sure there is recycle_queue */ if (lock_flag) spin_lock_irqsave(&sh->lock, flags); if (sh->recycle_queue) { /* pick one from head; most recent one */ clist = sh->recycle_queue; sh->recycle_count = 0; sh->recycle_queue = NULL; } if (lock_flag) spin_unlock_irqrestore(&sh->lock, flags); while (clist) { skb = clist; BUG_TRAP(!atomic_read(&skb->users)); clist = clist->next; dev_kfree_skb_any(skb); }}#endif/* If there are any tx skbs or rx skbs still around, free them. * Then free tx_skbuff and rx_skbuff */static void free_skb_resources(struct gfar_private *priv){ struct rxbd8 *rxbdp; struct txbd8 *txbdp; int i;#ifdef CONFIG_GFAR_SKBUFF_RECYCLING /* 1: spinlocking of skb_handler is required */ gfar_free_recycle_queue(&priv->skb_handler, 1);#endif /* Go through all the buffer descriptors and free their data buffers */ txbdp = priv->tx_bd_base; for (i = 0; i < priv->tx_ring_size; i++) { if (priv->tx_skbuff[i]) { dma_unmap_single(NULL, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } kfree(priv->tx_skbuff); rxbdp = priv->rx_bd_base; /* rx_skbuff is not guaranteed to be allocated, so only * free it and its contents if it is allocated */ if(priv->rx_skbuff != NULL) { for (i = 0; i < priv->rx_ring_size; i++) { if (priv->rx_skbuff[i]) { dma_unmap_single(NULL, rxbdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_skbuff[i]); priv->rx_skbuff[i] = NULL; } rxbdp->status = 0; rxbdp->length = 0; rxbdp->bufPtr = 0; rxbdp++; } kfree(priv->rx_skbuff); }}void gfar_start(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->regs; u32 tempval; /* Enable Rx and Tx in MACCFG1 */ tempval = gfar_read(®s->maccfg1); tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(®s->maccfg1, tempval); /* Initialize DMACTRL to have WWR and WOP */ tempval = gfar_read(&priv->regs->dmactrl); tempval |= DMACTRL_INIT_SETTINGS; gfar_write(&priv->regs->dmactrl, tempval); /* Make sure we aren't stopped */ tempval = gfar_read(&priv->regs->dmactrl); tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); gfar_write(&priv->regs->dmactrl, tempval); /* Clear THLT/RHLT, so that the DMA starts polling now */ gfar_write(®s->tstat, TSTAT_CLEAR_THALT); gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); /* Unmask the interrupts we look for */ gfar_write(®s->imask, IMASK_DEFAULT);}/* Bring the controller up and running */int startup_gfar(struct net_device *dev){ struct txbd8 *txbdp; struct rxbd8 *rxbdp; dma_addr_t addr; unsigned long vaddr; int i; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->regs; int err = 0; u32 rctrl = 0; u32 attrs = 0; gfar_write(®s->imask, IMASK_INIT_CLEAR); /* Allocate memory for the buffer descriptors */ vaddr = (unsigned long) dma_alloc_coherent(NULL, sizeof (struct txbd8) * priv->tx_ring_size + sizeof (struct rxbd8) * priv->rx_ring_size, &addr, GFP_KERNEL); if (vaddr == 0) { if (netif_msg_ifup(priv)) printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", dev->name); return -ENOMEM; } priv->tx_bd_base = (struct txbd8 *) vaddr; /* enet DMA only understands physical addresses */ gfar_write(®s->tbase0, addr); /* Start the rx descriptor ring where the tx ring leaves off */ addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; priv->rx_bd_base = (struct rxbd8 *) vaddr; gfar_write(®s->rbase0, addr); /* Setup the skbuff rings */ priv->tx_skbuff = (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (NULL == priv->tx_skbuff) { if (netif_msg_ifup(priv)) printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", dev->name); err = -ENOMEM; goto tx_skb_fail; } for (i = 0; i < priv->tx_ring_size; i++) priv->tx_skbuff[i] = NULL; priv->rx_skbuff = (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (NULL == priv->rx_skbuff) { if (netif_msg_ifup(priv)) printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -