📄 gianfar.c
字号:
dev->name); err = -ENOMEM; goto rx_skb_fail; } for (i = 0; i < priv->rx_ring_size; i++) priv->rx_skbuff[i] = NULL; /* Initialize some variables in our dev structure */ priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; priv->cur_rx = priv->rx_bd_base; priv->skb_curtx = priv->skb_dirtytx = 0; priv->skb_currx = 0; /* Initialize Transmit Descriptor Ring */ txbdp = priv->tx_bd_base; for (i = 0; i < priv->tx_ring_size; i++) { txbdp->status = 0; txbdp->length = 0; txbdp->bufPtr = 0; txbdp++; } /* Set the last descriptor in the ring to indicate wrap */ txbdp--; txbdp->status |= TXBD_WRAP; rxbdp = priv->rx_bd_base; for (i = 0; i < priv->rx_ring_size; i++) { struct sk_buff *skb = NULL; rxbdp->status = 0; skb = gfar_new_skb(dev, rxbdp); priv->rx_skbuff[i] = skb; rxbdp++; } /* Set the last descriptor in the ring to wrap */ rxbdp--; rxbdp->status |= RXBD_WRAP; /* If the device has multiple interrupts, register for * them. Otherwise, only register for the one */ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { /* Install our interrupt handlers for Error, * Transmit, and Receive */ if (request_irq(priv->interruptError, gfar_error, 0, "enet_error", dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, priv->interruptError); err = -1; goto err_irq_fail; } if (request_irq(priv->interruptTransmit, gfar_transmit, 0, "enet_tx", dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, priv->interruptTransmit); err = -1; goto tx_irq_fail; } if (request_irq(priv->interruptReceive, gfar_receive, 0, "enet_rx", dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", dev->name, priv->interruptReceive); err = -1; goto rx_irq_fail; } } else { if (request_irq(priv->interruptTransmit, gfar_interrupt, 0, "gfar_interrupt", dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, priv->interruptError); err = -1; goto err_irq_fail; } } phy_start(priv->phydev); /* Configure the coalescing support */ if (priv->txcoalescing) gfar_write(®s->txic, mk_ic_value(priv->txcount, priv->txtime)); else gfar_write(®s->txic, 0); if (priv->rxcoalescing) gfar_write(®s->rxic, mk_ic_value(priv->rxcount, priv->rxtime)); else gfar_write(®s->rxic, 0); if (priv->rx_csum_enable) rctrl |= RCTRL_CHECKSUMMING; if (priv->extended_hash) { rctrl |= RCTRL_EXTHASH; gfar_clear_exact_match(dev); rctrl |= RCTRL_EMEN; } if (priv->vlan_enable) rctrl |= RCTRL_VLAN; if (priv->padding) { rctrl &= ~RCTRL_PAL_MASK; rctrl |= RCTRL_PADDING(priv->padding); } /* Init rctrl based on our settings */ gfar_write(&priv->regs->rctrl, rctrl); if (dev->features & NETIF_F_IP_CSUM) gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); /* Set the extraction length and index */ attrs = ATTRELI_EL(priv->rx_stash_size) | ATTRELI_EI(priv->rx_stash_index); gfar_write(&priv->regs->attreli, attrs); /* Start with defaults, and add stashing or locking * depending on the approprate variables */ attrs = ATTR_INIT_SETTINGS; if (priv->bd_stash_en) attrs |= ATTR_BDSTASH; if (priv->rx_stash_size != 0) attrs |= ATTR_BUFSTASH; gfar_write(&priv->regs->attr, attrs); gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); /* Start the controller */ gfar_start(dev); return 0;rx_irq_fail: free_irq(priv->interruptTransmit, dev);tx_irq_fail: free_irq(priv->interruptError, dev);err_irq_fail:rx_skb_fail: free_skb_resources(priv);tx_skb_fail: dma_free_coherent(NULL, sizeof(struct txbd8)*priv->tx_ring_size + sizeof(struct rxbd8)*priv->rx_ring_size, priv->tx_bd_base, gfar_read(®s->tbase0)); return err;}/* Called when something needs to use the ethernet device *//* Returns 0 for success. */static int gfar_enet_open(struct net_device *dev){ int err; /* Initialize a bunch of registers */ init_registers(dev); gfar_set_mac_address(dev); err = init_phy(dev); if(err) return err; err = startup_gfar(dev); netif_start_queue(dev); return err;}static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp){ struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); memset(fcb, 0, GMAC_FCB_LEN); return fcb;}static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb){ u8 flags = 0; /* If we're here, it's a IP packet with a TCP or UDP * payload. We set it to checksum, using a pseudo-header * we provide */ flags = TXFCB_DEFAULT; /* Tell the controller what the protocol is */ /* And provide the already calculated phcs */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; fcb->phcs = udp_hdr(skb)->check; } else fcb->phcs = tcp_hdr(skb)->check; /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); fcb->l4os = skb_network_header_len(skb); fcb->flags = flags;}void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb){ fcb->flags |= TXFCB_VLN; fcb->vlctl = vlan_tx_tag_get(skb);}/* This is called by the kernel when a frame is ready for transmission. *//* It is pointed to by the dev->hard_start_xmit function pointer */static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct txfcb *fcb = NULL; struct txbd8 *txbdp; u16 status; unsigned long flags; /* Update transmit stats */ priv->stats.tx_bytes += skb->len; /* Lock priv now */ spin_lock_irqsave(&priv->txlock, flags); /* Point at the first free tx descriptor */ txbdp = priv->cur_tx; /* Clear all but the WRAP status flags */ status = txbdp->status & TXBD_WRAP; /* Set up checksumming */ if (likely((dev->features & NETIF_F_IP_CSUM) && (CHECKSUM_PARTIAL == skb->ip_summed))) { fcb = gfar_add_fcb(skb, txbdp); status |= TXBD_TOE; gfar_tx_checksum(skb, fcb); } if (priv->vlan_enable && unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { if (unlikely(NULL == fcb)) { fcb = gfar_add_fcb(skb, txbdp); status |= TXBD_TOE; } gfar_tx_vlan(skb, fcb); } /* Set buffer length and pointer */ txbdp->length = skb->len; txbdp->bufPtr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); /* Save the skb pointer so we can free it later */ priv->tx_skbuff[priv->skb_curtx] = skb; /* Update the current skb pointer (wrapping if this was the last) */ priv->skb_curtx = (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); /* Flag the BD as interrupt-causing */ status |= TXBD_INTERRUPT; /* Flag the BD as ready to go, last in frame, and */ /* in need of CRC */ status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); dev->trans_start = jiffies; /* The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we * don't need), thus requiring a more expensive sync instruction. At * some point, the set of architecture-independent barrier functions * should be expanded to include weaker barriers. */ eieio(); txbdp->status = status; /* If this was the last BD in the ring, the next one */ /* is at the beginning of the ring */ if (txbdp->status & TXBD_WRAP) txbdp = priv->tx_bd_base; else txbdp++; /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ if (txbdp == priv->dirty_tx) { netif_stop_queue(dev); priv->stats.tx_fifo_errors++; } /* Update the current txbd to the next one */ priv->cur_tx = txbdp; /* Tell the DMA to go go go */ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); /* Unlock priv */ spin_unlock_irqrestore(&priv->txlock, flags); return 0;}/* Stops the kernel queue, and halts the controller */static int gfar_close(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); stop_gfar(dev); /* Disconnect from the PHY */ phy_disconnect(priv->phydev); priv->phydev = NULL; netif_stop_queue(dev); return 0;}/* returns a net_device_stats structure pointer */static struct net_device_stats * gfar_get_stats(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); return &(priv->stats);}/* Changes the mac address if the controller is not running. */int gfar_set_mac_address(struct net_device *dev){ gfar_set_mac_for_addr(dev, 0, dev->dev_addr); return 0;}/* Enables and disables VLAN insertion/extraction */static void gfar_vlan_rx_register(struct net_device *dev, struct vlan_group *grp){ struct gfar_private *priv = netdev_priv(dev); unsigned long flags; u32 tempval; spin_lock_irqsave(&priv->rxlock, flags); priv->vlgrp = grp; if (grp) { /* Enable VLAN tag insertion */ tempval = gfar_read(&priv->regs->tctrl); tempval |= TCTRL_VLINS; gfar_write(&priv->regs->tctrl, tempval); /* Enable VLAN tag extraction */ tempval = gfar_read(&priv->regs->rctrl); tempval |= RCTRL_VLEX; gfar_write(&priv->regs->rctrl, tempval); } else { /* Disable VLAN tag insertion */ tempval = gfar_read(&priv->regs->tctrl); tempval &= ~TCTRL_VLINS; gfar_write(&priv->regs->tctrl, tempval); /* Disable VLAN tag extraction */ tempval = gfar_read(&priv->regs->rctrl); tempval &= ~RCTRL_VLEX; gfar_write(&priv->regs->rctrl, tempval); } spin_unlock_irqrestore(&priv->rxlock, flags);}static int gfar_change_mtu(struct net_device *dev, int new_mtu){ int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; if (priv->vlan_enable) frame_size += VLAN_HLEN; if (gfar_uses_fcb(priv)) frame_size += GMAC_FCB_LEN; frame_size += priv->padding; if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); return -EINVAL; } tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + INCREMENTAL_BUFFER_SIZE; /* Only stop and start the controller if it isn't already * stopped, and we changed something */ if ((oldsize != tempsize) && (dev->flags & IFF_UP)) stop_gfar(dev); priv->rx_buffer_size = tempsize; dev->mtu = new_mtu;#ifdef CONFIG_GFAR_SKBUFF_RECYCLING gfar_skbr_register_truesize(priv); printk(KERN_INFO"%s: MTU = %d (frame size=%d, truesize=%d)\n", dev->name, dev->mtu, frame_size, priv->rx_skbuff_truesize);#endif /*CONFIG_GFAR_SKBUFF_RECYCLING*/ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); /* If the mtu is larger than the max size for standard * ethernet frames (ie, a jumbo frame), then set maccfg2 * to allow huge frames, and to check the length */ tempval = gfar_read(&priv->regs->maccfg2); if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); else tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); gfar_write(&priv->regs->maccfg2, tempval); if ((oldsize != tempsize) && (dev->flags & IFF_UP)) startup_gfar(dev); return 0;}/* gfar_timeout gets called when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -