📄 gianfar.c
字号:
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb){ fcb->flags |= TXFCB_VLN; fcb->vlctl = vlan_tx_tag_get(skb);}/* This is called by the kernel when a frame is ready for transmission. *//* It is pointed to by the dev->hard_start_xmit function pointer */static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct txfcb *fcb = NULL; struct txbd8 *txbdp; u16 status; unsigned long flags; /* Update transmit stats */ dev->stats.tx_bytes += skb->len; /* Lock priv now */ spin_lock_irqsave(&priv->txlock, flags); /* Point at the first free tx descriptor */ txbdp = priv->cur_tx; /* Clear all but the WRAP status flags */ status = txbdp->status & TXBD_WRAP; /* Set up checksumming */ if (likely((dev->features & NETIF_F_IP_CSUM) && (CHECKSUM_PARTIAL == skb->ip_summed))) { fcb = gfar_add_fcb(skb, txbdp); status |= TXBD_TOE; gfar_tx_checksum(skb, fcb); } if (priv->vlan_enable && unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { if (unlikely(NULL == fcb)) { fcb = gfar_add_fcb(skb, txbdp); status |= TXBD_TOE; } gfar_tx_vlan(skb, fcb); } /* Set buffer length and pointer */ txbdp->length = skb->len; txbdp->bufPtr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); /* Save the skb pointer so we can free it later */ priv->tx_skbuff[priv->skb_curtx] = skb; /* Update the current skb pointer (wrapping if this was the last) */ priv->skb_curtx = (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); /* Flag the BD as interrupt-causing */ status |= TXBD_INTERRUPT; /* Flag the BD as ready to go, last in frame, and */ /* in need of CRC */ status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); dev->trans_start = jiffies; /* The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we * don't need), thus requiring a more expensive sync instruction. At * some point, the set of architecture-independent barrier functions * should be expanded to include weaker barriers. */ eieio(); txbdp->status = status; /* If this was the last BD in the ring, the next one */ /* is at the beginning of the ring */ if (txbdp->status & TXBD_WRAP) txbdp = priv->tx_bd_base; else txbdp++; /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ if (txbdp == priv->dirty_tx) { netif_stop_queue(dev); dev->stats.tx_fifo_errors++; } /* Update the current txbd to the next one */ priv->cur_tx = txbdp; /* Tell the DMA to go go go */ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); /* Unlock priv */ spin_unlock_irqrestore(&priv->txlock, flags); return 0;}/* Stops the kernel queue, and halts the controller */static int gfar_close(struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev);#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi);#endif stop_gfar(dev); /* Disconnect from the PHY */ phy_disconnect(priv->phydev); priv->phydev = NULL; netif_stop_queue(dev); return 0;}/* Changes the mac address if the controller is not running. */int gfar_set_mac_address(struct net_device *dev){ gfar_set_mac_for_addr(dev, 0, dev->dev_addr); return 0;}/* Enables and disables VLAN insertion/extraction */static void gfar_vlan_rx_register(struct net_device *dev, struct vlan_group *grp){ struct gfar_private *priv = netdev_priv(dev); unsigned long flags; u32 tempval; spin_lock_irqsave(&priv->rxlock, flags); priv->vlgrp = grp; if (grp) { /* Enable VLAN tag insertion */ tempval = gfar_read(&priv->regs->tctrl); tempval |= TCTRL_VLINS; gfar_write(&priv->regs->tctrl, tempval); /* Enable VLAN tag extraction */ tempval = gfar_read(&priv->regs->rctrl); tempval |= RCTRL_VLEX; gfar_write(&priv->regs->rctrl, tempval); } else { /* Disable VLAN tag insertion */ tempval = gfar_read(&priv->regs->tctrl); tempval &= ~TCTRL_VLINS; gfar_write(&priv->regs->tctrl, tempval); /* Disable VLAN tag extraction */ tempval = gfar_read(&priv->regs->rctrl); tempval &= ~RCTRL_VLEX; gfar_write(&priv->regs->rctrl, tempval); } spin_unlock_irqrestore(&priv->rxlock, flags);}static int gfar_change_mtu(struct net_device *dev, int new_mtu){ int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; if (priv->vlan_enable) frame_size += VLAN_ETH_HLEN; if (gfar_uses_fcb(priv)) frame_size += GMAC_FCB_LEN; frame_size += priv->padding; if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); return -EINVAL; } tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + INCREMENTAL_BUFFER_SIZE; /* Only stop and start the controller if it isn't already * stopped, and we changed something */ if ((oldsize != tempsize) && (dev->flags & IFF_UP)) stop_gfar(dev); priv->rx_buffer_size = tempsize; dev->mtu = new_mtu; gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); /* If the mtu is larger than the max size for standard * ethernet frames (ie, a jumbo frame), then set maccfg2 * to allow huge frames, and to check the length */ tempval = gfar_read(&priv->regs->maccfg2); if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); else tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); gfar_write(&priv->regs->maccfg2, tempval); if ((oldsize != tempsize) && (dev->flags & IFF_UP)) startup_gfar(dev); return 0;}/* gfar_timeout gets called when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and * starting over will fix the problem. */static void gfar_timeout(struct net_device *dev){ dev->stats.tx_errors++; if (dev->flags & IFF_UP) { stop_gfar(dev); startup_gfar(dev); } netif_schedule(dev);}/* Interrupt Handler for Transmit complete */static irqreturn_t gfar_transmit(int irq, void *dev_id){ struct net_device *dev = (struct net_device *) dev_id; struct gfar_private *priv = netdev_priv(dev); struct txbd8 *bdp; /* Clear IEVENT */ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); /* Lock priv */ spin_lock(&priv->txlock); bdp = priv->dirty_tx; while ((bdp->status & TXBD_READY) == 0) { /* If dirty_tx and cur_tx are the same, then either the */ /* ring is empty or full now (it could only be full in the beginning, */ /* obviously). If it is empty, we are done. */ if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) break; dev->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, */ /* but we eventually sent the packet. */ if (bdp->status & TXBD_DEF) dev->stats.collisions++; /* Free the sk buffer associated with this TxBD */ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); priv->tx_skbuff[priv->skb_dirtytx] = NULL; priv->skb_dirtytx = (priv->skb_dirtytx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); /* update bdp to point at next bd in the ring (wrapping if necessary) */ if (bdp->status & TXBD_WRAP) bdp = priv->tx_bd_base; else bdp++; /* Move dirty_tx to be the next bd */ priv->dirty_tx = bdp; /* We freed a buffer, so now we can restart transmission */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); } /* while ((bdp->status & TXBD_READY) == 0) */ /* If we are coalescing the interrupts, reset the timer */ /* Otherwise, clear it */ if (priv->txcoalescing) gfar_write(&priv->regs->txic, mk_ic_value(priv->txcount, priv->txtime)); else gfar_write(&priv->regs->txic, 0); spin_unlock(&priv->txlock); return IRQ_HANDLED;}struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp){ unsigned int alignamount; struct gfar_private *priv = netdev_priv(dev); struct sk_buff *skb = NULL; unsigned int timeout = SKB_ALLOC_TIMEOUT; /* We have to allocate the skb, so keep trying till we succeed */ while ((!skb) && timeout--) skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); if (NULL == skb) return NULL; alignamount = RXBUF_ALIGNMENT - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); /* We need the data buffer to be aligned properly. We will reserve * as many bytes as needed to align the data properly */ skb_reserve(skb, alignamount); bdp->bufPtr = dma_map_single(NULL, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE); bdp->length = 0; /* Mark the buffer empty */ eieio(); bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); return skb;}static inline void count_errors(unsigned short status, struct net_device *dev){ struct gfar_private *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors * matter */ if (status & RXBD_TRUNCATED) { stats->rx_length_errors++; estats->rx_trunc++; return; } /* Count the errors, if there were any */ if (status & (RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; if (status & RXBD_LARGE) estats->rx_large++; else estats->rx_short++; } if (status & RXBD_NONOCTET) { stats->rx_frame_errors++; estats->rx_nonoctet++; } if (status & RXBD_CRCERR) { estats->rx_crcerr++; stats->rx_crc_errors++; } if (status & RXBD_OVERRUN) { estats->rx_overrun++; stats->rx_crc_errors++; }}irqreturn_t gfar_receive(int irq, void *dev_id){ struct net_device *dev = (struct net_device *) dev_id; struct gfar_private *priv = netdev_priv(dev);#ifdef CONFIG_GFAR_NAPI u32 tempval;#else unsigned long flags;#endif /* Clear IEVENT, so rx interrupt isn't called again * because of this interrupt */ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); /* support NAPI */#ifdef CONFIG_GFAR_NAPI if (netif_rx_schedule_prep(dev, &priv->napi)) { tempval = gfar_read(&priv->regs->imask); tempval &= IMASK_RX_DISABLED; gfar_write(&priv->regs->imask, tempval); __netif_rx_schedule(dev, &priv->napi); } else { if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", dev->name, gfar_read(&priv->regs->ievent), gfar_read(&priv->regs->imask)); }#else spin_lock_irqsave(&priv->rxlock, flags); gfar_clean_rx_ring(dev, priv->rx_ring_size); /* If we are coalescing interrupts, update the timer */ /* Otherwise, clear it */ if (priv->rxcoalescing) gfar_write(&priv->regs->rxic, mk_ic_value(priv->rxcount, priv->rxtime)); else gfar_write(&priv->regs->rxic, 0); spin_unlock_irqrestore(&priv->rxlock, flags);#endif return IRQ_HANDLED;}static inline int gfar_rx_vlan(struct sk_buff *skb, struct vlan_group *vlgrp, unsigned short vlctl){#ifdef CONFIG_GFAR_NAPI return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);#else return vlan_hwaccel_rx(skb, vlgrp, vlctl);#endif}static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb){ /* If valid headers were found, and valid sums * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is */ if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE;}static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb){ struct rxfcb *fcb = (struct rxfcb *)skb->data; /* Remove the FCB from the skb */ skb_pull(skb, GMAC_FCB_LEN); return fcb;}/* gfar_process_frame() -- handle one incoming packet if skb * isn't NULL. */static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length){ struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; if (NULL == skb) { if (netif_msg_rx_err(priv)) printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); dev->stats.rx_dropped++; priv->extra_stats.rx_skbmissing++; } else { int ret; /* Prep the skb for the packet */ skb_put(skb, length); /* Grab the FCB if there is one */ if (gfar_uses_fcb(priv)) fcb = gfar_get_fcb(skb); /* Remove the padded bytes, if there are any */ if (priv->padding) skb_pull(skb, priv->padding); if (priv->rx_csum_enable) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, dev); /* Send the packet up the stack */ if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); else ret = RECEIVE(skb); if (NET_RX_DROP == ret) priv->extra_stats.kernel_dropped++; } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -