⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gianfar.c

📁 Powerpc网络处理器MPC85xx增强型三速以太控制器驱动程序
💻 C
📖 第 1 页 / 共 5 页
字号:
 * starting over will fix the problem. */static void gfar_timeout(struct net_device *dev){	struct gfar_private *priv = netdev_priv(dev);	priv->stats.tx_errors++;	if (dev->flags & IFF_UP) {		stop_gfar(dev);		startup_gfar(dev);	}	netif_schedule(dev);}/* Transmit ring clean up routine */int gfar_clean_tx_ring(struct net_device *dev){	struct txbd8 *bdp;	struct gfar_private *priv = netdev_priv(dev);	int howmany = 0;	bdp = priv->dirty_tx;	while ((bdp->status & TXBD_READY) == 0) {		/* If dirty_tx and cur_tx are the same, then either the */		/* ring is empty or full now (it could only be full in the beginning, */		/* obviously).  If it is empty, we are done. */		if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))			break;		howmany++;		/* Deferred means some collisions occurred during transmit, */		/* but we eventually sent the packet. */		if (bdp->status & TXBD_DEF)			priv->stats.collisions++;		/* Free the sk buffer associated with this TxBD */#ifdef CONFIG_GFAR_SKBUFF_RECYCLING		priv->extra_stats.rx_skbr_free +=			gfar_kfree_skb(priv->tx_skbuff[priv->skb_dirtytx]);#else		dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);#endif		priv->tx_skbuff[priv->skb_dirtytx] = NULL;		priv->skb_dirtytx =		    (priv->skb_dirtytx +		     1) & TX_RING_MOD_MASK(priv->tx_ring_size);		/* Clean BD length for empty detection */		bdp->length = 0;		/* update bdp to point at next bd in the ring (wrapping if necessary) */		if (bdp->status & TXBD_WRAP)			bdp = priv->tx_bd_base;		else			bdp++;		/* Move dirty_tx to be the next bd */		priv->dirty_tx = bdp;		/* We freed a buffer, so now we can restart transmission */		if (netif_queue_stopped(dev))			netif_wake_queue(dev);	} /* while ((bdp->status & TXBD_READY) == 0) */	priv->stats.tx_packets += howmany;	return howmany;}/* Interrupt Handler for Transmit complete */static irqreturn_t gfar_transmit(int irq, void *dev_id){	struct net_device *dev = (struct net_device *) dev_id;	struct gfar_private *priv = netdev_priv(dev);	/* Clear IEVENT */	gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);	/* Lock priv */	spin_lock(&priv->txlock);	gfar_clean_tx_ring(dev);	/* If we are coalescing the interrupts, reset the timer */	/* Otherwise, clear it */	if (likely(priv->txcoalescing)) {		gfar_write(&priv->regs->txic, 0);		gfar_write(&priv->regs->txic,			   mk_ic_value(priv->txcount, priv->txtime));	}	spin_unlock(&priv->txlock);	return IRQ_HANDLED;}#ifdef CONFIG_GFAR_SKBUFF_RECYCLINGstatic unsigned int skbuff_truesize(unsigned int buffer_size){	return SKB_DATA_ALIGN(buffer_size + RXBUF_ALIGNMENT +			      NET_SKB_PAD) + sizeof(struct sk_buff);}static void gfar_skbr_register_truesize(struct gfar_private *priv){	priv->rx_skbuff_truesize = skbuff_truesize(priv->rx_buffer_size);}//  CONFIG_GFAR_SKBUFF_RECYCLINGstruct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp){	struct gfar_private *priv = netdev_priv(dev);	struct sk_buff *skb = NULL;	unsigned int timeout = SKB_ALLOC_TIMEOUT;	unsigned long flags = 0;	struct gfar_skb_handler *sh = &priv->skb_handler;	unsigned int size;	unsigned int truesize;	unsigned int alignamount;	spin_lock_irqsave(&sh->lock, flags);//对gfar_skb_handler原子操作	if (sh->recycle_queue) {		/* pick one from head; most recent one */		//有并且获得一个可回收的sk_buff *		skb = sh->recycle_queue;		sh->recycle_queue = skb->next;		sh->recycle_count--;		spin_unlock_irqrestore(&sh->lock, flags); 		/* re-initialization		 *  We are not going to touch the buffer size, so		 *  skb->truesize can be used as the truesize again		 */		truesize = skb->truesize;		size = truesize - sizeof(struct sk_buff);		/* clear structure by &tail */		memset(skb, 0, offsetof(struct sk_buff, tail));		atomic_set(&skb->users, 1);		/* reset data and tail pointers */		skb->data = skb->head + NET_SKB_PAD;		skb_reset_tail_pointer(skb);		/* shared info clean up */		atomic_set(&(skb_shinfo(skb)->dataref), 1);		priv->extra_stats.rx_skbr++;	} else {	   //没有可回收的sk_buff *		spin_unlock_irqrestore(&sh->lock, flags);		while ((!skb) && timeout--)			skb = dev_alloc_skb(priv->rx_buffer_size +					    RXBUF_ALIGNMENT);		/* We have to allocate the skb, so keep trying till		   we succeed */		if (skb == NULL)			return NULL; //return  error 	}	/* We need the data buffer to be aligned properly.  We will	 * reserve as many bytes as needed to align the data properly	 */	alignamount = ((unsigned)skb->data) & (RXBUF_ALIGNMENT-1);	skb_reserve(skb, RXBUF_ALIGNMENT - alignamount);	skb->dev = dev;#ifdef CONFIG_GFAR_SKBUFF_RECYCLING	/* Keep incoming device pointer for recycling */	skb->skb_owner = dev;#endif	bdp->length = 0;	bdp->bufPtr = dma_map_single(NULL, skb->data,				     priv->rx_buffer_size,				     DMA_FROM_DEVICE);	/* Mark the buffer empty */	eieio();	bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);	return skb;}static int gfar_kfree_skb(struct sk_buff *skb){	unsigned long int flags;	struct gfar_private *priv;	struct gfar_skb_handler *sh;	int ret = 0;	if ((skb_cloned(skb)) ||	    (skb->skb_owner == NULL) ||	    (skb->destructor) ||	    (skb_shinfo(skb)->frag_list != NULL))		goto _normal_free;	priv = netdev_priv(skb->skb_owner);	if (skb->truesize == priv->rx_skbuff_truesize) {		sh = &priv->skb_handler;		spin_lock_irqsave(&sh->lock, flags);		if (likely(sh->recycle_count < sh->recycle_max)) {			if (atomic_dec_and_test(&skb->users)) {				dst_release(skb->dst);				skb->next = sh->recycle_queue;				sh->recycle_queue = skb;				sh->recycle_count++;				ret = 1;			}			spin_unlock_irqrestore(&sh->lock, flags);			return ret;		}		spin_unlock_irqrestore(&sh->lock, flags);	}_normal_free:	/* skb is not recyclable */	dev_kfree_skb_irq(skb);	return 0;}#else/* * normal new skb routine */struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp){	unsigned int alignamount;	struct gfar_private *priv = netdev_priv(dev);	struct sk_buff *skb = NULL;	unsigned int timeout = SKB_ALLOC_TIMEOUT;	/* We have to allocate the skb, so keep trying till we succeed */	while ((!skb) && timeout--)		skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);	if (NULL == skb)		return NULL;	alignamount = RXBUF_ALIGNMENT -		(((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));	/* We need the data buffer to be aligned properly.  We will reserve	 * as many bytes as needed to align the data properly	 */	skb_reserve(skb, alignamount);	bdp->bufPtr = dma_map_single(NULL, skb->data,			priv->rx_buffer_size, DMA_FROM_DEVICE);	bdp->length = 0;	/* Mark the buffer empty */	eieio();	bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);	return skb;}#endif /*CONFIG_GFAR_SKBUFF_RECYCLING*/static inline void count_errors(unsigned short status, struct gfar_private *priv){	struct net_device_stats *stats = &priv->stats;	struct gfar_extra_stats *estats = &priv->extra_stats;	/* If the packet was truncated, none of the other errors	 * matter */	if (status & RXBD_TRUNCATED) {		stats->rx_length_errors++;		estats->rx_trunc++;		return;	}	/* Count the errors, if there were any */	if (status & (RXBD_LARGE | RXBD_SHORT)) {		stats->rx_length_errors++;		if (status & RXBD_LARGE)			estats->rx_large++;		else			estats->rx_short++;	}	if (status & RXBD_NONOCTET) {		stats->rx_frame_errors++;		estats->rx_nonoctet++;	}	if (status & RXBD_CRCERR) {		estats->rx_crcerr++;		stats->rx_crc_errors++;	}	if (status & RXBD_OVERRUN) {		estats->rx_overrun++;		stats->rx_crc_errors++;	}}irqreturn_t gfar_receive(int irq, void *dev_id){	struct net_device *dev = (struct net_device *) dev_id;	struct gfar_private *priv = netdev_priv(dev);#ifdef CONFIG_GFAR_NAPI	u32 tempval;#else	unsigned long flags;#endif	/* support NAPI */#ifdef CONFIG_GFAR_NAPI	/* Clear IEVENT, so rx interrupt isn't called again	 * because of this interrupt */	gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);   //IEVENT 寄存器的RX  TX  BSY  set 1 to clear	if (netif_rx_schedule_prep(dev)) {		tempval = gfar_read(&priv->regs->imask);		tempval &= IMASK_RTX_DISABLED;		gfar_write(&priv->regs->imask, tempval); 		//关闭收和发的中断使能		__netif_rx_schedule(dev); 		//linux内核函数为软中断下半部服务	} else {		if (netif_msg_rx_err(priv))			printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",				dev->name, gfar_read(&priv->regs->ievent),				gfar_read(&priv->regs->imask));	}#else	/* Clear IEVENT, so rx interrupt isn't called again	 * because of this interrupt */	gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);	spin_lock_irqsave(&priv->rxlock, flags);	gfar_clean_rx_ring(dev, priv->rx_ring_size);	/* If we are coalescing interrupts, update the timer */	/* Otherwise, clear it */	if (likely(priv->rxcoalescing)) {		gfar_write(&priv->regs->rxic, 0);		gfar_write(&priv->regs->rxic,			   mk_ic_value(priv->rxcount, priv->rxtime));	}	spin_unlock_irqrestore(&priv->rxlock, flags);#endif	return IRQ_HANDLED;}static inline int gfar_rx_vlan(struct sk_buff *skb,		struct vlan_group *vlgrp, unsigned short vlctl){#ifdef CONFIG_GFAR_NAPI	return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);#else	return vlan_hwaccel_rx(skb, vlgrp, vlctl);#endif}static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb){	/* If valid headers were found, and valid sums	 * were verified, then we tell the kernel that no	 * checksumming is necessary.  Otherwise, it is */	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))		skb->ip_summed = CHECKSUM_UNNECESSARY;	else		skb->ip_summed = CHECKSUM_NONE;}static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb){	struct rxfcb *fcb = (struct rxfcb *)skb->data;	/* Remove the FCB from the skb */	skb_pull(skb, GMAC_FCB_LEN);	return fcb;}/* gfar_process_frame() -- handle one incoming packet if skb * isn't NULL.  */static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,		int length){	struct gfar_private *priv = netdev_priv(dev);	struct rxfcb *fcb = NULL;	if (NULL == skb) {		if (netif_msg_rx_err(priv))			printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);		priv->stats.rx_dropped++;		priv->extra_stats.rx_skbmissing++;	} else {  //skb 非空		int ret;		/* Prep the skb for the packet */		skb_put(skb, length); //NOTICE LJP		/* Grab the FCB if there is one */		if (gfar_uses_fcb(priv))			fcb = gfar_get_fcb(skb);		/* Remove the padded bytes, if there are any */		if (priv->padding)			skb_pull(skb, priv->padding); //NOTICE LJP		if (priv->rx_csum_enable)			gfar_rx_checksum(skb, fcb);		/* Tell the skb what kind of packet this is */		skb->protocol = eth_type_trans(skb, dev);		/* Send the packet up the stack */		if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))			ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);		    //NOTICE LJP		else			ret = RECEIVE(skb);		if (NET_RX_DROP == ret)			priv->extra_stats.kernel_dropped++;	}	return 0;}/* gfar_clean_rx_ring() -- Processes each frame in the rx ring *   until the budget/quota has been reached. Returns the number *   of frames handled */int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit){	struct rxbd8 *bdp;	struct sk_buff *skb;	u16 pkt_len;	int howmany = 0;	struct gfar_private *priv = netdev_priv(dev);	/* Get the first full descriptor */	bdp = priv->cur_rx; //struct rxbd8 *cur_rx; 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {		rmb();		skb = priv->rx_skbuff[priv->skb_currx];		if (!(bdp->status &		      (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET		       | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))		    && (bdp->status & RXBD_LAST)) {			/* Increment the number of packets */			priv->stats.rx_packets++;			howmany++;			/* Remove the FCS from the packet length */			pkt_len = bdp->length - 4;			gfar_process_frame(dev, skb, pkt_len);			priv->stats.rx_bytes += pkt_len;		} else {			count_errors(bdp->status, priv);			if (skb)				dev_kfree_skb_any(skb);			priv->rx_skbuff[priv->skb_currx] = NULL;		}		dev->last_rx = jiffies;		/* Clear the status flags for this buffer */		bdp->status &= ~RXBD_STATS;		/* Add another skb for the future */		skb = gfar_new_skb(dev, bdp);		priv->rx_skbuff[priv->skb_currx] = skb;		/* Update to the next pointer */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -