⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fs_enet-main.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * * Copyright (c) 2003 Intracom S.A.  *  by Pantelis Antoniou <panto@intracom.gr> *  * 2005 (c) MontaVista Software, Inc.  * Vitaly Bordug <vbordug@ru.mvista.com> * * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> * * This file is licensed under the terms of the GNU General Public License  * version 2. This program is licensed "as is" without any warranty of any  * kind, whether express or implied. */#include <linux/config.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/ioport.h>#include <linux/slab.h>#include <linux/interrupt.h>#include <linux/pci.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include <linux/spinlock.h>#include <linux/mii.h>#include <linux/ethtool.h>#include <linux/bitops.h>#include <linux/fs.h>#include <linux/platform_device.h>#include <linux/vmalloc.h>#include <asm/pgtable.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <asm/uaccess.h>#include "fs_enet.h"/*************************************************/static char version[] __devinitdata =    DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");MODULE_DESCRIPTION("Freescale Ethernet Driver");MODULE_LICENSE("GPL");MODULE_VERSION(DRV_MODULE_VERSION);MODULE_PARM(fs_enet_debug, "i");MODULE_PARM_DESC(fs_enet_debug,		 "Freescale bitmapped debugging message enable value");int fs_enet_debug = -1;		/* -1 == use FS_ENET_DEF_MSG_ENABLE as value */static void fs_set_multicast_list(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	(*fep->ops->set_multicast_list)(dev);}/* NAPI receive function */static int fs_enet_rx_napi(struct net_device *dev, int *budget){	struct fs_enet_private *fep = netdev_priv(dev);	const struct fs_platform_info *fpi = fep->fpi;	cbd_t *bdp;	struct sk_buff *skb, *skbn, *skbt;	int received = 0;	u16 pkt_len, sc;	int curidx;	int rx_work_limit = 0;	/* pacify gcc */	rx_work_limit = min(dev->quota, *budget);	if (!netif_running(dev))		return 0;	/*	 * First, grab all of the stats for the incoming packet.	 * These get messed up if we get called due to a busy condition.	 */	bdp = fep->cur_rx;	/* clear RX status bits for napi*/	(*fep->ops->napi_clear_rx_event)(dev);	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {		curidx = bdp - fep->rx_bd_base;		/*		 * Since we have allocated space to hold a complete frame,		 * the last indicator should be set.		 */		if ((sc & BD_ENET_RX_LAST) == 0)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s rcv is not +last\n",			       dev->name);		/*		 * Check for errors. 		 */		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {			fep->stats.rx_errors++;			/* Frame too long or too short. */			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))				fep->stats.rx_length_errors++;			/* Frame alignment */			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))				fep->stats.rx_frame_errors++;			/* CRC Error */			if (sc & BD_ENET_RX_CR)				fep->stats.rx_crc_errors++;			/* FIFO overrun */			if (sc & BD_ENET_RX_OV)				fep->stats.rx_crc_errors++;			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			skbn = skb;		} else {			/* napi, got packet but no quota */			if (--rx_work_limit < 0)				break;			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			/*			 * Process the incoming frame.			 */			fep->stats.rx_packets++;			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */			fep->stats.rx_bytes += pkt_len + 4;			if (pkt_len <= fpi->rx_copybreak) {				/* +2 to make IP header L1 cache aligned */				skbn = dev_alloc_skb(pkt_len + 2);				if (skbn != NULL) {					skb_reserve(skbn, 2);	/* align IP header */					memcpy(skbn->data, skb->data, pkt_len);					/* swap */					skbt = skb;					skb = skbn;					skbn = skbt;				}			} else				skbn = dev_alloc_skb(ENET_RX_FRSIZE);			if (skbn != NULL) {				skb->dev = dev;				skb_put(skb, pkt_len);	/* Make room */				skb->protocol = eth_type_trans(skb, dev);				received++;				netif_receive_skb(skb);			} else {				printk(KERN_WARNING DRV_MODULE_NAME				       ": %s Memory squeeze, dropping packet.\n",				       dev->name);				fep->stats.rx_dropped++;				skbn = skb;			}		}		fep->rx_skbuff[curidx] = skbn;		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),			     DMA_FROM_DEVICE));		CBDW_DATLEN(bdp, 0);		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);		/*		 * Update BD pointer to next entry. 		 */		if ((sc & BD_ENET_RX_WRAP) == 0)			bdp++;		else			bdp = fep->rx_bd_base;		(*fep->ops->rx_bd_done)(dev);	}	fep->cur_rx = bdp;	dev->quota -= received;	*budget -= received;	if (rx_work_limit < 0)		return 1;	/* not done */	/* done */	netif_rx_complete(dev);	(*fep->ops->napi_enable_rx)(dev);	return 0;}/* non NAPI receive function */static int fs_enet_rx_non_napi(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	const struct fs_platform_info *fpi = fep->fpi;	cbd_t *bdp;	struct sk_buff *skb, *skbn, *skbt;	int received = 0;	u16 pkt_len, sc;	int curidx;	/*	 * First, grab all of the stats for the incoming packet.	 * These get messed up if we get called due to a busy condition.	 */	bdp = fep->cur_rx;	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {		curidx = bdp - fep->rx_bd_base;		/*		 * Since we have allocated space to hold a complete frame,		 * the last indicator should be set.		 */		if ((sc & BD_ENET_RX_LAST) == 0)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s rcv is not +last\n",			       dev->name);		/*		 * Check for errors. 		 */		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {			fep->stats.rx_errors++;			/* Frame too long or too short. */			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))				fep->stats.rx_length_errors++;			/* Frame alignment */			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))				fep->stats.rx_frame_errors++;			/* CRC Error */			if (sc & BD_ENET_RX_CR)				fep->stats.rx_crc_errors++;			/* FIFO overrun */			if (sc & BD_ENET_RX_OV)				fep->stats.rx_crc_errors++;			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			skbn = skb;		} else {			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			/*			 * Process the incoming frame.			 */			fep->stats.rx_packets++;			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */			fep->stats.rx_bytes += pkt_len + 4;			if (pkt_len <= fpi->rx_copybreak) {				/* +2 to make IP header L1 cache aligned */				skbn = dev_alloc_skb(pkt_len + 2);				if (skbn != NULL) {					skb_reserve(skbn, 2);	/* align IP header */					memcpy(skbn->data, skb->data, pkt_len);					/* swap */					skbt = skb;					skb = skbn;					skbn = skbt;				}			} else				skbn = dev_alloc_skb(ENET_RX_FRSIZE);			if (skbn != NULL) {				skb->dev = dev;				skb_put(skb, pkt_len);	/* Make room */				skb->protocol = eth_type_trans(skb, dev);				received++;				netif_rx(skb);			} else {				printk(KERN_WARNING DRV_MODULE_NAME				       ": %s Memory squeeze, dropping packet.\n",				       dev->name);				fep->stats.rx_dropped++;				skbn = skb;			}		}		fep->rx_skbuff[curidx] = skbn;		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),			     DMA_FROM_DEVICE));		CBDW_DATLEN(bdp, 0);		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);		/*		 * Update BD pointer to next entry. 		 */		if ((sc & BD_ENET_RX_WRAP) == 0)			bdp++;		else			bdp = fep->rx_bd_base;		(*fep->ops->rx_bd_done)(dev);	}	fep->cur_rx = bdp;	return 0;}static void fs_enet_tx(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	cbd_t *bdp;	struct sk_buff *skb;	int dirtyidx, do_wake, do_restart;	u16 sc;	spin_lock(&fep->lock);	bdp = fep->dirty_tx;	do_wake = do_restart = 0;	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {		dirtyidx = bdp - fep->tx_bd_base;		if (fep->tx_free == fep->tx_ring)			break;		skb = fep->tx_skbuff[dirtyidx];		/*		 * Check for errors. 		 */		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {			if (sc & BD_ENET_TX_HB)	/* No heartbeat */				fep->stats.tx_heartbeat_errors++;			if (sc & BD_ENET_TX_LC)	/* Late collision */				fep->stats.tx_window_errors++;			if (sc & BD_ENET_TX_RL)	/* Retrans limit */				fep->stats.tx_aborted_errors++;			if (sc & BD_ENET_TX_UN)	/* Underrun */				fep->stats.tx_fifo_errors++;			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */				fep->stats.tx_carrier_errors++;			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {				fep->stats.tx_errors++;				do_restart = 1;			}		} else			fep->stats.tx_packets++;		if (sc & BD_ENET_TX_READY)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s HEY! Enet xmit interrupt and TX_READY.\n",			       dev->name);		/*		 * Deferred means some collisions occurred during transmit,		 * but we eventually sent the packet OK.		 */		if (sc & BD_ENET_TX_DEF)			fep->stats.collisions++;		/* unmap */		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				skb->len, DMA_TO_DEVICE);		/*		 * Free the sk buffer associated with this last transmit. 		 */		dev_kfree_skb_irq(skb);		fep->tx_skbuff[dirtyidx] = NULL;		/*		 * Update pointer to next buffer descriptor to be transmitted. 		 */		if ((sc & BD_ENET_TX_WRAP) == 0)			bdp++;		else			bdp = fep->tx_bd_base;		/*		 * Since we have freed up a buffer, the ring is no longer		 * full.		 */		if (!fep->tx_free++)			do_wake = 1;	}	fep->dirty_tx = bdp;	if (do_restart)		(*fep->ops->tx_restart)(dev);	spin_unlock(&fep->lock);	if (do_wake)		netif_wake_queue(dev);}/* * The interrupt handler. * This is called from the MPC core interrupt. */static irqreturn_tfs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct net_device *dev = dev_id;	struct fs_enet_private *fep;	const struct fs_platform_info *fpi;	u32 int_events;	u32 int_clr_events;	int nr, napi_ok;	int handled;	fep = netdev_priv(dev);	fpi = fep->fpi;	nr = 0;	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {		nr++;		int_clr_events = int_events;		if (fpi->use_napi)			int_clr_events &= ~fep->ev_napi_rx;		(*fep->ops->clear_int_events)(dev, int_clr_events);		if (int_events & fep->ev_err)			(*fep->ops->ev_error)(dev, int_events);		if (int_events & fep->ev_rx) {			if (!fpi->use_napi)				fs_enet_rx_non_napi(dev);			else {				napi_ok = netif_rx_schedule_prep(dev);				(*fep->ops->napi_disable_rx)(dev);				(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);				/* NOTE: it is possible for FCCs in NAPI mode    */				/* to submit a spurious interrupt while in poll  */				if (napi_ok)					__netif_rx_schedule(dev);			}		}		if (int_events & fep->ev_tx)			fs_enet_tx(dev);	}	handled = nr > 0;	return IRQ_RETVAL(handled);}void fs_init_bds(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	cbd_t *bdp;	struct sk_buff *skb;	int i;	fs_cleanup_bds(dev);	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;	fep->tx_free = fep->tx_ring;	fep->cur_rx = fep->rx_bd_base;	/*	 * Initialize the receive buffer descriptors. 	 */	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {		skb = dev_alloc_skb(ENET_RX_FRSIZE);		if (skb == NULL) {			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s Memory squeeze, unable to allocate skb\n",			       dev->name);			break;		}		fep->rx_skbuff[i] = skb;		skb->dev = dev;		CBDW_BUFADDR(bdp,			dma_map_single(fep->dev, skb->data,				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE));		CBDW_DATLEN(bdp, 0);	/* zero */		CBDW_SC(bdp, BD_ENET_RX_EMPTY |			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));	}	/*	 * if we failed, fillup remainder 	 */	for (; i < fep->rx_ring; i++, bdp++) {		fep->rx_skbuff[i] = NULL;		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);	}	/*	 * ...and the same for transmit.  	 */	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {		fep->tx_skbuff[i] = NULL;		CBDW_BUFADDR(bdp, 0);		CBDW_DATLEN(bdp, 0);		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);	}}void fs_cleanup_bds(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	struct sk_buff *skb;	cbd_t *bdp;	int i;	/*	 * Reset SKB transmit buffers.  	 */	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {		if ((skb = fep->tx_skbuff[i]) == NULL)			continue;		/* unmap */		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				skb->len, DMA_TO_DEVICE);		fep->tx_skbuff[i] = NULL;		dev_kfree_skb(skb);	}	/*	 * Reset SKB receive buffers 	 */	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {		if ((skb = fep->rx_skbuff[i]) == NULL)			continue;		/* unmap */		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),			L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),			DMA_FROM_DEVICE);		fep->rx_skbuff[i] = NULL;		dev_kfree_skb(skb);	}}/**********************************************************************************/static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	cbd_t *bdp;	int curidx;	u16 sc;	unsigned long flags;	spin_lock_irqsave(&fep->tx_lock, flags);	/*	 * Fill in a Tx ring entry 	 */	bdp = fep->cur_tx;	if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {		netif_stop_queue(dev);		spin_unlock_irqrestore(&fep->tx_lock, flags);		/*		 * Ooops.  All transmit buffers are full.  Bail out.		 * This should not happen, since the tx queue should be stopped.		 */		printk(KERN_WARNING DRV_MODULE_NAME		       ": %s tx queue full!.\n", dev->name);		return NETDEV_TX_BUSY;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -