fs_enet-main.c

来自「linux 内核源代码」· C语言 代码 · 共 1,528 行 · 第 1/3 页

C
1,528
字号
/* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * * Copyright (c) 2003 Intracom S.A. *  by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */#include <linux/module.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/string.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/ioport.h>#include <linux/slab.h>#include <linux/interrupt.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include <linux/spinlock.h>#include <linux/mii.h>#include <linux/ethtool.h>#include <linux/bitops.h>#include <linux/fs.h>#include <linux/platform_device.h>#include <linux/phy.h>#include <linux/vmalloc.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <asm/uaccess.h>#ifdef CONFIG_PPC_CPM_NEW_BINDING#include <asm/of_platform.h>#endif#include "fs_enet.h"/*************************************************/#ifndef CONFIG_PPC_CPM_NEW_BINDINGstatic char version[] __devinitdata =    DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";#endifMODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");MODULE_DESCRIPTION("Freescale Ethernet Driver");MODULE_LICENSE("GPL");MODULE_VERSION(DRV_MODULE_VERSION);static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */module_param(fs_enet_debug, int, 0);MODULE_PARM_DESC(fs_enet_debug,		 "Freescale bitmapped debugging message enable value");#ifdef CONFIG_NET_POLL_CONTROLLERstatic void fs_enet_netpoll(struct net_device *dev);#endifstatic void fs_set_multicast_list(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	(*fep->ops->set_multicast_list)(dev);}static void skb_align(struct sk_buff *skb, int align){	int off = ((unsigned long)skb->data) & (align - 1);	if (off)		skb_reserve(skb, align - off);}/* NAPI receive function */static int fs_enet_rx_napi(struct napi_struct *napi, int budget){	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);	struct net_device *dev = fep->ndev;	const struct fs_platform_info *fpi = fep->fpi;	cbd_t __iomem *bdp;	struct sk_buff *skb, *skbn, *skbt;	int received = 0;	u16 pkt_len, sc;	int curidx;	/*	 * First, grab all of the stats for the incoming packet.	 * These get messed up if we get called due to a busy condition.	 */	bdp = fep->cur_rx;	/* clear RX status bits for napi*/	(*fep->ops->napi_clear_rx_event)(dev);	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {		curidx = bdp - fep->rx_bd_base;		/*		 * Since we have allocated space to hold a complete frame,		 * the last indicator should be set.		 */		if ((sc & BD_ENET_RX_LAST) == 0)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s rcv is not +last\n",			       dev->name);		/*		 * Check for errors.		 */		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {			fep->stats.rx_errors++;			/* Frame too long or too short. */			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))				fep->stats.rx_length_errors++;			/* Frame alignment */			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))				fep->stats.rx_frame_errors++;			/* CRC Error */			if (sc & BD_ENET_RX_CR)				fep->stats.rx_crc_errors++;			/* FIFO overrun */			if (sc & BD_ENET_RX_OV)				fep->stats.rx_crc_errors++;			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			skbn = skb;		} else {			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			/*			 * Process the incoming frame.			 */			fep->stats.rx_packets++;			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */			fep->stats.rx_bytes += pkt_len + 4;			if (pkt_len <= fpi->rx_copybreak) {				/* +2 to make IP header L1 cache aligned */				skbn = dev_alloc_skb(pkt_len + 2);				if (skbn != NULL) {					skb_reserve(skbn, 2);	/* align IP header */					skb_copy_from_linear_data(skb,						      skbn->data, pkt_len);					/* swap */					skbt = skb;					skb = skbn;					skbn = skbt;				}			} else {				skbn = dev_alloc_skb(ENET_RX_FRSIZE);				if (skbn)					skb_align(skbn, ENET_RX_ALIGN);			}			if (skbn != NULL) {				skb_put(skb, pkt_len);	/* Make room */				skb->protocol = eth_type_trans(skb, dev);				received++;				netif_receive_skb(skb);			} else {				printk(KERN_WARNING DRV_MODULE_NAME				       ": %s Memory squeeze, dropping packet.\n",				       dev->name);				fep->stats.rx_dropped++;				skbn = skb;			}		}		fep->rx_skbuff[curidx] = skbn;		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),			     DMA_FROM_DEVICE));		CBDW_DATLEN(bdp, 0);		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);		/*		 * Update BD pointer to next entry.		 */		if ((sc & BD_ENET_RX_WRAP) == 0)			bdp++;		else			bdp = fep->rx_bd_base;		(*fep->ops->rx_bd_done)(dev);		if (received >= budget)			break;	}	fep->cur_rx = bdp;	if (received < budget) {		/* done */		netif_rx_complete(dev, napi);		(*fep->ops->napi_enable_rx)(dev);	}	return received;}/* non NAPI receive function */static int fs_enet_rx_non_napi(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	const struct fs_platform_info *fpi = fep->fpi;	cbd_t __iomem *bdp;	struct sk_buff *skb, *skbn, *skbt;	int received = 0;	u16 pkt_len, sc;	int curidx;	/*	 * First, grab all of the stats for the incoming packet.	 * These get messed up if we get called due to a busy condition.	 */	bdp = fep->cur_rx;	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {		curidx = bdp - fep->rx_bd_base;		/*		 * Since we have allocated space to hold a complete frame,		 * the last indicator should be set.		 */		if ((sc & BD_ENET_RX_LAST) == 0)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s rcv is not +last\n",			       dev->name);		/*		 * Check for errors.		 */		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {			fep->stats.rx_errors++;			/* Frame too long or too short. */			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))				fep->stats.rx_length_errors++;			/* Frame alignment */			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))				fep->stats.rx_frame_errors++;			/* CRC Error */			if (sc & BD_ENET_RX_CR)				fep->stats.rx_crc_errors++;			/* FIFO overrun */			if (sc & BD_ENET_RX_OV)				fep->stats.rx_crc_errors++;			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			skbn = skb;		} else {			skb = fep->rx_skbuff[curidx];			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),				DMA_FROM_DEVICE);			/*			 * Process the incoming frame.			 */			fep->stats.rx_packets++;			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */			fep->stats.rx_bytes += pkt_len + 4;			if (pkt_len <= fpi->rx_copybreak) {				/* +2 to make IP header L1 cache aligned */				skbn = dev_alloc_skb(pkt_len + 2);				if (skbn != NULL) {					skb_reserve(skbn, 2);	/* align IP header */					skb_copy_from_linear_data(skb,						      skbn->data, pkt_len);					/* swap */					skbt = skb;					skb = skbn;					skbn = skbt;				}			} else {				skbn = dev_alloc_skb(ENET_RX_FRSIZE);				if (skbn)					skb_align(skbn, ENET_RX_ALIGN);			}			if (skbn != NULL) {				skb_put(skb, pkt_len);	/* Make room */				skb->protocol = eth_type_trans(skb, dev);				received++;				netif_rx(skb);			} else {				printk(KERN_WARNING DRV_MODULE_NAME				       ": %s Memory squeeze, dropping packet.\n",				       dev->name);				fep->stats.rx_dropped++;				skbn = skb;			}		}		fep->rx_skbuff[curidx] = skbn;		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),			     DMA_FROM_DEVICE));		CBDW_DATLEN(bdp, 0);		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);		/*		 * Update BD pointer to next entry.		 */		if ((sc & BD_ENET_RX_WRAP) == 0)			bdp++;		else			bdp = fep->rx_bd_base;		(*fep->ops->rx_bd_done)(dev);	}	fep->cur_rx = bdp;	return 0;}static void fs_enet_tx(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	cbd_t __iomem *bdp;	struct sk_buff *skb;	int dirtyidx, do_wake, do_restart;	u16 sc;	spin_lock(&fep->tx_lock);	bdp = fep->dirty_tx;	do_wake = do_restart = 0;	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {		dirtyidx = bdp - fep->tx_bd_base;		if (fep->tx_free == fep->tx_ring)			break;		skb = fep->tx_skbuff[dirtyidx];		/*		 * Check for errors.		 */		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {			if (sc & BD_ENET_TX_HB)	/* No heartbeat */				fep->stats.tx_heartbeat_errors++;			if (sc & BD_ENET_TX_LC)	/* Late collision */				fep->stats.tx_window_errors++;			if (sc & BD_ENET_TX_RL)	/* Retrans limit */				fep->stats.tx_aborted_errors++;			if (sc & BD_ENET_TX_UN)	/* Underrun */				fep->stats.tx_fifo_errors++;			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */				fep->stats.tx_carrier_errors++;			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {				fep->stats.tx_errors++;				do_restart = 1;			}		} else			fep->stats.tx_packets++;		if (sc & BD_ENET_TX_READY)			printk(KERN_WARNING DRV_MODULE_NAME			       ": %s HEY! Enet xmit interrupt and TX_READY.\n",			       dev->name);		/*		 * Deferred means some collisions occurred during transmit,		 * but we eventually sent the packet OK.		 */		if (sc & BD_ENET_TX_DEF)			fep->stats.collisions++;		/* unmap */		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),				skb->len, DMA_TO_DEVICE);		/*		 * Free the sk buffer associated with this last transmit.		 */		dev_kfree_skb_irq(skb);		fep->tx_skbuff[dirtyidx] = NULL;		/*		 * Update pointer to next buffer descriptor to be transmitted.		 */		if ((sc & BD_ENET_TX_WRAP) == 0)			bdp++;		else			bdp = fep->tx_bd_base;		/*		 * Since we have freed up a buffer, the ring is no longer		 * full.		 */		if (!fep->tx_free++)			do_wake = 1;	}	fep->dirty_tx = bdp;	if (do_restart)		(*fep->ops->tx_restart)(dev);	spin_unlock(&fep->tx_lock);	if (do_wake)		netif_wake_queue(dev);}/* * The interrupt handler. * This is called from the MPC core interrupt. */static irqreturn_tfs_enet_interrupt(int irq, void *dev_id){	struct net_device *dev = dev_id;	struct fs_enet_private *fep;	const struct fs_platform_info *fpi;	u32 int_events;	u32 int_clr_events;	int nr, napi_ok;	int handled;	fep = netdev_priv(dev);	fpi = fep->fpi;	nr = 0;	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {		nr++;		int_clr_events = int_events;		if (fpi->use_napi)			int_clr_events &= ~fep->ev_napi_rx;		(*fep->ops->clear_int_events)(dev, int_clr_events);		if (int_events & fep->ev_err)			(*fep->ops->ev_error)(dev, int_events);		if (int_events & fep->ev_rx) {			if (!fpi->use_napi)				fs_enet_rx_non_napi(dev);			else {				napi_ok = napi_schedule_prep(&fep->napi);				(*fep->ops->napi_disable_rx)(dev);				(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);				/* NOTE: it is possible for FCCs in NAPI mode    */				/* to submit a spurious interrupt while in poll  */				if (napi_ok)					__netif_rx_schedule(dev, &fep->napi);			}		}		if (int_events & fep->ev_tx)			fs_enet_tx(dev);	}	handled = nr > 0;	return IRQ_RETVAL(handled);}void fs_init_bds(struct net_device *dev){	struct fs_enet_private *fep = netdev_priv(dev);	cbd_t __iomem *bdp;	struct sk_buff *skb;	int i;	fs_cleanup_bds(dev);	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;	fep->tx_free = fep->tx_ring;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?