⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ibm_emac_core.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * drivers/net/ibm_emac/ibm_emac_core.c * * Driver for PowerPC 4xx on-chip ethernet controller. * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * 	Matt Porter <mporter@kernel.crashing.org> *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> *      Armin Kuster <akuster@mvista.com> * 	Johnnie Peters <jpeters@mvista.com> * * This program is free software; you can redistribute  it and/or modify it * under  the terms of  the GNU General  Public License as published by the * Free Software Foundation;  either version 2 of the  License, or (at your * option) any later version. * */#include <linux/config.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/interrupt.h>#include <linux/delay.h>#include <linux/init.h>#include <linux/types.h>#include <linux/pci.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include <linux/crc32.h>#include <linux/ethtool.h>#include <linux/mii.h>#include <linux/bitops.h>#include <asm/processor.h>#include <asm/io.h>#include <asm/dma.h>#include <asm/uaccess.h>#include <asm/ocp.h>#include "ibm_emac_core.h"#include "ibm_emac_debug.h"/* * Lack of dma_unmap_???? calls is intentional. * * API-correct usage requires additional support state information to be  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to * EMAC design (e.g. TX buffer passed from network stack can be split into * several BDs, dma_map_single/dma_map_page can be used to map particular BD), * maintaining such information will add additional overhead. * Current DMA API implementation for 4xx processors only ensures cache coherency * and dma_unmap_???? routines are empty and are likely to stay this way. * I decided to omit dma_unmap_??? calls because I don't want to add additional * complexity just for the sake of following some abstract API, when it doesn't * add any real benefit to the driver. I understand that this decision maybe  * controversial, but I really tried to make code API-correct and efficient  * at the same time and didn't come up with code I liked :(.                --ebs */#define DRV_NAME        "emac"#define DRV_VERSION     "3.54"#define DRV_DESC        "PPC 4xx OCP EMAC driver"MODULE_DESCRIPTION(DRV_DESC);MODULE_AUTHOR    ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");MODULE_LICENSE("GPL");/* minimum number of free TX descriptors required to wake up TX process */#define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)/* If packet size is less than this number, we allocate small skb and copy packet  * contents into it instead of just sending original big skb up */#define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD/* Since multiple EMACs share MDIO lines in various ways, we need * to avoid re-using the same PHY ID in cases where the arch didn't * setup precise phy_map entries */static u32 busy_phy_map;#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \    (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us * with PHY RX clock problem. * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which * also allows controlling each EMAC clock */static inline void EMAC_RX_CLK_TX(int idx){	unsigned long flags;	local_irq_save(flags);#if defined(CONFIG_405EP)	mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));#else /* CONFIG_440EP || CONFIG_440GR */	SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));#endif	local_irq_restore(flags);}static inline void EMAC_RX_CLK_DEFAULT(int idx){	unsigned long flags;	local_irq_save(flags);#if defined(CONFIG_405EP)	mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));#else /* CONFIG_440EP */	SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));#endif	local_irq_restore(flags);}#else#define EMAC_RX_CLK_TX(idx)		((void)0)#define EMAC_RX_CLK_DEFAULT(idx)	((void)0)#endif#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS], * unfortunately this is less flexible than 440EP case, because it's a global  * setting for all EMACs, therefore we do this clock trick only during probe. */#define EMAC_CLK_INTERNAL		SDR_WRITE(DCRN_SDR_MFR, \					    SDR_READ(DCRN_SDR_MFR) | 0x08000000)#define EMAC_CLK_EXTERNAL		SDR_WRITE(DCRN_SDR_MFR, \					    SDR_READ(DCRN_SDR_MFR) & ~0x08000000)#else#define EMAC_CLK_INTERNAL		((void)0)#define EMAC_CLK_EXTERNAL		((void)0)#endif/* I don't want to litter system log with timeout errors  * when we have brain-damaged PHY. */static inline void emac_report_timeout_error(struct ocp_enet_private *dev,					     const char *error){#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)	DBG("%d: %s" NL, dev->def->index, error);#else	if (net_ratelimit())		printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);#endif}/* PHY polling intervals */#define PHY_POLL_LINK_ON	HZ#define PHY_POLL_LINK_OFF	(HZ / 5)/* Graceful stop timeouts in us.  * We should allow up to 1 frame time (full-duplex, ignoring collisions)  */#define STOP_TIMEOUT_10		1230	#define STOP_TIMEOUT_100	124#define STOP_TIMEOUT_1000	13#define STOP_TIMEOUT_1000_JUMBO	73/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",	"rx_bad_packet", "rx_runt_packet", "rx_short_event",	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",	"tx_bd_excessive_collisions", "tx_bd_late_collision",	"tx_bd_multple_collisions", "tx_bd_single_collision",	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",	"tx_errors"};static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);static void emac_clean_tx_ring(struct ocp_enet_private *dev);static inline int emac_phy_supports_gige(int phy_mode){	return  phy_mode == PHY_MODE_GMII ||		phy_mode == PHY_MODE_RGMII ||		phy_mode == PHY_MODE_TBI ||		phy_mode == PHY_MODE_RTBI;}static inline int emac_phy_gpcs(int phy_mode){	return  phy_mode == PHY_MODE_TBI ||		phy_mode == PHY_MODE_RTBI;}static inline void emac_tx_enable(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	u32 r;	local_irq_save(flags);	DBG("%d: tx_enable" NL, dev->def->index);	r = in_be32(&p->mr0);	if (!(r & EMAC_MR0_TXE))		out_be32(&p->mr0, r | EMAC_MR0_TXE);	local_irq_restore(flags);}static void emac_tx_disable(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	u32 r;	local_irq_save(flags);	DBG("%d: tx_disable" NL, dev->def->index);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_TXE) {		int n = dev->stop_timeout;		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {			udelay(1);			--n;		}			if (unlikely(!n))			emac_report_timeout_error(dev, "TX disable timeout");	}	local_irq_restore(flags);}static void emac_rx_enable(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	u32 r;	local_irq_save(flags);	if (unlikely(dev->commac.rx_stopped))		goto out;	DBG("%d: rx_enable" NL, dev->def->index);	r = in_be32(&p->mr0);	if (!(r & EMAC_MR0_RXE)) {		if (unlikely(!(r & EMAC_MR0_RXI))) {			/* Wait if previous async disable is still in progress */			int n = dev->stop_timeout;			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {				udelay(1);				--n;			}				if (unlikely(!n))				emac_report_timeout_error(dev,							  "RX disable timeout");		}		out_be32(&p->mr0, r | EMAC_MR0_RXE);	}      out:	local_irq_restore(flags);}static void emac_rx_disable(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	u32 r;	local_irq_save(flags);	DBG("%d: rx_disable" NL, dev->def->index);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_RXE) {		int n = dev->stop_timeout;		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {			udelay(1);			--n;		}			if (unlikely(!n))			emac_report_timeout_error(dev, "RX disable timeout");	}	local_irq_restore(flags);}static inline void emac_rx_disable_async(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	u32 r;	local_irq_save(flags);	DBG("%d: rx_disable_async" NL, dev->def->index);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_RXE)		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);	local_irq_restore(flags);}static int emac_reset(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	unsigned long flags;	int n = 20;	DBG("%d: reset" NL, dev->def->index);	local_irq_save(flags);	if (!dev->reset_failed) {		/* 40x erratum suggests stopping RX channel before reset,		 * we stop TX as well		 */		emac_rx_disable(dev);		emac_tx_disable(dev);	}	out_be32(&p->mr0, EMAC_MR0_SRST);	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)		--n;	local_irq_restore(flags);	if (n) {		dev->reset_failed = 0;		return 0;	} else {		emac_report_timeout_error(dev, "reset timeout");		dev->reset_failed = 1;		return -ETIMEDOUT;	}}static void emac_hash_mc(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	u16 gaht[4] = { 0 };	struct dev_mc_list *dmi;	DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);	for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {		int bit;		DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,		     dev->def->index,		     dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],		     dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);		bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);		gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);	}	out_be32(&p->gaht1, gaht[0]);	out_be32(&p->gaht2, gaht[1]);	out_be32(&p->gaht3, gaht[2]);	out_be32(&p->gaht4, gaht[3]);}static inline u32 emac_iff2rmr(struct net_device *ndev){	u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |	    EMAC_RMR_BASE;	if (ndev->flags & IFF_PROMISC)		r |= EMAC_RMR_PME;	else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)		r |= EMAC_RMR_PMME;	else if (ndev->mc_count > 0)		r |= EMAC_RMR_MAE;	return r;}static inline int emac_opb_mhz(void){	return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;}/* BHs disabled */static int emac_configure(struct ocp_enet_private *dev){	struct emac_regs *p = dev->emacp;	struct net_device *ndev = dev->ndev;	int gige;	u32 r;	DBG("%d: configure" NL, dev->def->index);	if (emac_reset(dev) < 0)		return -ETIMEDOUT;	tah_reset(dev->tah_dev);	/* Mode register */	r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;	if (dev->phy.duplex == DUPLEX_FULL)		r |= EMAC_MR1_FDE;	dev->stop_timeout = STOP_TIMEOUT_10;	switch (dev->phy.speed) {	case SPEED_1000:		if (emac_phy_gpcs(dev->phy.mode)) {			r |= EMAC_MR1_MF_1000GPCS |			    EMAC_MR1_MF_IPPA(dev->phy.address);			/* Put some arbitrary OUI, Manuf & Rev IDs so we can			 * identify this GPCS PHY later.			 */			out_be32(&p->ipcr, 0xdeadbeef);		} else			r |= EMAC_MR1_MF_1000;		r |= EMAC_MR1_RFS_16K;		gige = 1;		if (dev->ndev->mtu > ETH_DATA_LEN) {			r |= EMAC_MR1_JPSM;			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;		} else			dev->stop_timeout = STOP_TIMEOUT_1000;		break;	case SPEED_100:		r |= EMAC_MR1_MF_100;		dev->stop_timeout = STOP_TIMEOUT_100;		/* Fall through */	default:		r |= EMAC_MR1_RFS_4K;		gige = 0;		break;	}	if (dev->rgmii_dev)		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,				dev->phy.speed);	else		zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);#if !defined(CONFIG_40x)	/* on 40x erratum forces us to NOT use integrated flow control, 	 * let's hope it works on 44x ;)	 */	if (dev->phy.duplex == DUPLEX_FULL) {		if (dev->phy.pause)			r |= EMAC_MR1_EIFC | EMAC_MR1_APP;		else if (dev->phy.asym_pause)			r |= EMAC_MR1_APP;	}#endif	out_be32(&p->mr1, r);	/* Set individual MAC address */	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |		 ndev->dev_addr[5]);	/* VLAN Tag Protocol ID */	out_be32(&p->vtpid, 0x8100);	/* Receive mode register */	r = emac_iff2rmr(ndev);	if (r & EMAC_RMR_MAE)		emac_hash_mc(dev);	out_be32(&p->rmr, r);	/* FIFOs thresholds */	r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,		      EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);	out_be32(&p->tmr1, r);	out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,	   there should be still enough space in FIFO to allow the our link	   partner time to process this frame and also time to send PAUSE 	   frame itself.	   Here is the worst case scenario for the RX FIFO "headroom"	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):	   1) One maximum-length frame on TX                    1522 bytes	   2) One PAUSE frame time                                64 bytes	   3) PAUSE frame decode time allowance                   64 bytes	   4) One maximum-length frame on RX                    1522 bytes	   5) Round-trip propagation delay of the link (100Mb)    15 bytes	   ----------       	   3187 bytes	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)	 */	r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,		      EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);	out_be32(&p->rwmr, r);	/* Set PAUSE timer to the maximum */	out_be32(&p->ptr, 0xffff);	/* IRQ sources */	out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |		 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |		 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |		 EMAC_ISR_IRE | EMAC_ISR_TE);		 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */	if (emac_phy_gpcs(dev->phy.mode)) 		mii_reset_phy(&dev->phy);		 	return 0;}/* BHs disabled */static void emac_reinitialize(struct ocp_enet_private *dev){	DBG("%d: reinitialize" NL, dev->def->index);	if (!emac_configure(dev)) {		emac_tx_enable(dev);		emac_rx_enable(dev);	}}/* BHs disabled */static void emac_full_tx_reset(struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	struct ocp_func_emac_data *emacdata = dev->def->additions;	DBG("%d: full_tx_reset" NL, dev->def->index);	emac_tx_disable(dev);	mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);	emac_clean_tx_ring(dev);	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;	emac_configure(dev);	mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);	emac_tx_enable(dev);	emac_rx_enable(dev);	netif_wake_queue(ndev);}static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg){	struct emac_regs *p = dev->emacp;	u32 r;	int n;	DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);	/* Enable proper MDIO port */	zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);	/* Wait for management interface to become idle */	n = 10;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -