⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sunqe.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 2 页
字号:
/* $Id: sunqe.c,v 1.52.2.1 2001/12/21 00:52:47 davem Exp $ * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. *          Once again I am out to prove that every ethernet *          controller out there can be most efficiently programmed *          if you make it look like a LANCE. * * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) */static char version[] =        "sunqe.c:v2.9 9/11/99 David S. Miller (davem@redhat.com)\n";#include <linux/module.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/types.h>#include <linux/fcntl.h>#include <linux/interrupt.h>#include <linux/ptrace.h>#include <linux/ioport.h>#include <linux/in.h>#include <linux/slab.h>#include <linux/string.h>#include <linux/delay.h>#include <linux/init.h>#include <asm/system.h>#include <asm/bitops.h>#include <asm/io.h>#include <asm/dma.h>#include <linux/errno.h>#include <asm/byteorder.h>#include <asm/idprom.h>#include <asm/sbus.h>#include <asm/openprom.h>#include <asm/oplib.h>#include <asm/auxio.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include "sunqe.h"static struct sunqec *root_qec_dev;static void qe_set_multicast(struct net_device *dev);#define QEC_RESET_TRIES 200static inline int qec_global_reset(unsigned long gregs){	int tries = QEC_RESET_TRIES;	sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);	while (--tries) {		u32 tmp = sbus_readl(gregs + GLOB_CTRL);		if (tmp & GLOB_CTRL_RESET) {			udelay(20);			continue;		}		break;	}	if (tries)		return 0;	printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");	return -1;}#define MACE_RESET_RETRIES 200#define QE_RESET_RETRIES   200static inline int qe_stop(struct sunqe *qep){	unsigned long cregs = qep->qcregs;	unsigned long mregs = qep->mregs;	int tries;	/* Reset the MACE, then the QEC channel. */	sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);	tries = MACE_RESET_RETRIES;	while (--tries) {		u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);		if (tmp & MREGS_BCONFIG_RESET) {			udelay(20);			continue;		}		break;	}	if (!tries) {		printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");		return -1;	}	sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);	tries = QE_RESET_RETRIES;	while (--tries) {		u32 tmp = sbus_readl(cregs + CREG_CTRL);		if (tmp & CREG_CTRL_RESET) {			udelay(20);			continue;		}		break;	}	if (!tries) {		printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");		return -1;	}	return 0;}static void qe_init_rings(struct sunqe *qep){	struct qe_init_block *qb = qep->qe_block;	struct sunqe_buffers *qbufs = qep->buffers;	__u32 qbufs_dvma = qep->buffers_dvma;	int i;	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;	memset(qb, 0, sizeof(struct qe_init_block));	memset(qbufs, 0, sizeof(struct sunqe_buffers));	for (i = 0; i < RX_RING_SIZE; i++) {		qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);		qb->qe_rxd[i].rx_flags =			(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));	}}static int qe_init(struct sunqe *qep, int from_irq){	struct sunqec *qecp = qep->parent;	unsigned long cregs = qep->qcregs;	unsigned long mregs = qep->mregs;	unsigned long gregs = qecp->gregs;	unsigned char *e = &qep->dev->dev_addr[0];	u32 tmp;	int i;	/* Shut it up. */	if (qe_stop(qep))		return -EAGAIN;	/* Setup initial rx/tx init block pointers. */	sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);	sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);	/* Enable/mask the various irq's. */	sbus_writel(0, cregs + CREG_RIMASK);	sbus_writel(1, cregs + CREG_TIMASK);	sbus_writel(0, cregs + CREG_QMASK);	sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);	/* Setup the FIFO pointers into QEC local memory. */	tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);	sbus_writel(tmp, cregs + CREG_RXRBUFPTR);	sbus_writel(tmp, cregs + CREG_RXWBUFPTR);	tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +		sbus_readl(gregs + GLOB_RSIZE);	sbus_writel(tmp, cregs + CREG_TXRBUFPTR);	sbus_writel(tmp, cregs + CREG_TXWBUFPTR);	/* Clear the channel collision counter. */	sbus_writel(0, cregs + CREG_CCNT);	/* For 10baseT, inter frame space nor throttle seems to be necessary. */	sbus_writel(0, cregs + CREG_PIPG);	/* Now dork with the AMD MACE. */	sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);	sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);	sbus_writeb(0, mregs + MREGS_RXFCNTL);	/* The QEC dma's the rx'd packets from local memory out to main memory,	 * and therefore it interrupts when the packet reception is "complete".	 * So don't listen for the MACE talking about it.	 */	sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);	sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);	sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |		     MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),		    mregs + MREGS_FCONFIG);	/* Only usable interface on QuadEther is twisted pair. */	sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);	/* Tell MACE we are changing the ether address. */	sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,		    mregs + MREGS_IACONFIG);	while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)		barrier();	sbus_writeb(e[0], mregs + MREGS_ETHADDR);	sbus_writeb(e[1], mregs + MREGS_ETHADDR);	sbus_writeb(e[2], mregs + MREGS_ETHADDR);	sbus_writeb(e[3], mregs + MREGS_ETHADDR);	sbus_writeb(e[4], mregs + MREGS_ETHADDR);	sbus_writeb(e[5], mregs + MREGS_ETHADDR);	/* Clear out the address filter. */	sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,		    mregs + MREGS_IACONFIG);	while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)		barrier();	for (i = 0; i < 8; i++)		sbus_writeb(0, mregs + MREGS_FILTER);	/* Address changes are now complete. */	sbus_writeb(0, mregs + MREGS_IACONFIG);	qe_init_rings(qep);	/* Wait a little bit for the link to come up... */	mdelay(5);	if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {		int tries = 50;		while (tries--) {			u8 tmp;			mdelay(5);			barrier();			tmp = sbus_readb(mregs + MREGS_PHYCONFIG);			if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)				break;		}		if (tries == 0)			printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);	}	/* Missed packet counter is cleared on a read. */	sbus_readb(mregs + MREGS_MPCNT);	/* Reload multicast information, this will enable the receiver	 * and transmitter.	 */	qe_set_multicast(qep->dev);	/* QEC should now start to show interrupts. */	return 0;}/* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */static int qe_is_bolixed(struct sunqe *qep, u32 qe_status){	struct net_device *dev = qep->dev;	int mace_hwbug_workaround = 0;	if (qe_status & CREG_STAT_EDEFER) {		printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);		qep->net_stats.tx_errors++;	}	if (qe_status & CREG_STAT_CLOSS) {		printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_carrier_errors++;	}	if (qe_status & CREG_STAT_ERETRIES) {		printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_LCOLL) {		printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.collisions++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_FUFLOW) {		printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_JERROR) {		printk(KERN_ERR "%s: Jabber error.\n", dev->name);	}	if (qe_status & CREG_STAT_BERROR) {		printk(KERN_ERR "%s: Babble error.\n", dev->name);	}	if (qe_status & CREG_STAT_CCOFLOW) {		qep->net_stats.tx_errors += 256;		qep->net_stats.collisions += 256;	}	if (qe_status & CREG_STAT_TXDERROR) {		printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_TXLERR) {		printk(KERN_ERR "%s: Transmit late error.\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_TXPERR) {		printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_TXSERR) {		printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_RCCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.collisions += 256;	}	if (qe_status & CREG_STAT_RUOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_over_errors += 256;	}	if (qe_status & CREG_STAT_MCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_missed_errors += 256;	}	if (qe_status & CREG_STAT_RXFOFLOW) {		printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_over_errors++;	}	if (qe_status & CREG_STAT_RLCOLL) {		printk(KERN_ERR "%s: Late receive collision.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.collisions++;	}	if (qe_status & CREG_STAT_FCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_frame_errors += 256;	}	if (qe_status & CREG_STAT_CECOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_crc_errors += 256;	}	if (qe_status & CREG_STAT_RXDROP) {		printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_dropped++;		qep->net_stats.rx_missed_errors++;	}	if (qe_status & CREG_STAT_RXSMALL) {		printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_length_errors++;	}	if (qe_status & CREG_STAT_RXLERR) {		printk(KERN_ERR "%s: Receive late error.\n", dev->name);		qep->net_stats.rx_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_RXPERR) {		printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_missed_errors++;		mace_hwbug_workaround = 1;	}	if (qe_status & CREG_STAT_RXSERR) {		printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_missed_errors++;		mace_hwbug_workaround = 1;	}	if (mace_hwbug_workaround)		qe_init(qep, 1);	return mace_hwbug_workaround;}/* Per-QE receive interrupt service routine.  Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */static void qe_rx(struct sunqe *qep){	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];	struct qe_rxd *this;	struct sunqe_buffers *qbufs = qep->buffers;	__u32 qbufs_dvma = qep->buffers_dvma;	int elem = qep->rx_new, drops = 0;	u32 flags;	this = &rxbase[elem];	while (!((flags = this->rx_flags) & RXD_OWN)) {		struct sk_buff *skb;		unsigned char *this_qbuf =			&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];		__u32 this_qbuf_dvma = qbufs_dvma +			qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));		struct qe_rxd *end_rxd =			&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */		/* Check for errors. */		if (len < ETH_ZLEN) {			qep->net_stats.rx_errors++;			qep->net_stats.rx_length_errors++;			qep->net_stats.rx_dropped++;		} else {			skb = dev_alloc_skb(len + 2);			if (skb == NULL) {				drops++;				qep->net_stats.rx_dropped++;			} else {				skb->dev = qep->dev;				skb_reserve(skb, 2);				skb_put(skb, len);				eth_copy_and_sum(skb, (unsigned char *) this_qbuf,						 len, 0);				skb->protocol = eth_type_trans(skb, qep->dev);				netif_rx(skb);				qep->dev->last_rx = jiffies;				qep->net_stats.rx_packets++;				qep->net_stats.rx_bytes += len;			}		}		end_rxd->rx_addr = this_qbuf_dvma;		end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));				elem = NEXT_RX(elem);		this = &rxbase[elem];	}	qep->rx_new = elem;	if (drops)		printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);}static void qe_tx_reclaim(struct sunqe *qep);/* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */static void qec_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct sunqec *qecp = (struct sunqec *) dev_id;	u32 qec_status;	int channel = 0;	/* Latch the status now. */	qec_status = sbus_readl(qecp->gregs + GLOB_STAT);	while (channel < 4) {		if (qec_status & 0xf) {			struct sunqe *qep = qecp->qes[channel];			u32 qe_status;			qe_status = sbus_readl(qep->qcregs + CREG_STAT);			if (qe_status & CREG_STAT_ERRORS) {				if (qe_is_bolixed(qep, qe_status))					goto next;			}			if (qe_status & CREG_STAT_RXIRQ)				qe_rx(qep);			if (netif_queue_stopped(qep->dev) &&			    (qe_status & CREG_STAT_TXIRQ)) {				spin_lock(&qep->lock);				qe_tx_reclaim(qep);				if (TX_BUFFS_AVAIL(qep) > 0) {					/* Wake net queue and return to					 * lazy tx reclaim.					 */					netif_wake_queue(qep->dev);					sbus_writel(1, qep->qcregs + CREG_TIMASK);				}				spin_unlock(&qep->lock);			}	next:			;		}		qec_status >>= 4;		channel++;	}}static int qe_open(struct net_device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	qep->mconfig = (MREGS_MCONFIG_TXENAB |			MREGS_MCONFIG_RXENAB |			MREGS_MCONFIG_MBAENAB);	return qe_init(qep, 0);}static int qe_close(struct net_device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	qe_stop(qep);	return 0;}/* Reclaim TX'd frames from the ring.  This must always run under * the IRQ protected qep->lock.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -