sunqe.c

来自「powerpc内核mpc8241linux系统下net驱动程序」· C语言 代码 · 共 1,244 行 · 第 1/3 页

C
1,244
字号
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. *          Once again I am out to prove that every ethernet *          controller out there can be most efficiently programmed *          if you make it look like a LANCE. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */static char *version =        "sunqe.c:v1.1 8/Nov/96 David S. Miller (davem@caipfs.rutgers.edu)\n";#include <linux/module.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/types.h>#include <linux/fcntl.h>#include <linux/interrupt.h>#include <linux/ptrace.h>#include <linux/ioport.h>#include <linux/in.h>#include <linux/malloc.h>#include <linux/string.h>#include <linux/delay.h>#include <linux/init.h>#include <asm/system.h>#include <asm/bitops.h>#include <asm/io.h>#include <asm/dma.h>#include <linux/errno.h>#include <asm/byteorder.h>#include <asm/idprom.h>#include <asm/sbus.h>#include <asm/openprom.h>#include <asm/oplib.h>#include <asm/auxio.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include "sunqe.h"#ifdef MODULEstatic struct sunqec *root_qec_dev = NULL;#endif#define QEC_RESET_TRIES 200static inline int qec_global_reset(struct qe_globreg *gregs){	int tries = QEC_RESET_TRIES;	gregs->ctrl = GLOB_CTRL_RESET;	while(--tries) {		if(gregs->ctrl & GLOB_CTRL_RESET) {			udelay(20);			continue;		}		break;	}	if(tries)		return 0;	printk("QuadEther: AIEEE cannot reset the QEC!\n");	return -1;}#define MACE_RESET_RETRIES 200#define QE_RESET_RETRIES   200static inline int qe_stop(struct sunqe *qep){	struct qe_creg *cregs = qep->qcregs;	struct qe_mregs *mregs = qep->mregs;	int tries;	/* Reset the MACE, then the QEC channel. */	mregs->bconfig = MREGS_BCONFIG_RESET;	tries = MACE_RESET_RETRIES;	while(--tries) {		if(mregs->bconfig & MREGS_BCONFIG_RESET) {			udelay(20);			continue;		}		break;	}	if(!tries) {		printk("QuadEther: AIEEE cannot reset the MACE!\n");		return -1;	}	cregs->ctrl = CREG_CTRL_RESET;	tries = QE_RESET_RETRIES;	while(--tries) {		if(cregs->ctrl & CREG_CTRL_RESET) {			udelay(20);			continue;		}		break;	}	if(!tries) {		printk("QuadEther: Cannot reset QE channel!\n");		return -1;	}	return 0;}static inline void qe_clean_rings(struct sunqe *qep){	int i;	for(i = 0; i < RX_RING_SIZE; i++) {		if(qep->rx_skbs[i] != NULL) {			dev_kfree_skb(qep->rx_skbs[i]);			qep->rx_skbs[i] = NULL;		}	}	for(i = 0; i < TX_RING_SIZE; i++) {		if(qep->tx_skbs[i] != NULL) {			dev_kfree_skb(qep->tx_skbs[i]);			qep->tx_skbs[i] = NULL;		}	}}static void qe_init_rings(struct sunqe *qep, int from_irq){	struct qe_init_block *qb = qep->qe_block;	struct device *dev = qep->dev;	int i, gfp_flags = GFP_KERNEL;	if(from_irq || in_interrupt())		gfp_flags = GFP_ATOMIC;	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;	qe_clean_rings(qep);	for(i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb;		skb = qe_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags | GFP_DMA);		if(!skb)			continue;		qep->rx_skbs[i] = skb;		skb->dev = dev;		skb_put(skb, ETH_FRAME_LEN);		skb_reserve(skb, 34);		qb->qe_rxd[i].rx_addr = sbus_dvma_addr(skb->data);		qb->qe_rxd[i].rx_flags =			(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));	}	for(i = 0; i < TX_RING_SIZE; i++)		qb->qe_txd[i].tx_flags = qb->qe_txd[i].tx_addr = 0;}static void sun4c_qe_init_rings(struct sunqe *qep){	struct qe_init_block *qb = qep->qe_block;	struct sunqe_buffers *qbufs = qep->sun4c_buffers;	__u32 qbufs_dvma = qep->s4c_buf_dvma;	int i;	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;	memset(qbufs, 0, sizeof(struct sunqe_buffers));	for(i = 0; i < RX_RING_SIZE; i++)		qb->qe_rxd[i].rx_flags = qb->qe_rxd[i].rx_addr = 0;	for(i = 0; i < SUN4C_RX_RING_SIZE; i++) {		qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);		qb->qe_rxd[i].rx_flags =			(RXD_OWN | ((SUN4C_RX_BUFF_SIZE) & RXD_LENGTH));	}	for(i = 0; i < TX_RING_SIZE; i++)		qb->qe_txd[i].tx_flags = qb->qe_txd[i].tx_addr = 0;}static int qe_init(struct sunqe *qep, int from_irq){	struct sunqec *qecp = qep->parent;	struct qe_creg *cregs = qep->qcregs;	struct qe_mregs *mregs = qep->mregs;	struct qe_globreg *gregs = qecp->gregs;	unsigned char *e = &qep->dev->dev_addr[0];	volatile unsigned char garbage;	int i;	/* Shut it up. */	if(qe_stop(qep))		return -EAGAIN;	/* Setup initial rx/tx init block pointers. */	cregs->rxds = qep->qblock_dvma + qib_offset(qe_rxd, 0);	cregs->txds = qep->qblock_dvma + qib_offset(qe_txd, 0);	/* Enable the various irq's. */	cregs->rimask = 0;	cregs->timask = 0;	cregs->qmask = 0;	cregs->mmask = CREG_MMASK_RXCOLL;	/* Setup the FIFO pointers into QEC local memory. */	cregs->rxwbufptr = cregs->rxrbufptr = qep->channel * gregs->msize;	cregs->txwbufptr = cregs->txrbufptr = cregs->rxrbufptr + gregs->rsize;	/* Clear the channel collision counter. */	cregs->ccnt = 0;	/* For 10baseT, inter frame space nor throttle seems to be necessary. */	cregs->pipg = 0;	/* Now dork with the AMD MACE. */	mregs->txfcntl = MREGS_TXFCNTL_AUTOPAD; /* Save us some tx work. */	mregs->rxfcntl = 0;	/* The QEC dma's the rx'd packets from local memory out to main memory,	 * and therefore it interrupts when the packet reception is "complete".	 * So don't listen for the MACE talking about it.	 */	mregs->imask = (MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ);	mregs->bconfig = (MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS);	mregs->fconfig = (MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |			  MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU);	/* Only usable interface on QuadEther is twisted pair. */	mregs->plsconfig = (MREGS_PLSCONFIG_TP);	/* Tell MACE we are changing the ether address. */	mregs->iaconfig = (MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET);	mregs->ethaddr = e[0];	mregs->ethaddr = e[1];	mregs->ethaddr = e[2];	mregs->ethaddr = e[3];	mregs->ethaddr = e[4];	mregs->ethaddr = e[5];	/* Clear out the address filter. */	mregs->iaconfig = (MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET);	for(i = 0; i < 8; i++) mregs->filter = 0;	/* Address changes are now complete. */	mregs->iaconfig = 0;	if(sparc_cpu_model == sun4c)		sun4c_qe_init_rings(qep);	else		qe_init_rings(qep, from_irq);	/* Wait a little bit for the link to come up... */	if(!(mregs->phyconfig & MREGS_PHYCONFIG_LTESTDIS)) {		mdelay(5);		if(!(mregs->phyconfig & MREGS_PHYCONFIG_LSTAT))			printk("%s: Warning, link state is down.\n", qep->dev->name);	}	/* Missed packet counter is cleared on a read. */	garbage = mregs->mpcnt;	/* Turn on the MACE receiver and transmitter. */	mregs->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB);	/* QEC should now start to show interrupts. */	return 0;}/* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */static int qe_is_bolixed(struct sunqe *qep, unsigned int qe_status){	struct device *dev = qep->dev;	int mace_hwbug_workaround = 0;	if(qe_status & CREG_STAT_EDEFER) {		printk("%s: Excessive transmit defers.\n", dev->name);		qep->net_stats.tx_errors++;	}	if(qe_status & CREG_STAT_CLOSS) {		printk("%s: Carrier lost, link down?\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_carrier_errors++;	}	if(qe_status & CREG_STAT_ERETRIES) {		printk("%s: Excessive transmit retries (more than 16).\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_LCOLL) {		printk("%s: Late transmit collision.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.collisions++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_FUFLOW) {		printk("%s: Transmit fifo underflow, driver bug.\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_JERROR) {		printk("%s: Jabber error.\n", dev->name);	}	if(qe_status & CREG_STAT_BERROR) {		printk("%s: Babble error.\n", dev->name);	}	if(qe_status & CREG_STAT_CCOFLOW) {		qep->net_stats.tx_errors += 256;		qep->net_stats.collisions += 256;	}	if(qe_status & CREG_STAT_TXDERROR) {		printk("%s: Transmit descriptor is bogus, driver bug.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_TXLERR) {		printk("%s: Transmit late error.\n", dev->name);		qep->net_stats.tx_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_TXPERR) {		printk("%s: Transmit DMA parity error.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_TXSERR) {		printk("%s: Transmit DMA sbus error ack.\n", dev->name);		qep->net_stats.tx_errors++;		qep->net_stats.tx_aborted_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_RCCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.collisions += 256;	}	if(qe_status & CREG_STAT_RUOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_over_errors += 256;	}	if(qe_status & CREG_STAT_MCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_missed_errors += 256;	}	if(qe_status & CREG_STAT_RXFOFLOW) {		printk("%s: Receive fifo overflow.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_over_errors++;	}	if(qe_status & CREG_STAT_RLCOLL) {		printk("%s: Late receive collision.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.collisions++;	}	if(qe_status & CREG_STAT_FCOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_frame_errors += 256;	}	if(qe_status & CREG_STAT_CECOFLOW) {		qep->net_stats.rx_errors += 256;		qep->net_stats.rx_crc_errors += 256;	}	if(qe_status & CREG_STAT_RXDROP) {		printk("%s: Receive packet dropped.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_dropped++;		qep->net_stats.rx_missed_errors++;	}	if(qe_status & CREG_STAT_RXSMALL) {		printk("%s: Receive buffer too small, driver bug.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_length_errors++;	}	if(qe_status & CREG_STAT_RXLERR) {		printk("%s: Receive late error.\n", dev->name);		qep->net_stats.rx_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_RXPERR) {		printk("%s: Receive DMA parity error.\n", dev->name);		qep->net_stats.rx_errors++;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?