declance.c

来自「linux 内核源代码」· C语言 代码 · 共 1,382 行 · 第 1/3 页

C
1,382
字号
	*lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |				     dev->dev_addr[4];	/* Setup the initialization block */	/* Setup rx descriptor pointer */	leptr = offsetof(struct lance_init_block, brx_ring);	*lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |					 (leptr >> 16);	*lib_ptr(ib, rx_ptr, lp->type) = leptr;	if (ZERO)		printk("RX ptr: %8.8x(%8.8x)\n",		       leptr, lib_off(brx_ring, lp->type));	/* Setup tx descriptor pointer */	leptr = offsetof(struct lance_init_block, btx_ring);	*lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |					 (leptr >> 16);	*lib_ptr(ib, tx_ptr, lp->type) = leptr;	if (ZERO)		printk("TX ptr: %8.8x(%8.8x)\n",		       leptr, lib_off(btx_ring, lp->type));	if (ZERO)		printk("TX rings:\n");	/* Setup the Tx ring entries */	for (i = 0; i < TX_RING_SIZE; i++) {		leptr = lp->tx_buf_ptr_lnc[i];		*lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;		*lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &							   0xff;		*lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;						/* The ones required by tmd2 */		*lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;		if (i < 3 && ZERO)			printk("%d: 0x%8.8x(0x%8.8x)\n",			       i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);	}	/* Setup the Rx ring entries */	if (ZERO)		printk("RX rings:\n");	for (i = 0; i < RX_RING_SIZE; i++) {		leptr = lp->rx_buf_ptr_lnc[i];		*lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;		*lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &							    0xff) |							   LE_R1_OWN;		*lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |							     0xf000;		*lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;		if (i < 3 && ZERO)			printk("%d: 0x%8.8x(0x%8.8x)\n",			       i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);	}	iob();}static int init_restart_lance(struct lance_private *lp){	volatile struct lance_regs *ll = lp->ll;	int i;	writereg(&ll->rap, LE_CSR0);	writereg(&ll->rdp, LE_C0_INIT);	/* Wait for the lance to complete initialization */	for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {		udelay(10);	}	if ((i == 100) || (ll->rdp & LE_C0_ERR)) {		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",		       i, ll->rdp);		return -1;	}	if ((ll->rdp & LE_C0_ERR)) {		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",		       i, ll->rdp);		return -1;	}	writereg(&ll->rdp, LE_C0_IDON);	writereg(&ll->rdp, LE_C0_STRT);	writereg(&ll->rdp, LE_C0_INEA);	return 0;}static int lance_rx(struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile u16 *ib = (volatile u16 *)dev->mem_start;	volatile u16 *rd;	unsigned short bits;	int entry, len;	struct sk_buff *skb;#ifdef TEST_HITS	{		int i;		printk("[");		for (i = 0; i < RX_RING_SIZE; i++) {			if (i == lp->rx_new)				printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,						      lp->type) &					     LE_R1_OWN ? "_" : "X");			else				printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,						      lp->type) &					     LE_R1_OWN ? "." : "1");		}		printk("]");	}#endif	for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);	     !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);	     rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {		entry = lp->rx_new;		/* We got an incomplete frame? */		if ((bits & LE_R1_POK) != LE_R1_POK) {			dev->stats.rx_over_errors++;			dev->stats.rx_errors++;		} else if (bits & LE_R1_ERR) {			/* Count only the end frame as a rx error,			 * not the beginning			 */			if (bits & LE_R1_BUF)				dev->stats.rx_fifo_errors++;			if (bits & LE_R1_CRC)				dev->stats.rx_crc_errors++;			if (bits & LE_R1_OFL)				dev->stats.rx_over_errors++;			if (bits & LE_R1_FRA)				dev->stats.rx_frame_errors++;			if (bits & LE_R1_EOP)				dev->stats.rx_errors++;		} else {			len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;			skb = dev_alloc_skb(len + 2);			if (skb == 0) {				printk("%s: Memory squeeze, deferring packet.\n",				       dev->name);				dev->stats.rx_dropped++;				*rds_ptr(rd, mblength, lp->type) = 0;				*rds_ptr(rd, rmd1, lp->type) =					((lp->rx_buf_ptr_lnc[entry] >> 16) &					 0xff) | LE_R1_OWN;				lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;				return 0;			}			dev->stats.rx_bytes += len;			skb_reserve(skb, 2);	/* 16 byte align */			skb_put(skb, len);	/* make room */			cp_from_buf(lp->type, skb->data,				    (char *)lp->rx_buf_ptr_cpu[entry], len);			skb->protocol = eth_type_trans(skb, dev);			netif_rx(skb);			dev->last_rx = jiffies;			dev->stats.rx_packets++;		}		/* Return the packet to the pool */		*rds_ptr(rd, mblength, lp->type) = 0;		*rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;		*rds_ptr(rd, rmd1, lp->type) =			((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;		lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;	}	return 0;}static void lance_tx(struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile u16 *ib = (volatile u16 *)dev->mem_start;	volatile struct lance_regs *ll = lp->ll;	volatile u16 *td;	int i, j;	int status;	j = lp->tx_old;	spin_lock(&lp->lock);	for (i = j; i != lp->tx_new; i = j) {		td = lib_ptr(ib, btx_ring[i], lp->type);		/* If we hit a packet not owned by us, stop */		if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)			break;		if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {			status = *tds_ptr(td, misc, lp->type);			dev->stats.tx_errors++;			if (status & LE_T3_RTY)				dev->stats.tx_aborted_errors++;			if (status & LE_T3_LCOL)				dev->stats.tx_window_errors++;			if (status & LE_T3_CLOS) {				dev->stats.tx_carrier_errors++;				printk("%s: Carrier Lost\n", dev->name);				/* Stop the lance */				writereg(&ll->rap, LE_CSR0);				writereg(&ll->rdp, LE_C0_STOP);				lance_init_ring(dev);				load_csrs(lp);				init_restart_lance(lp);				goto out;			}			/* Buffer errors and underflows turn off the			 * transmitter, restart the adapter.			 */			if (status & (LE_T3_BUF | LE_T3_UFL)) {				dev->stats.tx_fifo_errors++;				printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",				       dev->name);				/* Stop the lance */				writereg(&ll->rap, LE_CSR0);				writereg(&ll->rdp, LE_C0_STOP);				lance_init_ring(dev);				load_csrs(lp);				init_restart_lance(lp);				goto out;			}		} else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==			   LE_T1_POK) {			/*			 * So we don't count the packet more than once.			 */			*tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);			/* One collision before packet was sent. */			if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)				dev->stats.collisions++;			/* More than one collision, be optimistic. */			if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)				dev->stats.collisions += 2;			dev->stats.tx_packets++;		}		j = (j + 1) & TX_RING_MOD_MASK;	}	lp->tx_old = j;out:	if (netif_queue_stopped(dev) &&	    TX_BUFFS_AVAIL > 0)		netif_wake_queue(dev);	spin_unlock(&lp->lock);}static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id){	struct net_device *dev = dev_id;	printk("%s: DMA error\n", dev->name);	return IRQ_HANDLED;}static irqreturn_t lance_interrupt(const int irq, void *dev_id){	struct net_device *dev = dev_id;	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	int csr0;	writereg(&ll->rap, LE_CSR0);	csr0 = ll->rdp;	/* Acknowledge all the interrupt sources ASAP */	writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));	if ((csr0 & LE_C0_ERR)) {		/* Clear the error condition */		writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |			 LE_C0_CERR | LE_C0_MERR);	}	if (csr0 & LE_C0_RINT)		lance_rx(dev);	if (csr0 & LE_C0_TINT)		lance_tx(dev);	if (csr0 & LE_C0_BABL)		dev->stats.tx_errors++;	if (csr0 & LE_C0_MISS)		dev->stats.rx_errors++;	if (csr0 & LE_C0_MERR) {		printk("%s: Memory error, status %04x\n", dev->name, csr0);		writereg(&ll->rdp, LE_C0_STOP);		lance_init_ring(dev);		load_csrs(lp);		init_restart_lance(lp);		netif_wake_queue(dev);	}	writereg(&ll->rdp, LE_C0_INEA);	writereg(&ll->rdp, LE_C0_INEA);	return IRQ_HANDLED;}struct net_device *last_dev = 0;static int lance_open(struct net_device *dev){	volatile u16 *ib = (volatile u16 *)dev->mem_start;	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	int status = 0;	last_dev = dev;	/* Stop the Lance */	writereg(&ll->rap, LE_CSR0);	writereg(&ll->rdp, LE_C0_STOP);	/* Set mode and clear multicast filter only at device open,	 * so that lance_init_ring() called at any error will not	 * forget multicast filters.	 *	 * BTW it is common bug in all lance drivers! --ANK	 */	*lib_ptr(ib, mode, lp->type) = 0;	*lib_ptr(ib, filter[0], lp->type) = 0;	*lib_ptr(ib, filter[1], lp->type) = 0;	*lib_ptr(ib, filter[2], lp->type) = 0;	*lib_ptr(ib, filter[3], lp->type) = 0;	lance_init_ring(dev);	load_csrs(lp);	netif_start_queue(dev);	/* Associate IRQ with lance_interrupt */	if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) {		printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);		return -EAGAIN;	}	if (lp->dma_irq >= 0) {		unsigned long flags;		if (request_irq(lp->dma_irq, &lance_dma_merr_int, 0,				"lance error", dev)) {			free_irq(dev->irq, dev);			printk("%s: Can't get DMA IRQ %d\n", dev->name,				lp->dma_irq);			return -EAGAIN;		}		spin_lock_irqsave(&ioasic_ssr_lock, flags);		fast_mb();		/* Enable I/O ASIC LANCE DMA.  */		ioasic_write(IO_REG_SSR,			     ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);		fast_mb();		spin_unlock_irqrestore(&ioasic_ssr_lock, flags);	}	status = init_restart_lance(lp);	return status;}static int lance_close(struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	netif_stop_queue(dev);	del_timer_sync(&lp->multicast_timer);	/* Stop the card */	writereg(&ll->rap, LE_CSR0);	writereg(&ll->rdp, LE_C0_STOP);	if (lp->dma_irq >= 0) {		unsigned long flags;		spin_lock_irqsave(&ioasic_ssr_lock, flags);		fast_mb();		/* Disable I/O ASIC LANCE DMA.  */		ioasic_write(IO_REG_SSR,			     ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);		fast_iob();		spin_unlock_irqrestore(&ioasic_ssr_lock, flags);		free_irq(lp->dma_irq, dev);	}	free_irq(dev->irq, dev);	return 0;}static inline int lance_reset(struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	int status;	/* Stop the lance */	writereg(&ll->rap, LE_CSR0);	writereg(&ll->rdp, LE_C0_STOP);	lance_init_ring(dev);	load_csrs(lp);	dev->trans_start = jiffies;	status = init_restart_lance(lp);	return status;}static void lance_tx_timeout(struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",		dev->name, ll->rdp);	lance_reset(dev);	netif_wake_queue(dev);}static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct lance_private *lp = netdev_priv(dev);	volatile struct lance_regs *ll = lp->ll;	volatile u16 *ib = (volatile u16 *)dev->mem_start;	int entry, len;	len = skb->len;	if (len < ETH_ZLEN) {		if (skb_padto(skb, ETH_ZLEN))			return 0;		len = ETH_ZLEN;	}	dev->stats.tx_bytes += len;	entry = lp->tx_new;	*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);	*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;	cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);	/* Now, give the packet to the lance */	*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?