⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100_cb.c

📁 pcmcia source code
💻 C
📖 第 1 页 / 共 5 页
字号:
		/* We haven't received a packet in a Long Time.  We might have been		   bitten by the receiver hang bug.  This can be cleared by sending		   a set multicast list command. */		set_rx_mode(dev);	}	/* We must continue to monitor the media. */	sp->timer.expires = jiffies + 2*HZ;	add_timer(&sp->timer);}static void speedo_show_state(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	int phy_num = sp->phy[0] & 0x1f;	int i;	/* Print a few items for debugging. */	if (speedo_debug > 0) {		int i;		printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %d / %d:\n", dev->name,			   sp->cur_tx, sp->dirty_tx);		for (i = 0; i < TX_RING_SIZE; i++)			printk(KERN_DEBUG "%s: %c%c%d %8.8x.\n", dev->name,				   i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',				   i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',				   i, sp->tx_ring[i].status);	}	printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",		   dev->name, sp->cur_rx);	for (i = 0; i < RX_RING_SIZE; i++)		printk(KERN_DEBUG "  Rx ring entry %d  %8.8x.\n",			   i, (int)sp->rx_ringp[i]->status);	for (i = 0; i < 16; i++) {		if (i == 6) i = 21;		printk(KERN_DEBUG "  PHY index %d register %d is %4.4x.\n",			   phy_num, i, mdio_read(ioaddr, phy_num, i));	}}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static voidspeedo_init_rx_ring(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	struct RxFD *rxf, *last_rxf = NULL;	int i;	sp->cur_rx = 0;	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb;		skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));		sp->rx_skbuff[i] = skb;		if (skb == NULL)			break;			/* OK.  Just initially short of Rx bufs. */		skb->dev = dev;			/* Mark as being used by this device. */		rxf = (struct RxFD *)skb->tail;		sp->rx_ringp[i] = rxf;		skb_reserve(skb, sizeof(struct RxFD));		if (last_rxf)			last_rxf->link = virt_to_le32desc(rxf);		last_rxf = rxf;		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */		rxf->link = 0;						/* None yet. */		/* This field unused by i82557, we use it as a consistency check. */#ifdef final_version		rxf->rx_buf_addr = 0xffffffff;#else		rxf->rx_buf_addr = virt_to_bus(skb->tail);#endif		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);	}	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Mark the last entry as end-of-list. */	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */	sp->last_rxf = last_rxf;}static void speedo_tx_timeout(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	int status = inw(ioaddr + SCBStatus);	printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "		   " %4.4x at %d/%d commands %8.8x %8.8x %8.8x.\n",		   dev->name, status, inw(ioaddr + SCBCmd),		   sp->dirty_tx, sp->cur_tx,		   sp->tx_ring[(sp->dirty_tx+0) % TX_RING_SIZE].status,		   sp->tx_ring[(sp->dirty_tx+1) % TX_RING_SIZE].status,		   sp->tx_ring[(sp->dirty_tx+2) % TX_RING_SIZE].status);	/* Trigger a stats dump to give time before the reset. */	speedo_get_stats(dev);    {		int stat;        for (stat = 0; stat < sizeof(struct speedo_stats); stat += 4) 			printk("%d ", *(int *)((int)&sp->lstats + stat));        printk("\n");    }	/*	speedo_show_state(dev);*/	if ((status & 0x00C0) != 0x0080		&&  (status & 0x003C) == 0x0010  &&  0) {		/* Only the command unit has stopped. */		printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",			   dev->name);		outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),			 ioaddr + SCBPointer);		outw(CUStart, ioaddr + SCBCmd);	} else {		/* Reset the Tx and Rx units. */		outl(PortReset, ioaddr + SCBPort);		/*		if (speedo_debug > 0)				speedo_show_state(dev);*/		udelay(10);		speedo_resume(dev);	}	/* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */	if ((sp->phy[0] & 0x8000) == 0) {		int phy_addr = sp->phy[0] & 0x1f;		int advertising = mdio_read(ioaddr, phy_addr, 4);		int mii_bmcr = mdio_read(ioaddr, phy_addr, 0);		mdio_write(ioaddr, phy_addr, 0, 0x0400);		mdio_write(ioaddr, phy_addr, 1, 0x0000);		mdio_write(ioaddr, phy_addr, 4, 0x0000);		mdio_write(ioaddr, phy_addr, 0, 0x8000);#ifdef honor_default_port		mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);#else		mdio_read(ioaddr, phy_addr, 0);		mdio_write(ioaddr, phy_addr, 0, mii_bmcr);		mdio_write(ioaddr, phy_addr, 4, advertising);#endif	}	sp->stats.tx_errors++;	dev->trans_start = jiffies;	return;}static intspeedo_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	int entry;	tx_timeout_check(dev, speedo_tx_timeout);	/* Caution: the write order is important here, set the base address	   with the "ownership" bits last. */	{	/* Prevent interrupts from changing the Tx ring from underneath us. */		unsigned long flags;		spin_lock_irqsave(&sp->lock, flags);		/* Calculate the Tx descriptor entry. */		entry = sp->cur_tx % TX_RING_SIZE;		sp->tx_skbuff[entry] = skb;		/* Todo: be a little more clever about setting the interrupt bit. */		sp->tx_ring[entry].status =			cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);		sp->cur_tx++;		sp->tx_ring[entry].link =			virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);		/* We may nominally release the lock here. */		sp->tx_ring[entry].tx_desc_addr =			virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0);		/* The data region is always in one buffer descriptor. */		sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);		sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data);		sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);		/* Todo: perhaps leave the interrupt bit set if the Tx queue is more		   than half full.  Argument against: we should be receiving packets		   and scavenging the queue.  Argument for: if so, it shouldn't		   matter. */		{			struct descriptor *last_cmd = sp->last_cmd;			sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];			clear_suspend(last_cmd);		}		if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT)			sp->tx_full = 1;		else			netif_start_queue(dev);		spin_unlock_irqrestore(&sp->lock, flags);	}	wait_for_cmd_done(ioaddr + SCBCmd);	outb(CUResume, ioaddr + SCBCmd);	dev->trans_start = jiffies;	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = (struct net_device *)dev_instance;	struct speedo_private *sp;	long ioaddr, boguscnt = max_interrupt_work;	unsigned short status;#ifndef final_version	if (dev == NULL) {		printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);		return;	}#endif	ioaddr = dev->base_addr;	sp = (struct speedo_private *)dev->priv;	do {		status = inw(ioaddr + SCBStatus);		/* Acknowledge all of the current interrupt sources ASAP. */		outw(status & 0xfc00, ioaddr + SCBStatus);		if (speedo_debug > 4)			printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",				   dev->name, status);		if ((status & 0xfc00) == 0)			break;		if (status & 0x5000)	 /* Packet received, or Rx error. */			speedo_rx(dev);		if (status & 0x1000) {			if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */				outb(RxResumeNoResources, ioaddr + SCBCmd);			else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */				/* No idea of what went wrong.  Restart the receiver. */				outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),					 ioaddr + SCBPointer);				outb(RxStart, ioaddr + SCBCmd);			}			sp->stats.rx_errors++;		}		/* User interrupt, Command/Tx unit interrupt or CU not active. */		if (status & 0xA400) {			unsigned int dirty_tx;			/* We should nominally not need this lock. */			spin_lock(&sp->lock);			dirty_tx = sp->dirty_tx;			while (sp->cur_tx - dirty_tx > 0) {				int entry = dirty_tx % TX_RING_SIZE;				int status = le32_to_cpu(sp->tx_ring[entry].status);				if (speedo_debug > 5)					printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",						   entry, status);				if ((status & StatusComplete) == 0) {					/* Special case error check: look for descriptor that the					   chip skipped(?). */					if (sp->cur_tx - dirty_tx > 2  &&						(sp->tx_ring[(dirty_tx+1) % TX_RING_SIZE].status						 & cpu_to_le32(StatusComplete))) {						printk(KERN_ERR "%s: Command unit failed to mark "							   "command %8.8x as complete at %d.\n",							   dev->name, status, dirty_tx);					} else						break;			/* It still hasn't been processed. */				}				if (status & TxUnderrun)					if (sp->tx_threshold < 0x01e08000)						sp->tx_threshold += 0x00040000;				/* Free the original skb. */				if (sp->tx_skbuff[entry]) {					sp->stats.tx_packets++;	/* Count only user packets. */#if LINUX_VERSION_CODE > 0x20127					sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;#endif					dev_kfree_skb_irq(sp->tx_skbuff[entry]);					sp->tx_skbuff[entry] = 0;				} else if ((status & 0x70000) == CmdNOp)					sp->mc_setup_busy = 0;				dirty_tx++;			}#ifndef final_version			if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {				printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"					   " full=%d.\n",					   dirty_tx, sp->cur_tx, sp->tx_full);				dirty_tx += TX_RING_SIZE;			}#endif			sp->dirty_tx = dirty_tx;			if (sp->tx_full				&&  sp->cur_tx - dirty_tx < TX_QUEUE_UNFULL) {				/* The ring is no longer full, clear tbusy. */				sp->tx_full = 0;				spin_unlock(&sp->lock);				netif_wake_queue(dev);			} else				spin_unlock(&sp->lock);		}		if (--boguscnt < 0) {			printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",				   dev->name, status);			/* Clear all interrupt sources. */			outl(0xfc00, ioaddr + SCBStatus);			break;		}	} while (1);	if (speedo_debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, inw(ioaddr + SCBStatus));	return;}static intspeedo_rx(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int entry = sp->cur_rx % RX_RING_SIZE;	int status;	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;	if (speedo_debug > 4)		printk(KERN_DEBUG " In speedo_rx().\n");	/* If we own the next entry, it's a new packet. Send it up. */	while (sp->rx_ringp[entry] != NULL &&		   (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {		int pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;		if (--rx_work_limit < 0)			break;		if (speedo_debug > 4)			printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,				   pkt_len);		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {			if (status & RxErrTooBig)				printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "					   "status %8.8x!\n", dev->name, status);			else if ( ! (status & RxOK)) {				/* There was a fatal error.  This *should* be impossible. */				sp->stats.rx_errors++;				printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "					   "status %8.8x.\n", dev->name, status);			}		} else {			struct sk_buff *skb;			/* Check if the packet is long enough to just accept without			   copying to a properly sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {				skb->dev = dev;				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */				/* 'skb_put()' points to the start of sk_buff data area. */#if 1 || USE_IP_CSUM				/* Packet is in one chunk -- we can copy + cksum. */				eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);				skb_put(skb, pkt_len);#else				memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,					   pkt_len);#endif			} else {				void *temp;				/* Pass up the already-filled skbuff. */				skb = sp->rx_skbuff[entry];				if (skb == NULL) {					printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",						   dev->name);					break;				}				sp->rx_skbuff[entry] = NULL;				temp = skb_put(skb, pkt_len);#if !defined(final_version) && !defined(__powerpc__)				if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -