⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		sp->in_interrupt = 0;	/* Avoid halting machine. */		return IRQ_NONE;	}#endif	do {		status = ioread16(ioaddr + SCBStatus);		/* Acknowledge all of the current interrupt sources ASAP. */		/* Will change from 0xfc00 to 0xff00 when we start handling		   FCP and ER interrupts --Dragan */		iowrite16(status & 0xfc00, ioaddr + SCBStatus);		if (netif_msg_intr(sp))			printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",				   dev->name, status);		if ((status & 0xfc00) == 0)			break;		handled = 1;		if ((status & 0x5000) ||	/* Packet received, or Rx error. */			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)									/* Need to gather the postponed packet. */			speedo_rx(dev);		/* Always check if all rx buffers are allocated.  --SAW */		speedo_refill_rx_buffers(dev, 0);		spin_lock(&sp->lock);		/*		 * The chip may have suspended reception for various reasons.		 * Check for that, and re-prime it should this be the case.		 */		switch ((status >> 2) & 0xf) {		case 0: /* Idle */			break;		case 1:	/* Suspended */		case 2:	/* No resources (RxFDs) */		case 9:	/* Suspended with no more RBDs */		case 10: /* No resources due to no RBDs */		case 12: /* Ready with no RBDs */			speedo_rx_soft_reset(dev);			break;		case 3:  case 5:  case 6:  case 7:  case 8:		case 11:  case 13:  case 14:  case 15:			/* these are all reserved values */			break;		}		/* User interrupt, Command/Tx unit interrupt or CU not active. */		if (status & 0xA400) {			speedo_tx_buffer_gc(dev);			if (sp->tx_full				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {				/* The ring is no longer full. */				sp->tx_full = 0;				netif_wake_queue(dev); /* Attention: under a spinlock.  --SAW */			}		}		spin_unlock(&sp->lock);		if (--boguscnt < 0) {			printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",				   dev->name, status);			/* Clear all interrupt sources. */			/* Will change from 0xfc00 to 0xff00 when we start handling			   FCP and ER interrupts --Dragan */			iowrite16(0xfc00, ioaddr + SCBStatus);			break;		}	} while (1);	if (netif_msg_intr(sp))		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, ioread16(ioaddr + SCBStatus));	clear_bit(0, (void*)&sp->in_interrupt);	return IRQ_RETVAL(handled);}static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry){	struct speedo_private *sp = netdev_priv(dev);	struct RxFD *rxf;	struct sk_buff *skb;	/* Get a fresh skbuff to replace the consumed one. */	skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));	if (skb)		rx_align(skb);		/* Align IP on 16 byte boundary */	sp->rx_skbuff[entry] = skb;	if (skb == NULL) {		sp->rx_ringp[entry] = NULL;		return NULL;	}	rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;	sp->rx_ring_dma[entry] =		pci_map_single(sp->pdev, rxf,					   PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);	skb->dev = dev;	skb_reserve(skb, sizeof(struct RxFD));	rxf->rx_buf_addr = 0xffffffff;	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],								   sizeof(struct RxFD), PCI_DMA_TODEVICE);	return rxf;}static inline void speedo_rx_link(struct net_device *dev, int entry,								  struct RxFD *rxf, dma_addr_t rxf_dma){	struct speedo_private *sp = netdev_priv(dev);	rxf->status = cpu_to_le32(0xC0000001); 	/* '1' for driver use only. */	rxf->link = 0;			/* None yet. */	rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);	sp->last_rxf->link = cpu_to_le32(rxf_dma);	sp->last_rxf->status &= cpu_to_le32(~0xC0000000);	pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,								   sizeof(struct RxFD), PCI_DMA_TODEVICE);	sp->last_rxf = rxf;	sp->last_rxf_dma = rxf_dma;}static int speedo_refill_rx_buf(struct net_device *dev, int force){	struct speedo_private *sp = netdev_priv(dev);	int entry;	struct RxFD *rxf;	entry = sp->dirty_rx % RX_RING_SIZE;	if (sp->rx_skbuff[entry] == NULL) {		rxf = speedo_rx_alloc(dev, entry);		if (rxf == NULL) {			unsigned int forw;			int forw_entry;			if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {				printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",						dev->name, force);				sp->rx_ring_state |= RrOOMReported;			}			speedo_show_state(dev);			if (!force)				return -1;	/* Better luck next time!  */			/* Borrow an skb from one of next entries. */			for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)				if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)					break;			if (forw == sp->cur_rx)				return -1;			forw_entry = forw % RX_RING_SIZE;			sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];			sp->rx_skbuff[forw_entry] = NULL;			rxf = sp->rx_ringp[forw_entry];			sp->rx_ringp[forw_entry] = NULL;			sp->rx_ringp[entry] = rxf;		}	} else {		rxf = sp->rx_ringp[entry];	}	speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);	sp->dirty_rx++;	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */	return 0;}static void speedo_refill_rx_buffers(struct net_device *dev, int force){	struct speedo_private *sp = netdev_priv(dev);	/* Refill the RX ring. */	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&			speedo_refill_rx_buf(dev, force) != -1);}static intspeedo_rx(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	int entry = sp->cur_rx % RX_RING_SIZE;	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;	int alloc_ok = 1;	int npkts = 0;	if (netif_msg_intr(sp))		printk(KERN_DEBUG " In speedo_rx().\n");	/* If we own the next entry, it's a new packet. Send it up. */	while (sp->rx_ringp[entry] != NULL) {		int status;		int pkt_len;		pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],									sizeof(struct RxFD), PCI_DMA_FROMDEVICE);		status = le32_to_cpu(sp->rx_ringp[entry]->status);		pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;		if (!(status & RxComplete))			break;		if (--rx_work_limit < 0)			break;		/* Check for a rare out-of-memory case: the current buffer is		   the last buffer allocated in the RX ring.  --SAW */		if (sp->last_rxf == sp->rx_ringp[entry]) {			/* Postpone the packet.  It'll be reaped at an interrupt when this			   packet is no longer the last packet in the ring. */			if (netif_msg_rx_err(sp))				printk(KERN_DEBUG "%s: RX packet postponed!\n",					   dev->name);			sp->rx_ring_state |= RrPostponed;			break;		}		if (netif_msg_rx_status(sp))			printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,				   pkt_len);		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {			if (status & RxErrTooBig)				printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "					   "status %8.8x!\n", dev->name, status);			else if (! (status & RxOK)) {				/* There was a fatal error.  This *should* be impossible. */				sp->stats.rx_errors++;				printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "					   "status %8.8x.\n",					   dev->name, status);			}		} else {			struct sk_buff *skb;			/* Check if the packet is long enough to just accept without			   copying to a properly sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */				/* 'skb_put()' points to the start of sk_buff data area. */				pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],											sizeof(struct RxFD) + pkt_len,											PCI_DMA_FROMDEVICE);#if 1 || USE_IP_CSUM				/* Packet is in one chunk -- we can copy + cksum. */				skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);				skb_put(skb, pkt_len);#else				skb_copy_from_linear_data(sp->rx_skbuff[entry],							  skb_put(skb, pkt_len),							  pkt_len);#endif				pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],											   sizeof(struct RxFD) + pkt_len,											   PCI_DMA_FROMDEVICE);				npkts++;			} else {				/* Pass up the already-filled skbuff. */				skb = sp->rx_skbuff[entry];				if (skb == NULL) {					printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",						   dev->name);					break;				}				sp->rx_skbuff[entry] = NULL;				skb_put(skb, pkt_len);				npkts++;				sp->rx_ringp[entry] = NULL;				pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],								 PKT_BUF_SZ + sizeof(struct RxFD),								 PCI_DMA_FROMDEVICE);			}			skb->protocol = eth_type_trans(skb, dev);			netif_rx(skb);			dev->last_rx = jiffies;			sp->stats.rx_packets++;			sp->stats.rx_bytes += pkt_len;		}		entry = (++sp->cur_rx) % RX_RING_SIZE;		sp->rx_ring_state &= ~RrPostponed;		/* Refill the recently taken buffers.		   Do it one-by-one to handle traffic bursts better. */		if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)			alloc_ok = 0;	}	/* Try hard to refill the recently taken buffers. */	speedo_refill_rx_buffers(dev, 1);	if (npkts)		sp->last_rx_time = jiffies;	return 0;}static intspeedo_close(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->regs;	int i;	netdevice_stop(dev);	netif_stop_queue(dev);	if (netif_msg_ifdown(sp))		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",			   dev->name, ioread16(ioaddr + SCBStatus));	/* Shut off the media monitoring timer. */	del_timer_sync(&sp->timer);	iowrite16(SCBMaskAll, ioaddr + SCBCmd);	/* Shutting down the chip nicely fails to disable flow control. So.. */	iowrite32(PortPartialReset, ioaddr + SCBPort);	ioread32(ioaddr + SCBPort); /* flush posted write */	/*	 * The chip requires a 10 microsecond quiet period.  Wait here!	 */	udelay(10);	free_irq(dev->irq, dev);	speedo_show_state(dev);    /* Free all the skbuffs in the Rx and Tx queues. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = sp->rx_skbuff[i];		sp->rx_skbuff[i] = NULL;		/* Clear the Rx descriptors. */		if (skb) {			pci_unmap_single(sp->pdev,					 sp->rx_ring_dma[i],					 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);			dev_kfree_skb(skb);		}	}	for (i = 0; i < TX_RING_SIZE; i++) {		struct sk_buff *skb = sp->tx_skbuff[i];		sp->tx_skbuff[i] = NULL;		/* Clear the Tx descriptors. */		if (skb) {			pci_unmap_single(sp->pdev,					 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),					 skb->len, PCI_DMA_TODEVICE);			dev_kfree_skb(skb);		}	}	/* Free multicast setting blocks. */	for (i = 0; sp->mc_setup_head != NULL; i++) {		struct speedo_mc_block *t;		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	sp->mc_setup_tail = NULL;	if (netif_msg_ifdown(sp))		printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);	pci_set_power_state(sp->pdev, PCI_D2);	return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting   statistics out of the chip.  It takes an unpredictable length of time   for the dump-stats command to complete.  To avoid a busy-wait loop we   update the stats with the previous dump results, and then trigger a   new dump.   Oh, and incoming frames are dropped while executing dump-stats!   */static struct net_device_stats *speedo_get_stats(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->regs;	/* Update only if the previous dump finished. */	if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {		sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);		sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);		/*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/		sp->stats.collisions += le32_to_c

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -