⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 starfire.c

📁 Linux下各种网卡的驱动程序
💻 C
📖 第 1 页 / 共 4 页
字号:
		if (jiffies - last_msg > 10*HZ) {			last_msg = jiffies;			printk(KERN_ERR "%s: The Starfire chip is missing!\n",				   dev->name);		}	} else if (np->polling) {		if (status & IntrPCIPin) {			intr_handler(dev->irq, dev, 0);			if (jiffies - last_msg > 10*HZ) {				printk(KERN_ERR "%s: IRQ %d is still blocked!\n",					   dev->name, dev->irq);				last_msg = jiffies;			}		} else if (jiffies - last_msg > 10*HZ)			np->polling = 0;		np->timer.expires = jiffies + 2;	} else if (status & IntrPCIPin) {		int new_status = readl(ioaddr + IntrStatus);		/* Bogus hardware IRQ mapping: Fake an interrupt handler call. */		if (new_status & IntrPCIPin) {			printk(KERN_ERR "%s: IRQ %d is not raising an interrupt! "				   "Status %8.8x/%8.8x.  \n",				   dev->name, dev->irq, status, new_status);			intr_handler(dev->irq, dev, 0);			np->timer.expires = jiffies + 2;			np->polling = 1;		}	} else if (netif_queue_paused(dev)  &&			   np->cur_tx - np->dirty_tx > 1  &&			   (jiffies - dev->trans_start) > TX_TIMEOUT) {		/* This will not catch tbusy incorrectly set when the queue is empty,		 * but that state should never occur. */		tx_timeout(dev);	}	check_duplex(dev);	add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));#if defined(__i386__)	if (np->msg_level & NETIF_MSG_TX_ERR) {		int i;		printk("\n" KERN_DEBUG "  Tx ring %p: ", np->tx_ring);		for (i = 0; i < TX_RING_SIZE; i++)			printk(" %4.4x", np->tx_ring[i].status);		printk("\n" KERN_DEBUG "  Rx ring %p: ", np->rx_ring);		for (i = 0; i < RX_RING_SIZE; i++)			printk(" %8.8x", (unsigned int)np->rx_ring[i].rxaddr);		printk("\n");	}#endif	/* If a specific problem is reported, reinitialize the hardware here. */	dev->if_port = 0;	/* Stop and restart the chip's Tx processes . */	writel(0, ioaddr + GenCtrl);	/* Enable the Rx and Tx units. */	writel(TxEnable|RxEnable, ioaddr + GenCtrl);	dev->trans_start = jiffies;	np->stats.tx_errors++;	return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int i;	np->tx_full = 0;	np->cur_rx = np->cur_tx = 0;	np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;	np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ :					 (dev->mtu + 14 + 3) & ~3);	/* Round to word. */	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);		np->rx_skbuff[i] = skb;		if (skb == NULL)			break;		skb->dev = dev;			/* Mark as being used by this device. */		/* Grrr, we cannot offset to correctly align the IP header. */		np->rx_ring[i].rxaddr =			virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);	}	writew(i - 1, dev->base_addr + RxDescQIdx);	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Clear the remainder of the Rx buffer ring. */	for (  ; i < RX_RING_SIZE; i++) {		np->rx_ring[i].rxaddr = 0;		np->rx_skbuff[i] = 0;	}	/* Mark the last entry as wrapping the ring. */	np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);	/* Clear the completion rings. */	for (i = 0; i < DONE_Q_SIZE; i++) {		np->rx_done_q[i].status = 0;		np->tx_done_q[i].status = 0;	}	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = 0;		np->tx_ring[i].status = 0;	}	return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	unsigned entry;	/* Block a timer-based transmit from overlapping.  This happens when	   packets are presumed lost, and we use this check the Tx status. */	if (netif_pause_tx_queue(dev) != 0) {		/* This watchdog code is redundant with the media monitor timer. */		if (jiffies - dev->trans_start > TX_TIMEOUT)			tx_timeout(dev);		return 1;	}	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_skbuff[entry] = skb;	np->tx_ring[entry].addr = virt_to_le32desc(skb->data);	/* Add  "| TxDescIntr" to generate Tx-done interrupts. */	np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);#if 1	if (entry >= TX_RING_SIZE-1) {		 /* Wrap ring */		np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);		entry = -1;	}#endif	/* On some architectures better performance results by explicitly	   flushing cache lines: pci_flush_virt(skb->data, skb->len); */	np->cur_tx++;	/* Update the producer index. */	writel(++entry, dev->base_addr + TxProducerIdx);	/* cf. using TX_QUEUE_LEN instead of TX_RING_SIZE here. */	if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {		np->tx_full = 1;		/* Check for the rare case of a just-cleared queue. */		if (np->cur_tx - (volatile unsigned int)np->dirty_tx			< TX_RING_SIZE - 2) {			np->tx_full = 0;			netif_unpause_tx_queue(dev);		} else			netif_stop_tx_queue(dev);	} else		netif_unpause_tx_queue(dev);		/* Typical path */	dev->trans_start = jiffies;	if (np->msg_level & NETIF_MSG_TX_QUEUED) {		printk(KERN_DEBUG "%s: Tx frame #%d slot %d  %8.8x %8.8x.\n",			   dev->name, np->cur_tx, entry,			   np->tx_ring[entry].status, np->tx_ring[entry].addr);	}	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = (struct net_device *)dev_instance;	struct netdev_private *np;	long ioaddr;	int boguscnt;#ifndef final_version			/* Can never occur. */	if (dev == NULL) {		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "				"device.\n", irq);		return;	}#endif	ioaddr = dev->base_addr;	np = (struct netdev_private *)dev->priv;	boguscnt = np->max_interrupt_work;	do {		u32 intr_status = readl(ioaddr + IntrClear);		if (np->msg_level & NETIF_MSG_INTR)			printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",				   dev->name, intr_status);		if (intr_status == 0 || intr_status == 0xffffffff)			break;		if (intr_status & IntrRxDone)			netdev_rx(dev);		/* Scavenge the skbuff list based on the Tx-done queue.		   There are redundant checks here that may be cleaned up		   after the driver has proven to be reliable. */		{			int consumer = readl(ioaddr + TxConsumerIdx);			int tx_status;			if (np->msg_level & NETIF_MSG_INTR)				printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",					   dev->name, consumer);#if 0			if (np->tx_done >= 250  || np->tx_done == 0)				printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "					   "%d is %8.8x.\n", dev->name,					   np->tx_done, np->tx_done_q[np->tx_done].status,					   (np->tx_done+1) & (DONE_Q_SIZE-1),					   np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status);#endif			while ((tx_status = cpu_to_le32(np->tx_done_q[np->tx_done].status))				   != 0) {				if (np->msg_level & NETIF_MSG_TX_DONE)					printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",						   dev->name, np->tx_done, tx_status);				if ((tx_status & 0xe0000000) == 0xa0000000) {					np->stats.tx_packets++;				} else if ((tx_status & 0xe0000000) == 0x80000000) {					u16 entry = tx_status; 		/* Implicit truncate */					entry >>= 3;					/* Scavenge the descriptor. */					if (np->tx_skbuff[entry]) {						dev_free_skb_irq(np->tx_skbuff[entry]);					} else						printk(KERN_WARNING "%s: Null skbuff at entry %d!!!\n",							   dev->name, entry);					np->tx_skbuff[entry] = 0;					np->dirty_tx++;				}				np->tx_done_q[np->tx_done].status = 0;				np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);			}			writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);		}		if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {			/* The ring is no longer full, allow new TX entries. */			np->tx_full = 0;			netif_resume_tx_queue(dev);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & IntrAbnormalSummary)			netdev_error(dev, intr_status);		if (--boguscnt < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "				   "status=0x%4.4x.\n",				   dev->name, intr_status);			writel(0x0021, ioaddr + IntrTimerCtrl);			break;		}	} while (1);	if (np->msg_level & NETIF_MSG_INTR)		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, (int)readl(ioaddr + IntrStatus));	return;}/* This routine is logically part of the interrupt handler, but separated   for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;	u32 desc_status;	if (np->rx_done_q == 0) {		printk(KERN_ERR "%s:  rx_done_q is NULL!  rx_done is %d. %p.\n",			   dev->name, np->rx_done, np->tx_done_q);		return 0;	}	/* If EOP is set on the next entry, it's a new packet. Send it up. */	while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {		if (np->msg_level & NETIF_MSG_RX_STATUS)			printk(KERN_DEBUG "  netdev_rx() status of %d was %8.8x.\n",				   np->rx_done, desc_status);		if (--boguscnt < 0)			break;		if ( ! (desc_status & RxOK)) {			/* There was a error. */			if (np->msg_level & NETIF_MSG_RX_ERR)				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",					   desc_status);			np->stats.rx_errors++;			if (desc_status & RxFIFOErr)				np->stats.rx_fifo_errors++;		} else {			struct sk_buff *skb;			u16 pkt_len = desc_status;			/* Implicitly Truncate */			int entry = (desc_status >> 16) & 0x7ff;#ifndef final_version			if (np->msg_level & NETIF_MSG_RX_STATUS)				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"					   ", bogus_cnt %d.\n",					   pkt_len, boguscnt);#endif			/* Check if the packet is long enough to accept without copying			   to a minimally-sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* 16 byte align the IP header */#if HAS_IP_COPYSUM			/* Call copy + cksum if available. */				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);				skb_put(skb, pkt_len);#else				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,					   pkt_len);#endif			} else {				char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);				np->rx_skbuff[entry] = NULL;#ifndef final_version				/* Remove after testing. */				if (le32desc_to_virt(np->rx_ring[entry].rxaddr & ~3) != temp)					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "						   "do not match in netdev_rx: %p vs. %p / %p.\n",						   dev->name,						   le32desc_to_virt(np->rx_ring[entry].rxaddr),						   skb->head, temp);#endif			}			skb->protocol = eth_type_trans(skb, dev);#ifdef full_rx_status			if (np->rx_done_q[np->rx_done].status2 & cpu_to_le32(0x01000000))				skb->ip_summed = CHECKSUM_UNNECESSARY;#endif			netif_rx(skb);			dev->last_rx = jiffies;			np->stats.rx_packets++;		}		np->cur_rx++;		np->rx_done_q[np->rx_done].status = 0;		np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);	}	writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);	/* Refill the Rx ring buffers. */	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {		struct sk_buff *skb;		int entry = np->dirty_rx % RX_RING_SIZE;		if (np->rx_skbuff[entry] == NULL) {			skb = dev_alloc_skb(np->rx_buf_sz);			np->rx_skbuff[entry] = skb;			if (skb == NULL)				break;				/* Better luck next round. */			skb->dev = dev;			/* Mark as being used by this device. */			np->rx_ring[entry].rxaddr =				virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);		}		if (entry == RX_RING_SIZE - 1)			np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);		/* We could defer this until later... */		writew(entry, dev->base_addr + RxDescQIdx);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -