⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 starfire.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			np->medialock = 1;	}	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)		np->full_duplex = 1;	if (np->full_duplex)		np->medialock = 1;	/* The chip-specific entries in the device structure. */	dev->open = &netdev_open;	dev->hard_start_xmit = &start_tx;	dev->tx_timeout = &tx_timeout;	dev->watchdog_timeo = TX_TIMEOUT;	dev->stop = &netdev_close;	dev->get_stats = &get_stats;	dev->set_multicast_list = &set_rx_mode;	dev->do_ioctl = &mii_ioctl;	if (mtu)		dev->mtu = mtu;	if (drv_flags & CanHaveMII) {		int phy, phy_idx = 0;		for (phy = 0; phy < 32 && phy_idx < 4; phy++) {			int mii_status = mdio_read(dev, phy, 1);			if (mii_status != 0xffff  &&  mii_status != 0x0000) {				np->phys[phy_idx++] = phy;				np->advertising = mdio_read(dev, phy, 4);				printk(KERN_INFO "%s: MII PHY found at address %d, status "					   "0x%4.4x advertising %4.4x.\n",					   dev->name, phy, mii_status, np->advertising);			}		}		np->mii_cnt = phy_idx;	}	return 0;err_out_free_res:	release_mem_region (ioaddr, io_size);err_out_free_netdev:	unregister_netdev (dev);	kfree (dev);	return -ENODEV;}/* Read the MII Management Data I/O (MDIO) interfaces. */static int mdio_read(struct net_device *dev, int phy_id, int location){	long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);	int result, boguscnt=1000;	/* ??? Should we add a busy-wait here? */	do		result = readl(mdio_addr);	while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);	return result & 0xffff;}static void mdio_write(struct net_device *dev, int phy_id, int location, int value){	long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);	writel(value, mdio_addr);	/* The busy-wait will occur before a read. */	return;}static int netdev_open(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int i, retval;	/* Do we ever need to reset the chip??? */	MOD_INC_USE_COUNT;	retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);	if (retval) {		MOD_DEC_USE_COUNT;		return retval;	}	/* Disable the Rx and Tx, and reset the chip. */	writel(0, ioaddr + GenCtrl);	writel(1, ioaddr + PCIDeviceConfig);	if (debug > 1)		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",			   dev->name, dev->irq);	/* Allocate the various queues, failing gracefully. */	if (np->tx_done_q == 0)		np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);	if (np->rx_done_q == 0)		np->rx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_done_q_dma);	if (np->tx_ring == 0)		np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);	if (np->rx_ring == 0)		np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);	if (np->tx_done_q == 0  ||  np->rx_done_q == 0		|| np->rx_ring == 0 ||  np->tx_ring == 0) {		if (np->tx_done_q)			pci_free_consistent(np->pci_dev, PAGE_SIZE,								np->tx_done_q, np->tx_done_q_dma);		if (np->rx_done_q)			pci_free_consistent(np->pci_dev, PAGE_SIZE,								np->rx_done_q, np->rx_done_q_dma);		if (np->tx_ring)			pci_free_consistent(np->pci_dev, PAGE_SIZE,								np->tx_ring, np->tx_ring_dma);		if (np->rx_ring)			pci_free_consistent(np->pci_dev, PAGE_SIZE,								np->rx_ring, np->rx_ring_dma);		MOD_DEC_USE_COUNT;		return -ENOMEM;	}	init_ring(dev);	/* Set the size of the Rx buffers. */	writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);	/* Set Tx descriptor to type 1 and padding to 0 bytes. */	writel(0x02000401, ioaddr + TxDescCtrl);#if defined(ADDR_64BITS) && defined(__alpha__)	/* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */	writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr);	writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr);#else	writel(0, ioaddr + RxDescQHiAddr);	writel(0, ioaddr + TxRingHiAddr);	writel(0, ioaddr + CompletionHiAddr);#endif	writel(np->rx_ring_dma, ioaddr + RxDescQAddr);	writel(np->tx_ring_dma, ioaddr + TxRingPtr);	writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);	writel(np->rx_done_q_dma, ioaddr + RxCompletionAddr);	if (debug > 1)		printk(KERN_DEBUG "%s:  Filling in the station address.\n", dev->name);	/* Fill both the unused Tx SA register and the Rx perfect filter. */	for (i = 0; i < 6; i++)		writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);	for (i = 0; i < 16; i++) {		u16 *eaddrs = (u16 *)dev->dev_addr;		long setup_frm = ioaddr + 0x56000 + i*16;		writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;		writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;		writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;	}	/* Initialize other registers. */	/* Configure the PCI bus bursts and FIFO thresholds. */	np->tx_mode = 0;			/* Initialized when TxMode set. */	np->tx_threshold = 4;	writel(np->tx_threshold, ioaddr + TxThreshold);	writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);	if (dev->if_port == 0)		dev->if_port = np->default_port;	netif_start_queue(dev);	if (debug > 1)		printk(KERN_DEBUG "%s:  Setting the Rx and Tx modes.\n", dev->name);	set_rx_mode(dev);	np->advertising = mdio_read(dev, np->phys[0], 4);	check_duplex(dev, 1);	/* Set the interrupt mask and enable PCI interrupts. */	writel(IntrRxDone | IntrRxEmpty | IntrRxPCIErr |		   IntrTxDone | IntrTxEmpty | IntrTxPCIErr |		   StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary		   | 0x0010 , ioaddr + IntrEnable);	writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),		   ioaddr + PCIDeviceConfig);	/* Enable the Rx and Tx units. */	writel(0x000F, ioaddr + GenCtrl);	if (debug > 2)		printk(KERN_DEBUG "%s: Done netdev_open().\n",			   dev->name);	/* Set the timer to check for link beat. */	init_timer(&np->timer);	np->timer.expires = jiffies + 3*HZ;	np->timer.data = (unsigned long)dev;	np->timer.function = &netdev_timer;				/* timer handler */	add_timer(&np->timer);	return 0;}static void check_duplex(struct net_device *dev, int startup){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int new_tx_mode ;	new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)		| (np->rx_flowctrl ? 0x0400:0);	if (np->medialock) {		if (np->full_duplex)			new_tx_mode |= 2;	} else {		int mii_reg5 = mdio_read(dev, np->phys[0], 5);		int negotiated =  mii_reg5 & np->advertising;		int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;		if (duplex)			new_tx_mode |= 2;		if (np->full_duplex != duplex) {			np->full_duplex = duplex;			if (debug > 1)				printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"					   " negotiated capability %4.4x.\n", dev->name,					   duplex ? "full" : "half", np->phys[0], negotiated);		}	}	if (new_tx_mode != np->tx_mode) {		np->tx_mode = new_tx_mode;		writel(np->tx_mode | 0x8000, ioaddr + TxMode);		writel(np->tx_mode, ioaddr + TxMode);	}}static void netdev_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int next_tick = 60*HZ;		/* Check before driver release. */	if (debug > 3) {		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",			   dev->name, (int)readl(ioaddr + IntrStatus));	}	check_duplex(dev, 0);#if ! defined(final_version)	/* This is often falsely triggered. */	if (readl(ioaddr + IntrStatus) & 1) {		int new_status = readl(ioaddr + IntrStatus);		/* Bogus hardware IRQ: Fake an interrupt handler call. */		if (new_status & 1) {			printk(KERN_ERR "%s: Interrupt blocked, status %8.8x/%8.8x.\n",				   dev->name, new_status, (int)readl(ioaddr + IntrStatus));			intr_handler(dev->irq, dev, 0);		}	}#endif	np->timer.expires = jiffies + next_tick;	add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));#ifndef __alpha__	{		int i;		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);		for (i = 0; i < RX_RING_SIZE; i++)			printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr));		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);		for (i = 0; i < TX_RING_SIZE; i++)			printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status));		printk("\n");	}#endif	/* Perhaps we should reinitialize the hardware here. */	dev->if_port = 0;	/* Stop and restart the chip's Tx processes . */	/* Trigger an immediate transmit demand. */	dev->trans_start = jiffies;	np->stats.tx_errors++;	return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int i;	np->tx_full = 0;	np->cur_rx = np->cur_tx = 0;	np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);		np->rx_info[i].skb = skb;		if (skb == NULL)			break;		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);		skb->dev = dev;			/* Mark as being used by this device. */		/* Grrr, we cannot offset to correctly align the IP header. */		np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid);	}	writew(i - 1, dev->base_addr + RxDescQIdx);	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Clear the remainder of the Rx buffer ring. */	for (  ; i < RX_RING_SIZE; i++) {		np->rx_ring[i].rxaddr = 0;		np->rx_info[i].skb = NULL;		np->rx_info[i].mapping = 0;	}	/* Mark the last entry as wrapping the ring. */	np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);	/* Clear the completion rings. */	for (i = 0; i < DONE_Q_SIZE; i++) {		np->rx_done_q[i].status = 0;		np->tx_done_q[i].status = 0;	}	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_info[i].skb = NULL;		np->tx_info[i].mapping = 0;		np->tx_ring[i].status = 0;	}	return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	unsigned entry;	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_info[entry].skb = skb;	np->tx_info[entry].mapping =		pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);	np->tx_ring[entry].addr = cpu_to_le32(np->tx_info[entry].mapping);	/* Add  "| TxDescIntr" to generate Tx-done interrupts. */	np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);	if (debug > 5) {		printk(KERN_DEBUG "%s: Tx #%d slot %d  %8.8x %8.8x.\n",			   dev->name, np->cur_tx, entry,			   le32_to_cpu(np->tx_ring[entry].status),			   le32_to_cpu(np->tx_ring[entry].addr));	}	np->cur_tx++;#if 1	if (entry >= TX_RING_SIZE-1) {		 /* Wrap ring */		np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);		entry = -1;	}#endif	/* Non-x86: explicitly flush descriptor cache lines here. */	/* Update the producer index. */	writel(++entry, dev->base_addr + TxProducerIdx);	if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {		np->tx_full = 1;		netif_stop_queue(dev);	}	dev->trans_start = jiffies;	if (debug > 4) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",			   dev->name, np->cur_tx, entry);	}	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = (struct net_device *)dev_instance;	struct netdev_private *np;	long ioaddr;	int boguscnt = max_interrupt_work;#ifndef final_version			/* Can never occur. */	if (dev == NULL) {		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "				"device.\n", irq);		return;	}#endif	ioaddr = dev->base_addr;	np = (struct netdev_private *)dev->priv;	do {		u32 intr_status = readl(ioaddr + IntrClear);		if (debug > 4)			printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",				   dev->name, intr_status);		if (intr_status == 0)			break;		if (intr_status & IntrRxDone)			netdev_rx(dev);		/* Scavenge the skbuff list based on the Tx-done queue.		   There are redundant checks here that may be cleaned up		   after the driver has proven to be reliable. */		{			int consumer = readl(ioaddr + TxConsumerIdx);			int tx_status;			if (debug > 4)				printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",					   dev->name, consumer);#if 0			if (np->tx_done >= 250  || np->tx_done == 0)				printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "					   "%d is %8.8x.\n", dev->name,					   np->tx_done, le32_to_cpu(np->tx_done_q[np->tx_done].status),					   (np->tx_done+1) & (DONE_Q_SIZE-1),					   le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));#endif			while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status))				   != 0) {				if (debug > 4)					printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",						   dev->name, np->tx_done, tx_status);				if ((tx_status & 0xe0000000) == 0xa0000000) {					np->stats.tx_packets++;				} else if ((tx_status & 0xe0000000) == 0x80000000) {					struct sk_buff *skb;					u16 entry = tx_status; 		/* Implicit truncate */					entry >>= 3;					skb = np->tx_info[entry].skb;					pci_unmap_single(np->pci_dev,							 np->tx_info[entry].mapping,							 skb->len, PCI_DMA_TODEVICE);					/* Scavenge the descriptor. */					dev_kfree_skb_irq(skb);					np->tx_info[entry].skb = NULL;					np->tx_info[entry].mapping = 0;					np->dirty_tx++;				}				np->tx_done_q[np->tx_done].status = 0;				np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);			}			writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);		}		if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {			/* The ring is no longer full, wake the queue. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -