⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 winbond-840.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* Initialize the Tx descriptors */	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = NULL;		np->tx_ring[i].status = 0;	}	np->tx_full = 0;	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;	iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);	iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,		np->base_addr + TxRingPtr);}static void free_rxtx_rings(struct netdev_private* np){	int i;	/* Free all the skbuffs in the Rx queue. */	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].status = 0;		if (np->rx_skbuff[i]) {			pci_unmap_single(np->pci_dev,						np->rx_addr[i],						np->rx_skbuff[i]->len,						PCI_DMA_FROMDEVICE);			dev_kfree_skb(np->rx_skbuff[i]);		}		np->rx_skbuff[i] = NULL;	}	for (i = 0; i < TX_RING_SIZE; i++) {		if (np->tx_skbuff[i]) {			pci_unmap_single(np->pci_dev,						np->tx_addr[i],						np->tx_skbuff[i]->len,						PCI_DMA_TODEVICE);			dev_kfree_skb(np->tx_skbuff[i]);		}		np->tx_skbuff[i] = NULL;	}}static void init_registers(struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	void __iomem *ioaddr = np->base_addr;	int i;	for (i = 0; i < 6; i++)		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);	/* Initialize other registers. */#ifdef __BIG_ENDIAN	i = (1<<20);	/* Big-endian descriptors */#else	i = 0;#endif	i |= (0x04<<2);		/* skip length 4 u32 */	i |= 0x02;		/* give Rx priority */	/* Configure the PCI bus bursts and FIFO thresholds.	   486: Set 8 longword cache alignment, 8 longword burst.	   586: Set 16 longword cache alignment, no burst limit.	   Cache alignment bits 15:14	     Burst length 13:8		0000	<not allowed> 		0000 align to cache	0800 8 longwords		4000	8  longwords		0100 1 longword		1000 16 longwords		8000	16 longwords		0200 2 longwords	2000 32 longwords		C000	32  longwords		0400 4 longwords */#if defined (__i386__) && !defined(MODULE)	/* When not a module we can work around broken '486 PCI boards. */	if (boot_cpu_data.x86 <= 4) {		i |= 0x4800;		printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "			   "alignment to 8 longwords.\n", dev->name);	} else {		i |= 0xE000;	}#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)	i |= 0xE000;#elif defined(__sparc__)	i |= 0x4800;#else#warning Processor architecture undefined	i |= 0x4800;#endif	iowrite32(i, ioaddr + PCIBusCfg);	np->csr6 = 0;	/* 128 byte Tx threshold; 		Transmit on; Receive on; */	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));	/* Clear and Enable interrupts by setting the interrupt mask. */	iowrite32(0x1A0F5, ioaddr + IntrStatus);	iowrite32(0x1A0F5, ioaddr + IntrEnable);	iowrite32(0, ioaddr + RxStartDemand);}static void tx_timeout(struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	void __iomem *ioaddr = np->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"		   " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));	{		int i;		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);		for (i = 0; i < RX_RING_SIZE; i++)			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);		for (i = 0; i < TX_RING_SIZE; i++)			printk(" %8.8x", np->tx_ring[i].status);		printk("\n");	}	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",				np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);	printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));	disable_irq(dev->irq);	spin_lock_irq(&np->lock);	/*	 * Under high load dirty_tx and the internal tx descriptor pointer	 * come out of sync, thus perform a software reset and reinitialize	 * everything.	 */	iowrite32(1, np->base_addr+PCIBusCfg);	udelay(1);	free_rxtx_rings(np);	init_rxtx_rings(dev);	init_registers(dev);	spin_unlock_irq(&np->lock);	enable_irq(dev->irq);	netif_wake_queue(dev);	dev->trans_start = jiffies;	np->stats.tx_errors++;	return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static int alloc_ringdesc(struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);	np->rx_ring = pci_alloc_consistent(np->pci_dev,			sizeof(struct w840_rx_desc)*RX_RING_SIZE +			sizeof(struct w840_tx_desc)*TX_RING_SIZE,			&np->ring_dma_addr);	if(!np->rx_ring)		return -ENOMEM;	init_rxtx_rings(dev);	return 0;}static void free_ringdesc(struct netdev_private *np){	pci_free_consistent(np->pci_dev,			sizeof(struct w840_rx_desc)*RX_RING_SIZE +			sizeof(struct w840_tx_desc)*TX_RING_SIZE,			np->rx_ring, np->ring_dma_addr);}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	unsigned entry;	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_addr[entry] = pci_map_single(np->pci_dev,				skb->data,skb->len, PCI_DMA_TODEVICE);	np->tx_skbuff[entry] = skb;	np->tx_ring[entry].buffer1 = np->tx_addr[entry];	if (skb->len < TX_BUFLIMIT) {		np->tx_ring[entry].length = DescWholePkt | skb->len;	} else {		int len = skb->len - TX_BUFLIMIT;		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;	}	if(entry == TX_RING_SIZE-1)		np->tx_ring[entry].length |= DescEndRing;	/* Now acquire the irq spinlock.	 * The difficult race is the the ordering between	 * increasing np->cur_tx and setting DescOwn:	 * - if np->cur_tx is increased first the interrupt	 *   handler could consider the packet as transmitted	 *   since DescOwn is cleared.	 * - If DescOwn is set first the NIC could report the	 *   packet as sent, but the interrupt handler would ignore it	 *   since the np->cur_tx was not yet increased.	 */	spin_lock_irq(&np->lock);	np->cur_tx++;	wmb(); /* flush length, buffer1, buffer2 */	np->tx_ring[entry].status = DescOwn;	wmb(); /* flush status and kick the hardware */	iowrite32(0, np->base_addr + TxStartDemand);	np->tx_q_bytes += skb->len;	/* Work around horrible bug in the chip by marking the queue as full	   when we do not have FIFO room for a maximum sized packet. */	if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||		((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {		netif_stop_queue(dev);		wmb();		np->tx_full = 1;	}	spin_unlock_irq(&np->lock);	dev->trans_start = jiffies;	if (debug > 4) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",			   dev->name, np->cur_tx, entry);	}	return 0;}static void netdev_tx_done(struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {		int entry = np->dirty_tx % TX_RING_SIZE;		int tx_status = np->tx_ring[entry].status;		if (tx_status < 0)			break;		if (tx_status & 0x8000) { 	/* There was an error, log it. */#ifndef final_version			if (debug > 1)				printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",					   dev->name, tx_status);#endif			np->stats.tx_errors++;			if (tx_status & 0x0104) np->stats.tx_aborted_errors++;			if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;			if (tx_status & 0x0200) np->stats.tx_window_errors++;			if (tx_status & 0x0002) np->stats.tx_fifo_errors++;			if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)				np->stats.tx_heartbeat_errors++;		} else {#ifndef final_version			if (debug > 3)				printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",					   dev->name, entry, tx_status);#endif			np->stats.tx_bytes += np->tx_skbuff[entry]->len;			np->stats.collisions += (tx_status >> 3) & 15;			np->stats.tx_packets++;		}		/* Free the original skb. */		pci_unmap_single(np->pci_dev,np->tx_addr[entry],					np->tx_skbuff[entry]->len,					PCI_DMA_TODEVICE);		np->tx_q_bytes -= np->tx_skbuff[entry]->len;		dev_kfree_skb_irq(np->tx_skbuff[entry]);		np->tx_skbuff[entry] = NULL;	}	if (np->tx_full &&		np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&		np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {		/* The ring is no longer full, clear tbusy. */		np->tx_full = 0;		wmb();		netif_wake_queue(dev);	}}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = (struct net_device *)dev_instance;	struct netdev_private *np = netdev_priv(dev);	void __iomem *ioaddr = np->base_addr;	int work_limit = max_interrupt_work;	int handled = 0;	if (!netif_device_present(dev))		return IRQ_NONE;	do {		u32 intr_status = ioread32(ioaddr + IntrStatus);		/* Acknowledge all of the current interrupt sources ASAP. */		iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);		if (debug > 4)			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",				   dev->name, intr_status);		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)			break;		handled = 1;		if (intr_status & (IntrRxDone | RxNoBuf))			netdev_rx(dev);		if (intr_status & RxNoBuf)			iowrite32(0, ioaddr + RxStartDemand);		if (intr_status & (TxIdle | IntrTxDone) &&			np->cur_tx != np->dirty_tx) {			spin_lock(&np->lock);			netdev_tx_done(dev);			spin_unlock(&np->lock);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |						   TimerInt | IntrTxStopped))			netdev_error(dev, intr_status);		if (--work_limit < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "				   "status=0x%4.4x.\n", dev->name, intr_status);			/* Set the timer to re-enable the other interrupts after			   10*82usec ticks. */			spin_lock(&np->lock);			if (netif_device_present(dev)) {				iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);				iowrite32(10, ioaddr + GPTimer);			}			spin_unlock(&np->lock);			break;		}	} while (1);	if (debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, ioread32(ioaddr + IntrStatus));	return IRQ_RETVAL(handled);}/* This routine is logically part of the interrupt handler, but separated   for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	int entry = np->cur_rx % RX_RING_SIZE;	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;	if (debug > 4) {		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",			   entry, np->rx_ring[entry].status);	}	/* If EOP is set on the next entry, it's a new packet. Send it up. */	while (--work_limit >= 0) {		struct w840_rx_desc *desc = np->rx_head_desc;		s32 status = desc->status;		if (debug > 4)			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",				   status);		if (status < 0)			break;		if ((status & 0x38008300) != 0x0300) {			if ((status & 0x38000300) != 0x0300) {				/* Ingore earlier buffers. */				if ((status & 0xffff) != 0x7fff) {					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "						   "multiple buffers, entry %#x status %4.4x!\n",						   dev->name, np->cur_rx, status);					np->stats.rx_length_errors++;				}			} else if (status & 0x8000) {				/* There was a fatal error. */				if (debug > 2)					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",						   dev->name, status);				np->stats.rx_errors++; /* end of a packet.*/				if (status & 0x0890) np->stats.rx_length_errors++;				if (status & 0x004C) np->stats.rx_frame_errors++;				if (status & 0x0002) np->stats.rx_crc_errors++;			}		} else {			struct sk_buff *skb;			/* Omit the four octet CRC from the length. */			int pkt_len = ((status >> 16) & 0x7ff) - 4;#ifndef final_version			if (debug > 4)				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"					   " status %x.\n", pkt_len, status);#endif			/* Check if the packet is long enough to accept without copying			   to a minimally-sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* 16 byte align the IP header */				pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],							    np->rx_skbuff[entry]->len,							    PCI_DMA_FROMDEVICE);				eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);				skb_put(skb, pkt_len);				pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],							       np->rx_skbuff[entry]->len,							       PCI_DMA_FROMDEVICE);			} else {				pci_unmap_single(np->pci_dev,np->rx_addr[entry],							np->rx_skbuff[entry]->len,							PCI_DMA_FROMDEVICE);				skb_put(skb = np->rx_skbuff[entry], pkt_len);				np->rx_skbuff[entry] = NULL;			}#ifndef final_version				/* Remove after testing. */			/* You will want this info for the initial debug. */			if (debug > 5)				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "					   "%d.%d.%d.%d.\n",					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -