⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 epic100.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* This magic is documented in SMSC app note 7.15 */	for (i = 16; i > 0; i--)		outl(0x0008, ioaddr + TEST1);#if defined(__powerpc__) || defined(__sparc__)		/* Big endian */	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);#else	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);#endif	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);	if (ep->chip_flags & MII_PWRDWN)		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);	for (i = 0; i < 3; i++)		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);	ep->tx_threshold = TX_FIFO_THRESH;	outl(ep->tx_threshold, ioaddr + TxThresh);	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);	/* Start the chip's Rx process. */	set_rx_mode(dev);	outl(StartRx | RxQueued, ioaddr + COMMAND);	/* Enable interrupts by setting the interrupt mask. */	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)		 | CntFull | TxUnderrun | TxDone | TxEmpty		 | RxError | RxOverflow | RxFull | RxHeader | RxDone,		 ioaddr + INTMASK);	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"		   " interrupt %4.4x.\n",		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),		   (int)inl(ioaddr + INTSTAT));	return;}static void check_media(struct net_device *dev){	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;	int negotiated = mii_lpa & ep->mii.advertising;	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;	if (ep->mii.duplex_lock)		return;	if (mii_lpa == 0xffff)		/* Bogus read */		return;	if (ep->mii.full_duplex != duplex) {		ep->mii.full_duplex = duplex;		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"			   " partner capability of %4.4x.\n", dev->name,			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);	}}static void epic_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	int next_tick = 5*HZ;	if (debug > 3) {		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",			   dev->name, (int)inl(ioaddr + TxSTAT));		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "			   "IntStatus %4.4x RxStatus %4.4x.\n",			   dev->name, (int)inl(ioaddr + INTMASK),			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));	}	check_media(dev);	ep->timer.expires = jiffies + next_tick;	add_timer(&ep->timer);}static void epic_tx_timeout(struct net_device *dev){	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	if (debug > 0) {		printk(KERN_WARNING "%s: Transmit timeout using MII device, "			   "Tx status %4.4x.\n",			   dev->name, (int)inw(ioaddr + TxSTAT));		if (debug > 1) {			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",				   dev->name, ep->dirty_tx, ep->cur_tx);		}	}	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */		ep->stats.tx_fifo_errors++;		outl(RestartTx, ioaddr + COMMAND);	} else {		epic_restart(dev);		outl(TxQueued, dev->base_addr + COMMAND);	}	dev->trans_start = jiffies;	ep->stats.tx_errors++;	if (!ep->tx_full)		netif_wake_queue(dev);}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void epic_init_ring(struct net_device *dev){	struct epic_private *ep = dev->priv;	int i;	ep->tx_full = 0;	ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;	ep->dirty_tx = ep->cur_tx = 0;	ep->cur_rx = ep->dirty_rx = 0;	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);	/* Initialize all Rx descriptors. */	for (i = 0; i < RX_RING_SIZE; i++) {		ep->rx_ring[i].rxstatus = 0;		ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);		ep->rx_ring[i].next = ep->rx_ring_dma + 				      (i+1)*sizeof(struct epic_rx_desc);		ep->rx_skbuff[i] = 0;	}	/* Mark the last entry as wrapping the ring. */	ep->rx_ring[i-1].next = ep->rx_ring_dma;	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);		ep->rx_skbuff[i] = skb;		if (skb == NULL)			break;		skb->dev = dev;			/* Mark as being used by this device. */		skb_reserve(skb, 2);	/* 16 byte align the IP header. */		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 			skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);		ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);	}	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* The Tx buffer descriptor is filled in as needed, but we	   do need to clear the ownership bit. */	for (i = 0; i < TX_RING_SIZE; i++) {		ep->tx_skbuff[i] = 0;		ep->tx_ring[i].txstatus = 0x0000;		ep->tx_ring[i].next = ep->tx_ring_dma + 			(i+1)*sizeof(struct epic_tx_desc);	}	ep->tx_ring[i-1].next = ep->tx_ring_dma;	return;}static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct epic_private *ep = dev->priv;	int entry, free_count;	u32 ctrl_word;	long flags;	/* Caution: the write order is important here, set the field with the	   "ownership" bit last. */	/* Calculate the next Tx descriptor entry. */	spin_lock_irqsave(&ep->lock, flags);	free_count = ep->cur_tx - ep->dirty_tx;	entry = ep->cur_tx % TX_RING_SIZE;	ep->tx_skbuff[entry] = skb;	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, 		 			            skb->len, PCI_DMA_TODEVICE);	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */	} else if (free_count == TX_QUEUE_LEN/2) {		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */	} else if (free_count < TX_QUEUE_LEN - 1) {		ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */	} else {		/* Leave room for an additional entry. */		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */		ep->tx_full = 1;	}	ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);	ep->tx_ring[entry].txstatus =		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)		| cpu_to_le32(DescOwn);	ep->cur_tx++;	if (ep->tx_full)		netif_stop_queue(dev);	spin_unlock_irqrestore(&ep->lock, flags);	/* Trigger an immediate transmit demand. */	outl(TxQueued, dev->base_addr + COMMAND);	dev->trans_start = jiffies;	if (debug > 4)		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "			   "flag %2.2x Tx status %8.8x.\n",			   dev->name, (int)skb->len, entry, ctrl_word,			   (int)inl(dev->base_addr + TxSTAT));	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = dev_instance;	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	int status, boguscnt = max_interrupt_work;	do {		status = inl(ioaddr + INTSTAT);		/* Acknowledge all of the current interrupt sources ASAP. */		outl(status & 0x00007fff, ioaddr + INTSTAT);		if (debug > 4)			printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "				   "intstat=%#8.8x.\n",				   dev->name, status, (int)inl(ioaddr + INTSTAT));		if ((status & IntrSummary) == 0)			break;		if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))			epic_rx(dev);		if (status & (TxEmpty | TxDone)) {			unsigned int dirty_tx, cur_tx;			/* Note: if this lock becomes a problem we can narrow the locked			   region at the cost of occasionally grabbing the lock more			   times. */			spin_lock(&ep->lock);			cur_tx = ep->cur_tx;			dirty_tx = ep->dirty_tx;			for (; cur_tx - dirty_tx > 0; dirty_tx++) {				struct sk_buff *skb;				int entry = dirty_tx % TX_RING_SIZE;				int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);				if (txstatus & DescOwn)					break;			/* It still hasn't been Txed */				if ( ! (txstatus & 0x0001)) {					/* There was an major error, log it. */#ifndef final_version					if (debug > 1)						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",							   dev->name, txstatus);#endif					ep->stats.tx_errors++;					if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;					if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;					if (txstatus & 0x0040) ep->stats.tx_window_errors++;					if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;#ifdef ETHER_STATS					if (txstatus & 0x1000) ep->stats.collisions16++;#endif				} else {#ifdef ETHER_STATS					if ((txstatus & 0x0002) != 0) ep->stats.tx_deferred++;#endif					ep->stats.collisions += (txstatus >> 8) & 15;					ep->stats.tx_packets++;					ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;				}				/* Free the original skb. */				skb = ep->tx_skbuff[entry];				pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 						 skb->len, PCI_DMA_TODEVICE);				dev_kfree_skb_irq(skb);				ep->tx_skbuff[entry] = 0;			}#ifndef final_version			if (cur_tx - dirty_tx > TX_RING_SIZE) {				printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",					   dev->name, dirty_tx, cur_tx, ep->tx_full);				dirty_tx += TX_RING_SIZE;			}#endif			ep->dirty_tx = dirty_tx;			if (ep->tx_full				&& cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {				/* The ring is no longer full, allow new TX entries. */				ep->tx_full = 0;				spin_unlock(&ep->lock);				netif_wake_queue(dev);			} else				spin_unlock(&ep->lock);		}		/* Check uncommon events all at once. */		if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |					  PCIBusErr170 | PCIBusErr175)) {			if (status == 0xffffffff) /* Chip failed or removed (CardBus). */				break;			/* Always update the error counts to avoid overhead later. */			ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);			ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);			ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);			if (status & TxUnderrun) { /* Tx FIFO underflow. */				ep->stats.tx_fifo_errors++;				outl(ep->tx_threshold += 128, ioaddr + TxThresh);				/* Restart the transmit process. */				outl(RestartTx, ioaddr + COMMAND);			}			if (status & RxOverflow) {		/* Missed a Rx frame. */				ep->stats.rx_errors++;			}			if (status & (RxOverflow | RxFull))				outw(RxQueued, ioaddr + COMMAND);			if (status & PCIBusErr170) {				printk(KERN_ERR "%s: PCI Bus Error!  EPIC status %4.4x.\n",					   dev->name, status);				epic_pause(dev);				epic_restart(dev);			}			/* Clear all error sources. */			outl(status & 0x7f18, ioaddr + INTSTAT);		}		if (--boguscnt < 0) {			printk(KERN_ERR "%s: Too much work at interrupt, "				   "IntrStatus=0x%8.8x.\n",				   dev->name, status);			/* Clear all interrupt sources. */			outl(0x0001ffff, ioaddr + INTSTAT);			break;		}	} while (1);	if (debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",			   dev->name, status);	return;}static int epic_rx(struct net_device *dev){	struct epic_private *ep = dev->priv;	int entry = ep->cur_rx % RX_RING_SIZE;	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;	int work_done = 0;	if (debug > 4)		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,			   ep->rx_ring[entry].rxstatus);	/* If we own the next entry, it's a new packet. Send it up. */	while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {		int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);		if (debug > 4)			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);		if (--rx_work_limit < 0)			break;		if (status & 0x2006) {			if (debug > 2)				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",					   dev->name, status);			if (status & 0x2000) {				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "					   "multiple buffers, status %4.4x!\n", dev->name, status);				ep->stats.rx_length_errors++;			} else if (status & 0x0006)				/* Rx Frame errors are counted in hardware. */				ep->stats.rx_errors++;		} else {			/* Malloc up new buffer, compatible with net-2e. */			/* Omit the four octet CRC from the length. */			short pkt_len = (status >> 16) - 4;			struct sk_buff *skb;			pci_dma_sync_single(ep->pci_dev, ep->rx_ring[entry].bufaddr, 					    ep->rx_buf_sz, PCI_DMA_FROMDEVICE);			if (pkt_len > PKT_BUF_SZ - 4) {				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "					   "%d bytes.\n",					   dev->name, status, pkt_len);				pkt_len = 1514;			}			/* Check if the packet is long enough to accept without copying			   to a minimally-sized skbuff. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -