epic100.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,688 行 · 第 1/4 页

C
1,688
字号
	}	/* Remove the packets on the Rx queue. */	epic_rx(dev, RX_RING_SIZE);}static void epic_restart(struct net_device *dev){	long ioaddr = dev->base_addr;	struct epic_private *ep = dev->priv;	int i;	/* Soft reset the chip. */	outl(0x4001, ioaddr + GENCTL);	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);	udelay(1);	/* This magic is documented in SMSC app note 7.15 */	for (i = 16; i > 0; i--)		outl(0x0008, ioaddr + TEST1);#if defined(__powerpc__) || defined(__sparc__)		/* Big endian */	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);#else	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);#endif	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);	if (ep->chip_flags & MII_PWRDWN)		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);	for (i = 0; i < 3; i++)		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);	ep->tx_threshold = TX_FIFO_THRESH;	outl(ep->tx_threshold, ioaddr + TxThresh);	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);	/* Start the chip's Rx process. */	set_rx_mode(dev);	outl(StartRx | RxQueued, ioaddr + COMMAND);	/* Enable interrupts by setting the interrupt mask. */	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)		 | CntFull | TxUnderrun		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"		   " interrupt %4.4x.\n",		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),		   (int)inl(ioaddr + INTSTAT));	return;}static void check_media(struct net_device *dev){	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;	int negotiated = mii_lpa & ep->mii.advertising;	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;	if (ep->mii.force_media)		return;	if (mii_lpa == 0xffff)		/* Bogus read */		return;	if (ep->mii.full_duplex != duplex) {		ep->mii.full_duplex = duplex;		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"			   " partner capability of %4.4x.\n", dev->name,			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);	}}static void epic_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	int next_tick = 5*HZ;	if (debug > 3) {		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",			   dev->name, (int)inl(ioaddr + TxSTAT));		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "			   "IntStatus %4.4x RxStatus %4.4x.\n",			   dev->name, (int)inl(ioaddr + INTMASK),			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));	}	check_media(dev);	ep->timer.expires = jiffies + next_tick;	add_timer(&ep->timer);}static void epic_tx_timeout(struct net_device *dev){	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	if (debug > 0) {		printk(KERN_WARNING "%s: Transmit timeout using MII device, "			   "Tx status %4.4x.\n",			   dev->name, (int)inw(ioaddr + TxSTAT));		if (debug > 1) {			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",				   dev->name, ep->dirty_tx, ep->cur_tx);		}	}	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */		ep->stats.tx_fifo_errors++;		outl(RestartTx, ioaddr + COMMAND);	} else {		epic_restart(dev);		outl(TxQueued, dev->base_addr + COMMAND);	}	dev->trans_start = jiffies;	ep->stats.tx_errors++;	if (!ep->tx_full)		netif_wake_queue(dev);}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void epic_init_ring(struct net_device *dev){	struct epic_private *ep = dev->priv;	int i;	ep->tx_full = 0;	ep->dirty_tx = ep->cur_tx = 0;	ep->cur_rx = ep->dirty_rx = 0;	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);	/* Initialize all Rx descriptors. */	for (i = 0; i < RX_RING_SIZE; i++) {		ep->rx_ring[i].rxstatus = 0;		ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);		ep->rx_ring[i].next = ep->rx_ring_dma + 				      (i+1)*sizeof(struct epic_rx_desc);		ep->rx_skbuff[i] = NULL;	}	/* Mark the last entry as wrapping the ring. */	ep->rx_ring[i-1].next = ep->rx_ring_dma;	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);		ep->rx_skbuff[i] = skb;		if (skb == NULL)			break;		skb->dev = dev;			/* Mark as being used by this device. */		skb_reserve(skb, 2);	/* 16 byte align the IP header. */		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 			skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);		ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);	}	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* The Tx buffer descriptor is filled in as needed, but we	   do need to clear the ownership bit. */	for (i = 0; i < TX_RING_SIZE; i++) {		ep->tx_skbuff[i] = NULL;		ep->tx_ring[i].txstatus = 0x0000;		ep->tx_ring[i].next = ep->tx_ring_dma + 			(i+1)*sizeof(struct epic_tx_desc);	}	ep->tx_ring[i-1].next = ep->tx_ring_dma;	return;}static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct epic_private *ep = dev->priv;	int entry, free_count;	u32 ctrl_word;	unsigned long flags;		if (skb->len < ETH_ZLEN) {		skb = skb_padto(skb, ETH_ZLEN);		if (skb == NULL)			return 0;	}	/* Caution: the write order is important here, set the field with the	   "ownership" bit last. */	/* Calculate the next Tx descriptor entry. */	spin_lock_irqsave(&ep->lock, flags);	free_count = ep->cur_tx - ep->dirty_tx;	entry = ep->cur_tx % TX_RING_SIZE;	ep->tx_skbuff[entry] = skb;	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, 		 			            skb->len, PCI_DMA_TODEVICE);	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */	} else if (free_count == TX_QUEUE_LEN/2) {		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */	} else if (free_count < TX_QUEUE_LEN - 1) {		ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */	} else {		/* Leave room for an additional entry. */		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */		ep->tx_full = 1;	}	ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);	ep->tx_ring[entry].txstatus =		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)		| cpu_to_le32(DescOwn);	ep->cur_tx++;	if (ep->tx_full)		netif_stop_queue(dev);	spin_unlock_irqrestore(&ep->lock, flags);	/* Trigger an immediate transmit demand. */	outl(TxQueued, dev->base_addr + COMMAND);	dev->trans_start = jiffies;	if (debug > 4)		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "			   "flag %2.2x Tx status %8.8x.\n",			   dev->name, (int)skb->len, entry, ctrl_word,			   (int)inl(dev->base_addr + TxSTAT));	return 0;}static void epic_tx_error(struct net_device *dev, struct epic_private *ep,			  int status){	struct net_device_stats *stats = &ep->stats;#ifndef final_version	/* There was an major error, log it. */	if (debug > 1)		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",		       dev->name, status);#endif	stats->tx_errors++;	if (status & 0x1050)		stats->tx_aborted_errors++;	if (status & 0x0008)		stats->tx_carrier_errors++;	if (status & 0x0040)		stats->tx_window_errors++;	if (status & 0x0010)		stats->tx_fifo_errors++;}static void epic_tx(struct net_device *dev, struct epic_private *ep){	unsigned int dirty_tx, cur_tx;	/*	 * Note: if this lock becomes a problem we can narrow the locked	 * region at the cost of occasionally grabbing the lock more times.	 */	cur_tx = ep->cur_tx;	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {		struct sk_buff *skb;		int entry = dirty_tx % TX_RING_SIZE;		int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);		if (txstatus & DescOwn)			break;	/* It still hasn't been Txed */		if (likely(txstatus & 0x0001)) {			ep->stats.collisions += (txstatus >> 8) & 15;			ep->stats.tx_packets++;			ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;		} else			epic_tx_error(dev, ep, txstatus);		/* Free the original skb. */		skb = ep->tx_skbuff[entry];		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 				 skb->len, PCI_DMA_TODEVICE);		dev_kfree_skb_irq(skb);		ep->tx_skbuff[entry] = NULL;	}#ifndef final_version	if (cur_tx - dirty_tx > TX_RING_SIZE) {		printk(KERN_WARNING		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",		       dev->name, dirty_tx, cur_tx, ep->tx_full);		dirty_tx += TX_RING_SIZE;	}#endif	ep->dirty_tx = dirty_tx;	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {		/* The ring is no longer full, allow new TX entries. */		ep->tx_full = 0;		netif_wake_queue(dev);	}}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = dev_instance;	struct epic_private *ep = dev->priv;	long ioaddr = dev->base_addr;	unsigned int handled = 0;	int status;	status = inl(ioaddr + INTSTAT);	/* Acknowledge all of the current interrupt sources ASAP. */	outl(status & EpicNormalEvent, ioaddr + INTSTAT);	if (debug > 4) {		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "				   "intstat=%#8.8x.\n", dev->name, status,				   (int)inl(ioaddr + INTSTAT));	}	if ((status & IntrSummary) == 0)		goto out;	handled = 1;	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {		spin_lock(&ep->napi_lock);		if (netif_rx_schedule_prep(dev)) {			epic_napi_irq_off(dev, ep);			__netif_rx_schedule(dev);		} else			ep->reschedule_in_poll++;		spin_unlock(&ep->napi_lock);	}	status &= ~EpicNapiEvent;	/* Check uncommon events all at once. */	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {		if (status == EpicRemoved)			goto out;		/* Always update the error counts to avoid overhead later. */		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);		if (status & TxUnderrun) { /* Tx FIFO underflow. */			ep->stats.tx_fifo_errors++;			outl(ep->tx_threshold += 128, ioaddr + TxThresh);			/* Restart the transmit process. */			outl(RestartTx, ioaddr + COMMAND);		}		if (status & PCIBusErr170) {			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",					 dev->name, status);			epic_pause(dev);			epic_restart(dev);		}		/* Clear all error sources. */		outl(status & 0x7f18, ioaddr + INTSTAT);	}out:	if (debug > 3) {		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",				   dev->name, status);	}	return IRQ_RETVAL(handled);}static int epic_rx(struct net_device *dev, int budget){	struct epic_private *ep = dev->priv;	int entry = ep->cur_rx % RX_RING_SIZE;	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;	int work_done = 0;	if (debug > 4)		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,			   ep->rx_ring[entry].rxstatus);	if (rx_work_limit > budget)		rx_work_limit = budget;	/* If we own the next entry, it's a new packet. Send it up. */	while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {		int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);		if (debug > 4)			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);		if (--rx_work_limit < 0)			break;		if (status & 0x2006) {			if (debug > 2)				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",					   dev->name, status);			if (status & 0x2000) {				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "					   "multiple buffers, status %4.4x!\n", dev->name, status);				ep->stats.rx_length_errors++;			} else if (status & 0x0006)				/* Rx Frame errors are counted in hardware. */				ep->stats.rx_errors++;		} else {			/* Malloc up new buffer, compatible with net-2e. */			/* Omit the four octet CRC from the length. */			short pkt_len = (status >> 16) - 4;			struct sk_buff *skb;			if (pkt_len > PKT_BUF_SZ - 4) {				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "					   "%d bytes.\n",					   dev->name, status, pkt_len);				pkt_len = 1514;			}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?