⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r8169.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
{	int i;	tp->cur_tx = 0;	for (i = 0; i < NUM_TX_DESC; i++) {		struct sk_buff *skb = tp->Tx_skbuff[i];		if (skb) {			rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + i,					     tp->TxDescArray + i);			dev_kfree_skb(skb);			tp->stats.tx_dropped++;		}	}}static voidrtl8169_tx_timeout(struct net_device *dev){	struct rtl8169_private *tp = netdev_priv(dev);	void *ioaddr = tp->mmio_addr;	u8 tmp8;	printk(KERN_INFO "%s: TX Timeout\n", dev->name);	/* disable Tx, if not already */	tmp8 = RTL_R8(ChipCmd);	if (tmp8 & CmdTxEnb)		RTL_W8(ChipCmd, tmp8 & ~CmdTxEnb);	/* Disable interrupts by clearing the interrupt mask. */	RTL_W16(IntrMask, 0x0000);	/* Stop a shared interrupt from scavenging while we are. */	spin_lock_irq(&tp->lock);	rtl8169_tx_clear(tp);	spin_unlock_irq(&tp->lock);	/* ...and finally, reset everything */	rtl8169_hw_start(dev);	netif_wake_queue(dev);}static intrtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct rtl8169_private *tp = netdev_priv(dev);	void *ioaddr = tp->mmio_addr;	unsigned int entry = tp->cur_tx % NUM_TX_DESC;	u32 len = skb->len;	if (unlikely(skb->len < ETH_ZLEN)) {		skb = skb_padto(skb, ETH_ZLEN);		if (!skb)			goto err_update_stats;		len = ETH_ZLEN;	}		if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {		dma_addr_t mapping;		u32 status;		mapping = pci_map_single(tp->pci_dev, skb->data, len,					 PCI_DMA_TODEVICE);		tp->Tx_skbuff[entry] = skb;		tp->TxDescArray[entry].addr = cpu_to_le64(mapping);		/* anti gcc 2.95.3 bugware */		status = OWNbit | FSbit | LSbit | len |			 (EORbit * !((entry + 1) % NUM_TX_DESC));		tp->TxDescArray[entry].status = cpu_to_le32(status);					RTL_W8(TxPoll, 0x40);	//set polling bit		dev->trans_start = jiffies;		tp->cur_tx++;		smp_wmb();	} else		goto err_drop;	if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {		u32 dirty = tp->dirty_tx;			netif_stop_queue(dev);		smp_rmb();		if (dirty != tp->dirty_tx)			netif_wake_queue(dev);	}out:	return 0;err_drop:	dev_kfree_skb(skb);err_update_stats:	tp->stats.tx_dropped++;	goto out;}static voidrtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,		     void *ioaddr){	unsigned int dirty_tx, tx_left;	assert(dev != NULL);	assert(tp != NULL);	assert(ioaddr != NULL);	dirty_tx = tp->dirty_tx;	smp_rmb();	tx_left = tp->cur_tx - dirty_tx;	while (tx_left > 0) {		unsigned int entry = dirty_tx % NUM_TX_DESC;		struct sk_buff *skb = tp->Tx_skbuff[entry];		u32 status;		rmb();		status = le32_to_cpu(tp->TxDescArray[entry].status);		if (status & OWNbit)			break;		/* FIXME: is it really accurate for TxErr ? */		tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?				      skb->len : ETH_ZLEN;		tp->stats.tx_packets++;		rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,				     tp->TxDescArray + entry);		dev_kfree_skb_irq(skb);		tp->Tx_skbuff[entry] = NULL;		dirty_tx++;		tx_left--;	}	if (tp->dirty_tx != dirty_tx) {		tp->dirty_tx = dirty_tx;		smp_wmb();		if (netif_queue_stopped(dev))			netif_wake_queue(dev);	}}static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,				      struct RxDesc *desc,				      struct net_device *dev){	int ret = -1;	if (pkt_size < rx_copybreak) {		struct sk_buff *skb;		skb = dev_alloc_skb(pkt_size + 2);		if (skb) {			skb->dev = dev;			skb_reserve(skb, 2);			eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);			*sk_buff = skb;			rtl8169_return_to_asic(desc);			ret = 0;		}	}	return ret;}static intrtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,		     void *ioaddr){	unsigned int cur_rx, rx_left, count;	int delta;	assert(dev != NULL);	assert(tp != NULL);	assert(ioaddr != NULL);	cur_rx = tp->cur_rx;	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;	rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);	while (rx_left > 0) {		unsigned int entry = cur_rx % NUM_RX_DESC;		u32 status;		rmb();		status = le32_to_cpu(tp->RxDescArray[entry].status);		if (status & OWNbit)			break;		if (status & RxRES) {			printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);			tp->stats.rx_errors++;			if (status & (RxRWT | RxRUNT))				tp->stats.rx_length_errors++;			if (status & RxCRC)				tp->stats.rx_crc_errors++;		} else {			struct RxDesc *desc = tp->RxDescArray + entry;			struct sk_buff *skb = tp->Rx_skbuff[entry];			int pkt_size = (status & 0x00001FFF) - 4;			void (*pci_action)(struct pci_dev *, dma_addr_t,				size_t, int) = pci_dma_sync_single_for_device;			pci_dma_sync_single_for_cpu(tp->pci_dev,				le64_to_cpu(desc->addr), RX_BUF_SIZE,				PCI_DMA_FROMDEVICE);			if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {				pci_action = pci_unmap_single;				tp->Rx_skbuff[entry] = NULL;			}			pci_action(tp->pci_dev, le64_to_cpu(desc->addr),				   RX_BUF_SIZE, PCI_DMA_FROMDEVICE);			skb_put(skb, pkt_size);			skb->protocol = eth_type_trans(skb, dev);			rtl8169_rx_skb(skb);			dev->last_rx = jiffies;			tp->stats.rx_bytes += pkt_size;			tp->stats.rx_packets++;		}				cur_rx++; 		rx_left--;	}	count = cur_rx - tp->cur_rx;	tp->cur_rx = cur_rx;	delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);	if (delta < 0) {		printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);		delta = 0;	}	tp->dirty_rx += delta;	/*	 * FIXME: until there is periodic timer to try and refill the ring,	 * a temporary shortage may definitely kill the Rx process.	 * - disable the asic to try and avoid an overflow and kick it again	 *   after refill ?	 * - how do others driver handle this condition (Uh oh...).	 */	if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)		printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);	return count;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static irqreturn_trtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = (struct net_device *) dev_instance;	struct rtl8169_private *tp = netdev_priv(dev);	int boguscnt = max_interrupt_work;	void *ioaddr = tp->mmio_addr;	int status = 0;	int handled = 0;	do {		status = RTL_R16(IntrStatus);		/* hotplug/major error/no more work/shared irq */		if ((status == 0xFFFF) || !status)			break;		handled = 1;		status &= tp->intr_mask;		RTL_W16(IntrStatus,			(status & RxFIFOOver) ? (status | RxOverflow) : status);		if (!(status & rtl8169_intr_mask))			break;		if (unlikely(status & SYSErr)) {			printk(KERN_ERR PFX "%s: PCI error (status: 0x%04x)."			       " Device disabled.\n", dev->name, status);			RTL_W8(ChipCmd, 0x00);			RTL_W16(IntrMask, 0x0000);			RTL_R16(IntrMask);			break;		}		if (status & LinkChg)			rtl8169_check_link_status(dev, tp, ioaddr);#ifdef CONFIG_R8169_NAPI		RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);		tp->intr_mask = ~rtl8169_napi_event;		if (likely(netif_rx_schedule_prep(dev)))			__netif_rx_schedule(dev);		else {			printk(KERN_INFO "%s: interrupt %x taken in poll\n",			       dev->name, status);			}		break;#else		// Rx interrupt 		if (status & (RxOK | RxOverflow | RxFIFOOver)) {			rtl8169_rx_interrupt(dev, tp, ioaddr);		}		// Tx interrupt		if (status & (TxOK | TxErr))			rtl8169_tx_interrupt(dev, tp, ioaddr);#endif		boguscnt--;	} while (boguscnt > 0);	if (boguscnt <= 0) {		printk(KERN_WARNING "%s: Too much work at interrupt!\n",		       dev->name);		/* Clear all interrupt sources. */		RTL_W16(IntrStatus, 0xffff);	}	return IRQ_RETVAL(handled);}#ifdef CONFIG_R8169_NAPIstatic int rtl8169_poll(struct net_device *dev, int *budget){	unsigned int work_done, work_to_do = min(*budget, dev->quota);	struct rtl8169_private *tp = netdev_priv(dev);	void *ioaddr = tp->mmio_addr;	work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);	rtl8169_tx_interrupt(dev, tp, ioaddr);	*budget -= work_done;	dev->quota -= work_done;	if ((work_done < work_to_do) || !netif_running(dev)) {		netif_rx_complete(dev);		tp->intr_mask = 0xffff;		/*		 * 20040426: the barrier is not strictly required but the		 * behavior of the irq handler could be less predictable		 * without it. Btw, the lack of flush for the posted pci		 * write is safe - FR		 */		smp_wmb();		RTL_W16(IntrMask, rtl8169_intr_mask);	}	return (work_done >= work_to_do);}#endifstatic intrtl8169_close(struct net_device *dev){	struct rtl8169_private *tp = netdev_priv(dev);	struct pci_dev *pdev = tp->pci_dev;	void *ioaddr = tp->mmio_addr;	netif_stop_queue(dev);	rtl8169_delete_timer(dev);	spin_lock_irq(&tp->lock);	/* Stop the chip's Tx and Rx DMA processes. */	RTL_W8(ChipCmd, 0x00);	/* Disable interrupts by clearing the interrupt mask. */	RTL_W16(IntrMask, 0x0000);	/* Update the error counts. */	tp->stats.rx_missed_errors += RTL_R32(RxMissed);	RTL_W32(RxMissed, 0);	spin_unlock_irq(&tp->lock);	synchronize_irq(dev->irq);	free_irq(dev->irq, dev);	rtl8169_tx_clear(tp);	rtl8169_rx_clear(tp);	pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,			    tp->RxPhyAddr);	pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,			    tp->TxPhyAddr);	tp->TxDescArray = NULL;	tp->RxDescArray = NULL;	return 0;}static voidrtl8169_set_rx_mode(struct net_device *dev){	struct rtl8169_private *tp = netdev_priv(dev);	void *ioaddr = tp->mmio_addr;	unsigned long flags;	u32 mc_filter[2];	/* Multicast hash filter */	int i, rx_mode;	u32 tmp = 0;	if (dev->flags & IFF_PROMISC) {		/* Unconditionally log net taps. */		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",		       dev->name);		rx_mode =		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |		    AcceptAllPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else if ((dev->mc_count > multicast_filter_limit)		   || (dev->flags & IFF_ALLMULTI)) {		/* Too many to filter perfectly -- accept all multicasts. */		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else {		struct dev_mc_list *mclist;		rx_mode = AcceptBroadcast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0;		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;		     i++, mclist = mclist->next) {			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);			rx_mode |= AcceptMulticast;		}	}	spin_lock_irqsave(&tp->lock, flags);	tmp = rtl8169_rx_config | rx_mode |	      (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);	RTL_W32(RxConfig, tmp);	RTL_W32(MAR0 + 0, mc_filter[0]);	RTL_W32(MAR0 + 4, mc_filter[1]);	spin_unlock_irqrestore(&tp->lock, flags);}/** *  rtl8169_get_stats - Get rtl8169 read/write statistics *  @dev: The Ethernet Device to get statistics for * *  Get TX/RX statistics for rtl8169 */static struct net_device_stats *rtl8169_get_stats(struct net_device *dev){	struct rtl8169_private *tp = netdev_priv(dev);	void *ioaddr = tp->mmio_addr;	unsigned long flags;	if (netif_running(dev)) {		spin_lock_irqsave(&tp->lock, flags);		tp->stats.rx_missed_errors += RTL_R32(RxMissed);		RTL_W32(RxMissed, 0);		spin_unlock_irqrestore(&tp->lock, flags);	}			return &tp->stats;}static struct pci_driver rtl8169_pci_driver = {	.name		= MODULENAME,	.id_table	= rtl8169_pci_tbl,	.probe		= rtl8169_init_one,	.remove		= __devexit_p(rtl8169_remove_one),#ifdef CONFIG_PM	.suspend	= rtl8169_suspend,	.resume		= rtl8169_resume,#endif};static int __initrtl8169_init_module(void){	return pci_module_init(&rtl8169_pci_driver);}static void __exitrtl8169_cleanup_module(void){	pci_unregister_driver(&rtl8169_pci_driver);}module_init(rtl8169_init_module);module_exit(rtl8169_cleanup_module);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -