⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ns83820.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				tag = ntohs(extsts & EXTSTS_VTG_MASK);				rx_rc = vlan_hwaccel_rx(skb,dev->vlgrp,tag);			} else {				rx_rc = netif_rx(skb);			}#else			rx_rc = netif_rx(skb);#endif			if (NET_RX_DROP == rx_rc) {netdev_mangle_me_harder_failed:				dev->stats.rx_dropped ++;			}		} else {			kfree_skb(skb);		}		nr++;		next_rx = info->next_rx;		desc = info->descs + (DESC_SIZE * next_rx);	}	info->next_rx = next_rx;	info->next_rx_desc = info->descs + (DESC_SIZE * next_rx);out:	if (0 && !nr) {		Dprintk("dazed: cmdsts_f: %08x\n", cmdsts);	}	spin_unlock_irqrestore(&info->lock, flags);}static void rx_action(unsigned long _dev){	struct net_device *ndev = (void *)_dev;	struct ns83820 *dev = PRIV(ndev);	rx_irq(ndev);	writel(ihr, dev->base + IHR);	spin_lock_irq(&dev->misc_lock);	dev->IMR_cache |= ISR_RXDESC;	writel(dev->IMR_cache, dev->base + IMR);	spin_unlock_irq(&dev->misc_lock);	rx_irq(ndev);	ns83820_rx_kick(ndev);}/* Packet Transmit code */static inline void kick_tx(struct ns83820 *dev){	dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n",		dev, dev->tx_idx, dev->tx_free_idx);	writel(CR_TXE, dev->base + CR);}/* No spinlock needed on the transmit irq path as the interrupt handler is * serialized. */static void do_tx_done(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	u32 cmdsts, tx_done_idx;	__le32 *desc;	dprintk("do_tx_done(%p)\n", ndev);	tx_done_idx = dev->tx_done_idx;	desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);	dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",		tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));	while ((tx_done_idx != dev->tx_free_idx) &&	       !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) {		struct sk_buff *skb;		unsigned len;		dma_addr_t addr;		if (cmdsts & CMDSTS_ERR)			dev->stats.tx_errors ++;		if (cmdsts & CMDSTS_OK)			dev->stats.tx_packets ++;		if (cmdsts & CMDSTS_OK)			dev->stats.tx_bytes += cmdsts & 0xffff;		dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",			tx_done_idx, dev->tx_free_idx, cmdsts);		skb = dev->tx_skbs[tx_done_idx];		dev->tx_skbs[tx_done_idx] = NULL;		dprintk("done(%p)\n", skb);		len = cmdsts & CMDSTS_LEN_MASK;		addr = desc_addr_get(desc + DESC_BUFPTR);		if (skb) {			pci_unmap_single(dev->pci_dev,					addr,					len,					PCI_DMA_TODEVICE);			dev_kfree_skb_irq(skb);			atomic_dec(&dev->nr_tx_skbs);		} else			pci_unmap_page(dev->pci_dev,					addr,					len,					PCI_DMA_TODEVICE);		tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;		dev->tx_done_idx = tx_done_idx;		desc[DESC_CMDSTS] = cpu_to_le32(0);		mb();		desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);	}	/* Allow network stack to resume queueing packets after we've	 * finished transmitting at least 1/4 of the packets in the queue.	 */	if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {		dprintk("start_queue(%p)\n", ndev);		netif_start_queue(ndev);		netif_wake_queue(ndev);	}}static void ns83820_cleanup_tx(struct ns83820 *dev){	unsigned i;	for (i=0; i<NR_TX_DESC; i++) {		struct sk_buff *skb = dev->tx_skbs[i];		dev->tx_skbs[i] = NULL;		if (skb) {			__le32 *desc = dev->tx_descs + (i * DESC_SIZE);			pci_unmap_single(dev->pci_dev,					desc_addr_get(desc + DESC_BUFPTR),					le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,					PCI_DMA_TODEVICE);			dev_kfree_skb_irq(skb);			atomic_dec(&dev->nr_tx_skbs);		}	}	memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);}/* transmit routine.  This code relies on the network layer serializing * its calls in, but will run happily in parallel with the interrupt * handler.  This code currently has provisions for fragmenting tx buffers * while trying to track down a bug in either the zero copy code or * the tx fifo (hence the MAX_FRAG_LEN). */static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	u32 free_idx, cmdsts, extsts;	int nr_free, nr_frags;	unsigned tx_done_idx, last_idx;	dma_addr_t buf;	unsigned len;	skb_frag_t *frag;	int stopped = 0;	int do_intr = 0;	volatile __le32 *first_desc;	dprintk("ns83820_hard_start_xmit\n");	nr_frags =  skb_shinfo(skb)->nr_frags;again:	if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {		netif_stop_queue(ndev);		if (unlikely(dev->CFG_cache & CFG_LNKSTS))			return 1;		netif_start_queue(ndev);	}	last_idx = free_idx = dev->tx_free_idx;	tx_done_idx = dev->tx_done_idx;	nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC;	nr_free -= 1;	if (nr_free <= nr_frags) {		dprintk("stop_queue - not enough(%p)\n", ndev);		netif_stop_queue(ndev);		/* Check again: we may have raced with a tx done irq */		if (dev->tx_done_idx != tx_done_idx) {			dprintk("restart queue(%p)\n", ndev);			netif_start_queue(ndev);			goto again;		}		return 1;	}	if (free_idx == dev->tx_intr_idx) {		do_intr = 1;		dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC;	}	nr_free -= nr_frags;	if (nr_free < MIN_TX_DESC_FREE) {		dprintk("stop_queue - last entry(%p)\n", ndev);		netif_stop_queue(ndev);		stopped = 1;	}	frag = skb_shinfo(skb)->frags;	if (!nr_frags)		frag = NULL;	extsts = 0;	if (skb->ip_summed == CHECKSUM_PARTIAL) {		extsts |= EXTSTS_IPPKT;		if (IPPROTO_TCP == ip_hdr(skb)->protocol)			extsts |= EXTSTS_TCPPKT;		else if (IPPROTO_UDP == ip_hdr(skb)->protocol)			extsts |= EXTSTS_UDPPKT;	}#ifdef NS83820_VLAN_ACCEL_SUPPORT	if(vlan_tx_tag_present(skb)) {		/* fetch the vlan tag info out of the		 * ancilliary data if the vlan code		 * is using hw vlan acceleration		 */		short tag = vlan_tx_tag_get(skb);		extsts |= (EXTSTS_VPKT | htons(tag));	}#endif	len = skb->len;	if (nr_frags)		len -= skb->data_len;	buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);	first_desc = dev->tx_descs + (free_idx * DESC_SIZE);	for (;;) {		volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);		dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,			(unsigned long long)buf);		last_idx = free_idx;		free_idx = (free_idx + 1) % NR_TX_DESC;		desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4));		desc_addr_set(desc + DESC_BUFPTR, buf);		desc[DESC_EXTSTS] = cpu_to_le32(extsts);		cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);		cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;		cmdsts |= len;		desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);		if (!nr_frags)			break;		buf = pci_map_page(dev->pci_dev, frag->page,				   frag->page_offset,				   frag->size, PCI_DMA_TODEVICE);		dprintk("frag: buf=%08Lx  page=%08lx offset=%08lx\n",			(long long)buf, (long) page_to_pfn(frag->page),			frag->page_offset);		len = frag->size;		frag++;		nr_frags--;	}	dprintk("done pkt\n");	spin_lock_irq(&dev->tx_lock);	dev->tx_skbs[last_idx] = skb;	first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN);	dev->tx_free_idx = free_idx;	atomic_inc(&dev->nr_tx_skbs);	spin_unlock_irq(&dev->tx_lock);	kick_tx(dev);	/* Check again: we may have raced with a tx done irq */	if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))		netif_start_queue(ndev);	/* set the transmit start time to catch transmit timeouts */	ndev->trans_start = jiffies;	return 0;}static void ns83820_update_stats(struct ns83820 *dev){	u8 __iomem *base = dev->base;	/* the DP83820 will freeze counters, so we need to read all of them */	dev->stats.rx_errors		+= readl(base + 0x60) & 0xffff;	dev->stats.rx_crc_errors	+= readl(base + 0x64) & 0xffff;	dev->stats.rx_missed_errors	+= readl(base + 0x68) & 0xffff;	dev->stats.rx_frame_errors	+= readl(base + 0x6c) & 0xffff;	/*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70);	dev->stats.rx_length_errors	+= readl(base + 0x74) & 0xffff;	dev->stats.rx_length_errors	+= readl(base + 0x78) & 0xffff;	/*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c);	/*dev->stats.rx_pause_count += */  readl(base + 0x80);	/*dev->stats.tx_pause_count += */  readl(base + 0x84);	dev->stats.tx_carrier_errors	+= readl(base + 0x88) & 0xff;}static struct net_device_stats *ns83820_get_stats(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	/* somewhat overkill */	spin_lock_irq(&dev->misc_lock);	ns83820_update_stats(dev);	spin_unlock_irq(&dev->misc_lock);	return &dev->stats;}/* Let ethtool retrieve info */static int ns83820_get_settings(struct net_device *ndev,				struct ethtool_cmd *cmd){	struct ns83820 *dev = PRIV(ndev);	u32 cfg, tanar, tbicr;	int have_optical = 0;	int fullduplex   = 0;	/*	 * Here's the list of available ethtool commands from other drivers:	 *	cmd->advertising =	 *	cmd->speed =	 *	cmd->duplex =	 *	cmd->port = 0;	 *	cmd->phy_address =	 *	cmd->transceiver = 0;	 *	cmd->autoneg =	 *	cmd->maxtxpkt = 0;	 *	cmd->maxrxpkt = 0;	 */	/* read current configuration */	cfg   = readl(dev->base + CFG) ^ SPDSTS_POLARITY;	tanar = readl(dev->base + TANAR);	tbicr = readl(dev->base + TBICR);	if (dev->CFG_cache & CFG_TBI_EN) {		/* we have an optical interface */		have_optical = 1;		fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;	} else {		/* We have copper */		fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;        }	cmd->supported = SUPPORTED_Autoneg;	/* we have optical interface */	if (dev->CFG_cache & CFG_TBI_EN) {		cmd->supported |= SUPPORTED_1000baseT_Half |					SUPPORTED_1000baseT_Full |					SUPPORTED_FIBRE;		cmd->port       = PORT_FIBRE;	} /* TODO: else copper related  support */	cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;	switch (cfg / CFG_SPDSTS0 & 3) {	case 2:		cmd->speed = SPEED_1000;		break;	case 1:		cmd->speed = SPEED_100;		break;	default:		cmd->speed = SPEED_10;		break;	}	cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0;	return 0;}/* Let ethool change settings*/static int ns83820_set_settings(struct net_device *ndev,				struct ethtool_cmd *cmd){	struct ns83820 *dev = PRIV(ndev);	u32 cfg, tanar;	int have_optical = 0;	int fullduplex   = 0;	/* read current configuration */	cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;	tanar = readl(dev->base + TANAR);	if (dev->CFG_cache & CFG_TBI_EN) {		/* we have optical */		have_optical = 1;		fullduplex   = (tanar & TANAR_FULL_DUP);	} else {		/* we have copper */		fullduplex = cfg & CFG_DUPSTS;	}	spin_lock_irq(&dev->misc_lock);	spin_lock(&dev->tx_lock);	/* Set duplex */	if (cmd->duplex != fullduplex) {		if (have_optical) {			/*set full duplex*/			if (cmd->duplex == DUPLEX_FULL) {				/* force full duplex */				writel(readl(dev->base + TXCFG)					| TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,					dev->base + TXCFG);				writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,					dev->base + RXCFG);				/* Light up full duplex LED */				writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,					dev->base + GPIOR);			} else {				/*TODO: set half duplex */			}		} else {			/*we have copper*/			/* TODO: Set duplex for copper cards */		}		printk(KERN_INFO "%s: Duplex set via ethtool\n",		ndev->name);	}	/* Set autonegotiation */	if (1) {		if (cmd->autoneg == AUTONEG_ENABLE) {			/* restart auto negotiation */			writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,				dev->base + TBICR);			writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);				dev->linkstate = LINK_AUTONEGOTIATE;			printk(KERN_INFO "%s: autoneg enabled via ethtool\n",				ndev->name);		} else {			/* disable auto negotiation */			writel(0x00000000, dev->base + TBICR);		}		printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name,				cmd->autoneg ? "ENABLED" : "DISABLED");	}	phy_intr(ndev);	spin_unlock(&dev->tx_lock);	spin_unlock_irq(&dev->misc_lock);	return 0;}/* end ethtool get/set support -df */static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info){	struct ns83820 *dev = PRIV(ndev);	strcpy(info->driver, "ns83820");	strcpy(info->version, VERSION);	strcpy(info->bus_info, pci_name(dev->pci_dev));}static u32 ns83820_get_link(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;	return cfg & CFG_LNKSTS ? 1 : 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -