⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ns83820.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	return netdev_priv(dev);}#define __kick_rx(dev)	writel(CR_RXE, dev->base + CR)static inline void kick_rx(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	dprintk("kick_rx: maybe kicking\n");	if (test_and_clear_bit(0, &dev->rx_info.idle)) {		dprintk("actually kicking\n");		writel(dev->rx_info.phy_descs +			(4 * DESC_SIZE * dev->rx_info.next_rx),		       dev->base + RXDP);		if (dev->rx_info.next_rx == dev->rx_info.next_empty)			printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n",				ndev->name);		__kick_rx(dev);	}}//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC#define start_tx_okay(dev)	\	(((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)#ifdef NS83820_VLAN_ACCEL_SUPPORTstatic void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp){	struct ns83820 *dev = PRIV(ndev);	spin_lock_irq(&dev->misc_lock);	spin_lock(&dev->tx_lock);	dev->vlgrp = grp;	spin_unlock(&dev->tx_lock);	spin_unlock_irq(&dev->misc_lock);}#endif/* Packet Receiver * * The hardware supports linked lists of receive descriptors for * which ownership is transfered back and forth by means of an * ownership bit.  While the hardware does support the use of a * ring for receive descriptors, we only make use of a chain in * an attempt to reduce bus traffic under heavy load scenarios. * This will also make bugs a bit more obvious.  The current code * only makes use of a single rx chain; I hope to implement * priority based rx for version 1.0.  Goal: even under overload * conditions, still route realtime traffic with as low jitter as * possible. */static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts){	desc_addr_set(desc + DESC_LINK, link);	desc_addr_set(desc + DESC_BUFPTR, buf);	desc[DESC_EXTSTS] = cpu_to_le32(extsts);	mb();	desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);}#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC)static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb){	unsigned next_empty;	u32 cmdsts;	__le32 *sg;	dma_addr_t buf;	next_empty = dev->rx_info.next_empty;	/* don't overrun last rx marker */	if (unlikely(nr_rx_empty(dev) <= 2)) {		kfree_skb(skb);		return 1;	}#if 0	dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n",		dev->rx_info.next_empty,		dev->rx_info.nr_used,		dev->rx_info.next_rx		);#endif	sg = dev->rx_info.descs + (next_empty * DESC_SIZE);	BUG_ON(NULL != dev->rx_info.skbs[next_empty]);	dev->rx_info.skbs[next_empty] = skb;	dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;	cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;	buf = pci_map_single(dev->pci_dev, skb->data,			     REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);	build_rx_desc(dev, sg, 0, buf, cmdsts, 0);	/* update link of previous rx */	if (likely(next_empty != dev->rx_info.next_rx))		dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4));	return 0;}static inline int rx_refill(struct net_device *ndev, gfp_t gfp){	struct ns83820 *dev = PRIV(ndev);	unsigned i;	unsigned long flags = 0;	if (unlikely(nr_rx_empty(dev) <= 2))		return 0;	dprintk("rx_refill(%p)\n", ndev);	if (gfp == GFP_ATOMIC)		spin_lock_irqsave(&dev->rx_info.lock, flags);	for (i=0; i<NR_RX_DESC; i++) {		struct sk_buff *skb;		long res;		/* extra 16 bytes for alignment */		skb = __dev_alloc_skb(REAL_RX_BUF_SIZE+16, gfp);		if (unlikely(!skb))			break;		res = (long)skb->data & 0xf;		res = 0x10 - res;		res &= 0xf;		skb_reserve(skb, res);		if (gfp != GFP_ATOMIC)			spin_lock_irqsave(&dev->rx_info.lock, flags);		res = ns83820_add_rx_skb(dev, skb);		if (gfp != GFP_ATOMIC)			spin_unlock_irqrestore(&dev->rx_info.lock, flags);		if (res) {			i = 1;			break;		}	}	if (gfp == GFP_ATOMIC)		spin_unlock_irqrestore(&dev->rx_info.lock, flags);	return i ? 0 : -ENOMEM;}static void FASTCALL(rx_refill_atomic(struct net_device *ndev));static void fastcall rx_refill_atomic(struct net_device *ndev){	rx_refill(ndev, GFP_ATOMIC);}/* REFILL */static inline void queue_refill(struct work_struct *work){	struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);	struct net_device *ndev = dev->ndev;	rx_refill(ndev, GFP_KERNEL);	if (dev->rx_info.up)		kick_rx(ndev);}static inline void clear_rx_desc(struct ns83820 *dev, unsigned i){	build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);}static void FASTCALL(phy_intr(struct net_device *ndev));static void fastcall phy_intr(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };	u32 cfg, new_cfg;	u32 tbisr, tanar, tanlpar;	int speed, fullduplex, newlinkstate;	cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;	if (dev->CFG_cache & CFG_TBI_EN) {		/* we have an optical transceiver */		tbisr = readl(dev->base + TBISR);		tanar = readl(dev->base + TANAR);		tanlpar = readl(dev->base + TANLPAR);		dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",			tbisr, tanar, tanlpar);		if ( (fullduplex = (tanlpar & TANAR_FULL_DUP)		      && (tanar & TANAR_FULL_DUP)) ) {			/* both of us are full duplex */			writel(readl(dev->base + TXCFG)			       | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,			       dev->base + RXCFG);			/* Light up full duplex LED */			writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,			       dev->base + GPIOR);		} else if(((tanlpar & TANAR_HALF_DUP)			   && (tanar & TANAR_HALF_DUP))			|| ((tanlpar & TANAR_FULL_DUP)			    && (tanar & TANAR_HALF_DUP))			|| ((tanlpar & TANAR_HALF_DUP)			    && (tanar & TANAR_FULL_DUP))) {			/* one or both of us are half duplex */			writel((readl(dev->base + TXCFG)				& ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP,			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD,			       dev->base + RXCFG);			/* Turn off full duplex LED */			writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT,			       dev->base + GPIOR);		}		speed = 4; /* 1000F */	} else {		/* we have a copper transceiver */		new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS);		if (cfg & CFG_SPDSTS1)			new_cfg |= CFG_MODE_1000;		else			new_cfg &= ~CFG_MODE_1000;		speed = ((cfg / CFG_SPDSTS0) & 3);		fullduplex = (cfg & CFG_DUPSTS);		if (fullduplex) {			new_cfg |= CFG_SB;			writel(readl(dev->base + TXCFG)					| TXCFG_CSI | TXCFG_HBI,			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,			       dev->base + RXCFG);		} else {			writel(readl(dev->base + TXCFG)					& ~(TXCFG_CSI | TXCFG_HBI),			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),			       dev->base + RXCFG);		}		if ((cfg & CFG_LNKSTS) &&		    ((new_cfg ^ dev->CFG_cache) != 0)) {			writel(new_cfg, dev->base + CFG);			dev->CFG_cache = new_cfg;		}		dev->CFG_cache &= ~CFG_SPDSTS;		dev->CFG_cache |= cfg & CFG_SPDSTS;	}	newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;	if (newlinkstate & LINK_UP	    && dev->linkstate != newlinkstate) {		netif_start_queue(ndev);		netif_wake_queue(ndev);		printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",			ndev->name,			speeds[speed],			fullduplex ? "full" : "half");	} else if (newlinkstate & LINK_DOWN		   && dev->linkstate != newlinkstate) {		netif_stop_queue(ndev);		printk(KERN_INFO "%s: link now down.\n", ndev->name);	}	dev->linkstate = newlinkstate;}static int ns83820_setup_rx(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	unsigned i;	int ret;	dprintk("ns83820_setup_rx(%p)\n", ndev);	dev->rx_info.idle = 1;	dev->rx_info.next_rx = 0;	dev->rx_info.next_rx_desc = dev->rx_info.descs;	dev->rx_info.next_empty = 0;	for (i=0; i<NR_RX_DESC; i++)		clear_rx_desc(dev, i);	writel(0, dev->base + RXDP_HI);	writel(dev->rx_info.phy_descs, dev->base + RXDP);	ret = rx_refill(ndev, GFP_KERNEL);	if (!ret) {		dprintk("starting receiver\n");		/* prevent the interrupt handler from stomping on us */		spin_lock_irq(&dev->rx_info.lock);		writel(0x0001, dev->base + CCSR);		writel(0, dev->base + RFCR);		writel(0x7fc00000, dev->base + RFCR);		writel(0xffc00000, dev->base + RFCR);		dev->rx_info.up = 1;		phy_intr(ndev);		/* Okay, let it rip */		spin_lock_irq(&dev->misc_lock);		dev->IMR_cache |= ISR_PHY;		dev->IMR_cache |= ISR_RXRCMP;		//dev->IMR_cache |= ISR_RXERR;		//dev->IMR_cache |= ISR_RXOK;		dev->IMR_cache |= ISR_RXORN;		dev->IMR_cache |= ISR_RXSOVR;		dev->IMR_cache |= ISR_RXDESC;		dev->IMR_cache |= ISR_RXIDLE;		dev->IMR_cache |= ISR_TXDESC;		dev->IMR_cache |= ISR_TXIDLE;		writel(dev->IMR_cache, dev->base + IMR);		writel(1, dev->base + IER);		spin_unlock(&dev->misc_lock);		kick_rx(ndev);		spin_unlock_irq(&dev->rx_info.lock);	}	return ret;}static void ns83820_cleanup_rx(struct ns83820 *dev){	unsigned i;	unsigned long flags;	dprintk("ns83820_cleanup_rx(%p)\n", dev);	/* disable receive interrupts */	spin_lock_irqsave(&dev->misc_lock, flags);	dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE);	writel(dev->IMR_cache, dev->base + IMR);	spin_unlock_irqrestore(&dev->misc_lock, flags);	/* synchronize with the interrupt handler and kill it */	dev->rx_info.up = 0;	synchronize_irq(dev->pci_dev->irq);	/* touch the pci bus... */	readl(dev->base + IMR);	/* assumes the transmitter is already disabled and reset */	writel(0, dev->base + RXDP_HI);	writel(0, dev->base + RXDP);	for (i=0; i<NR_RX_DESC; i++) {		struct sk_buff *skb = dev->rx_info.skbs[i];		dev->rx_info.skbs[i] = NULL;		clear_rx_desc(dev, i);		if (skb)			kfree_skb(skb);	}}static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));static void fastcall ns83820_rx_kick(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {		if (dev->rx_info.up) {			rx_refill_atomic(ndev);			kick_rx(ndev);		}	}	if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4)		schedule_work(&dev->tq_refill);	else		kick_rx(ndev);	if (dev->rx_info.idle)		printk(KERN_DEBUG "%s: BAD\n", ndev->name);}/* rx_irq * */static void FASTCALL(rx_irq(struct net_device *ndev));static void fastcall rx_irq(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	struct rx_info *info = &dev->rx_info;	unsigned next_rx;	int rx_rc, len;	u32 cmdsts;	__le32 *desc;	unsigned long flags;	int nr = 0;	dprintk("rx_irq(%p)\n", ndev);	dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n",		readl(dev->base + RXDP),		(long)(dev->rx_info.phy_descs),		(int)dev->rx_info.next_rx,		(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),		(int)dev->rx_info.next_empty,		(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))		);	spin_lock_irqsave(&info->lock, flags);	if (!info->up)		goto out;	dprintk("walking descs\n");	next_rx = info->next_rx;	desc = info->next_rx_desc;	while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) &&	       (cmdsts != CMDSTS_OWN)) {		struct sk_buff *skb;		u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]);		dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR);		dprintk("cmdsts: %08x\n", cmdsts);		dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK]));		dprintk("extsts: %08x\n", extsts);		skb = info->skbs[next_rx];		info->skbs[next_rx] = NULL;		info->next_rx = (next_rx + 1) % NR_RX_DESC;		mb();		clear_rx_desc(dev, next_rx);		pci_unmap_single(dev->pci_dev, bufptr,				 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);		len = cmdsts & CMDSTS_LEN_MASK;#ifdef NS83820_VLAN_ACCEL_SUPPORT		/* NH: As was mentioned below, this chip is kinda		 * brain dead about vlan tag stripping.  Frames		 * that are 64 bytes with a vlan header appended		 * like arp frames, or pings, are flagged as Runts		 * when the tag is stripped and hardware.  This		 * also means that the OK bit in the descriptor		 * is cleared when the frame comes in so we have		 * to do a specific length check here to make sure		 * the frame would have been ok, had we not stripped		 * the tag.		 */		if (likely((CMDSTS_OK & cmdsts) ||			((cmdsts & CMDSTS_RUNT) && len >= 56))) {#else		if (likely(CMDSTS_OK & cmdsts)) {#endif			skb_put(skb, len);			if (unlikely(!skb))				goto netdev_mangle_me_harder_failed;			if (cmdsts & CMDSTS_DEST_MULTI)				dev->stats.multicast ++;			dev->stats.rx_packets ++;			dev->stats.rx_bytes += len;			if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {				skb->ip_summed = CHECKSUM_UNNECESSARY;			} else {				skb->ip_summed = CHECKSUM_NONE;			}			skb->protocol = eth_type_trans(skb, ndev);#ifdef NS83820_VLAN_ACCEL_SUPPORT			if(extsts & EXTSTS_VPKT) {				unsigned short tag;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -