⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ns83820.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
		);#endif	sg = dev->rx_info.descs + (next_empty * DESC_SIZE);	if (unlikely(NULL != dev->rx_info.skbs[next_empty]))		BUG();	dev->rx_info.skbs[next_empty] = skb;	dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;	cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;	buf = pci_map_single(dev->pci_dev, skb->tail,			     REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);	build_rx_desc(dev, sg, 0, buf, cmdsts, 0);	/* update link of previous rx */	if (likely(next_empty != dev->rx_info.next_rx))		dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4));	return 0;}static inline int rx_refill(struct net_device *ndev, int gfp){	struct ns83820 *dev = PRIV(ndev);	unsigned i;	unsigned long flags = 0;	if (unlikely(nr_rx_empty(dev) <= 2))		return 0;	dprintk("rx_refill(%p)\n", ndev);	if (gfp == GFP_ATOMIC)		spin_lock_irqsave(&dev->rx_info.lock, flags);	for (i=0; i<NR_RX_DESC; i++) {		struct sk_buff *skb;		long res;		/* extra 16 bytes for alignment */		skb = __dev_alloc_skb(REAL_RX_BUF_SIZE+16, gfp);		if (unlikely(!skb))			break;		res = (long)skb->tail & 0xf;		res = 0x10 - res;		res &= 0xf;		skb_reserve(skb, res);		skb->dev = ndev;		if (gfp != GFP_ATOMIC)			spin_lock_irqsave(&dev->rx_info.lock, flags);		res = ns83820_add_rx_skb(dev, skb);		if (gfp != GFP_ATOMIC)			spin_unlock_irqrestore(&dev->rx_info.lock, flags);		if (res) {			i = 1;			break;		}	}	if (gfp == GFP_ATOMIC)		spin_unlock_irqrestore(&dev->rx_info.lock, flags);	return i ? 0 : -ENOMEM;}static void FASTCALL(rx_refill_atomic(struct net_device *ndev));static void fastcall rx_refill_atomic(struct net_device *ndev){	rx_refill(ndev, GFP_ATOMIC);}/* REFILL */static inline void queue_refill(void *_dev){	struct net_device *ndev = _dev;	struct ns83820 *dev = PRIV(ndev);	rx_refill(ndev, GFP_KERNEL);	if (dev->rx_info.up)		kick_rx(ndev);}static inline void clear_rx_desc(struct ns83820 *dev, unsigned i){	build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);}static void FASTCALL(phy_intr(struct net_device *ndev));static void fastcall phy_intr(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };	u32 cfg, new_cfg;	u32 tbisr, tanar, tanlpar;	int speed, fullduplex, newlinkstate;	cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;	if (dev->CFG_cache & CFG_TBI_EN) {		/* we have an optical transceiver */		tbisr = readl(dev->base + TBISR);		tanar = readl(dev->base + TANAR);		tanlpar = readl(dev->base + TANLPAR);		dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",			tbisr, tanar, tanlpar);		if ( (fullduplex = (tanlpar & TANAR_FULL_DUP)		      && (tanar & TANAR_FULL_DUP)) ) {			/* both of us are full duplex */			writel(readl(dev->base + TXCFG)			       | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,			       dev->base + RXCFG);			/* Light up full duplex LED */			writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,			       dev->base + GPIOR);		} else if(((tanlpar & TANAR_HALF_DUP)			   && (tanar & TANAR_HALF_DUP))			|| ((tanlpar & TANAR_FULL_DUP)			    && (tanar & TANAR_HALF_DUP))			|| ((tanlpar & TANAR_HALF_DUP)			    && (tanar & TANAR_FULL_DUP))) {			/* one or both of us are half duplex */			writel((readl(dev->base + TXCFG)				& ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP,			       dev->base + TXCFG);			writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD,			       dev->base + RXCFG);			/* Turn off full duplex LED */			writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT,			       dev->base + GPIOR);		}		speed = 4; /* 1000F */	} else {		/* we have a copper transceiver */		new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS);		if (cfg & CFG_SPDSTS1)			new_cfg |= CFG_MODE_1000;		else			new_cfg &= ~CFG_MODE_1000;		speed = ((cfg / CFG_SPDSTS0) & 3);		fullduplex = (cfg & CFG_DUPSTS);		if (fullduplex)			new_cfg |= CFG_SB;		if ((cfg & CFG_LNKSTS) &&		    ((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) {			writel(new_cfg, dev->base + CFG);			dev->CFG_cache = new_cfg;		}		dev->CFG_cache &= ~CFG_SPDSTS;		dev->CFG_cache |= cfg & CFG_SPDSTS;	}	newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;	if (newlinkstate & LINK_UP	    && dev->linkstate != newlinkstate) {		netif_start_queue(ndev);		netif_wake_queue(ndev);		printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",			ndev->name,			speeds[speed],			fullduplex ? "full" : "half");	} else if (newlinkstate & LINK_DOWN		   && dev->linkstate != newlinkstate) {		netif_stop_queue(ndev);		printk(KERN_INFO "%s: link now down.\n", ndev->name);	}	dev->linkstate = newlinkstate;}static int ns83820_setup_rx(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	unsigned i;	int ret;	dprintk("ns83820_setup_rx(%p)\n", ndev);	dev->rx_info.idle = 1;	dev->rx_info.next_rx = 0;	dev->rx_info.next_rx_desc = dev->rx_info.descs;	dev->rx_info.next_empty = 0;	for (i=0; i<NR_RX_DESC; i++)		clear_rx_desc(dev, i);	writel(0, dev->base + RXDP_HI);	writel(dev->rx_info.phy_descs, dev->base + RXDP);	ret = rx_refill(ndev, GFP_KERNEL);	if (!ret) {		dprintk("starting receiver\n");		/* prevent the interrupt handler from stomping on us */		spin_lock_irq(&dev->rx_info.lock);		writel(0x0001, dev->base + CCSR);		writel(0, dev->base + RFCR);		writel(0x7fc00000, dev->base + RFCR);		writel(0xffc00000, dev->base + RFCR);		dev->rx_info.up = 1;		phy_intr(ndev);		/* Okay, let it rip */		spin_lock_irq(&dev->misc_lock);		dev->IMR_cache |= ISR_PHY;		dev->IMR_cache |= ISR_RXRCMP;		//dev->IMR_cache |= ISR_RXERR;		//dev->IMR_cache |= ISR_RXOK;		dev->IMR_cache |= ISR_RXORN;		dev->IMR_cache |= ISR_RXSOVR;		dev->IMR_cache |= ISR_RXDESC;		dev->IMR_cache |= ISR_RXIDLE;		dev->IMR_cache |= ISR_TXDESC;		dev->IMR_cache |= ISR_TXIDLE;		writel(dev->IMR_cache, dev->base + IMR);		writel(1, dev->base + IER);		spin_unlock_irq(&dev->misc_lock);		kick_rx(ndev);		spin_unlock_irq(&dev->rx_info.lock);	}	return ret;}static void ns83820_cleanup_rx(struct ns83820 *dev){	unsigned i;	unsigned long flags;	dprintk("ns83820_cleanup_rx(%p)\n", dev);	/* disable receive interrupts */	spin_lock_irqsave(&dev->misc_lock, flags);	dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE);	writel(dev->IMR_cache, dev->base + IMR);	spin_unlock_irqrestore(&dev->misc_lock, flags);	/* synchronize with the interrupt handler and kill it */	dev->rx_info.up = 0;	synchronize_irq(dev->pci_dev->irq);	/* touch the pci bus... */	readl(dev->base + IMR);	/* assumes the transmitter is already disabled and reset */	writel(0, dev->base + RXDP_HI);	writel(0, dev->base + RXDP);	for (i=0; i<NR_RX_DESC; i++) {		struct sk_buff *skb = dev->rx_info.skbs[i];		dev->rx_info.skbs[i] = NULL;		clear_rx_desc(dev, i);		if (skb)			kfree_skb(skb);	}}static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));static void fastcall ns83820_rx_kick(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {		if (dev->rx_info.up) {			rx_refill_atomic(ndev);			kick_rx(ndev);		}	}	if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4)		schedule_work(&dev->tq_refill);	else		kick_rx(ndev);	if (dev->rx_info.idle)		printk(KERN_DEBUG "%s: BAD\n", ndev->name);}/* rx_irq *	 */static void FASTCALL(rx_irq(struct net_device *ndev));static void fastcall rx_irq(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	struct rx_info *info = &dev->rx_info;	unsigned next_rx;	u32 cmdsts, *desc;	unsigned long flags;	int nr = 0;	dprintk("rx_irq(%p)\n", ndev);	dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n",		readl(dev->base + RXDP),		(long)(dev->rx_info.phy_descs),		(int)dev->rx_info.next_rx,		(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),		(int)dev->rx_info.next_empty,		(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))		);	spin_lock_irqsave(&info->lock, flags);	if (!info->up)		goto out;	dprintk("walking descs\n");	next_rx = info->next_rx;	desc = info->next_rx_desc;	while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) &&	       (cmdsts != CMDSTS_OWN)) {		struct sk_buff *skb;		u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]);		dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR);		dprintk("cmdsts: %08x\n", cmdsts);		dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK]));		dprintk("extsts: %08x\n", extsts);		skb = info->skbs[next_rx];		info->skbs[next_rx] = NULL;		info->next_rx = (next_rx + 1) % NR_RX_DESC;		mb();		clear_rx_desc(dev, next_rx);		pci_unmap_single(dev->pci_dev, bufptr,				 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);		if (likely(CMDSTS_OK & cmdsts)) {			int len = cmdsts & 0xffff;			skb_put(skb, len);			if (unlikely(!skb))				goto netdev_mangle_me_harder_failed;			if (cmdsts & CMDSTS_DEST_MULTI)				dev->stats.multicast ++;			dev->stats.rx_packets ++;			dev->stats.rx_bytes += len;			if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {				skb->ip_summed = CHECKSUM_UNNECESSARY;			} else {				skb->ip_summed = CHECKSUM_NONE;			}			skb->protocol = eth_type_trans(skb, ndev);			if (NET_RX_DROP == netif_rx(skb)) {netdev_mangle_me_harder_failed:				dev->stats.rx_dropped ++;			}		} else {			kfree_skb(skb);		}		nr++;		next_rx = info->next_rx;		desc = info->descs + (DESC_SIZE * next_rx);	}	info->next_rx = next_rx;	info->next_rx_desc = info->descs + (DESC_SIZE * next_rx);out:	if (0 && !nr) {		Dprintk("dazed: cmdsts_f: %08x\n", cmdsts);	}	spin_unlock_irqrestore(&info->lock, flags);}static void rx_action(unsigned long _dev){	struct net_device *ndev = (void *)_dev;	struct ns83820 *dev = PRIV(ndev);	rx_irq(ndev);	writel(ihr, dev->base + IHR);	spin_lock_irq(&dev->misc_lock);	dev->IMR_cache |= ISR_RXDESC;	writel(dev->IMR_cache, dev->base + IMR);	spin_unlock_irq(&dev->misc_lock);	rx_irq(ndev);	ns83820_rx_kick(ndev);}/* Packet Transmit code */static inline void kick_tx(struct ns83820 *dev){	dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n",		dev, dev->tx_idx, dev->tx_free_idx);	writel(CR_TXE, dev->base + CR);}/* No spinlock needed on the transmit irq path as the interrupt handler is * serialized. */static void do_tx_done(struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	u32 cmdsts, tx_done_idx, *desc;	spin_lock_irq(&dev->tx_lock);	dprintk("do_tx_done(%p)\n", ndev);	tx_done_idx = dev->tx_done_idx;	desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);	dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",		tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));	while ((tx_done_idx != dev->tx_free_idx) &&	       !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) {		struct sk_buff *skb;		unsigned len;		dma_addr_t addr;		if (cmdsts & CMDSTS_ERR)			dev->stats.tx_errors ++;		if (cmdsts & CMDSTS_OK)			dev->stats.tx_packets ++;		if (cmdsts & CMDSTS_OK)			dev->stats.tx_bytes += cmdsts & 0xffff;		dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",			tx_done_idx, dev->tx_free_idx, cmdsts);		skb = dev->tx_skbs[tx_done_idx];		dev->tx_skbs[tx_done_idx] = NULL;		dprintk("done(%p)\n", skb);		len = cmdsts & CMDSTS_LEN_MASK;		addr = desc_addr_get(desc + DESC_BUFPTR);		if (skb) {			pci_unmap_single(dev->pci_dev,					addr,					len,					PCI_DMA_TODEVICE);			dev_kfree_skb_irq(skb);			atomic_dec(&dev->nr_tx_skbs);		} else			pci_unmap_page(dev->pci_dev, 					addr,					len,					PCI_DMA_TODEVICE);		tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;		dev->tx_done_idx = tx_done_idx;		desc[DESC_CMDSTS] = cpu_to_le32(0);		mb();		desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);	}	/* Allow network stack to resume queueing packets after we've	 * finished transmitting at least 1/4 of the packets in the queue.	 */	if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {		dprintk("start_queue(%p)\n", ndev);		netif_start_queue(ndev);		netif_wake_queue(ndev);	}	spin_unlock_irq(&dev->tx_lock);}static void ns83820_cleanup_tx(struct ns83820 *dev){	unsigned i;	for (i=0; i<NR_TX_DESC; i++) {		struct sk_buff *skb = dev->tx_skbs[i];		dev->tx_skbs[i] = NULL;		if (skb) {			u32 *desc = dev->tx_descs + (i * DESC_SIZE);			pci_unmap_single(dev->pci_dev,					desc_addr_get(desc + DESC_BUFPTR),					le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,					PCI_DMA_TODEVICE);			dev_kfree_skb_irq(skb);			atomic_dec(&dev->nr_tx_skbs);		}	}	memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);}/* transmit routine.  This code relies on the network layer serializing * its calls in, but will run happily in parallel with the interrupt * handler.  This code currently has provisions for fragmenting tx buffers * while trying to track down a bug in either the zero copy code or * the tx fifo (hence the MAX_FRAG_LEN). */static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev){	struct ns83820 *dev = PRIV(ndev);	u32 free_idx, cmdsts, extsts;	int nr_free, nr_frags;	unsigned tx_done_idx, last_idx;	dma_addr_t buf;	unsigned len;	skb_frag_t *frag;	int stopped = 0;	int do_intr = 0;	volatile u32 *first_desc;	dprintk("ns83820_hard_start_xmit\n");	nr_frags =  skb_shinfo(skb)->nr_frags;again:	if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {		netif_stop_queue(ndev);		if (unlikely(dev->CFG_cache & CFG_LNKSTS))			return 1;		netif_start_queue(ndev);	}	last_idx = free_idx = dev->tx_free_idx;	tx_done_idx = dev->tx_done_idx;	nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC;	nr_free -= 1;	if (nr_free <= nr_frags) {		dprintk("stop_queue - not enough(%p)\n", ndev);		netif_stop_queue(ndev);		/* Check again: we may have raced with a tx done irq */		if (dev->tx_done_idx != tx_done_idx) {			dprintk("restart queue(%p)\n", ndev);			netif_start_queue(ndev);			goto again;		}		return 1;	}	if (free_idx == dev->tx_intr_idx) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -