⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 forcedeth.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			np->tx_ring.ex[i].bufhigh = 0;			np->tx_ring.ex[i].buflow = 0;		}		np->tx_skb[i].skb = NULL;		np->tx_skb[i].dma = 0;	}}static int nv_init_ring(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	nv_init_tx(dev);	nv_init_rx(dev);	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)		return nv_alloc_rx(dev);	else		return nv_alloc_rx_optimized(dev);}static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb){	struct fe_priv *np = netdev_priv(dev);	if (tx_skb->dma) {		pci_unmap_page(np->pci_dev, tx_skb->dma,			       tx_skb->dma_len,			       PCI_DMA_TODEVICE);		tx_skb->dma = 0;	}	if (tx_skb->skb) {		dev_kfree_skb_any(tx_skb->skb);		tx_skb->skb = NULL;		return 1;	} else {		return 0;	}}static void nv_drain_tx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	unsigned int i;	for (i = 0; i < np->tx_ring_size; i++) {		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {			np->tx_ring.orig[i].flaglen = 0;			np->tx_ring.orig[i].buf = 0;		} else {			np->tx_ring.ex[i].flaglen = 0;			np->tx_ring.ex[i].txvlan = 0;			np->tx_ring.ex[i].bufhigh = 0;			np->tx_ring.ex[i].buflow = 0;		}		if (nv_release_txskb(dev, &np->tx_skb[i]))			dev->stats.tx_dropped++;	}}static void nv_drain_rx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	int i;	for (i = 0; i < np->rx_ring_size; i++) {		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {			np->rx_ring.orig[i].flaglen = 0;			np->rx_ring.orig[i].buf = 0;		} else {			np->rx_ring.ex[i].flaglen = 0;			np->rx_ring.ex[i].txvlan = 0;			np->rx_ring.ex[i].bufhigh = 0;			np->rx_ring.ex[i].buflow = 0;		}		wmb();		if (np->rx_skb[i].skb) {			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,					 (skb_end_pointer(np->rx_skb[i].skb) -					  np->rx_skb[i].skb->data),					 PCI_DMA_FROMDEVICE);			dev_kfree_skb(np->rx_skb[i].skb);			np->rx_skb[i].skb = NULL;		}	}}static void drain_ring(struct net_device *dev){	nv_drain_tx(dev);	nv_drain_rx(dev);}static inline u32 nv_get_empty_tx_slots(struct fe_priv *np){	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));}/* * nv_start_xmit: dev->hard_start_xmit function * Called with netif_tx_lock held. */static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u32 tx_flags = 0;	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);	unsigned int fragments = skb_shinfo(skb)->nr_frags;	unsigned int i;	u32 offset = 0;	u32 bcnt;	u32 size = skb->len-skb->data_len;	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);	u32 empty_slots;	struct ring_desc* put_tx;	struct ring_desc* start_tx;	struct ring_desc* prev_tx;	struct nv_skb_map* prev_tx_ctx;	/* add fragments to entries count */	for (i = 0; i < fragments; i++) {		entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);	}	empty_slots = nv_get_empty_tx_slots(np);	if (unlikely(empty_slots <= entries)) {		spin_lock_irq(&np->lock);		netif_stop_queue(dev);		np->tx_stop = 1;		spin_unlock_irq(&np->lock);		return NETDEV_TX_BUSY;	}	start_tx = put_tx = np->put_tx.orig;	/* setup the header buffer */	do {		prev_tx = put_tx;		prev_tx_ctx = np->put_tx_ctx;		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,						PCI_DMA_TODEVICE);		np->put_tx_ctx->dma_len = bcnt;		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);		tx_flags = np->tx_flags;		offset += bcnt;		size -= bcnt;		if (unlikely(put_tx++ == np->last_tx.orig))			put_tx = np->first_tx.orig;		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))			np->put_tx_ctx = np->first_tx_ctx;	} while (size);	/* setup the fragments */	for (i = 0; i < fragments; i++) {		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];		u32 size = frag->size;		offset = 0;		do {			prev_tx = put_tx;			prev_tx_ctx = np->put_tx_ctx;			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;			np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,							   PCI_DMA_TODEVICE);			np->put_tx_ctx->dma_len = bcnt;			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);			offset += bcnt;			size -= bcnt;			if (unlikely(put_tx++ == np->last_tx.orig))				put_tx = np->first_tx.orig;			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))				np->put_tx_ctx = np->first_tx_ctx;		} while (size);	}	/* set last fragment flag  */	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);	/* save skb in this slot's context area */	prev_tx_ctx->skb = skb;	if (skb_is_gso(skb))		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);	else		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;	spin_lock_irq(&np->lock);	/* set tx flags */	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);	np->put_tx.orig = put_tx;	spin_unlock_irq(&np->lock);	dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",		dev->name, entries, tx_flags_extra);	{		int j;		for (j=0; j<64; j++) {			if ((j%16) == 0)				dprintk("\n%03x:", j);			dprintk(" %02x", ((unsigned char*)skb->data)[j]);		}		dprintk("\n");	}	dev->trans_start = jiffies;	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);	return NETDEV_TX_OK;}static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u32 tx_flags = 0;	u32 tx_flags_extra;	unsigned int fragments = skb_shinfo(skb)->nr_frags;	unsigned int i;	u32 offset = 0;	u32 bcnt;	u32 size = skb->len-skb->data_len;	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);	u32 empty_slots;	struct ring_desc_ex* put_tx;	struct ring_desc_ex* start_tx;	struct ring_desc_ex* prev_tx;	struct nv_skb_map* prev_tx_ctx;	/* add fragments to entries count */	for (i = 0; i < fragments; i++) {		entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);	}	empty_slots = nv_get_empty_tx_slots(np);	if (unlikely(empty_slots <= entries)) {		spin_lock_irq(&np->lock);		netif_stop_queue(dev);		np->tx_stop = 1;		spin_unlock_irq(&np->lock);		return NETDEV_TX_BUSY;	}	start_tx = put_tx = np->put_tx.ex;	/* setup the header buffer */	do {		prev_tx = put_tx;		prev_tx_ctx = np->put_tx_ctx;		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,						PCI_DMA_TODEVICE);		np->put_tx_ctx->dma_len = bcnt;		put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;		put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);		tx_flags = NV_TX2_VALID;		offset += bcnt;		size -= bcnt;		if (unlikely(put_tx++ == np->last_tx.ex))			put_tx = np->first_tx.ex;		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))			np->put_tx_ctx = np->first_tx_ctx;	} while (size);	/* setup the fragments */	for (i = 0; i < fragments; i++) {		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];		u32 size = frag->size;		offset = 0;		do {			prev_tx = put_tx;			prev_tx_ctx = np->put_tx_ctx;			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;			np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,							   PCI_DMA_TODEVICE);			np->put_tx_ctx->dma_len = bcnt;			put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;			put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);			offset += bcnt;			size -= bcnt;			if (unlikely(put_tx++ == np->last_tx.ex))				put_tx = np->first_tx.ex;			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))				np->put_tx_ctx = np->first_tx_ctx;		} while (size);	}	/* set last fragment flag  */	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);	/* save skb in this slot's context area */	prev_tx_ctx->skb = skb;	if (skb_is_gso(skb))		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);	else		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;	/* vlan tag */	if (likely(!np->vlangrp)) {		start_tx->txvlan = 0;	} else {		if (vlan_tx_tag_present(skb))			start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));		else			start_tx->txvlan = 0;	}	spin_lock_irq(&np->lock);	/* set tx flags */	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);	np->put_tx.ex = put_tx;	spin_unlock_irq(&np->lock);	dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",		dev->name, entries, tx_flags_extra);	{		int j;		for (j=0; j<64; j++) {			if ((j%16) == 0)				dprintk("\n%03x:", j);			dprintk(" %02x", ((unsigned char*)skb->data)[j]);		}		dprintk("\n");	}	dev->trans_start = jiffies;	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);	return NETDEV_TX_OK;}/* * nv_tx_done: check for completed packets, release the skbs. * * Caller must own np->lock. */static void nv_tx_done(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u32 flags;	struct ring_desc* orig_get_tx = np->get_tx.orig;	while ((np->get_tx.orig != np->put_tx.orig) &&	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {		dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",					dev->name, flags);		pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,			       np->get_tx_ctx->dma_len,			       PCI_DMA_TODEVICE);		np->get_tx_ctx->dma = 0;		if (np->desc_ver == DESC_VER_1) {			if (flags & NV_TX_LASTPACKET) {				if (flags & NV_TX_ERROR) {					if (flags & NV_TX_UNDERFLOW)						dev->stats.tx_fifo_errors++;					if (flags & NV_TX_CARRIERLOST)						dev->stats.tx_carrier_errors++;					dev->stats.tx_errors++;				} else {					dev->stats.tx_packets++;					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;				}				dev_kfree_skb_any(np->get_tx_ctx->skb);				np->get_tx_ctx->skb = NULL;			}		} else {			if (flags & NV_TX2_LASTPACKET) {				if (flags & NV_TX2_ERROR) {					if (flags & NV_TX2_UNDERFLOW)						dev->stats.tx_fifo_errors++;					if (flags & NV_TX2_CARRIERLOST)						dev->stats.tx_carrier_errors++;					dev->stats.tx_errors++;				} else {					dev->stats.tx_packets++;					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;				}				dev_kfree_skb_any(np->get_tx_ctx->skb);				np->get_tx_ctx->skb = NULL;			}		}		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))			np->get_tx.orig = np->first_tx.orig;		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))			np->get_tx_ctx = np->first_tx_ctx;	}	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {		np->tx_stop = 0;		netif_wake_queue(dev);	}}static void nv_tx_done_optimized(struct net_device *dev, int limit){	struct fe_priv *np = netdev_priv(dev);	u32 flags;	struct ring_desc_ex* orig_get_tx = np->get_tx.ex;	while ((np->get_tx.ex != np->put_tx.ex) &&	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&	       (limit-- > 0)) {		dprintk(KERN_DEBUG "%s: nv_tx_done_optimized

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -