⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 DELL755 Intel 网卡驱动
💻 C
📖 第 1 页 / 共 5 页
字号:
/** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure **/static void e1000_clean_tx_ring(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	struct e1000_buffer *buffer_info;	unsigned long size;	unsigned int i;	for (i = 0; i < tx_ring->count; i++) {		buffer_info = &tx_ring->buffer_info[i];		e1000_put_txbuf(adapter, buffer_info);	}	size = sizeof(struct e1000_buffer) * tx_ring->count;	memset(tx_ring->buffer_info, 0, size);	memset(tx_ring->desc, 0, tx_ring->size);	tx_ring->next_to_use = 0;	tx_ring->next_to_clean = 0;	writel(0, adapter->hw.hw_addr + tx_ring->head);	writel(0, adapter->hw.hw_addr + tx_ring->tail);}/** * e1000_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * * Free all transmit software resources **/void e1000_free_tx_resources(struct e1000_adapter *adapter){	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *tx_ring = adapter->tx_ring;	e1000_clean_tx_ring(adapter);	vfree(tx_ring->buffer_info);	tx_ring->buffer_info = NULL;	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,			  tx_ring->dma);	tx_ring->desc = NULL;}/** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/#ifdef CONFIG_E1000E_NAPIstatic bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,				  int *work_done, int work_to_do)#elsestatic bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)#endif{	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info, *next_buffer;	struct e1000_ps_page *ps_page;	struct sk_buff *skb;	unsigned int i, j;	u32 length, staterr;	int cleaned_count = 0;	bool cleaned = 0;	unsigned int total_rx_bytes = 0, total_rx_packets = 0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	buffer_info = &rx_ring->buffer_info[i];	while (staterr & E1000_RXD_STAT_DD) {#ifdef CONFIG_E1000E_NAPI		if (*work_done >= work_to_do)			break;		(*work_done)++;#endif		skb = buffer_info->skb;		/* in the packet split case this is header only */		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev, buffer_info->dma,				 adapter->rx_ps_bsize0,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		if (!(staterr & E1000_RXD_STAT_EOP)) {			e_dbg("Packet Split buffers didn't pick up the full"			      " packet\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {			dev_kfree_skb_irq(skb);			goto next_desc;		}		length = le16_to_cpu(rx_desc->wb.middle.length0);		if (!length) {			e_dbg("Last part of the packet spanning multiple"			      " descriptors\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		/* Good Receive */		skb_put(skb, length);#ifdef CONFIG_E1000E_NAPI		{		/*		 * this looks ugly, but it seems compiler issues make it		 * more efficient than reusing j		 */		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);		/*		 * page alloc/put takes too long and effects small packet		 * throughput, so unsplit small packets and save the alloc/put		 * only valid in softirq (napi) context to call kmap_*		 */		if (l1 && (l1 <= copybreak) &&		    ((length + l1) <= adapter->rx_ps_bsize0)) {			u8 *vaddr;			ps_page = &buffer_info->ps_pages[0];			/*			 * there is no documentation about how to call			 * kmap_atomic, so we can't hold the mapping			 * very long			 */			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);			memcpy(skb_tail_pointer(skb), vaddr, l1);			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);			pci_dma_sync_single_for_device(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			skb_put(skb, l1);			goto copydone;		} /* if */		}#endif		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			length = le16_to_cpu(rx_desc->wb.upper.length[j]);			if (!length)				break;			ps_page = &buffer_info->ps_pages[j];			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			skb_fill_page_desc(skb, j, ps_page->page, 0, length);			ps_page->page = NULL;			skb->len += length;			skb->data_len += length;			skb->truesize += length;		}#ifdef CONFIG_E1000E_NAPIcopydone:#endif		total_rx_bytes += skb->len;		total_rx_packets++;		e1000_rx_checksum(adapter, staterr, le16_to_cpu(			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);		if (rx_desc->wb.upper.header_status &			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))			adapter->rx_hdr_split++;		e1000_receive_skb(adapter, netdev, skb,				  staterr, rx_desc->wb.middle.vlan);next_desc:		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);		buffer_info->skb = NULL;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}#ifdef CONFIG_E1000E_NAPI/* NOTE: these new jumbo frame routines rely on NAPI because of the * pskb_may_pull call, which eventually must call kmap_atomic which you cannot * call from hard irq context *//** * e1000_consume_page - helper function **/static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,                               u16 length){	bi->page = NULL;	skb->len += length;	skb->data_len += length;	skb->truesize += length;}/** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,                                     int *work_done, int work_to_do){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc, *next_rxd;	struct e1000_buffer *buffer_info, *next_buffer;	u32 length;	unsigned int i;	int cleaned_count = 0;	bool cleaned = FALSE;	unsigned int total_rx_bytes=0, total_rx_packets=0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while (rx_desc->status & E1000_RXD_STAT_DD) {		struct sk_buff *skb;		u8 status;		if (*work_done >= work_to_do)			break;		(*work_done)++;		status = rx_desc->status;		skb = buffer_info->skb;		buffer_info->skb = NULL;		++i;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = TRUE;		cleaned_count++;		pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,		               PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		length = le16_to_cpu(rx_desc->length);		/* errors is only valid for DD + EOP descriptors */		if (unlikely((status & E1000_RXD_STAT_EOP) &&		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {				/* recycle both page and skb */				buffer_info->skb = skb;				/* an error means any chain goes out the window				 * too */				if (rx_ring->rx_skb_top)					dev_kfree_skb(rx_ring->rx_skb_top);				rx_ring->rx_skb_top = NULL;				goto next_desc;		}#define rxtop rx_ring->rx_skb_top		if (!(status & E1000_RXD_STAT_EOP)) {			/* this descriptor is only the beginning (or middle) */			if (!rxtop) {				/* this is the beginning of a chain */				rxtop = skb;				skb_fill_page_desc(rxtop, 0, buffer_info->page,				                   0, length);			} else {				/* this is the middle of a chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the skb, only consumed the page */				buffer_info->skb = skb;			}			e1000_consume_page(buffer_info, rxtop, length);			goto next_desc;		} else {			if (rxtop) {				/* end of the chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the current skb, we only consumed the				 * page */				buffer_info->skb = skb;				skb = rxtop;				rxtop = NULL;				e1000_consume_page(buffer_info, skb, length);			} else {				/* no chain, got EOP, this buf is the packet				 * copybreak to save the put_page/alloc_page */				if (length <= copybreak &&				    skb_tailroom(skb) >= length) {					u8 *vaddr;					vaddr = kmap_atomic(buffer_info->page,					                   KM_SKB_DATA_SOFTIRQ);					memcpy(skb_tail_pointer(skb), vaddr,					       length);					kunmap_atomic(vaddr,					              KM_SKB_DATA_SOFTIRQ);					/* re-use the page, so don't erase					 * buffer_info->page */					skb_put(skb, length);				} else {					skb_fill_page_desc(skb, 0,					                   buffer_info->page, 0,				                           length);					e1000_consume_page(buffer_info, skb,					                   length);				}			}		}		/* Receive Checksum Offload XXX recompute due to CRC strip? */		e1000_rx_checksum(adapter,		                  (u32)(status) |		                  ((u32)(rx_desc->errors) << 24),		                  le16_to_cpu(rx_desc->csum), skb);		/* probably a little skewed due to removing CRC */		total_rx_bytes += skb->len;		total_rx_packets++;		/* eth type trans needs skb->data to point to something */		if (!pskb_may_pull(skb, ETH_HLEN)) {			e_err("pskb_may_pull failed.\n");			dev_kfree_skb(skb);			goto next_desc;		}		e1000_receive_skb(adapter, netdev, skb, status,		                  rx_desc->special);next_desc:		rx_desc->status = 0;		/* return some buffers to hardware, one at a time is too slow */		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}#endif /* CONFIG_E1000E_NAPI *//** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/static void e1000_clean_rx_ring(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct pci_dev *pdev = adapter->pdev;	unsigned int i, j;	/* Free all the Rx ring sk_buffs */	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		if (buffer_info->dma) {			if (adapter->clean_rx == e1000_clean_rx_irq)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_buffer_len,						 PCI_DMA_FROMDEVICE);#ifdef CONFIG_E1000E_NAPI			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)				pci_unmap_page(pdev, buffer_info->dma,				               PAGE_SIZE,				               PCI_DMA_FROMDEVICE);#endif			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_ps_bsize0,						 PCI_DMA_FROMDEVICE);			buffer_info->dma = 0;		}		if (buffer_info->page) {			put_page(buffer_info->page);			buffer_info->page = NULL;		}		if (buffer_info->skb) {			dev_kfree_skb(buffer_info->skb);			buffer_info->skb = NULL;		}		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (!ps_page->page)				break;			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			put_page(ps_page->page);			ps_page->page = NULL;		}	}#ifdef CONFIG_E1000E_NAPI	/* there also may be some cached data from a chained receive */	if (rx_ring->rx_skb_top) {		dev_kfree_skb(rx_ring->rx_skb_top);		rx_ring->rx_skb_top = NULL;	}#endif	/* Zero out the descriptor ring */	memset(rx_ring->desc, 0, rx_ring->size);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -