⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 DELL755 Intel 网卡驱动
💻 C
📖 第 1 页 / 共 5 页
字号:
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,				      int cleaned_count){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	union e1000_rx_desc_packet_split *rx_desc;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct sk_buff *skb;	unsigned int i, j;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	while (cleaned_count--) {		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (j >= adapter->rx_ps_pages) {				/* all unused desc entries get hw null ptr */				rx_desc->read.buffer_addr[j+1] = ~0;				continue;			}			if (!ps_page->page) {				ps_page->page = alloc_page(GFP_ATOMIC);				if (!ps_page->page) {					adapter->alloc_rx_buff_failed++;					goto no_buffers;				}				ps_page->dma = pci_map_page(pdev,						   ps_page->page,						   0, PAGE_SIZE,						   PCI_DMA_FROMDEVICE);				if (pci_dma_mapping_error(ps_page->dma)) {					dev_err(&adapter->pdev->dev,					  "RX DMA page map failed\n");					adapter->rx_dma_failed++;					goto no_buffers;				}			}			/*			 * Refresh the desc even if buffer_addrs			 * didn't change because each write-back			 * erases this info.			 */			rx_desc->read.buffer_addr[j+1] =			     cpu_to_le64(ps_page->dma);		}		skb = netdev_alloc_skb(netdev,				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);		if (!skb) {			adapter->alloc_rx_buff_failed++;			break;		}		/*		 * Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		buffer_info->skb = skb;		buffer_info->dma = pci_map_single(pdev, skb->data,						  adapter->rx_ps_bsize0,						  PCI_DMA_FROMDEVICE);		if (pci_dma_mapping_error(buffer_info->dma)) {			dev_err(&pdev->dev, "RX DMA map failed\n");			adapter->rx_dma_failed++;			/* cleanup skb */			dev_kfree_skb_any(skb);			buffer_info->skb = NULL;			break;		}		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);		i++;		if (i == rx_ring->count)			i = 0;		buffer_info = &rx_ring->buffer_info[i];	}no_buffers:	if (rx_ring->next_to_use != i) {		rx_ring->next_to_use = i;		if (!(i--))			i = (rx_ring->count - 1);		/*		 * Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.  (Only		 * applicable for weak-ordered memory model archs,		 * such as IA-64).		 */		wmb();		/*		 * Hardware increments by 16 bytes, but packet split		 * descriptors are 32 bytes...so we increment tail		 * twice as much.		 */		writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);	}}#ifdef CONFIG_E1000E_NAPI/** * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers * @adapter: address of board private structure * @rx_ring: pointer to receive ring structure * @cleaned_count: number of buffers to allocate this pass **/static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,                                         int cleaned_count){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_rx_desc *rx_desc;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct sk_buff *skb;	unsigned int i;	unsigned int bufsz = 256 -	                     16 /* for skb_reserve */ -	                     NET_IP_ALIGN;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	while (cleaned_count--) {		skb = buffer_info->skb;		if (skb) {			skb_trim(skb, 0);			goto check_page;		}		skb = netdev_alloc_skb(netdev, bufsz);		if (unlikely(!skb)) {			/* Better luck next round */			adapter->alloc_rx_buff_failed++;			break;		}		/* Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		buffer_info->skb = skb;check_page:		/* allocate a new page if necessary */		if (!buffer_info->page) {			buffer_info->page = alloc_page(GFP_ATOMIC);			if (unlikely(!buffer_info->page)) {				adapter->alloc_rx_buff_failed++;				break;			}		}		if (!buffer_info->dma)			buffer_info->dma = pci_map_page(pdev,			                                buffer_info->page, 0,			                                PAGE_SIZE,			                                PCI_DMA_FROMDEVICE);		rx_desc = E1000_RX_DESC(*rx_ring, i);		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);		if (unlikely(++i == rx_ring->count))			i = 0;		buffer_info = &rx_ring->buffer_info[i];	}	if (likely(rx_ring->next_to_use != i)) {		rx_ring->next_to_use = i;		if (unlikely(i-- == 0))			i = (rx_ring->count - 1);		/* Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.  (Only		 * applicable for weak-ordered memory model archs,		 * such as IA-64). */		wmb();		writel(i, adapter->hw.hw_addr + rx_ring->tail);	}}#endif /* CONFIG_E1000E_NAPI *//** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/#ifdef CONFIG_E1000E_NAPIstatic bool e1000_clean_rx_irq(struct e1000_adapter *adapter,			       int *work_done, int work_to_do)#elsestatic bool e1000_clean_rx_irq(struct e1000_adapter *adapter)#endif{	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc, *next_rxd;	struct e1000_buffer *buffer_info, *next_buffer;	u32 length;	unsigned int i;	int cleaned_count = 0;	bool cleaned = 0;	unsigned int total_rx_bytes = 0, total_rx_packets = 0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while (rx_desc->status & E1000_RXD_STAT_DD) {		struct sk_buff *skb;		u8 status;#ifdef CONFIG_E1000E_NAPI		if (*work_done >= work_to_do)			break;		(*work_done)++;#endif		status = rx_desc->status;		skb = buffer_info->skb;		buffer_info->skb = NULL;		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev,				 buffer_info->dma,				 adapter->rx_buffer_len,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		length = le16_to_cpu(rx_desc->length);		/* !EOP means multiple descriptors were used to store a single		 * packet, also make sure the frame isn't just CRC only */		if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {			/* All receives must fit into a single buffer */			e_dbg("Receive packet consumed multiple buffers\n");			/* recycle */			buffer_info->skb = skb;			goto next_desc;		}		if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {			/* recycle */			buffer_info->skb = skb;			goto next_desc;		}		total_rx_bytes += length;		total_rx_packets++;		/*		 * code added for copybreak, this should improve		 * performance for small packets with large amounts		 * of reassembly being done in the stack		 */		if (length < copybreak) {			struct sk_buff *new_skb =			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);			if (new_skb) {				skb_reserve(new_skb, NET_IP_ALIGN);				skb_copy_to_linear_data_offset(new_skb,				                               -NET_IP_ALIGN,				                               (skb->data -				                                NET_IP_ALIGN),				                               (length +				                                NET_IP_ALIGN));				/* save the skb in buffer_info as good */				buffer_info->skb = skb;				skb = new_skb;			}			/* else just continue with the old one */		}		/* end copybreak code */		skb_put(skb, length);		/* Receive Checksum Offload */		e1000_rx_checksum(adapter,				  (u32)(status) |				  ((u32)(rx_desc->errors) << 24),				  le16_to_cpu(rx_desc->csum), skb);		e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);next_desc:		rx_desc->status = 0;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}static void e1000_put_txbuf(struct e1000_adapter *adapter,			     struct e1000_buffer *buffer_info){	if (buffer_info->dma) {		pci_unmap_page(adapter->pdev, buffer_info->dma,			       buffer_info->length, PCI_DMA_TODEVICE);		buffer_info->dma = 0;	}	if (buffer_info->skb) {		dev_kfree_skb_any(buffer_info->skb);		buffer_info->skb = NULL;	}}static void e1000_print_tx_hang(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	unsigned int i = tx_ring->next_to_clean;	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);	/* detected Tx unit hang */	e_err("Detected Tx Unit Hang:\n"	      "  TDH                  <%x>\n"	      "  TDT                  <%x>\n"	      "  next_to_use          <%x>\n"	      "  next_to_clean        <%x>\n"	      "buffer_info[next_to_clean]:\n"	      "  time_stamp           <%lx>\n"	      "  next_to_watch        <%x>\n"	      "  jiffies              <%lx>\n"	      "  next_to_watch.status <%x>\n",	      readl(adapter->hw.hw_addr + tx_ring->head),	      readl(adapter->hw.hw_addr + tx_ring->tail),	      tx_ring->next_to_use,	      tx_ring->next_to_clean,	      tx_ring->buffer_info[eop].time_stamp,	      eop,	      jiffies,	      eop_desc->upper.fields.status);}/** * @e1000_alloc_ring - allocate memory for a ring structure **/static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,				struct e1000_ring *ring){	struct pci_dev *pdev = adapter->pdev;	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,					GFP_KERNEL);	if (!ring->desc)		return -ENOMEM;	return 0;}/** * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * * Return 0 on success, negative on failure **/int e1000_setup_tx_resources(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	int err = -ENOMEM, size;	size = sizeof(struct e1000_buffer) * tx_ring->count;	tx_ring->buffer_info = vmalloc(size);	if (!tx_ring->buffer_info)		goto err;	memset(tx_ring->buffer_info, 0, size);	/* round up to nearest 4K */	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);	tx_ring->size = ALIGN(tx_ring->size, 4096);	err = e1000_alloc_ring_dma(adapter, tx_ring);	if (err)		goto err;	tx_ring->next_to_use = 0;	tx_ring->next_to_clean = 0;	spin_lock_init(&adapter->tx_queue_lock);	return 0;err:	vfree(tx_ring->buffer_info);	e_err("Unable to allocate memory for the transmit descriptor ring\n");	return err;}/** * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * * Returns 0 on success, negative on failure **/int e1000_setup_rx_resources(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	int i, size, desc_len, err = -ENOMEM;	size = sizeof(struct e1000_buffer) * rx_ring->count;	rx_ring->buffer_info = vmalloc(size);	if (!rx_ring->buffer_info)		goto err;	memset(rx_ring->buffer_info, 0, size);	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,						sizeof(struct e1000_ps_page),						GFP_KERNEL);		if (!buffer_info->ps_pages)			goto err_pages;	}	desc_len = sizeof(union e1000_rx_desc_packet_split);	/* Round up to nearest 4K */	rx_ring->size = rx_ring->count * desc_len;	rx_ring->size = ALIGN(rx_ring->size, 4096);	err = e1000_alloc_ring_dma(adapter, rx_ring);	if (err)		goto err_pages;	rx_ring->next_to_clean = 0;	rx_ring->next_to_use = 0;	rx_ring->rx_skb_top = NULL;	return 0;err_pages:	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		kfree(buffer_info->ps_pages);	}err:	vfree(rx_ring->buffer_info);	e_err("Unable to allocate memory for the transmit descriptor ring\n");	return err;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -