⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 grub源码分析文档
💻 C
📖 第 1 页 / 共 5 页
字号:
		 * packet, also make sure the frame isn't just CRC only */		if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {			/* All receives must fit into a single buffer */			e_dbg("Receive packet consumed multiple buffers\n");			/* recycle */			buffer_info->skb = skb;			goto next_desc;		}		if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {			/* recycle */			buffer_info->skb = skb;			goto next_desc;		}		total_rx_bytes += length;		total_rx_packets++;		/*		 * code added for copybreak, this should improve		 * performance for small packets with large amounts		 * of reassembly being done in the stack		 */		if (length < copybreak) {			struct sk_buff *new_skb =			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);			if (new_skb) {				skb_reserve(new_skb, NET_IP_ALIGN);				skb_copy_to_linear_data_offset(new_skb,				                               -NET_IP_ALIGN,				                               (skb->data -				                                NET_IP_ALIGN),				                               (length +				                                NET_IP_ALIGN));				/* save the skb in buffer_info as good */				buffer_info->skb = skb;				skb = new_skb;			}			/* else just continue with the old one */		}		/* end copybreak code */		skb_put(skb, length);		/* Receive Checksum Offload */		e1000_rx_checksum(adapter,				  (u32)(status) |				  ((u32)(rx_desc->errors) << 24),				  le16_to_cpu(rx_desc->csum), skb);		e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);next_desc:		rx_desc->status = 0;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}static void e1000_put_txbuf(struct e1000_adapter *adapter,			     struct e1000_buffer *buffer_info){	if (buffer_info->dma) {		pci_unmap_page(adapter->pdev, buffer_info->dma,			       buffer_info->length, PCI_DMA_TODEVICE);		buffer_info->dma = 0;	}	if (buffer_info->skb) {		dev_kfree_skb_any(buffer_info->skb);		buffer_info->skb = NULL;	}}static void e1000_print_tx_hang(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	unsigned int i = tx_ring->next_to_clean;	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);	/* detected Tx unit hang */	e_err("Detected Tx Unit Hang:\n"	      "  TDH                  <%x>\n"	      "  TDT                  <%x>\n"	      "  next_to_use          <%x>\n"	      "  next_to_clean        <%x>\n"	      "buffer_info[next_to_clean]:\n"	      "  time_stamp           <%lx>\n"	      "  next_to_watch        <%x>\n"	      "  jiffies              <%lx>\n"	      "  next_to_watch.status <%x>\n",	      readl(adapter->hw.hw_addr + tx_ring->head),	      readl(adapter->hw.hw_addr + tx_ring->tail),	      tx_ring->next_to_use,	      tx_ring->next_to_clean,	      tx_ring->buffer_info[eop].time_stamp,	      eop,	      jiffies,	      eop_desc->upper.fields.status);}/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_tx_irq(struct e1000_adapter *adapter){	struct net_device *netdev = adapter->netdev;	struct e1000_hw *hw = &adapter->hw;	struct e1000_ring *tx_ring = adapter->tx_ring;	struct e1000_tx_desc *tx_desc, *eop_desc;	struct e1000_buffer *buffer_info;	unsigned int i, eop;	unsigned int count = 0;	bool cleaned = 1;	unsigned int total_tx_bytes = 0, total_tx_packets = 0;	i = tx_ring->next_to_clean;	eop = tx_ring->buffer_info[i].next_to_watch;	eop_desc = E1000_TX_DESC(*tx_ring, eop);	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {		for (cleaned = 0; !cleaned; ) {			tx_desc = E1000_TX_DESC(*tx_ring, i);			buffer_info = &tx_ring->buffer_info[i];			cleaned = (i == eop);			if (cleaned) {				struct sk_buff *skb = buffer_info->skb;#ifdef NETIF_F_TSO				unsigned int segs, bytecount;				segs = skb_shinfo(skb)->gso_segs ?: 1;				/* multiply data chunks by size of headers */				bytecount = ((segs - 1) * skb_headlen(skb)) +					    skb->len;				total_tx_packets += segs;				total_tx_bytes += bytecount;#else				total_tx_packets++;				total_tx_bytes += skb->len;#endif			}			e1000_put_txbuf(adapter, buffer_info);			tx_desc->upper.data = 0;			i++;			if (i == tx_ring->count)				i = 0;			count++;			if (count == tx_ring->count) {				cleaned = 0;				goto done_cleaning;			}		}		eop = tx_ring->buffer_info[i].next_to_watch;		eop_desc = E1000_TX_DESC(*tx_ring, eop);	}done_cleaning:	tx_ring->next_to_clean = i;#define TX_WAKE_THRESHOLD 32	if (count && netif_carrier_ok(netdev) &&	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {		/*		 * Make sure that anybody stopping the queue after this		 * sees the new next_to_clean.		 */		smp_mb();		if (netif_queue_stopped(netdev) &&		    !(test_bit(__E1000_DOWN, &adapter->state))) {			netif_wake_queue(netdev);			++adapter->restart_queue;		}	}	if (adapter->detect_tx_hung) {		/*		 * Detect a transmit hang in hardware, this serializes the		 * check with the clearing of time_stamp and movement of i		 */		adapter->detect_tx_hung = 0;		if (tx_ring->buffer_info[eop].dma &&		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp			       + (adapter->tx_timeout_factor * HZ))		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {			e1000_print_tx_hang(adapter);			netif_stop_queue(netdev);		}	}	adapter->total_tx_bytes += total_tx_bytes;	adapter->total_tx_packets += total_tx_packets;	adapter->net_stats.tx_bytes += total_tx_bytes;	adapter->net_stats.tx_packets += total_tx_packets;	return cleaned;}/** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,				  int *work_done, int work_to_do){	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info, *next_buffer;	struct e1000_ps_page *ps_page;	struct sk_buff *skb;	unsigned int i, j;	u32 length, staterr;	int cleaned_count = 0;	bool cleaned = 0;	unsigned int total_rx_bytes = 0, total_rx_packets = 0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	buffer_info = &rx_ring->buffer_info[i];	while (staterr & E1000_RXD_STAT_DD) {		if (*work_done >= work_to_do)			break;		(*work_done)++;		skb = buffer_info->skb;		/* in the packet split case this is header only */		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev, buffer_info->dma,				 adapter->rx_ps_bsize0,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		if (!(staterr & E1000_RXD_STAT_EOP)) {			e_dbg("Packet Split buffers didn't pick up the full"			      " packet\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {			dev_kfree_skb_irq(skb);			goto next_desc;		}		length = le16_to_cpu(rx_desc->wb.middle.length0);		if (!length) {			e_dbg("Last part of the packet spanning multiple"			      " descriptors\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		/* Good Receive */		skb_put(skb, length);#ifdef CONFIG_E1000_NAPI		{		/*		 * this looks ugly, but it seems compiler issues make it		 * more efficient than reusing j		 */		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);		/*		 * page alloc/put takes too long and effects small packet		 * throughput, so unsplit small packets and save the alloc/put		 * only valid in softirq (napi) context to call kmap_*		 */		if (l1 && (l1 <= copybreak) &&		    ((length + l1) <= adapter->rx_ps_bsize0)) {			u8 *vaddr;			ps_page = &buffer_info->ps_pages[0];			/*			 * there is no documentation about how to call			 * kmap_atomic, so we can't hold the mapping			 * very long			 */			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);			memcpy(skb_tail_pointer(skb), vaddr, l1);			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);			pci_dma_sync_single_for_device(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			skb_put(skb, l1);			goto copydone;		} /* if */		}#endif		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			length = le16_to_cpu(rx_desc->wb.upper.length[j]);			if (!length)				break;			ps_page = &buffer_info->ps_pages[j];			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			skb_fill_page_desc(skb, j, ps_page->page, 0, length);			ps_page->page = NULL;			skb->len += length;			skb->data_len += length;			skb->truesize += length;		}#ifdef CONFIG_E1000_NAPIcopydone:#endif		total_rx_bytes += skb->len;		total_rx_packets++;		e1000_rx_checksum(adapter, staterr, le16_to_cpu(			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);		if (rx_desc->wb.upper.header_status &			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))			adapter->rx_hdr_split++;		e1000_receive_skb(adapter, netdev, skb,				  staterr, rx_desc->wb.middle.vlan);next_desc:		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);		buffer_info->skb = NULL;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}/** * e1000_consume_page - helper function **/static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,                               u16 length){	bi->page = NULL;	skb->len += length;	skb->data_len += length;	skb->truesize += length;}/** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,                                     int *work_done, int work_to_do){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc, *next_rxd;	struct e1000_buffer *buffer_info, *next_buffer;	u32 length;	unsigned int i;	int cleaned_count = 0;	bool cleaned = FALSE;	unsigned int total_rx_bytes=0, total_rx_packets=0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while (rx_desc->status & E1000_RXD_STAT_DD) {		struct sk_buff *skb;		u8 status;		if (*work_done >= work_to_do)			break;		(*work_done)++;		status = rx_desc->status;		skb = buffer_info->skb;		buffer_info->skb = NULL;		if (++i == rx_ring->count) i = 0;		next_rxd = E1000_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = TRUE;		cleaned_count++;		pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,		               PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		length = le16_to_cpu(rx_desc->length);		/* errors is only valid for DD + EOP descriptors */		if (unlikely((status & E1000_RXD_STAT_EOP) &&		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {				/* recycle both page and skb */				buffer_info->skb = skb;				/* an error means any chain goes out the window				 * too */				if (rx_ring->rx_skb_top)					dev_kfree_skb(rx_ring->rx_skb_top);				rx_ring->rx_skb_top = NULL;				goto next_desc;		}#define rxtop rx_ring->rx_skb_top		if (!(status & E1000_RXD_STAT_EOP)) {			/* this descriptor is only the beginning (or middle) */			if (!rxtop) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -