⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 Intel 82546系列lan driver源码
💻 C
📖 第 1 页 / 共 5 页
字号:
		/* in the packet split case this is header only */		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev, buffer_info->dma,				 adapter->rx_ps_bsize0,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		if (!(staterr & E1000_RXD_STAT_EOP)) {			e_dbg("Packet Split buffers didn't pick up the full "			      "packet\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {			dev_kfree_skb_irq(skb);			goto next_desc;		}		length = le16_to_cpu(rx_desc->wb.middle.length0);		if (!length) {			e_dbg("Last part of the packet spanning multiple "			      "descriptors\n");			dev_kfree_skb_irq(skb);			goto next_desc;		}		/* Good Receive */		skb_put(skb, length);#ifdef CONFIG_E1000E_NAPI		{		/*		 * this looks ugly, but it seems compiler issues make it		 * more efficient than reusing j		 */		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);		/*		 * page alloc/put takes too long and effects small packet		 * throughput, so unsplit small packets and save the alloc/put		 * only valid in softirq (napi) context to call kmap_*		 */		if (l1 && (l1 <= copybreak) &&		    ((length + l1) <= adapter->rx_ps_bsize0)) {			u8 *vaddr;			ps_page = &buffer_info->ps_pages[0];			/*			 * there is no documentation about how to call			 * kmap_atomic, so we can't hold the mapping			 * very long			 */			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);			memcpy(skb_tail_pointer(skb), vaddr, l1);			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);			pci_dma_sync_single_for_device(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			/* remove the CRC */			if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))				l1 -= 4;			skb_put(skb, l1);			goto copydone;		} /* if */		}#endif		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			length = le16_to_cpu(rx_desc->wb.upper.length[j]);			if (!length)				break;			ps_page = &buffer_info->ps_pages[j];			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			skb_fill_page_desc(skb, j, ps_page->page, 0, length);			ps_page->page = NULL;			skb->len += length;			skb->data_len += length;			skb->truesize += length;		}		/* strip the ethernet crc, problem is we're using pages now so		 * this whole operation can get a little cpu intensive */		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))			pskb_trim(skb, skb->len - 4);#ifdef CONFIG_E1000E_NAPIcopydone:#endif		total_rx_bytes += skb->len;		total_rx_packets++;		e1000_rx_checksum(adapter, staterr, le16_to_cpu(			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);		if (rx_desc->wb.upper.header_status &			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))			adapter->rx_hdr_split++;		e1000_receive_skb(adapter, netdev, skb,				  staterr, rx_desc->wb.middle.vlan);next_desc:		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);		buffer_info->skb = NULL;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_bytes += total_rx_bytes;	adapter->total_rx_packets += total_rx_packets;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}#ifdef CONFIG_E1000E_NAPI/** * e1000_consume_page - helper function **/static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,                               u16 length){	bi->page = NULL;	skb->len += length;	skb->data_len += length;	skb->truesize += length;}/** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,                                     int *work_done, int work_to_do){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc, *next_rxd;	struct e1000_buffer *buffer_info, *next_buffer;	u32 length;	unsigned int i;	int cleaned_count = 0;	bool cleaned = false;	unsigned int total_rx_bytes=0, total_rx_packets=0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while (rx_desc->status & E1000_RXD_STAT_DD) {		struct sk_buff *skb;		u8 status;		if (*work_done >= work_to_do)			break;		(*work_done)++;		status = rx_desc->status;		skb = buffer_info->skb;		buffer_info->skb = NULL;		++i;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = true;		cleaned_count++;		pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,		               PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		length = le16_to_cpu(rx_desc->length);		/* errors is only valid for DD + EOP descriptors */		if (unlikely((status & E1000_RXD_STAT_EOP) &&		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {				/* recycle both page and skb */				buffer_info->skb = skb;				/* an error means any chain goes out the window				 * too */				if (rx_ring->rx_skb_top)					dev_kfree_skb(rx_ring->rx_skb_top);				rx_ring->rx_skb_top = NULL;				goto next_desc;		}#define rxtop rx_ring->rx_skb_top		if (!(status & E1000_RXD_STAT_EOP)) {			/* this descriptor is only the beginning (or middle) */			if (!rxtop) {				/* this is the beginning of a chain */				rxtop = skb;				skb_fill_page_desc(rxtop, 0, buffer_info->page,				                   0, length);			} else {				/* this is the middle of a chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the skb, only consumed the page */				buffer_info->skb = skb;			}			e1000_consume_page(buffer_info, rxtop, length);			goto next_desc;		} else {			if (rxtop) {				/* end of the chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the current skb, we only consumed the				 * page */				buffer_info->skb = skb;				skb = rxtop;				rxtop = NULL;				e1000_consume_page(buffer_info, skb, length);			} else {				/* no chain, got EOP, this buf is the packet				 * copybreak to save the put_page/alloc_page */				if (length <= copybreak &&				    skb_tailroom(skb) >= length) {					u8 *vaddr;					vaddr = kmap_atomic(buffer_info->page,					                   KM_SKB_DATA_SOFTIRQ);					memcpy(skb_tail_pointer(skb), vaddr,					       length);					kunmap_atomic(vaddr,					              KM_SKB_DATA_SOFTIRQ);					/* re-use the page, so don't erase					 * buffer_info->page */					skb_put(skb, length);				} else {					skb_fill_page_desc(skb, 0,					                   buffer_info->page, 0,				                           length);					e1000_consume_page(buffer_info, skb,					                   length);				}			}		}		/* Receive Checksum Offload XXX recompute due to CRC strip? */		e1000_rx_checksum(adapter,		                  (u32)(status) |		                  ((u32)(rx_desc->errors) << 24),		                  le16_to_cpu(rx_desc->csum), skb);		/* probably a little skewed due to removing CRC */		total_rx_bytes += skb->len;		total_rx_packets++;		/* eth type trans needs skb->data to point to something */		if (!pskb_may_pull(skb, ETH_HLEN)) {			e_err("pskb_may_pull failed.\n");			dev_kfree_skb(skb);			goto next_desc;		}		e1000_receive_skb(adapter, netdev, skb, status,		                  rx_desc->special);next_desc:		rx_desc->status = 0;		/* return some buffers to hardware, one at a time is too slow */		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_bytes += total_rx_bytes;	adapter->total_rx_packets += total_rx_packets;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}#endif /* CONFIG_E1000E_NAPI *//** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/static void e1000_clean_rx_ring(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct pci_dev *pdev = adapter->pdev;	unsigned int i, j;	/* Free all the Rx ring sk_buffs */	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		if (buffer_info->dma) {			if (adapter->clean_rx == e1000_clean_rx_irq)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_buffer_len,						 PCI_DMA_FROMDEVICE);#ifdef CONFIG_E1000E_NAPI			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)				pci_unmap_page(pdev, buffer_info->dma,				               PAGE_SIZE,				               PCI_DMA_FROMDEVICE);#endif			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_ps_bsize0,						 PCI_DMA_FROMDEVICE);			buffer_info->dma = 0;		}		if (buffer_info->page) {			put_page(buffer_info->page);			buffer_info->page = NULL;		}		if (buffer_info->skb) {			dev_kfree_skb(buffer_info->skb);			buffer_info->skb = NULL;		}		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (!ps_page->page)				break;			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			put_page(ps_page->page);			ps_page->page = NULL;		}	}#ifdef CONFIG_E1000E_NAPI	/* there also may be some cached data from a chained receive */	if (rx_ring->rx_skb_top) {		dev_kfree_skb(rx_ring->rx_skb_top);		rx_ring->rx_skb_top = NULL;	}#endif	/* Zero out the descriptor ring */	memset(rx_ring->desc, 0, rx_ring->size);	rx_ring->next_to_clean = 0;	rx_ring->next_to_use = 0;	writel(0, adapter->hw.hw_addr + rx_ring->head);	writel(0, adapter->hw.hw_addr + rx_ring->tail);}static void e1000e_downshift_workaround(struct work_struct *work){	struct e1000_adapter *adapter = container_of(work,					struct e1000_adapter, downshift_task);	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);}#ifndef CONFIG_E1000E_NAPIstatic void e1000_set_itr(struct e1000_adapter *adapter);#endif/** * e1000_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/static irqreturn_t e1000_intr_msi(int irq, void *data){	struct net_device *netdev = data;	struct e1000_adapter *adapter = netdev_priv(netdev);	struct e1000_hw *hw = &adapter->hw;#ifndef CONFIG_E1000E_NAPI	int i;#endif	u32 icr = er32(ICR);	/*	 * read ICR disables interrupts using IAM	 */	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {		hw->mac.get_link_status = 1;		/*		 * ICH8 workaround-- Call gig speed drop workaround on cable		 * disconnect (LSC) before accessing any PHY registers		 */		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&		    (!(er32(STATUS) & E1000_STATUS_LU)))			schedule_work(&adapter->downshift_task);		/*		 * 80003ES2LAN workaround-- For packet buffer work-around on		 * link down event; disable receives here in the ISR and reset		 * adapter in watchdog		 */		if (netif_carrier_ok(netdev) &&		    adapter->flags & FLAG_RX_NEEDS_RESTART) {			/* disable receives */			u32 rctl = er32(RCTL);			ew32(RCTL, rctl & ~E1000_RCTL_EN);			adapter->flags |= FLAG_RX_RESTART_NOW;		}		/* guard against interrupt when we're going down */		if (!test_bit(__E1000_DOWN, &adapter->state))			mod_timer(&adapter->watchdog_timer, jiffies + 1);	}#ifdef CONFIG_E1000E_NAPI	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {		adapter->total_tx_bytes = 0;		adapter->total_tx_packets = 0;		adapter->total_rx_bytes = 0;		adapter->total_rx_packets = 0;		__netif_rx_schedule(netdev, &adapter->napi);	}#else	adapter->total_tx_bytes = 0;	adapter->total_rx_bytes = 0;	adapter->total_tx_packets = 0;	adapter->total_rx_packets = 0;	for (i = 0; i < E1000_MAX_INTR; i++) {		int rx_cleaned = adapter->clean_rx(adapter);		int tx_cleaned_complete = e1000_clean_tx_irq(adapter);		if (!rx_cleaned && tx_cleaned_complete)			break;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -