⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 grub源码分析文档
💻 C
📖 第 1 页 / 共 5 页
字号:
				/* this is the beginning of a chain */				rxtop = skb;				skb_fill_page_desc(rxtop, 0, buffer_info->page,				                   0, length);			} else {				/* this is the middle of a chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the skb, only consumed the page */				buffer_info->skb = skb;			}			e1000_consume_page(buffer_info, rxtop, length);			goto next_desc;		} else {			if (rxtop) {				/* end of the chain */				skb_fill_page_desc(rxtop,				    skb_shinfo(rxtop)->nr_frags,				    buffer_info->page, 0, length);				/* re-use the current skb, we only consumed the				 * page */				buffer_info->skb = skb;				skb = rxtop;				rxtop = NULL;				e1000_consume_page(buffer_info, skb, length);			} else {				/* no chain, got EOP, this buf is the packet				 * copybreak to save the put_page/alloc_page */				if (length <= copybreak &&				    skb_tailroom(skb) >= length) {					u8 *vaddr;					vaddr = kmap_atomic(buffer_info->page,					                   KM_SKB_DATA_SOFTIRQ);					memcpy(skb_tail_pointer(skb), vaddr,					       length);					kunmap_atomic(vaddr,					              KM_SKB_DATA_SOFTIRQ);					/* re-use the page, so don't erase					 * buffer_info->page */					skb_put(skb, length);				} else {					skb_fill_page_desc(skb, 0,					                   buffer_info->page, 0,				                           length);					e1000_consume_page(buffer_info, skb,					                   length);				}			}		}		/* Receive Checksum Offload XXX recompute due to CRC strip? */		e1000_rx_checksum(adapter,		                  (u32)(status) |		                  ((u32)(rx_desc->errors) << 24),		                  le16_to_cpu(rx_desc->csum), skb);		/* probably a little skewed due to removing CRC */		total_rx_bytes += skb->len;		total_rx_packets++;		/* eth type trans needs skb->data to point to something */		if (!pskb_may_pull(skb, ETH_HLEN)) {			e_err("pskb_may_pull failed.\n");			dev_kfree_skb(skb);			goto next_desc;		}		e1000_receive_skb(adapter, netdev, skb, status,		                  rx_desc->special);next_desc:		rx_desc->status = 0;		/* return some buffers to hardware, one at a time is too slow */		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	adapter->net_stats.rx_bytes += total_rx_bytes;	adapter->net_stats.rx_packets += total_rx_packets;	return cleaned;}/** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/static void e1000_clean_rx_ring(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct pci_dev *pdev = adapter->pdev;	unsigned int i, j;	/* Free all the Rx ring sk_buffs */	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		if (buffer_info->dma) {			if (adapter->clean_rx == e1000_clean_rx_irq)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_buffer_len,						 PCI_DMA_FROMDEVICE);			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)				pci_unmap_page(pdev, buffer_info->dma,				               PAGE_SIZE,				               PCI_DMA_FROMDEVICE);			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_ps_bsize0,						 PCI_DMA_FROMDEVICE);			buffer_info->dma = 0;		}		if (buffer_info->page) {			put_page(buffer_info->page);			buffer_info->page = NULL;		}		if (buffer_info->skb) {			dev_kfree_skb(buffer_info->skb);			buffer_info->skb = NULL;		}		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (!ps_page->page)				break;			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			put_page(ps_page->page);			ps_page->page = NULL;		}	}	/* there also may be some cached data from a chained receive */	if (rx_ring->rx_skb_top) {		dev_kfree_skb(rx_ring->rx_skb_top);		rx_ring->rx_skb_top = NULL;	}	/* Zero out the descriptor ring */	memset(rx_ring->desc, 0, rx_ring->size);	rx_ring->next_to_clean = 0;	rx_ring->next_to_use = 0;	writel(0, adapter->hw.hw_addr + rx_ring->head);	writel(0, adapter->hw.hw_addr + rx_ring->tail);}/** * e1000_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/static irqreturn_t e1000_intr_msi(int irq, void *data){	struct net_device *netdev = data;	struct e1000_adapter *adapter = netdev_priv(netdev);	struct e1000_hw *hw = &adapter->hw;	u32 icr = er32(ICR);	/*	 * read ICR disables interrupts using IAM	 */	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {		hw->mac.get_link_status = 1;		/*		 * ICH8 workaround-- Call gig speed drop workaround on cable		 * disconnect (LSC) before accessing any PHY registers		 */		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&		    (!(er32(STATUS) & E1000_STATUS_LU)))			e1000e_gig_downshift_workaround_ich8lan(hw);		/*		 * 80003ES2LAN workaround-- For packet buffer work-around on		 * link down event; disable receives here in the ISR and reset		 * adapter in watchdog		 */		if (netif_carrier_ok(netdev) &&		    adapter->flags & FLAG_RX_NEEDS_RESTART) {			/* disable receives */			u32 rctl = er32(RCTL);			ew32(RCTL, rctl & ~E1000_RCTL_EN);			adapter->flags |= FLAG_RX_RESTART_NOW;		}		/* guard against interrupt when we're going down */		if (!test_bit(__E1000_DOWN, &adapter->state))			mod_timer(&adapter->watchdog_timer, jiffies + 1);	}	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {		adapter->total_tx_bytes = 0;		adapter->total_tx_packets = 0;		adapter->total_rx_bytes = 0;		adapter->total_rx_packets = 0;		__netif_rx_schedule(netdev, &adapter->napi);	}	return IRQ_HANDLED;}/** * e1000_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/static irqreturn_t e1000_intr(int irq, void *data){	struct net_device *netdev = data;	struct e1000_adapter *adapter = netdev_priv(netdev);	struct e1000_hw *hw = &adapter->hw;	u32 rctl, icr = er32(ICR);	if (!icr)		return IRQ_NONE;  /* Not our interrupt */	/*	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is	 * not set, then the adapter didn't send an interrupt	 */	if (!(icr & E1000_ICR_INT_ASSERTED))		return IRQ_NONE;	/*	 * Interrupt Auto-Mask...upon reading ICR,	 * interrupts are masked.  No need for the	 * IMC write	 */	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {		hw->mac.get_link_status = 1;		/*		 * ICH8 workaround-- Call gig speed drop workaround on cable		 * disconnect (LSC) before accessing any PHY registers		 */		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&		    (!(er32(STATUS) & E1000_STATUS_LU)))			e1000e_gig_downshift_workaround_ich8lan(hw);		/*		 * 80003ES2LAN workaround--		 * For packet buffer work-around on link down event;		 * disable receives here in the ISR and		 * reset adapter in watchdog		 */		if (netif_carrier_ok(netdev) &&		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {			/* disable receives */			rctl = er32(RCTL);			ew32(RCTL, rctl & ~E1000_RCTL_EN);			adapter->flags |= FLAG_RX_RESTART_NOW;		}		/* guard against interrupt when we're going down */		if (!test_bit(__E1000_DOWN, &adapter->state))			mod_timer(&adapter->watchdog_timer, jiffies + 1);	}	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {		adapter->total_tx_bytes = 0;		adapter->total_tx_packets = 0;		adapter->total_rx_bytes = 0;		adapter->total_rx_packets = 0;		__netif_rx_schedule(netdev, &adapter->napi);	}	return IRQ_HANDLED;}static int e1000_request_irq(struct e1000_adapter *adapter){	struct net_device *netdev = adapter->netdev;	int irq_flags = IRQF_SHARED;	int err;	if (adapter->flags & FLAG_HAS_MSI) {		err = pci_enable_msi(adapter->pdev);		if (!err) {			adapter->flags |= FLAG_MSI_ENABLED;			irq_flags = 0;		}	}	err = request_irq(adapter->pdev->irq, (adapter->flags & FLAG_HAS_MSI) ?			  &e1000_intr_msi : &e1000_intr, irq_flags,			  netdev->name, netdev);	if (err) {		if (adapter->flags & FLAG_MSI_ENABLED) {			pci_disable_msi(adapter->pdev);			adapter->flags &= ~FLAG_MSI_ENABLED;		}		e_err("Unable to allocate interrupt, Error: %d\n", err);	}	return err;}static void e1000_free_irq(struct e1000_adapter *adapter){	struct net_device *netdev = adapter->netdev;	free_irq(adapter->pdev->irq, netdev);	if (adapter->flags & FLAG_MSI_ENABLED) {		pci_disable_msi(adapter->pdev);		adapter->flags &= ~FLAG_MSI_ENABLED;	}}/** * e1000_irq_disable - Mask off interrupt generation on the NIC **/static void e1000_irq_disable(struct e1000_adapter *adapter){	struct e1000_hw *hw = &adapter->hw;	ew32(IMC, ~0);	e1e_flush();	synchronize_irq(adapter->pdev->irq);}/** * e1000_irq_enable - Enable default interrupt generation settings **/static void e1000_irq_enable(struct e1000_adapter *adapter){	struct e1000_hw *hw = &adapter->hw;	ew32(IMS, IMS_ENABLE_MASK);}/** * e1000_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) * of the f/w this means that the network i/f is open. **/static void e1000_get_hw_control(struct e1000_adapter *adapter){	struct e1000_hw *hw = &adapter->hw;	u32 ctrl_ext;	u32 swsm;	/* Let firmware know the driver has taken over */	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {		swsm = er32(SWSM);		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {		ctrl_ext = er32(CTRL_EXT);		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);	}}/** * e1000_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i * of the f/w this means that the network i/f is closed. * **/static void e1000_release_hw_control(struct e1000_adapter *adapter){	struct e1000_hw *hw = &adapter->hw;	u32 ctrl_ext;	u32 swsm;	/* Let firmware taken over control of h/w */	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {		swsm = er32(SWSM);		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {		ctrl_ext = er32(CTRL_EXT);		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);	}}/** * @e1000_alloc_ring - allocate memory for a ring structure **/static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,				struct e1000_ring *ring){	struct pci_dev *pdev = adapter->pdev;	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,					GFP_KERNEL);	if (!ring->desc)		return -ENOMEM;	return 0;}/** * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * * Return 0 on success, negative on failure **/int e1000e_setup_tx_resources(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	int err = -ENOMEM, size;	size = sizeof(struct e1000_buffer) * tx_ring->count;	tx_ring->buffer_info = vmalloc(size);	if (!tx_ring->buffer_info)		goto err;	memset(tx_ring->buffer_info, 0, size);	/* round up to nearest 4K */	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);	tx_ring->size = ALIGN(tx_ring->size, 4096);	err = e1000_alloc_ring_dma(adapter, tx_ring);	if (err)		goto err;	tx_ring->next_to_use = 0;	tx_ring->next_to_clean = 0;	spin_lock_init(&adapter->tx_queue_lock);	return 0;err:	vfree(tx_ring->buffer_info);	e_err("Unable to allocate memory for the transmit descriptor ring\n");	return err;}/** * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * * Returns 0 on success, negative on failure **/int e1000e_setup_rx_resources(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	int i, size, desc_len, err = -ENOMEM;	size = sizeof(struct e1000_buffer) * rx_ring->count;	rx_ring->buffer_info = vmalloc(size);	if (!rx_ring->buffer_info)		goto err;	memset(rx_ring->buffer_info, 0, size);	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,						sizeof(struct e1000_ps_page),						GFP_KERNEL);		if (!buffer_info->ps_pages)			goto err_pages;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -