📄 netdev.c
字号:
#endif /* Other Causes interrupt vector */ free_irq(adapter->msix_entries[vector].vector, netdev); return; }#endif /* CONFIG_E1000E_MSIX */ free_irq(adapter->pdev->irq, netdev);#ifndef CONFIG_E1000E_MSIX if (adapter->flags & FLAG_MSI_ENABLED) { pci_disable_msi(adapter->pdev); adapter->flags &= ~FLAG_MSI_ENABLED; }#endif}/** * e1000_irq_disable - Mask off interrupt generation on the NIC **/static void e1000_irq_disable(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; ew32(IMC, ~0);#ifdef CONFIG_E1000E_MSIX if (adapter->msix_entries) ew32(EIAC_82574, 0);#endif /* CONFIG_E1000E_MSIX */ e1e_flush(); synchronize_irq(adapter->pdev->irq);}/** * e1000_irq_enable - Enable default interrupt generation settings **/static void e1000_irq_enable(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw;#ifdef CONFIG_E1000E_MSIX if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); } else { ew32(IMS, IMS_ENABLE_MASK); }#else ew32(IMS, IMS_ENABLE_MASK);#endif /* CONFIG_E1000E_MSIX */ e1e_flush();}/** * e1000_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) * of the f/w this means that the network i/f is open. **/static void e1000_get_hw_control(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; u32 swsm; /* Let firmware know the driver has taken over */ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { ctrl_ext = er32(CTRL_EXT); ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); }}/** * e1000_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i * of the f/w this means that the network i/f is closed. * **/static void e1000_release_hw_control(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; u32 swsm; /* Let firmware taken over control of h/w */ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { swsm = er32(SWSM); ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { ctrl_ext = er32(CTRL_EXT); ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); }}/** * @e1000_alloc_ring - allocate memory for a ring structure **/static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, struct e1000_ring *ring){ struct pci_dev *pdev = adapter->pdev; ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, GFP_KERNEL); if (!ring->desc) return -ENOMEM; return 0;}/** * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * * Return 0 on success, negative on failure **/int e1000e_setup_tx_resources(struct e1000_adapter *adapter){ struct e1000_ring *tx_ring = adapter->tx_ring; int err = -ENOMEM, size; size = sizeof(struct e1000_buffer) * tx_ring->count; tx_ring->buffer_info = vmalloc(size); if (!tx_ring->buffer_info) goto err; memset(tx_ring->buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); err = e1000_alloc_ring_dma(adapter, tx_ring); if (err) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; spin_lock_init(&adapter->tx_queue_lock); return 0;err: vfree(tx_ring->buffer_info); e_err("Unable to allocate memory for the transmit descriptor ring\n"); return err;}/** * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * * Returns 0 on success, negative on failure **/int e1000e_setup_rx_resources(struct e1000_adapter *adapter){ struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_buffer *buffer_info; int i, size, desc_len, err = -ENOMEM; size = sizeof(struct e1000_buffer) * rx_ring->count; rx_ring->buffer_info = vmalloc(size); if (!rx_ring->buffer_info) goto err; memset(rx_ring->buffer_info, 0, size); for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, sizeof(struct e1000_ps_page), GFP_KERNEL); if (!buffer_info->ps_pages) goto err_pages; } desc_len = sizeof(union e1000_rx_desc_packet_split); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); err = e1000_alloc_ring_dma(adapter, rx_ring); if (err) goto err_pages; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->rx_skb_top = NULL; return 0;err_pages: for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; kfree(buffer_info->ps_pages); }err: vfree(rx_ring->buffer_info); e_err("Unable to allocate memory for the receive descriptor ring\n"); return err;}/** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure **/static void e1000_clean_tx_ring(struct e1000_adapter *adapter){ struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; e1000_put_txbuf(adapter, buffer_info); } size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; writel(0, adapter->hw.hw_addr + tx_ring->head); writel(0, adapter->hw.hw_addr + tx_ring->tail);}/** * e1000e_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * * Free all transmit software resources **/void e1000e_free_tx_resources(struct e1000_adapter *adapter){ struct pci_dev *pdev = adapter->pdev; struct e1000_ring *tx_ring = adapter->tx_ring; e1000_clean_tx_ring(adapter); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL;}/** * e1000e_free_rx_resources - Free Rx Resources * @adapter: board private structure * * Free all receive software resources **/void e1000e_free_rx_resources(struct e1000_adapter *adapter){ struct pci_dev *pdev = adapter->pdev; struct e1000_ring *rx_ring = adapter->rx_ring; int i; e1000_clean_rx_ring(adapter); for (i = 0; i < rx_ring->count; i++) { kfree(rx_ring->buffer_info[i].ps_pages); } vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL;}/** * e1000_update_itr - update the dynamic ITR value based on statistics * @adapter: pointer to adapter * @itr_setting: current adapter->itr * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. This functionality is controlled * by the InterruptThrottleRate module parameter. **/static unsigned int e1000_update_itr(struct e1000_adapter *adapter, u16 itr_setting, int packets, int bytes){ unsigned int retval = itr_setting; if (packets == 0) goto update_itr_done; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) { retval = low_latency; } break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) { retval = bulk_latency; } else if ((packets < 10) || ((bytes/packets) > 1200)) { retval = bulk_latency; } else if ((packets > 35)) { retval = lowest_latency; } } else if (bytes/packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) { retval = low_latency; } } else if (bytes < 6000) { retval = low_latency; } break; }update_itr_done: return retval;}static void e1000_set_itr(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; u16 current_itr; u32 new_itr = adapter->itr; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { current_itr = 0; new_itr = 4000; goto set_itr_now; } adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, adapter->total_tx_packets, adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, adapter->total_rx_packets, adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = 70000; break; case low_latency: new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; break; default: break; }set_itr_now: if (new_itr != adapter->itr) { /* * this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > adapter->itr ? min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr;#ifdef CONFIG_E1000E_MSIX adapter->rx_ring->itr_val = new_itr; if (adapter->msix_entries) adapter->rx_ring->set_itr = 1; else ew32(ITR, 1000000000 / (new_itr * 256));#else ew32(ITR, 1000000000 / (new_itr * 256));#endif }}/** * e1000_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize **/static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter){ adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); if (!adapter->tx_ring) goto err; adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); if (!adapter->rx_ring) goto err; return 0;err: e_err("Unable to allocate memory for queues\n"); kfree(adapter->rx_ring); kfree(adapter->tx_ring); return -ENOMEM;}#ifdef CONFIG_E1000E_NAPI/** * e1000_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback * @budget: amount of packets driver is allowed to process this poll **/static int e1000_poll(struct napi_struct *napi, int budget){ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct net_device *netdev = adapter->netdev; int tx_clean_complete = 1, work_done = 0;#ifdef CONFIG_E1000E_MSIX struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries && !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) goto clean_rx;#endif /* * e1000_poll is called per-cpu. This lock protects * tx_ring from being cleaned by multiple cpus * simultaneously. A failure obtaining the lock means * tx_ring is currently being cleaned anyway. */ if (spin_trylock(&adapter->tx_queue_lock)) { tx_clean_complete &= e1000_clean_tx_irq(adapter); spin_unlock(&adapter->tx_queue_lock); }#ifdef CONFIG_E1000E_MSIXclean_rx:#endif adapter->clean_rx(adapter, &work_done, budget); /* If Tx completed and all Rx work don
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -