📄 netdev.c
字号:
desc_len = sizeof(union e1000_rx_desc_packet_split); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); err = e1000_alloc_ring_dma(adapter, rx_ring); if (err) goto err_pages; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->rx_skb_top = NULL; return 0;err_pages: for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; kfree(buffer_info->ps_pages); }err: vfree(rx_ring->buffer_info); e_err("Unable to allocate memory for the transmit descriptor ring\n"); return err;}/** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure **/static void e1000_clean_tx_ring(struct e1000_adapter *adapter){ struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; e1000_put_txbuf(adapter, buffer_info); } size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; writel(0, adapter->hw.hw_addr + tx_ring->head); writel(0, adapter->hw.hw_addr + tx_ring->tail);}/** * e1000e_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * * Free all transmit software resources **/void e1000e_free_tx_resources(struct e1000_adapter *adapter){ struct pci_dev *pdev = adapter->pdev; struct e1000_ring *tx_ring = adapter->tx_ring; e1000_clean_tx_ring(adapter); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL;}/** * e1000e_free_rx_resources - Free Rx Resources * @adapter: board private structure * * Free all receive software resources **/void e1000e_free_rx_resources(struct e1000_adapter *adapter){ struct pci_dev *pdev = adapter->pdev; struct e1000_ring *rx_ring = adapter->rx_ring; int i; e1000_clean_rx_ring(adapter); for (i = 0; i < rx_ring->count; i++) { kfree(rx_ring->buffer_info[i].ps_pages); } vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL;}/** * e1000_update_itr - update the dynamic ITR value based on statistics * @adapter: pointer to adapter * @itr_setting: current adapter->itr * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. * this functionality is controlled by the InterruptThrottleRate module * parameter (see e1000_param.c) **/static unsigned int e1000_update_itr(struct e1000_adapter *adapter, u16 itr_setting, int packets, int bytes){ unsigned int retval = itr_setting; if (packets == 0) goto update_itr_done; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) { retval = low_latency; } break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) { retval = bulk_latency; } else if ((packets < 10) || ((bytes/packets) > 1200)) { retval = bulk_latency; } else if ((packets > 35)) { retval = lowest_latency; } } else if (bytes/packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) { retval = low_latency; } } else if (bytes < 6000) { retval = low_latency; } break; }update_itr_done: return retval;}static void e1000_set_itr(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; u16 current_itr; u32 new_itr = adapter->itr; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { current_itr = 0; new_itr = 4000; goto set_itr_now; } adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, adapter->total_tx_packets, adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, adapter->total_rx_packets, adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = 70000; break; case low_latency: new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; break; default: break; }set_itr_now: if (new_itr != adapter->itr) { /* * this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > adapter->itr ? min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; ew32(ITR, 1000000000 / (new_itr * 256)); }}/** * e1000_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback * @budget: amount of packets driver is allowed to process this poll **/static int e1000_poll(struct napi_struct *napi, int budget){ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct net_device *netdev = adapter->netdev; int tx_clean_complete = 1, work_done = 0; /* * e1000_poll is called per-cpu. This lock protects * tx_ring from being cleaned by multiple cpus * simultaneously. A failure obtaining the lock means * tx_ring is currently being cleaned anyway. */ if (spin_trylock(&adapter->tx_queue_lock)) { tx_clean_complete &= e1000_clean_tx_irq(adapter); spin_unlock(&adapter->tx_queue_lock); } adapter->clean_rx(adapter, &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ if ((tx_clean_complete && (work_done == 0)) || !netif_running(netdev)) { netif_rx_complete(netdev, napi); if (adapter->itr_setting & 3) e1000_set_itr(adapter); if (!test_bit(__E1000_DOWN, &adapter->state)) e1000_irq_enable(adapter); return 0; } if (!tx_clean_complete) work_done = budget; return work_done;}static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid){ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 vfta, index; struct net_device *v_netdev; /* don't update vlan cookie if already programmed */ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) return; /* add VID to filter table */ index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); vfta |= (1 << (vid & 0x1F)); e1000e_write_vfta(hw, index, vfta); /* * Copy feature flags from netdev to the vlan netdev for this vid. * This allows things like TSO to bubble down to our vlan device. */ v_netdev = vlan_group_get_device(adapter->vlgrp, vid); v_netdev->features |= adapter->netdev->features; vlan_group_set_device(adapter->vlgrp, vid, v_netdev);}static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid){ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 vfta, index; if (!test_bit(__E1000_DOWN, &adapter->state)) e1000_irq_disable(adapter); vlan_group_set_device(adapter->vlgrp, vid, NULL); if (!test_bit(__E1000_DOWN, &adapter->state)) e1000_irq_enable(adapter); if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) { /* release control to f/w */ e1000_release_hw_control(adapter); return; } /* remove VID from filter table */ index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); vfta &= ~(1 << (vid & 0x1F)); e1000e_write_vfta(hw, index, vfta);}static void e1000_update_mng_vlan(struct e1000_adapter *adapter){ struct net_device *netdev = adapter->netdev; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (!adapter->vlgrp) return; if (!vlan_group_get_device(adapter->vlgrp, vid)) { adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { e1000_vlan_rx_add_vid(netdev, vid); adapter->mng_vlan_id = vid; } if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid) && !vlan_group_get_device(adapter->vlgrp, old_vid)) e1000_vlan_rx_kill_vid(netdev, old_vid); } else { adapter->mng_vlan_id = vid; }}static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp){ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; if (!test_bit(__E1000_DOWN, &adapter->state)) e1000_irq_disable(adapter); adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = er32(CTRL); ctrl |= E1000_CTRL_VME; ew32(CTRL, ctrl); if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { /* enable VLAN receive filtering */ rctl = er32(RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; ew32(RCTL, rctl); e1000_update_mng_vlan(adapter); } } else { /* disable VLAN tag insert/strip */ ctrl = er32(CTRL); ctrl &= ~E1000_CTRL_VME; ew32(CTRL, ctrl); if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { /* disable VLAN filtering */ rctl = er32(RCTL); rctl &= ~E1000_RCTL_VFE; ew32(RCTL, rctl); if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; } } } if (!test_bit(__E1000_DOWN, &adapter->state)) e1000_irq_enable(adapter);}static void e1000_restore_vlan(struct e1000_adapter *adapter){ u16 vid; e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); if (!adapter->vlgrp) return; for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; e1000_vlan_rx_add_vid(adapter->netdev, vid); }}static void e1000_init_manageability(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; u32 manc, manc2h; if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) return; manc = er32(MANC); /* * enable receiving management packets to the host. this will probably * generate destination unreachable messages from the host OS, but * the packets will be handled on SMBUS */ manc |= E1000_MANC_EN_MNG2HOST; manc2h = er32(MANC2H);#define E1000_MNG2HOST_PORT_623 (1 << 5)#define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; ew32(MANC2H, manc2h); ew32(MANC, manc);}/** * e1000_configure_tx - Configure 8254x Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/static void e1000_configure_tx(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; u32 tdlen, tctl, tipg, tarc; u32 ipgr1, ipgr2; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); ew32(TDBAL, (tdba & DMA_32BIT_MASK)); ew32(TDBAH, (tdba >> 32)); ew32(TDLEN, tdlen); ew32(TDH, 0); ew32(TDT, 0); tx_ring->head = E1000_TDH; tx_ring->tail = E1000_TDT; /* Set the default values for the Tx Inter Packet Gap timer */ tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; ew32(TIPG, tipg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -