📄 e1000_main.c
字号:
sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } /* disable receives while setting up the descriptors */ rctl = E1000_READ_REG(hw, RCTL); E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); /* set the Receive Delay Timer Register */ E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); if (hw->mac_type >= e1000_82540) { E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); if (adapter->itr > 1) E1000_WRITE_REG(hw, ITR, 1000000000 / (adapter->itr * 256)); } if (hw->mac_type >= e1000_82571) { ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); /* Reset delay timers after every interrupt */ ctrl_ext |= E1000_CTRL_EXT_CANC;#ifdef CONFIG_E1000_NAPI /* Auto-Mask interrupts upon ICR read. */ ctrl_ext |= E1000_CTRL_EXT_IAME;#endif E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); E1000_WRITE_REG(hw, IAM, ~0); E1000_WRITE_FLUSH(hw); } /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ switch (adapter->num_rx_queues) {#ifdef CONFIG_E1000_MQ case 2: rdba = adapter->rx_ring[1].dma; E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); E1000_WRITE_REG(hw, RDLEN1, rdlen); E1000_WRITE_REG(hw, RDH1, 0); E1000_WRITE_REG(hw, RDT1, 0); adapter->rx_ring[1].rdh = E1000_RDH1; adapter->rx_ring[1].rdt = E1000_RDT1; /* Fall Through */#endif case 1: default: rdba = adapter->rx_ring[0].dma; E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); E1000_WRITE_REG(hw, RDLEN, rdlen); E1000_WRITE_REG(hw, RDH, 0); E1000_WRITE_REG(hw, RDT, 0); adapter->rx_ring[0].rdh = E1000_RDH; adapter->rx_ring[0].rdt = E1000_RDT; break; }#ifdef CONFIG_E1000_MQ if (adapter->num_rx_queues > 1) { uint32_t random[10]; get_random_bytes(&random[0], 40); if (hw->mac_type <= e1000_82572) { E1000_WRITE_REG(hw, RSSIR, 0); E1000_WRITE_REG(hw, RSSIM, 0); } switch (adapter->num_rx_queues) { case 2: default: reta = 0x00800080; mrqc = E1000_MRQC_ENABLE_RSS_2Q; break; } /* Fill out redirection table */ for (i = 0; i < 32; i++) E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); /* Fill out hash function seeds */ for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); E1000_WRITE_REG(hw, MRQC, mrqc); } /* Multiqueue and packet checksumming are mutually exclusive. */ if (hw->mac_type >= e1000_82571) { rxcsum = E1000_READ_REG(hw, RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; E1000_WRITE_REG(hw, RXCSUM, rxcsum); }#else /* Enable 82543 Receive Checksum Offload for TCP and UDP */ if (hw->mac_type >= e1000_82543) { rxcsum = E1000_READ_REG(hw, RXCSUM); if (adapter->rx_csum == TRUE) { rxcsum |= E1000_RXCSUM_TUOFL; /* Enable 82571 IPv4 payload checksum for UDP fragments * Must be used in conjunction with packet-split. */ if ((hw->mac_type >= e1000_82571) && (adapter->rx_ps_pages)) { rxcsum |= E1000_RXCSUM_IPPCSE; } } else { rxcsum &= ~E1000_RXCSUM_TUOFL; /* don't need to clear IPPCSE as it defaults to 0 */ } E1000_WRITE_REG(hw, RXCSUM, rxcsum); }#endif /* CONFIG_E1000_MQ */ if (hw->mac_type == e1000_82573) E1000_WRITE_REG(hw, ERT, 0x0100); /* Enable Receives */ E1000_WRITE_REG(hw, RCTL, rctl);}/** * e1000_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/static voide1000_free_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring){ struct pci_dev *pdev = adapter->pdev; e1000_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL;}/** * e1000_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/voide1000_free_all_tx_resources(struct e1000_adapter *adapter){ int i; for (i = 0; i < adapter->num_tx_queues; i++) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);}static inline voide1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info){ if (buffer_info->dma) { pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); } if (buffer_info->skb) dev_kfree_skb_any(buffer_info->skb); memset(buffer_info, 0, sizeof(struct e1000_buffer));}/** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure * @tx_ring: ring to be cleaned **/static voide1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring){ struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; e1000_unmap_and_free_tx_resource(adapter, buffer_info); } size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->last_tx_tso = 0; writel(0, adapter->hw.hw_addr + tx_ring->tdh); writel(0, adapter->hw.hw_addr + tx_ring->tdt);}/** * e1000_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/static voide1000_clean_all_tx_rings(struct e1000_adapter *adapter){ int i; for (i = 0; i < adapter->num_tx_queues; i++) e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);}/** * e1000_free_rx_resources - Free Rx Resources * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/static voide1000_free_rx_resources(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring){ struct pci_dev *pdev = adapter->pdev; e1000_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; kfree(rx_ring->ps_page); rx_ring->ps_page = NULL; kfree(rx_ring->ps_page_dma); rx_ring->ps_page_dma = NULL; pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL;}/** * e1000_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/voide1000_free_all_rx_resources(struct e1000_adapter *adapter){ int i; for (i = 0; i < adapter->num_rx_queues; i++) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);}/** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure * @rx_ring: ring to free buffers from **/static voide1000_clean_rx_ring(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring){ struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i, j; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->skb) { pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } ps_page = &rx_ring->ps_page[i]; ps_page_dma = &rx_ring->ps_page_dma[i]; for (j = 0; j < adapter->rx_ps_pages; j++) { if (!ps_page->ps_page[j]) break; pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], PAGE_SIZE, PCI_DMA_FROMDEVICE); ps_page_dma->ps_page_dma[j] = 0; put_page(ps_page->ps_page[j]); ps_page->ps_page[j] = NULL; } } size = sizeof(struct e1000_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); size = sizeof(struct e1000_ps_page) * rx_ring->count; memset(rx_ring->ps_page, 0, size); size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; memset(rx_ring->ps_page_dma, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; writel(0, adapter->hw.hw_addr + rx_ring->rdh); writel(0, adapter->hw.hw_addr + rx_ring->rdt);}/** * e1000_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/static voide1000_clean_all_rx_rings(struct e1000_adapter *adapter){ int i; for (i = 0; i < adapter->num_rx_queues; i++) e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);}/* The 82542 2.0 (revision 2) needs to have the receive unit in reset * and memory write and invalidate disabled for certain operations */static voide1000_enter_82542_rst(struct e1000_adapter *adapter){ struct net_device *netdev = adapter->netdev; uint32_t rctl; e1000_pci_clear_mwi(&adapter->hw); rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); E1000_WRITE_FLUSH(&adapter->hw); mdelay(5); if (netif_running(netdev)) e1000_clean_all_rx_rings(adapter);}static voide1000_leave_82542_rst(struct e1000_adapter *adapter){ struct net_device *netdev = adapter->netdev; uint32_t rctl; rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); E1000_WRITE_FLUSH(&adapter->hw); mdelay(5); if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); if (netif_running(netdev)) { /* No need to loop, because 82542 supports only 1 queue */ struct e1000_rx_ring *ring = &adapter->rx_ring[0]; e1000_configure_rx(adapter); adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); }}/** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/static inte1000_set_mac(struct net_device *netdev, void *p){ struct e1000_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* 82542 2.0 needs to be in reset to write receive address registers */ if (adapter->hw.mac_type == e1000_82542_rev2_0) e1000_enter_82542_rst(adapter); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); /* With 82571 controllers, LAA may be overwritten (with the default) * due to controller reset from the other port. */ if (adapter->hw.mac_type == e1000_82571) { /* activate the work around */ adapter->hw.laa_is_present = 1; /* Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed (in e1000_watchdog), the actual LAA is in one * of the RARs and no incoming packets directed to this port * are dropped. Eventaully the LAA will be in RAR[0] and * RAR[14] */ e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, E1000_RAR_ENTRIES - 1); } if (adapter->hw.mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); return 0;}/** * e1000_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -