📄 e1000_main.c
字号:
memset(rxdr->ps_page, 0, size); size = sizeof(struct e1000_ps_page_dma) * rxdr->count; rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); if(!rxdr->ps_page_dma) { vfree(rxdr->buffer_info); kfree(rxdr->ps_page); DPRINTK(PROBE, ERR, "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } memset(rxdr->ps_page_dma, 0, size); if(adapter->hw.mac_type <= e1000_82547_rev_2) desc_len = sizeof(struct e1000_rx_desc); else desc_len = sizeof(union e1000_rx_desc_packet_split); /* Round up to nearest 4K */ rxdr->size = rxdr->count * desc_len; E1000_ROUNDUP(rxdr->size, 4096); rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); if (!rxdr->desc) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the receive descriptor ring\n");setup_rx_desc_die: vfree(rxdr->buffer_info); kfree(rxdr->ps_page); kfree(rxdr->ps_page_dma); return -ENOMEM; } /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { void *olddesc = rxdr->desc; dma_addr_t olddma = rxdr->dma; DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " "at %p\n", rxdr->size, rxdr->desc); /* Try again, without freeing the previous */ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); /* Failed allocation, critical failure */ if (!rxdr->desc) { pci_free_consistent(pdev, rxdr->size, olddesc, olddma); DPRINTK(PROBE, ERR, "Unable to allocate memory " "for the receive descriptor ring\n"); goto setup_rx_desc_die; } if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { /* give up */ pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); pci_free_consistent(pdev, rxdr->size, olddesc, olddma); DPRINTK(PROBE, ERR, "Unable to allocate aligned memory " "for the receive descriptor ring\n"); goto setup_rx_desc_die; } else { /* Free old allocation, new allocation was successful */ pci_free_consistent(pdev, rxdr->size, olddesc, olddma); } } memset(rxdr->desc, 0, rxdr->size); rxdr->next_to_clean = 0; rxdr->next_to_use = 0; return 0;}/** * e1000_setup_all_rx_resources - wrapper to allocate Rx resources * (Descriptors) for all queues * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/inte1000_setup_all_rx_resources(struct e1000_adapter *adapter){ int i, err = 0; for (i = 0; i < adapter->num_queues; i++) { err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) { DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); break; } } return err;}/** * e1000_setup_rctl - configure the receive control registers * @adapter: Board private structure **/#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))static voide1000_setup_rctl(struct e1000_adapter *adapter){ uint32_t rctl, rfctl; uint32_t psrctl = 0;#ifdef CONFIG_E1000_PACKET_SPLIT uint32_t pages = 0;#endif rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); if(adapter->hw.tbi_compatibility_on == 1) rctl |= E1000_RCTL_SBP; else rctl &= ~E1000_RCTL_SBP; if (adapter->netdev->mtu <= ETH_DATA_LEN) rctl &= ~E1000_RCTL_LPE; else rctl |= E1000_RCTL_LPE; /* Setup buffer sizes */ if(adapter->hw.mac_type >= e1000_82571) { /* We can now specify buffers in 1K increments. * BSIZE and BSEX are ignored in this case. */ rctl |= adapter->rx_buffer_len << 0x11; } else { rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; switch (adapter->rx_buffer_len) { case E1000_RXBUFFER_2048: default: rctl |= E1000_RCTL_SZ_2048; rctl &= ~E1000_RCTL_BSEX; break; case E1000_RXBUFFER_4096: rctl |= E1000_RCTL_SZ_4096; break; case E1000_RXBUFFER_8192: rctl |= E1000_RCTL_SZ_8192; break; case E1000_RXBUFFER_16384: rctl |= E1000_RCTL_SZ_16384; break; } }#ifdef CONFIG_E1000_PACKET_SPLIT /* 82571 and greater support packet-split where the protocol * header is placed in skb->data and the packet data is * placed in pages hanging off of skb_shinfo(skb)->nr_frags. * In the case of a non-split, skb->data is linearly filled, * followed by the page buffers. Therefore, skb->data is * sized to hold the largest protocol header. */ pages = PAGE_USE_COUNT(adapter->netdev->mtu); if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) && PAGE_SIZE <= 16384) adapter->rx_ps_pages = pages; else adapter->rx_ps_pages = 0;#endif if (adapter->rx_ps_pages) { /* Configure extra packet-split registers */ rfctl = E1000_READ_REG(&adapter->hw, RFCTL); rfctl |= E1000_RFCTL_EXTEN; /* disable IPv6 packet split support */ rfctl |= E1000_RFCTL_IPV6_DIS; E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; switch (adapter->rx_ps_pages) { case 3: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; case 2: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; case 1: psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; } E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); } E1000_WRITE_REG(&adapter->hw, RCTL, rctl);}/** * e1000_configure_rx - Configure 8254x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/static voide1000_configure_rx(struct e1000_adapter *adapter){ uint64_t rdba; struct e1000_hw *hw = &adapter->hw; uint32_t rdlen, rctl, rxcsum, ctrl_ext;#ifdef CONFIG_E1000_MQ uint32_t reta, mrqc; int i;#endif if (adapter->rx_ps_pages) { rdlen = adapter->rx_ring[0].count * sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else { rdlen = adapter->rx_ring[0].count * sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } /* disable receives while setting up the descriptors */ rctl = E1000_READ_REG(hw, RCTL); E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); /* set the Receive Delay Timer Register */ E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); if (hw->mac_type >= e1000_82540) { E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); if(adapter->itr > 1) E1000_WRITE_REG(hw, ITR, 1000000000 / (adapter->itr * 256)); } if (hw->mac_type >= e1000_82571) { /* Reset delay timers after every interrupt */ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_CANC; E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); } /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ switch (adapter->num_queues) {#ifdef CONFIG_E1000_MQ case 2: rdba = adapter->rx_ring[1].dma; E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); E1000_WRITE_REG(hw, RDLEN1, rdlen); E1000_WRITE_REG(hw, RDH1, 0); E1000_WRITE_REG(hw, RDT1, 0); adapter->rx_ring[1].rdh = E1000_RDH1; adapter->rx_ring[1].rdt = E1000_RDT1; /* Fall Through */#endif case 1: default: rdba = adapter->rx_ring[0].dma; E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); E1000_WRITE_REG(hw, RDLEN, rdlen); E1000_WRITE_REG(hw, RDH, 0); E1000_WRITE_REG(hw, RDT, 0); adapter->rx_ring[0].rdh = E1000_RDH; adapter->rx_ring[0].rdt = E1000_RDT; break; }#ifdef CONFIG_E1000_MQ if (adapter->num_queues > 1) { uint32_t random[10]; get_random_bytes(&random[0], 40); if (hw->mac_type <= e1000_82572) { E1000_WRITE_REG(hw, RSSIR, 0); E1000_WRITE_REG(hw, RSSIM, 0); } switch (adapter->num_queues) { case 2: default: reta = 0x00800080; mrqc = E1000_MRQC_ENABLE_RSS_2Q; break; } /* Fill out redirection table */ for (i = 0; i < 32; i++) E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); /* Fill out hash function seeds */ for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); E1000_WRITE_REG(hw, MRQC, mrqc); } /* Multiqueue and packet checksumming are mutually exclusive. */ if (hw->mac_type >= e1000_82571) { rxcsum = E1000_READ_REG(hw, RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; E1000_WRITE_REG(hw, RXCSUM, rxcsum); }#else /* Enable 82543 Receive Checksum Offload for TCP and UDP */ if (hw->mac_type >= e1000_82543) { rxcsum = E1000_READ_REG(hw, RXCSUM); if(adapter->rx_csum == TRUE) { rxcsum |= E1000_RXCSUM_TUOFL; /* Enable 82571 IPv4 payload checksum for UDP fragments * Must be used in conjunction with packet-split. */ if ((hw->mac_type >= e1000_82571) && (adapter->rx_ps_pages)) { rxcsum |= E1000_RXCSUM_IPPCSE; } } else { rxcsum &= ~E1000_RXCSUM_TUOFL; /* don't need to clear IPPCSE as it defaults to 0 */ } E1000_WRITE_REG(hw, RXCSUM, rxcsum); }#endif /* CONFIG_E1000_MQ */ if (hw->mac_type == e1000_82573) E1000_WRITE_REG(hw, ERT, 0x0100); /* Enable Receives */ E1000_WRITE_REG(hw, RCTL, rctl);}/** * e1000_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/static voide1000_free_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring){ struct pci_dev *pdev = adapter->pdev; e1000_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL;}/** * e1000_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/voide1000_free_all_tx_resources(struct e1000_adapter *adapter){ int i; for (i = 0; i < adapter->num_queues; i++) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);}static inline voide1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info){ if(buffer_info->dma) { pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } if(buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; }}/** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure * @tx_ring: ring to be cleaned **/static voide1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring){ struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ if (likely(tx_ring->previous_buffer_info.skb != NULL)) { e1000_unmap_and_free_tx_resource(adapter, &tx_ring->previous_buffer_info); } for(i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; e1000_unmap_and_free_tx_resource(adapter, buffer_info); } size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; writel(0, adapter->hw.hw_addr + tx_ring->tdh); writel(0, adapter->hw.hw_addr + tx_ring->tdt);}/** * e1000_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/static voide1000_clean_all_tx_rings(struct e1000_adapter *adapter){ int i;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -