📄 ixgbe_main.c
字号:
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK); } else { srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else srrctl |= adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* disable receives while setting up the descriptors */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rdba = adapter->rx_ring[i].dma; IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); adapter->rx_ring[i].head = IXGBE_RDH(i); adapter->rx_ring[i].tail = IXGBE_RDT(i); } if (adapter->num_rx_queues > 1) { /* Random 40bytes used as random key in RSS hash function */ get_random_bytes(&random[0], 40); switch (adapter->num_rx_queues) { case 8: case 4: /* Bits [3:0] in each byte refers the Rx queue no */ reta = 0x00010203; break; case 2: reta = 0x00010001; break; default: reta = 0x00000000; break; } /* Fill out redirection table */ for (i = 0; i < 32; i++) { IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); if (adapter->num_rx_queues > 4) { i++; IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, 0x04050607); } } /* Fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); mrqc = IXGBE_MRQC_RSSEN /* Perform hash on these packet types */ | IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV4_UDP | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | IXGBE_MRQC_RSS_FIELD_IPV6_EX | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP | IXGBE_MRQC_RSS_FIELD_IPV6_UDP | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* Multiqueue and packet checksumming are mutually exclusive. */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); } else { /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { /* Enable IPv4 payload checksum for UDP fragments * Must be used in conjunction with packet-split. */ rxcsum |= IXGBE_RXCSUM_IPPCSE; } else { /* don't need to clear IPPCSE as it defaults to 0 */ } IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); } /* Enable Receives */ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);}static void ixgbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp){ struct ixgbe_adapter *adapter = netdev_priv(netdev); u32 ctrl; ixgbe_irq_disable(adapter); adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; ctrl &= ~IXGBE_VLNCTRL_CFIEN; IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); } ixgbe_irq_enable(adapter);}static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid){ struct ixgbe_adapter *adapter = netdev_priv(netdev); /* add VID to filter table */ ixgbe_set_vfta(&adapter->hw, vid, 0, true);}static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid){ struct ixgbe_adapter *adapter = netdev_priv(netdev); ixgbe_irq_disable(adapter); vlan_group_set_device(adapter->vlgrp, vid, NULL); ixgbe_irq_enable(adapter); /* remove VID from filter table */ ixgbe_set_vfta(&adapter->hw, vid, 0, false);}static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter){ ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); if (adapter->vlgrp) { u16 vid; for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; ixgbe_vlan_rx_add_vid(adapter->netdev, vid); } }}/** * ixgbe_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. **/static void ixgbe_set_multi(struct net_device *netdev){ struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct dev_mc_list *mc_ptr; u8 *mta_list; u32 fctrl; int i; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); if (netdev->flags & IFF_PROMISC) { fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); } else if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; fctrl &= ~IXGBE_FCTRL_UPE; } else { fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->mc_count) { mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); if (!mta_list) return; /* Shared function expects packed array of only addresses. */ mc_ptr = netdev->mc_list; for (i = 0; i < netdev->mc_count; i++) { if (!mc_ptr) break; memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); mc_ptr = mc_ptr->next; } ixgbe_update_mc_addr_list(hw, mta_list, i, 0); kfree(mta_list); } else { ixgbe_update_mc_addr_list(hw, NULL, 0, 0); }}static void ixgbe_configure(struct ixgbe_adapter *adapter){ struct net_device *netdev = adapter->netdev; int i; ixgbe_set_multi(netdev); ixgbe_restore_vlan(adapter); ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], (adapter->rx_ring[i].count - 1));}static int ixgbe_up_complete(struct ixgbe_adapter *adapter){ struct net_device *netdev = adapter->netdev; int i; u32 gpie = 0; struct ixgbe_hw *hw = &adapter->hw; u32 txdctl, rxdctl, mhadd; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); } else { /* MSI only */ gpie = (IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT); } IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); } mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } for (i = 0; i < adapter->num_tx_queues; i++) { txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); } for (i = 0; i < adapter->num_rx_queues; i++) { rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); } /* enable all receives */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ixgbe_configure_msix(adapter); else ixgbe_configure_msi_and_legacy(adapter); clear_bit(__IXGBE_DOWN, &adapter->state); napi_enable(&adapter->napi); ixgbe_irq_enable(adapter); /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ mod_timer(&adapter->watchdog_timer, jiffies); return 0;}int ixgbe_up(struct ixgbe_adapter *adapter){ /* hardware has been reset, we need to reload some things */ ixgbe_configure(adapter); return ixgbe_up_complete(adapter);}void ixgbe_reset(struct ixgbe_adapter *adapter){ if (ixgbe_init_hw(&adapter->hw)) DPRINTK(PROBE, ERR, "Hardware Error\n"); /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);}#ifdef CONFIG_PMstatic int ixgbe_resume(struct pci_dev *pdev){ struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev_priv(netdev); u32 err, num_rx_queues = adapter->num_rx_queues; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ "suspend\n"); return err; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); if (netif_running(netdev)) { err = ixgbe_request_irq(adapter, &num_rx_queues); if (err) return err; } ixgbe_reset(adapter); if (netif_running(netdev)) ixgbe_up(adapter); netif_device_attach(netdev); return 0;}#endif/** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure * @rx_ring: ring to free buffers from **/static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring){ struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ixgbe_rx_buffer *rx_buffer_info; rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { pci_unmap_single(pdev, rx_buffer_info->dma, adapter->rx_buf_len, PCI_DMA_FROMDEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { dev_kfree_skb(rx_buffer_info->skb); rx_buffer_info->skb = NULL; } if (!rx_buffer_info->page) continue; pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_buffer_info->page_dma = 0; put_page(rx_buffer_info->page); rx_buffer_info->page = NULL; } size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; writel(0, adapter->hw.hw_addr + rx_ring->head); writel(0, adapter->hw.hw_addr + rx_ring->tail);}/** * ixgbe_clean_tx_ring - Free Tx Buffers * @adapter: board private structure * @tx_ring: ring to be cleaned **/static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring){ struct ixgbe_tx_buffer *tx_buffer_info; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); } size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; writel(0, adapter->hw.hw_addr + tx_ring->head); writel(0, adapter->hw.hw_addr + tx_ring->tail);}/** * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter){ int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);}/** * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter){ int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);}void ixgbe_down(struct ixgbe_adapter *adapter){ struct net_device *netdev = adapter->netdev; u32 rxctrl; /* signal that we are down to the interrupt handler */ set_bit(__IXGBE_DOWN, &adapter->state); /* disable receives */ rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); netif_tx_disable(netdev); /* disable transmits in the hardware */ /* flush both disables */ IXGBE_WRITE_FLUSH(&adapter->hw); msleep(10);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -