📄 e1000_main.c
字号:
if ((err = register_netdev(netdev))) goto err_register; DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); cards_found++; return 0;err_register:err_sw_init:err_eeprom: iounmap(adapter->hw.hw_addr);err_ioremap: free_netdev(netdev);err_alloc_etherdev: pci_release_regions(pdev); return err;}/** * e1000_remove - Device Removal Routine * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/static void __devexite1000_remove(struct pci_dev *pdev){ struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t manc;#ifdef CONFIG_E1000_NAPI int i;#endif flush_scheduled_work(); if (adapter->hw.mac_type >= e1000_82540 && adapter->hw.media_type == e1000_media_type_copper) { manc = E1000_READ_REG(&adapter->hw, MANC); if (manc & E1000_MANC_SMBUS_EN) { manc |= E1000_MANC_ARP_EN; E1000_WRITE_REG(&adapter->hw, MANC, manc); } } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000_release_hw_control(adapter); unregister_netdev(netdev);#ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) __dev_put(&adapter->polling_netdev[i]);#endif if (!e1000_check_phy_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); kfree(adapter->tx_ring); kfree(adapter->rx_ring);#ifdef CONFIG_E1000_NAPI kfree(adapter->polling_netdev);#endif iounmap(adapter->hw.hw_addr); pci_release_regions(pdev);#ifdef CONFIG_E1000_MQ free_percpu(adapter->cpu_netdev); free_percpu(adapter->cpu_tx_ring);#endif free_netdev(netdev); pci_disable_device(pdev);}/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * * e1000_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/static int __devinite1000_sw_init(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev;#ifdef CONFIG_E1000_NAPI int i;#endif /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->rx_buffer_len = E1000_RXBUFFER_2048; adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; /* identify the MAC */ if (e1000_set_mac_type(hw)) { DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); return -EIO; } /* initialize eeprom parameters */ if (e1000_init_eeprom_params(hw)) { E1000_ERR("EEPROM initialization failed\n"); return -EIO; } switch (hw->mac_type) { default: break; case e1000_82541: case e1000_82547: case e1000_82541_rev_2: case e1000_82547_rev_2: hw->phy_init_script = 1; break; } e1000_set_media_type(hw); hw->wait_autoneg_complete = FALSE; hw->tbi_compatibility_en = TRUE; hw->adaptive_ifs = TRUE; /* Copper options */ if (hw->media_type == e1000_media_type_copper) { hw->mdix = AUTO_ALL_MODES; hw->disable_polarity_correction = FALSE; hw->master_slave = E1000_MASTER_SLAVE; }#ifdef CONFIG_E1000_MQ /* Number of supported queues */ switch (hw->mac_type) { case e1000_82571: case e1000_82572: /* These controllers support 2 tx queues, but with a single * qdisc implementation, multiple tx queues aren't quite as * interesting. If we can find a logical way of mapping * flows to a queue, then perhaps we can up the num_tx_queue * count back to its default. Until then, we run the risk of * terrible performance due to SACK overload. */ adapter->num_tx_queues = 1; adapter->num_rx_queues = 2; break; default: adapter->num_tx_queues = 1; adapter->num_rx_queues = 1; break; } adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", adapter->num_rx_queues, ((adapter->num_rx_queues == 1) ? ((num_online_cpus() > 1) ? "(due to unsupported feature in current adapter)" : "(due to unsupported system configuration)") : "")); DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", adapter->num_tx_queues);#else adapter->num_tx_queues = 1; adapter->num_rx_queues = 1;#endif if (e1000_alloc_queues(adapter)) { DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); return -ENOMEM; }#ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) { adapter->polling_netdev[i].priv = adapter; adapter->polling_netdev[i].poll = &e1000_clean; adapter->polling_netdev[i].weight = 64; dev_hold(&adapter->polling_netdev[i]); set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); } spin_lock_init(&adapter->tx_queue_lock);#endif atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->stats_lock); return 0;}/** * e1000_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/static int __devinite1000_alloc_queues(struct e1000_adapter *adapter){ int size; size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; adapter->tx_ring = kmalloc(size, GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; memset(adapter->tx_ring, 0, size); size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; adapter->rx_ring = kmalloc(size, GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; } memset(adapter->rx_ring, 0, size);#ifdef CONFIG_E1000_NAPI size = sizeof(struct net_device) * adapter->num_rx_queues; adapter->polling_netdev = kmalloc(size, GFP_KERNEL); if (!adapter->polling_netdev) { kfree(adapter->tx_ring); kfree(adapter->rx_ring); return -ENOMEM; } memset(adapter->polling_netdev, 0, size);#endif#ifdef CONFIG_E1000_MQ adapter->rx_sched_call_data.func = e1000_rx_schedule; adapter->rx_sched_call_data.info = adapter->netdev; adapter->cpu_netdev = alloc_percpu(struct net_device *); adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);#endif return E1000_SUCCESS;}#ifdef CONFIG_E1000_MQstatic void __devinite1000_setup_queue_mapping(struct e1000_adapter *adapter){ int i, cpu; adapter->rx_sched_call_data.func = e1000_rx_schedule; adapter->rx_sched_call_data.info = adapter->netdev; cpus_clear(adapter->rx_sched_call_data.cpumask); adapter->cpu_netdev = alloc_percpu(struct net_device *); adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); lock_cpu_hotplug(); i = 0; for_each_online_cpu(cpu) { *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; /* This is incomplete because we'd like to assign separate * physical cpus to these netdev polling structures and * avoid saturating a subset of cpus. */ if (i < adapter->num_rx_queues) { *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; adapter->rx_ring[i].cpu = cpu; cpu_set(cpu, adapter->cpumask); } else *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; i++; } unlock_cpu_hotplug();}#endif/** * e1000_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/static inte1000_open(struct net_device *netdev){ struct e1000_adapter *adapter = netdev_priv(netdev); int err; /* allocate transmit descriptors */ if ((err = e1000_setup_all_tx_resources(adapter))) goto err_setup_tx; /* allocate receive descriptors */ if ((err = e1000_setup_all_rx_resources(adapter))) goto err_setup_rx; if ((err = e1000_up(adapter))) goto err_up; adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { e1000_update_mng_vlan(adapter); } /* If AMT is enabled, let the firmware know that the network * interface is now open */ if (adapter->hw.mac_type == e1000_82573 && e1000_check_mng_mode(&adapter->hw)) e1000_get_hw_control(adapter); return E1000_SUCCESS;err_up: e1000_free_all_rx_resources(adapter);err_setup_rx: e1000_free_all_tx_resources(adapter);err_setup_tx: e1000_reset(adapter); return err;}/** * e1000_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/static inte1000_close(struct net_device *netdev){ struct e1000_adapter *adapter = netdev_priv(netdev); e1000_down(adapter); e1000_free_all_tx_resources(adapter); e1000_free_all_rx_resources(adapter); if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); } /* If AMT is enabled, let the firmware know that the network * interface is now closed */ if (adapter->hw.mac_type == e1000_82573 && e1000_check_mng_mode(&adapter->hw)) e1000_release_hw_control(adapter); return 0;}/** * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary * @adapter: address of board private structure * @start: address of beginning of memory * @len: length of memory **/static inline boolean_te1000_check_64k_bound(struct e1000_adapter *adapter, void *start, unsigned long len){ unsigned long begin = (unsigned long) start; unsigned long end = begin + len; /* First rev 82545 and 82546 need to not allow any memory * write location to cross 64k boundary due to errata 23 */ if (adapter->hw.mac_type == e1000_82545 || adapter->hw.mac_type == e1000_82546) { return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; } return TRUE;}/** * e1000_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @txdr: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/static inte1000_setup_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *txdr){ struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct e1000_buffer) * txdr->count; txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); if (!txdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } memset(txdr->buffer_info, 0, size); /* round up to nearest 4K */ txdr->size = txdr->count * sizeof(struct e1000_tx_desc); E1000_ROUNDUP(txdr->size, 4096); txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); if (!txdr->desc) {setup_tx_desc_die: vfree(txdr->buffer_info); DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } /* Fix for errata 23, can't cross 64kB boundary */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -