📄 cxgb2.c
字号:
phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, &val); data->val_out = val; break; } case SIOCSMIIREG: { struct cphy *phy = adapter->port[dev->if_port].phy; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!phy->mdio_write) return -EOPNOTSUPP; phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, data->val_in); break; } default: return -EOPNOTSUPP; } return 0;}static int t1_change_mtu(struct net_device *dev, int new_mtu){ int ret; struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; if (!mac->ops->set_mtu) return -EOPNOTSUPP; if (new_mtu < 68) return -EINVAL; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; return 0;}static int t1_set_mac_addr(struct net_device *dev, void *p){ struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct sockaddr *addr = p; if (!mac->ops->macaddress_set) return -EOPNOTSUPP; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mac->ops->macaddress_set(mac, dev->dev_addr); return 0;}#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp){ struct adapter *adapter = dev->priv; spin_lock_irq(&adapter->async_lock); adapter->vlan_grp = grp; t1_set_vlan_accel(adapter, grp != NULL); spin_unlock_irq(&adapter->async_lock);}static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid){ struct adapter *adapter = dev->priv; spin_lock_irq(&adapter->async_lock); if (adapter->vlan_grp) adapter->vlan_grp->vlan_devices[vid] = NULL; spin_unlock_irq(&adapter->async_lock);}#endif#ifdef CONFIG_NET_POLL_CONTROLLERstatic void t1_netpoll(struct net_device *dev){ unsigned long flags; struct adapter *adapter = dev->priv; local_irq_save(flags); t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL); local_irq_restore(flags);}#endif/* * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */static void mac_stats_task(void *data){ int i; struct adapter *adapter = data; for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; if (netif_running(p->dev)) p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FAST); } /* Schedule the next statistics update if any port is active. */ spin_lock(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); spin_unlock(&adapter->work_lock);}/* * Processes elmer0 external interrupts in process context. */static void ext_intr_task(void *data){ struct adapter *adapter = data; elmer0_ext_intr_handler(adapter); /* Now reenable external interrupts */ spin_lock_irq(&adapter->async_lock); adapter->slow_intr_mask |= F_PL_INTR_EXT; writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); spin_unlock_irq(&adapter->async_lock);}/* * Interrupt-context handler for elmer0 external interrupts. */void t1_elmer0_ext_intr(struct adapter *adapter){ /* * Schedule a task to handle external interrupts as we require * a process context. We disable EXT interrupts in the interim * and let the task reenable them when it's done. */ adapter->slow_intr_mask &= ~F_PL_INTR_EXT; writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); schedule_work(&adapter->ext_intr_handler_task);}void t1_fatal_err(struct adapter *adapter){ if (adapter->flags & FULL_INIT_DONE) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); } CH_ALERT("%s: encountered fatal error, operation suspended\n", adapter->name);}static int __devinit init_one(struct pci_dev *pdev, const struct pci_device_id *ent){ static int version_printed; int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; if (!version_printed) { printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); ++version_printed; } err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { CH_ERR("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { CH_ERR("%s: unable to obtain 64-bit DMA for" "consistent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev->priv; adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { CH_ERR("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; init_MUTEX(&adapter->mib_mutex); spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task, adapter); INIT_WORK(&adapter->stats_update_task, mac_stats_task, adapter);#ifdef work_struct init_timer(&adapter->stats_update_timer); adapter->stats_update_timer.function = mac_stats_timer; adapter->stats_update_timer.data = (unsigned long)adapter;#endif pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->priv = adapter; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; netdev->features |= NETIF_F_LLTX; adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) {#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) adapter->flags |= VLAN_ACCEL_CAPABLE; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; netdev->vlan_rx_register = vlan_rx_register; netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;#endif adapter->flags |= TSO_CAPABLE; netdev->features |= NETIF_F_TSO; } netdev->open = cxgb_open; netdev->stop = cxgb_close; netdev->hard_start_xmit = t1_start_xmit; netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netdev->get_stats = t1_get_stats; netdev->set_multicast_list = t1_set_rxmode; netdev->do_ioctl = t1_ioctl; netdev->change_mtu = t1_change_mtu; netdev->set_mac_address = t1_set_mac_addr;#ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = t1_netpoll;#endif netdev->weight = 64; SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) CH_WARN("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { CH_ERR("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) { cxgb_proc_cleanup(adapter, proc_root_driver); kfree(adapter->port[i].dev); } } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err;}static inline void t1_sw_reset(struct pci_dev *pdev){ pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);}static void __devexit remove_one(struct pci_dev *pdev){ struct net_device *dev = pci_get_drvdata(pdev); if (dev) { int i; struct adapter *adapter = dev->priv; for_each_port(adapter, i) if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i].dev); t1_free_sw_modules(adapter); iounmap(adapter->regs); while (--i >= 0) if (adapter->port[i].dev) { cxgb_proc_cleanup(adapter, proc_root_driver); kfree(adapter->port[i].dev); } pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); t1_sw_reset(pdev); }}static struct pci_driver driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = __devexit_p(remove_one),};static int __init t1_init_module(void){ return pci_module_init(&driver);}static void __exit t1_cleanup_module(void){ pci_unregister_driver(&driver);}module_init(t1_init_module);module_exit(t1_cleanup_module);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -