qla3xxx.c
来自「linux 内核源代码」· C语言 代码 · 共 2,557 行 · 第 1/5 页
C
2,557 行
linkState = LS_DOWN; if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Link is down.\n", qdev->ndev->name); } return linkState;}static int ql_port_start(struct ql3_adapter *qdev){ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { printk(KERN_ERR "%s: Could not get hw lock for GIO\n", qdev->ndev->name); return -1; } if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); } else { /* Copper port */ ql_phy_init_ex(qdev); } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0;}static int ql_finish_auto_neg(struct ql3_adapter *qdev){ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER,&qdev->flags)) { /* configure the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Configuring link.\n", qdev->ndev-> name); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed (qdev) == SPEED_1000)); ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup (qdev)); ql_mac_cfg_pause(qdev, ql_is_neg_pause (qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Enabling mac.\n", qdev->ndev-> name); ql_mac_enable(qdev, 1); } if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Change port_link_state LS_DOWN to LS_UP.\n", qdev->ndev->name); qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", qdev->ndev->name, ql_get_link_speed(qdev), ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ if (test_bit(QL_LINK_MASTER,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Remote error detected. " "Calling ql_port_start().\n", qdev->ndev-> name); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); if(ql_port_start(qdev)) {/* Restart port */ return -1; } else return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0;}static void ql_link_state_machine_work(struct work_struct *work){ struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, link_state_work.work); u32 curr_link_state; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); curr_link_state = ql_get_link_state(qdev); if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ return; } switch (qdev->port_link_state) { default: if (test_bit(QL_LINK_MASTER,&qdev->flags)) { ql_port_start(qdev); } qdev->port_link_state = LS_DOWN; /* Fall Through */ case LS_DOWN: if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: port_link_state = LS_DOWN.\n", qdev->ndev->name); if (curr_link_state == LS_UP) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: curr_link_state = LS_UP.\n", qdev->ndev->name); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); if (qdev->port_link_state == LS_UP) ql_link_down_detect_clear(qdev); } break; case LS_UP: /* * See if the link is currently down or went down and came * back up */ if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is down.\n", qdev->ndev->name); qdev->port_link_state = LS_DOWN; } break; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);}/* * Caller must take hw_lock and QL_PHY_GIO_SEM. */static void ql_get_phy_owner(struct ql3_adapter *qdev){ if (ql_this_adapter_controls_port(qdev)) set_bit(QL_LINK_MASTER,&qdev->flags); else clear_bit(QL_LINK_MASTER,&qdev->flags);}/* * Caller must take hw_lock and QL_PHY_GIO_SEM. */static void ql_init_scan_mode(struct ql3_adapter *qdev){ ql_mii_enable_scan_mode(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { if (ql_this_adapter_controls_port(qdev)) ql_phy_init_ex(qdev); }}/* * MII_Setup needs to be called before taking the PHY out of reset so that the * management interface clock speed can be set properly. It would be better if * we had a way to disable MDC until after the PHY is out of reset, but we * don't have that capability. */static int ql_mii_setup(struct ql3_adapter *qdev){ u32 reg; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (qdev->device_id == QL3032_DEVICE_ID) ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ reg = MAC_MII_CONTROL_CLK_SEL_DIV28; ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0;}static u32 ql_supported_modes(struct ql3_adapter *qdev){ u32 supported; if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg; } else { supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; } return supported;}static int ql_get_auto_cfg_status(struct ql3_adapter *qdev){ int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status;}static u32 ql_get_speed(struct ql3_adapter *qdev){ u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status;}static int ql_get_full_dup(struct ql3_adapter *qdev){ int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status;}static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd){ struct ql3_adapter *qdev = netdev_priv(ndev); ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; ecmd->phy_address = qdev->PHYAddr; } ecmd->advertising = ql_supported_modes(qdev); ecmd->autoneg = ql_get_auto_cfg_status(qdev); ecmd->speed = ql_get_speed(qdev); ecmd->duplex = ql_get_full_dup(qdev); return 0;}static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo){ struct ql3_adapter *qdev = netdev_priv(ndev); strncpy(drvinfo->driver, ql3xxx_driver_name, 32); strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0;}static u32 ql_get_msglevel(struct net_device *ndev){ struct ql3_adapter *qdev = netdev_priv(ndev); return qdev->msg_enable;}static void ql_set_msglevel(struct net_device *ndev, u32 value){ struct ql3_adapter *qdev = netdev_priv(ndev); qdev->msg_enable = value;}static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause){ struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 reg; if(qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); pause->autoneg = ql_get_auto_cfg_status(qdev); pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;}static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam,};static int ql_populate_free_queue(struct ql3_adapter *qdev){ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; dma_addr_t map; int err; while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { printk(KERN_DEBUG PFX "%s: Failed netdev_alloc_skb().\n", qdev->ndev->name); break; } else { /* * We save some space to copy the ethhdr from * first buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; if (!qdev->lrg_buf_skb_check) return 1; } } lrg_buf_cb = lrg_buf_cb->next; } return 0;}/* * Caller holds hw_lock. */static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev){ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; if (qdev->small_buf_q_producer_index == NUM_SBUFQ_ENTRIES) qdev->small_buf_q_producer_index = 0; qdev->small_buf_release_cnt -= 8; } wmb(); writel(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); }}/* * Caller holds hw_lock. */static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev){ struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { if (qdev->lrg_buf_skb_check) if (!ql_populate_free_queue(qdev)) return; lrg_buf_q_ele = qdev->lrg_buf_next_free; while ((qdev->lrg_buf_release_cnt >= 16) && (qdev->lrg_buf_free_count >= 8)) { for (i = 0; i < 8; i++) { lrg_buf_cb = ql_get_from_lrg_buf_free_list(qdev); lrg_buf_q_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; lrg_buf_q_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; lrg_buf_q_ele++; qdev->lrg_buf_release_cnt--; } qdev->lrg_buf_q_producer_index++; if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == (qdev->num_lbufq_entries - 1)) { lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; } } wmb(); qdev->lrg_buf_next_free = lrg_buf_q_ele; writel(qdev->lrg_buf_q_producer_index, &port_regs->CommonRegs.rxLargeQProducerIndex); }}static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp){ struct ql_tx_buf_cb *tx_cb; int i; int retval = 0;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?