📄 e100_main.c
字号:
for (i=0; i<100; i++) { if (!readb(&bdp->scb->scb_cmd_low)) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 100 + 1); } /* Wait for TCO request bit in PMDR register to be clear */ for (i=0; i<50; i++) { if (!(readb(&bdp->scb->scb_ext.d101m_scb.scb_pmdr) & BIT_1)) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 100 + 1); }}/** * e100_hw_init - initialized tthe hardware * @bdp: atapter's private data struct * * This routine performs a reset on the adapter, and configures the adapter. * This includes configuring the 82557 LAN controller, validating and setting * the node address, detecting and configuring the Phy chip on the adapter, * and initializing all of the on chip counters. * * Returns: * true - If the adapter was initialized * false - If the adapter failed initialization */unsigned char __devinite100_hw_init(struct e100_private *bdp){ if (!e100_phy_init(bdp)) return false; e100_sw_reset(bdp, PORT_SELECTIVE_RESET); /* Only 82559 or above needs TCO workaround */ if (bdp->rev_id >= D101MA_REV_ID) e100_tco_workaround(bdp); /* Load the CU BASE (set to 0, because we use linear mode) */ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) return false; if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) return false; /* Load interrupt microcode */ if (e100_load_microcode(bdp)) { bdp->flags |= DF_UCODE_LOADED; } e100_config_init(bdp); if (!e100_config(bdp)) { return false; } if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) return false; /* Clear the internal counters */ if (!e100_clr_cntrs(bdp)) return false; /* Change for 82558 enhancement */ /* If 82558/9 and if the user has enabled flow control, set up the * Flow Control Reg. in the CSR */ if ((bdp->flags & IS_BACHELOR) && (bdp->params.b_params & PRM_FC)) { writeb(DFLT_FC_THLD, &bdp->scb->scb_ext.d101_scb.scb_fc_thld); writeb(DFLT_FC_CMD, &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff); } return true;}/** * e100_setup_tcb_pool - setup TCB circular list * @head: Pointer to head of the allocated TCBs * @qlen: Number of elements in the queue * @bdp: atapter's private data struct * * This routine arranges the contigiously allocated TCB's in a circular list. * Also does the one time initialization of the TCBs. */static voide100_setup_tcb_pool(tcb_t *head, unsigned int qlen, struct e100_private *bdp){ int ele_no; tcb_t *pcurr_tcb; /* point to current tcb */ u32 next_phys; /* the next phys addr */ u16 txcommand = CB_S_BIT | CB_TX_SF_BIT; bdp->tx_count = 0; if (bdp->flags & USE_IPCB) { txcommand |= CB_IPCB_TRANSMIT | CB_CID_DEFAULT; } else if (bdp->flags & IS_BACHELOR) { txcommand |= CB_TRANSMIT | CB_CID_DEFAULT; } else { txcommand |= CB_TRANSMIT; } for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head; ele_no < qlen; ele_no++, pcurr_tcb++) { /* set the phys addr for this TCB, next_phys has not incr. yet */ pcurr_tcb->tcb_phys = next_phys; next_phys += sizeof (tcb_t); /* set the link to next tcb */ if (ele_no == (qlen - 1)) pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(bdp->tcb_phys); else pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys); pcurr_tcb->tcb_hdr.cb_status = 0; pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand); pcurr_tcb->tcb_cnt = 0; pcurr_tcb->tcb_thrshld = bdp->tx_thld; if (ele_no < 2) { pcurr_tcb->tcb_hdr.cb_status = cpu_to_le16(CB_STATUS_COMPLETE); } pcurr_tcb->tcb_tbd_num = 1; if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_ptr = __constant_cpu_to_le32(0xFFFFFFFF); } else { pcurr_tcb->tcb_tbd_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); } if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x20); } else { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); } pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr; if (bdp->flags & USE_IPCB) { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]); pcurr_tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT; pcurr_tcb->tcbu.ipcb.vlan = 0; } else { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]); } pcurr_tcb->tcb_skb = NULL; } wmb();}/***************************************************************************//***************************************************************************//* Memory Management Routines *//***************************************************************************//** * e100_alloc_space - allocate private driver data * @bdp: atapter's private data struct * * This routine allocates memory for the driver. Memory allocated is for the * selftest and statistics structures. * * Returns: * 0: if the operation was successful * %-ENOMEM: if memory allocation failed */unsigned char __devinite100_alloc_space(struct e100_private *bdp){ unsigned long off; /* allocate all the dma-able structures in one call: * selftest results, adapter stats, and non-tx cb commands */ if (!(bdp->dma_able = pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t), &(bdp->dma_able_phys)))) { goto err; } /* now assign the various pointers into the struct we've just allocated */ off = offsetof(bd_dma_able_t, selftest); bdp->selftest = (self_test_t *) (bdp->dma_able + off); bdp->selftest_phys = bdp->dma_able_phys + off; off = offsetof(bd_dma_able_t, stats_counters); bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off); bdp->stat_cnt_phys = bdp->dma_able_phys + off; return 0;err: printk(KERN_ERR "e100: Failed to allocate memory\n"); return -ENOMEM;}/** * e100_alloc_tcb_pool - allocate TCB circular list * @bdp: atapter's private data struct * * This routine allocates memory for the circular list of transmit descriptors. * * Returns: * 0: if allocation has failed. * 1: Otherwise. */inte100_alloc_tcb_pool(struct e100_private *bdp){ int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors; /* allocate space for the TCBs */ if (!(bdp->tcb_pool.data = pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys))) return 0; memset(bdp->tcb_pool.data, 0x00, stcb); return 1;}voide100_free_tcb_pool(struct e100_private *bdp){ pci_free_consistent(bdp->pdev, sizeof (tcb_t) * bdp->params.TxDescriptors, bdp->tcb_pool.data, bdp->tcb_phys); bdp->tcb_phys = 0;}static voide100_dealloc_space(struct e100_private *bdp){ if (bdp->dma_able) { pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t), bdp->dma_able, bdp->dma_able_phys); } bdp->selftest_phys = 0; bdp->stat_cnt_phys = 0; bdp->dma_able_phys = 0; bdp->dma_able = 0;}static voide100_free_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; while (!list_empty(&(bdp->active_rx_list))) { rx_struct = list_entry(bdp->active_rx_list.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); pci_unmap_single(bdp->pdev, rx_struct->dma_addr, sizeof (rfd_t), PCI_DMA_TODEVICE); dev_kfree_skb(rx_struct->skb); kfree(rx_struct); } while (!list_empty(&(bdp->rx_struct_pool))) { rx_struct = list_entry(bdp->rx_struct_pool.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); kfree(rx_struct); }}/** * e100_alloc_rfd_pool - allocate RFDs * @bdp: atapter's private data struct * * Allocates initial pool of skb which holds both rfd and data, * and return a pointer to the head of the list */static inte100_alloc_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; int i; INIT_LIST_HEAD(&(bdp->active_rx_list)); INIT_LIST_HEAD(&(bdp->rx_struct_pool)); bdp->skb_req = bdp->params.RxDescriptors; for (i = 0; i < bdp->skb_req; i++) { rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC); list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool)); } e100_alloc_skbs(bdp); return !list_empty(&(bdp->active_rx_list));}voide100_clear_pools(struct e100_private *bdp){ bdp->last_tcb = NULL; e100_free_rfd_pool(bdp); e100_free_tcb_pool(bdp);}/*****************************************************************************//*****************************************************************************//* Run Time Functions *//*****************************************************************************//** * e100_watchdog * @dev: adapter's net_device struct * * This routine runs every 2 seconds and updates our statitics and link state, * and refreshs txthld value. */voide100_watchdog(struct net_device *dev){ struct e100_private *bdp = dev->priv;#ifdef E100_CU_DEBUG if (e100_cu_unknown_state(bdp)) { printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n", dev->name); }#endif if (!netif_running(dev)) { return; } /* check if link state has changed */ if (e100_phy_check(bdp)) { if (netif_carrier_ok(dev)) { printk(KERN_ERR "e100: %s NIC Link is Up %d Mbps %s duplex\n", bdp->device->name, bdp->cur_line_speed, (bdp->cur_dplx_mode == HALF_DUPLEX) ? "Half" : "Full"); e100_config_fc(bdp); e100_config(bdp); } else { printk(KERN_ERR "e100: %s NIC Link is Down\n", bdp->device->name); } } // toggle the tx queue according to link status // this also resolves a race condition between tx & non-cu cmd flows if (netif_carrier_ok(dev)) { if (netif_running(dev)) netif_wake_queue(dev); } else { if (netif_running(dev)) netif_stop_queue(dev); /* When changing to non-autoneg, device may lose */ /* link with some switches. e100 will try to */ /* revover link by sending command to PHY layer */ if (bdp->params.e100_speed_duplex != E100_AUTONEG) e100_force_speed_duplex_to_phy(bdp); } rmb(); if (e100_update_stats(bdp)) { /* Check if a change in the IFS parameter is needed, and configure the device accordingly */ if (bdp->params.b_params & PRM_IFS) e100_manage_adaptive_ifs(bdp); /* Now adjust our dynamic tx threshold value */ e100_refresh_txthld(bdp); /* Now if we are on a 557 and we havn't received any frames then we * should issue a multicast command to reset the RU */ if (bdp->rev_id < D101A4_REV_ID) { if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) { e100_set_multi(dev); } } /* Update the statistics needed by the upper interface */ /* This should be the last statistic related command * as it's async. now */ e100_dump_stats_cntrs(bdp); } wmb(); /* relaunch watchdog timer in 2 sec */ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ)); if (list_empty(&bdp->active_rx_list)) e100_trigger_SWI(bdp);}/** * e100_manage_adaptive_ifs * @bdp: atapter's private data struct * * This routine manages the adaptive Inter-Frame Spacing algorithm * using a state machine. */voide100_manage_adaptive_ifs(struct e100_private *bdp){ static u16 state_table[9][4] = { // rows are states {2, 0, 0, 0}, // state0 // column0: next state if increasing {2, 0, 5, 30}, // state1 // column1: next state if decreasing {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit {5, 3, 10, 60}, // state4 {8, 4, 10, 60}, // state5 {8, 6, 0, 0}, // state6 {8, 6, 20, 60}, // state7 {8, 7, 20, 60} // state8 }; u32 transmits = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames); u32 collisions = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll); u32 state = bdp->ifs_state; u32 old_value = bdp->ifs_value; int next_col; u32 min_transmits; if (bdp->cur_dplx_mode == FULL_DUPLEX) { bdp->ifs_state = 0; bdp->ifs_value = 0; } else { /* Half Duplex */ /* Set speed specific parameters */ if (bdp->cur_line_speed == 100) { next_col = 2; min_transmits = MIN_NUMBER_OF_TRANSMITS_100; } else { /* 10 Mbps */ next_col = 3; min_transmits = MIN_NUMBER_OF_TRANSMITS_10;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -