📄 e100_main.c
字号:
u32 st_result = 0; e100_sw_init(bdp); if (!e100_selftest(bdp, &st_timeout, &st_result)) { if (st_timeout) { printk(KERN_ERR "e100: selftest timeout\n"); } else { printk(KERN_ERR "e100: selftest failed. Results: %x\n", st_result); } return false; } else printk(KERN_DEBUG "e100: selftest OK.\n"); /* read the MAC address from the eprom */ e100_rd_eaddr(bdp); if (!is_valid_ether_addr(bdp->device->dev_addr)) { printk(KERN_ERR "e100: Invalid Ethernet address\n"); return false; } /* read NIC's part number */ e100_rd_pwa_no(bdp); if (!e100_hw_init(bdp)) return false; /* Interrupts are enabled after device reset */ e100_disable_clear_intr(bdp); return true;}/** * e100_sw_init - initialize software structs * @bdp: atapter's private data struct * * This routine initializes all software structures. Sets up the * circular structures for the RFD's & TCB's. Allocates the per board * structure for storing adapter information. The CSR is also memory * mapped in this routine. * * Returns : * true: if S/W was successfully initialized * false: otherwise */static unsigned char __devinite100_sw_init(struct e100_private *bdp){ bdp->next_cu_cmd = START_WAIT; // init the next cu state /* * Set the value for # of good xmits per underrun. the value assigned * here is an intelligent suggested default. Nothing magical about it. */ bdp->tx_per_underrun = DEFAULT_TX_PER_UNDERRUN; /* get the default transmit threshold value */ bdp->tx_thld = TX_THRSHLD; /* get the EPROM size */ bdp->eeprom_size = e100_eeprom_size(bdp); /* Initialize our spinlocks */ spin_lock_init(&(bdp->bd_lock)); spin_lock_init(&(bdp->bd_non_tx_lock)); spin_lock_init(&(bdp->config_lock)); spin_lock_init(&(bdp->mdi_access_lock)); /* Initialize configuration data */ e100_config_init(bdp); return 1;}static voide100_tco_workaround(struct e100_private *bdp){ int i; /* Do software reset */ e100_sw_reset(bdp, PORT_SOFTWARE_RESET); /* Do a dummy LOAD CU BASE command. */ /* This gets us out of pre-driver to post-driver. */ e100_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE); /* Wait 20 msec for reset to take effect */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 50 + 1); /* disable interrupts since they are enabled */ /* after device reset */ e100_disable_clear_intr(bdp); /* Wait for command to be cleared up to 1 sec */ for (i=0; i<100; i++) { if (!readb(&bdp->scb->scb_cmd_low)) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 100 + 1); } /* Wait for TCO request bit in PMDR register to be clear */ for (i=0; i<50; i++) { if (!(readb(&bdp->scb->scb_ext.d101m_scb.scb_pmdr) & BIT_1)) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 100 + 1); }}/** * e100_hw_init - initialized tthe hardware * @bdp: atapter's private data struct * * This routine performs a reset on the adapter, and configures the adapter. * This includes configuring the 82557 LAN controller, validating and setting * the node address, detecting and configuring the Phy chip on the adapter, * and initializing all of the on chip counters. * * Returns: * true - If the adapter was initialized * false - If the adapter failed initialization */unsigned chare100_hw_init(struct e100_private *bdp){ if (!e100_phy_init(bdp)) goto err; e100_sw_reset(bdp, PORT_SELECTIVE_RESET); /* Only 82559 or above needs TCO workaround */ if (bdp->rev_id >= D101MA_REV_ID) e100_tco_workaround(bdp); /* Load the CU BASE (set to 0, because we use linear mode) */ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) goto err; if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) goto err; /* Load interrupt microcode */ if (e100_load_microcode(bdp)) { bdp->flags |= DF_UCODE_LOADED; } if ((u8) bdp->rev_id < D101A4_REV_ID) e100_config_init_82557(bdp); if (!e100_config(bdp)) goto err; if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) goto err; /* Clear the internal counters */ if (!e100_clr_cntrs(bdp)) goto err; /* Change for 82558 enhancement */ /* If 82558/9 and if the user has enabled flow control, set up the * Flow Control Reg. in the CSR */ if ((bdp->flags & IS_BACHELOR) && (bdp->params.b_params & PRM_FC)) { writeb(DFLT_FC_THLD, &bdp->scb->scb_ext.d101_scb.scb_fc_thld); writeb(DFLT_FC_CMD, &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff); } return true;err: printk(KERN_ERR "e100: hw init failed\n"); return false;}/** * e100_setup_tcb_pool - setup TCB circular list * @head: Pointer to head of the allocated TCBs * @qlen: Number of elements in the queue * @bdp: atapter's private data struct * * This routine arranges the contigiously allocated TCB's in a circular list. * Also does the one time initialization of the TCBs. */static voide100_setup_tcb_pool(tcb_t *head, unsigned int qlen, struct e100_private *bdp){ int ele_no; tcb_t *pcurr_tcb; /* point to current tcb */ u32 next_phys; /* the next phys addr */ u16 txcommand = CB_S_BIT | CB_TX_SF_BIT; bdp->tx_count = 0; if (bdp->flags & USE_IPCB) { txcommand |= CB_IPCB_TRANSMIT | CB_CID_DEFAULT; } else if (bdp->flags & IS_BACHELOR) { txcommand |= CB_TRANSMIT | CB_CID_DEFAULT; } else { txcommand |= CB_TRANSMIT; } for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head; ele_no < qlen; ele_no++, pcurr_tcb++) { /* set the phys addr for this TCB, next_phys has not incr. yet */ pcurr_tcb->tcb_phys = next_phys; next_phys += sizeof (tcb_t); /* set the link to next tcb */ if (ele_no == (qlen - 1)) pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(bdp->tcb_phys); else pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys); pcurr_tcb->tcb_hdr.cb_status = 0; pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand); pcurr_tcb->tcb_cnt = 0; pcurr_tcb->tcb_thrshld = bdp->tx_thld; if (ele_no < 2) { pcurr_tcb->tcb_hdr.cb_status = cpu_to_le16(CB_STATUS_COMPLETE); } pcurr_tcb->tcb_tbd_num = 1; if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_ptr = __constant_cpu_to_le32(0xFFFFFFFF); } else { pcurr_tcb->tcb_tbd_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); }#ifdef MAX_SKB_FRAGS if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x20); } else { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); } pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr;#endif if (bdp->flags & USE_IPCB) { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]); pcurr_tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT; pcurr_tcb->tcbu.ipcb.vlan = 0; } else { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]); } pcurr_tcb->tcb_skb = NULL; } wmb();}/***************************************************************************//***************************************************************************//* Memory Management Routines *//***************************************************************************//** * e100_alloc_space - allocate private driver data * @bdp: atapter's private data struct * * This routine allocates memory for the driver. Memory allocated is for the * selftest and statistics structures. * * Returns: * 0: if the operation was successful * %-ENOMEM: if memory allocation failed */unsigned char __devinite100_alloc_space(struct e100_private *bdp){ unsigned long off; /* allocate all the dma-able structures in one call: * selftest results, adapter stats, and non-tx cb commands */ if (!(bdp->dma_able = pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t), &(bdp->dma_able_phys)))) { goto err; } /* now assign the various pointers into the struct we've just allocated */ off = offsetof(bd_dma_able_t, selftest); bdp->selftest = (self_test_t *) (bdp->dma_able + off); bdp->selftest_phys = bdp->dma_able_phys + off; off = offsetof(bd_dma_able_t, stats_counters); bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off); bdp->stat_cnt_phys = bdp->dma_able_phys + off; return 0;err: printk(KERN_ERR "e100: Failed to allocate memory\n"); return -ENOMEM;}/** * e100_alloc_tcb_pool - allocate TCB circular list * @bdp: atapter's private data struct * * This routine allocates memory for the circular list of transmit descriptors. * * Returns: * 0: if allocation has failed. * 1: Otherwise. */inte100_alloc_tcb_pool(struct e100_private *bdp){ int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors; /* allocate space for the TCBs */ if (!(bdp->tcb_pool.data = pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys))) return 0; memset(bdp->tcb_pool.data, 0x00, stcb); return 1;}voide100_free_tcb_pool(struct e100_private *bdp){ tcb_t *tcb; int i; /* Return tx skbs */ for (i = 0; i < bdp->params.TxDescriptors; i++) { tcb = bdp->tcb_pool.data; tcb += bdp->tcb_pool.head; e100_tx_skb_free(bdp, tcb); if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) break; bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head); } pci_free_consistent(bdp->pdev, sizeof (tcb_t) * bdp->params.TxDescriptors, bdp->tcb_pool.data, bdp->tcb_phys); bdp->tcb_pool.head = 0; bdp->tcb_pool.tail = 1; bdp->tcb_phys = 0;}static voide100_dealloc_space(struct e100_private *bdp){ if (bdp->dma_able) { pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t), bdp->dma_able, bdp->dma_able_phys); } bdp->selftest_phys = 0; bdp->stat_cnt_phys = 0; bdp->dma_able_phys = 0; bdp->dma_able = 0;}static voide100_free_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; while (!list_empty(&(bdp->active_rx_list))) { rx_struct = list_entry(bdp->active_rx_list.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); pci_unmap_single(bdp->pdev, rx_struct->dma_addr, sizeof (rfd_t), PCI_DMA_TODEVICE); dev_kfree_skb(rx_struct->skb); kfree(rx_struct); } while (!list_empty(&(bdp->rx_struct_pool))) { rx_struct = list_entry(bdp->rx_struct_pool.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); kfree(rx_struct); }}/** * e100_alloc_rfd_pool - allocate RFDs * @bdp: atapter's private data struct * * Allocates initial pool of skb which holds both rfd and data, * and return a pointer to the head of the list */static inte100_alloc_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; int i; INIT_LIST_HEAD(&(bdp->active_rx_list)); INIT_LIST_HEAD(&(bdp->rx_struct_pool)); bdp->skb_req = bdp->params.RxDescriptors; for (i = 0; i < bdp->skb_req; i++) { rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC); list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool)); } e100_alloc_skbs(bdp); return !list_empty(&(bdp->active_rx_list));}voide100_clear_pools(struct e100_private *bdp){ bdp->last_tcb = NULL; e100_free_rfd_pool(bdp); e100_free_tcb_pool(bdp);}/*****************************************************************************//*****************************************************************************//* Run Time Functions *//*****************************************************************************//** * e100_watchdog * @dev: adapter's net_device struct * * This routine runs every 2 seconds and updates our statitics and link state, * and refreshs txthld value. */voide100_watchdog(struct net_device *dev){ struct e100_private *bdp = dev->priv;#ifdef E100_CU_DEBUG if (e100_cu_unknown_state(bdp)) { printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n", dev->name); }#endif if (!netif_running(dev)) { return; } /* check if link state has changed */ if (e100_phy_check(bdp)) { if (netif_carrier_ok(dev)) { printk(KERN_ERR "e100: %s NIC Link is Up %d Mbps %s duplex\n", bdp->device->name, bdp->cur_line_speed, (bdp->cur_dplx_mode == HALF_DUPLEX) ?
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -