📄 e100_main.c
字号:
} else if (bdp->flags & IS_BACHELOR) { txcommand |= CB_TRANSMIT | CB_CID_DEFAULT; } else { txcommand |= CB_TRANSMIT; } for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head; ele_no < qlen; ele_no++, pcurr_tcb++) { /* set the phys addr for this TCB, next_phys has not incr. yet */ pcurr_tcb->tcb_phys = next_phys; next_phys += sizeof (tcb_t); /* set the link to next tcb */ if (ele_no == (qlen - 1)) pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(bdp->tcb_phys); else pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys); pcurr_tcb->tcb_hdr.cb_status = 0; pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand); pcurr_tcb->tcb_cnt = 0; pcurr_tcb->tcb_thrshld = bdp->tx_thld; if (ele_no < 2) { pcurr_tcb->tcb_hdr.cb_status = cpu_to_le16(CB_STATUS_COMPLETE); } pcurr_tcb->tcb_tbd_num = 1; if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_ptr = __constant_cpu_to_le32(0xFFFFFFFF); } else { pcurr_tcb->tcb_tbd_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); } if (bdp->flags & IS_BACHELOR) { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x20); } else { pcurr_tcb->tcb_tbd_expand_ptr = cpu_to_le32(pcurr_tcb->tcb_phys + 0x10); } pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr; if (bdp->flags & USE_IPCB) { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]); pcurr_tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT; pcurr_tcb->tcbu.ipcb.vlan = 0; } else { pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]); } pcurr_tcb->tcb_skb = NULL; } wmb();}/***************************************************************************//***************************************************************************//* Memory Management Routines *//***************************************************************************//** * e100_alloc_space - allocate private driver data * @bdp: atapter's private data struct * * This routine allocates memory for the driver. Memory allocated is for the * selftest and statistics structures. * * Returns: * 0: if the operation was successful * %-ENOMEM: if memory allocation failed */unsigned char __devinite100_alloc_space(struct e100_private *bdp){ unsigned long off; /* allocate all the dma-able structures in one call: * selftest results, adapter stats, and non-tx cb commands */ if (!(bdp->dma_able = pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t), &(bdp->dma_able_phys)))) { goto err; } /* now assign the various pointers into the struct we've just allocated */ off = offsetof(bd_dma_able_t, selftest); bdp->selftest = (self_test_t *) (bdp->dma_able + off); bdp->selftest_phys = bdp->dma_able_phys + off; off = offsetof(bd_dma_able_t, stats_counters); bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off); bdp->stat_cnt_phys = bdp->dma_able_phys + off; return 0;err: printk(KERN_ERR "e100: Failed to allocate memory\n"); return -ENOMEM;}/** * e100_alloc_tcb_pool - allocate TCB circular list * @bdp: atapter's private data struct * * This routine allocates memory for the circular list of transmit descriptors. * * Returns: * 0: if allocation has failed. * 1: Otherwise. */inte100_alloc_tcb_pool(struct e100_private *bdp){ int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors; /* allocate space for the TCBs */ if (!(bdp->tcb_pool.data = pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys))) return 0; memset(bdp->tcb_pool.data, 0x00, stcb); return 1;}voide100_free_tcb_pool(struct e100_private *bdp){ pci_free_consistent(bdp->pdev, sizeof (tcb_t) * bdp->params.TxDescriptors, bdp->tcb_pool.data, bdp->tcb_phys); bdp->tcb_phys = 0;}static voide100_dealloc_space(struct e100_private *bdp){ if (bdp->dma_able) { pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t), bdp->dma_able, bdp->dma_able_phys); } bdp->selftest_phys = 0; bdp->stat_cnt_phys = 0; bdp->dma_able_phys = 0; bdp->dma_able = 0;}static voide100_free_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; while (!list_empty(&(bdp->active_rx_list))) { rx_struct = list_entry(bdp->active_rx_list.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); pci_unmap_single(bdp->pdev, rx_struct->dma_addr, sizeof (rfd_t), PCI_DMA_TODEVICE); dev_kfree_skb(rx_struct->skb); kfree(rx_struct); } while (!list_empty(&(bdp->rx_struct_pool))) { rx_struct = list_entry(bdp->rx_struct_pool.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); kfree(rx_struct); }}/** * e100_alloc_rfd_pool - allocate RFDs * @bdp: atapter's private data struct * * Allocates initial pool of skb which holds both rfd and data, * and return a pointer to the head of the list */static inte100_alloc_rfd_pool(struct e100_private *bdp){ struct rx_list_elem *rx_struct; int i; INIT_LIST_HEAD(&(bdp->active_rx_list)); INIT_LIST_HEAD(&(bdp->rx_struct_pool)); bdp->skb_req = bdp->params.RxDescriptors; for (i = 0; i < bdp->skb_req; i++) { rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC); list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool)); } e100_alloc_skbs(bdp); return !list_empty(&(bdp->active_rx_list));}voide100_clear_pools(struct e100_private *bdp){ bdp->last_tcb = NULL; e100_free_rfd_pool(bdp); e100_free_tcb_pool(bdp);}/*****************************************************************************//*****************************************************************************//* Run Time Functions *//*****************************************************************************//** * e100_watchdog * @dev: adapter's net_device struct * * This routine runs every 2 seconds and updates our statitics and link state, * and refreshs txthld value. */voide100_watchdog(struct net_device *dev){ struct e100_private *bdp = dev->priv; read_lock(&(bdp->isolate_lock)); if (bdp->driver_isolated) { goto exit; } if (!netif_running(dev)) { goto exit; } e100_get_mdix_status(bdp); /* check if link state has changed */ if (e100_phy_check(bdp)) { if (netif_carrier_ok(dev)) { printk(KERN_ERR "e100: %s NIC Link is Up %d Mbps %s duplex\n", bdp->device->name, bdp->cur_line_speed, (bdp->cur_dplx_mode == HALF_DUPLEX) ? "Half" : "Full"); e100_config_fc(bdp); e100_config(bdp); bdp->cable_status = "Cable OK"; } else { printk(KERN_ERR "e100: %s NIC Link is Down\n", bdp->device->name); if (bdp->rev_id < D102_REV_ID) bdp->cable_status = "Not supported"; else { /* Initiate hwi, ie, cable diagnostic */ bdp->saved_open_circut = 0xffff; bdp->saved_short_circut = 0xffff; bdp->saved_distance = 0xffff; bdp->saved_i = 0; bdp->saved_same = 0; bdp->hwi_started = 1; /* Disable MDI/MDI-X auto switching */ e100_mdi_write(bdp, MII_NCONFIG, bdp->phy_addr, MDI_MDIX_RESET_ALL_MASK); /* Set to 100 Full as required by hwi test */ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, BMCR_SPEED100 | BMCR_FULLDPLX); /* Enable and execute HWI test */ e100_mdi_write(bdp, HWI_CONTROL_REG, bdp->phy_addr, (HWI_TEST_ENABLE | HWI_TEST_EXECUTE)); /* Launch hwi timer in 1 msec */ mod_timer(&(bdp->hwi_timer), jiffies + (HZ / 1000) ); } } } // toggle the tx queue according to link status // this also resolves a race condition between tx & non-cu cmd flows if (netif_carrier_ok(dev)) { if (netif_running(dev)) netif_wake_queue(dev); } else { netif_stop_queue(dev); } rmb(); if (e100_update_stats(bdp)) { /* Check if a change in the IFS parameter is needed, and configure the device accordingly */ if (bdp->params.b_params & PRM_IFS) e100_manage_adaptive_ifs(bdp); /* Now adjust our dynamic tx threshold value */ e100_refresh_txthld(bdp); /* Now if we are on a 557 and we havn't received any frames then we * should issue a multicast command to reset the RU */ if (bdp->rev_id < D101A4_REV_ID) { if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) { e100_set_multi(dev); } } /* Update the statistics needed by the upper interface */ /* This should be the last statistic related command * as it's async. now */ e100_dump_stats_cntrs(bdp); } wmb(); /* relaunch watchdog timer in 2 sec */ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ)); if (list_empty(&bdp->active_rx_list)) e100_trigger_SWI(bdp);exit: read_unlock(&(bdp->isolate_lock));}/** * e100_manage_adaptive_ifs * @bdp: atapter's private data struct * * This routine manages the adaptive Inter-Frame Spacing algorithm * using a state machine. */voide100_manage_adaptive_ifs(struct e100_private *bdp){ static u16 state_table[9][4] = { // rows are states {2, 0, 0, 0}, // state0 // column0: next state if increasing {2, 0, 5, 30}, // state1 // column1: next state if decreasing {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit {5, 3, 10, 60}, // state4 {8, 4, 10, 60}, // state5 {8, 6, 0, 0}, // state6 {8, 6, 20, 60}, // state7 {8, 7, 20, 60} // state8 }; u32 transmits = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames); u32 collisions = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll); u32 state = bdp->ifs_state; u32 old_value = bdp->ifs_value; int next_col; u32 min_transmits; if (bdp->cur_dplx_mode == FULL_DUPLEX) { bdp->ifs_state = 0; bdp->ifs_value = 0; } else { /* Half Duplex */ /* Set speed specific parameters */ if (bdp->cur_line_speed == 100) { next_col = 2; min_transmits = MIN_NUMBER_OF_TRANSMITS_100; } else { /* 10 Mbps */ next_col = 3; min_transmits = MIN_NUMBER_OF_TRANSMITS_10; } if ((transmits / 32 < collisions) && (transmits > min_transmits)) { state = state_table[state][0]; /* increment */ } else if (transmits < min_transmits) { state = state_table[state][1]; /* decrement */ } bdp->ifs_value = state_table[state][next_col]; bdp->ifs_state = state; } /* If the IFS value has changed, configure the device */ if (bdp->ifs_value != old_value) { e100_config_ifs(bdp); e100_config(bdp); }}/** * e100intr - interrupt handler * @irq: the IRQ number * @dev_inst: the net_device struct * @regs: registers (unused) * * This routine is the ISR for the e100 board. It services * the RX & TX queues & starts the RU if it has stopped due * to no resources. */voide100intr(int irq, void *dev_inst, struct pt_regs *regs){ struct net_device *dev; struct e100_private *bdp; u16 intr_status; dev = dev_inst; bdp = dev->priv; intr_status = readw(&bdp->scb->scb_status); if (!intr_status || (intr_status == 0xffff)) { return; } /* disable intr before we ack & after identifying the intr as ours */ e100_dis_intr(bdp); writew(intr_status, &bdp->scb->scb_status); /* ack intrs */ readw(&bdp->scb->scb_status); /* the device is closed, don't continue or else bad things may happen. */ if (!netif_running(dev)) { e100_set_intr_mask(bdp); return; } read_lock(&(bdp->isolate_lock)); if (bdp->driver_isolated) { goto exit; } /* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */ if (intr_status & SCB_STATUS_ACK_SWI) { e100_alloc_skbs(bdp); } /* do recv work if any */ if (intr_status & (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI)) bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp); /* clean up after tx'ed packets */ if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX)) { bdp->tx_count = 0; /* restart tx interrupt batch count */ e100_tx_srv(bdp); }exit: e100_set_intr_mask(bdp); read_unlock(&(bdp->isolate_lock));}/** * e100_tx_skb_free - free TX skbs resources * @bdp: atapter's private data struct * @tcb: associated tcb of the freed skb
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -