📄 e100_main.c
字号:
bdp->saved_short_circut = 0xffff; bdp->saved_distance = 0xffff; bdp->saved_i = 0; bdp->saved_same = 0; bdp->hwi_started = 1; /* Disable MDI/MDI-X auto switching */ e100_mdi_write(bdp, MII_NCONFIG, bdp->phy_addr, MDI_MDIX_RESET_ALL_MASK); /* Set to 100 Full as required by hwi test */ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, BMCR_SPEED100 | BMCR_FULLDPLX); /* Enable and execute HWI test */ e100_mdi_write(bdp, HWI_CONTROL_REG, bdp->phy_addr, (HWI_TEST_ENABLE | HWI_TEST_EXECUTE)); /* Launch hwi timer in 1 msec */ mod_timer(&(bdp->hwi_timer), jiffies + (HZ / 1000) ); } } } // toggle the tx queue according to link status // this also resolves a race condition between tx & non-cu cmd flows#ifdef IANS if (netif_carrier_ok(dev)) { e100_tx_notify_start(bdp); } else { e100_tx_notify_stop(bdp); }#else if (netif_carrier_ok(dev)) { if (netif_running(dev)) netif_wake_queue(dev); } else { netif_stop_queue(dev); }#endif rmb(); if (e100_update_stats(bdp)) { /* Check if a change in the IFS parameter is needed, and configure the device accordingly */ if (bdp->params.b_params & PRM_IFS) e100_manage_adaptive_ifs(bdp); /* Now adjust our dynamic tx threshold value */ e100_refresh_txthld(bdp); /* Now if we are on a 557 and we havn't received any frames then we * should issue a multicast command to reset the RU */ if (bdp->rev_id < D101A4_REV_ID) { if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) { e100_set_multi(dev); } } /* Update the statistics needed by the upper interface */ /* This should be the last statistic related command * as it's async. now */ e100_dump_stats_cntrs(bdp); }#ifdef IANS /* Now do the ANS stuff */ if ((ANS_PRIVATE_DATA_FIELD(bdp)->iANS_status == IANS_COMMUNICATION_UP) && (ANS_PRIVATE_DATA_FIELD(bdp)->reporting_mode == IANS_STATUS_REPORTING_ON)) { bd_ans_os_Watchdog(dev, bdp); }#endif wmb(); /* relaunch watchdog timer in 2 sec */ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ)); if (list_empty(&bdp->active_rx_list)) e100_trigger_SWI(bdp);exit: read_unlock(&(bdp->isolate_lock));}/** * e100_manage_adaptive_ifs * @bdp: atapter's private data struct * * This routine manages the adaptive Inter-Frame Spacing algorithm * using a state machine. */voide100_manage_adaptive_ifs(struct e100_private *bdp){ static u16 state_table[9][4] = { // rows are states {2, 0, 0, 0}, // state0 // column0: next state if increasing {2, 0, 5, 30}, // state1 // column1: next state if decreasing {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit {5, 3, 10, 60}, // state4 {8, 4, 10, 60}, // state5 {8, 6, 0, 0}, // state6 {8, 6, 20, 60}, // state7 {8, 7, 20, 60} // state8 }; u32 transmits = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames); u32 collisions = le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll); u32 state = bdp->ifs_state; u32 old_value = bdp->ifs_value; int next_col; u32 min_transmits; if (bdp->cur_dplx_mode == FULL_DUPLEX) { bdp->ifs_state = 0; bdp->ifs_value = 0; } else { /* Half Duplex */ /* Set speed specific parameters */ if (bdp->cur_line_speed == 100) { next_col = 2; min_transmits = MIN_NUMBER_OF_TRANSMITS_100; } else { /* 10 Mbps */ next_col = 3; min_transmits = MIN_NUMBER_OF_TRANSMITS_10; } if ((transmits / 32 < collisions) && (transmits > min_transmits)) { state = state_table[state][0]; /* increment */ } else if (transmits < min_transmits) { state = state_table[state][1]; /* decrement */ } bdp->ifs_value = state_table[state][next_col]; bdp->ifs_state = state; } /* If the IFS value has changed, configure the device */ if (bdp->ifs_value != old_value) { e100_config_ifs(bdp); e100_config(bdp); }}#ifdef E100_RX_CONGESTION_CONTROLvoide100_polling_tasklet(unsigned long ptr){ struct e100_private *bdp = (struct e100_private *) ptr; unsigned int rx_congestion = 0; u32 skb_cnt; /* the device is closed, don't continue or else bad things may happen. */#ifdef IANS if (!(bdp->flags & DF_OPENED)) { return; }#else if (!netif_running(bdp->device)) { return; }#endif read_lock(&(bdp->isolate_lock)); if (bdp->driver_isolated) { tasklet_schedule(&(bdp->polling_tasklet)); goto exit; } e100_alloc_skbs(bdp); skb_cnt = e100_rx_srv(bdp, bdp->params.PollingMaxWork, &rx_congestion); bdp->drv_stats.rx_tasklet_pkts += skb_cnt; if (rx_congestion || skb_cnt) { tasklet_schedule(&(bdp->polling_tasklet)); } else { bdp->intr_mask &= ~SCB_INT_MASK; bdp->drv_stats.poll_intr_switch++; } bdp->tx_count = 0; /* restart tx interrupt batch count */ e100_tx_srv(bdp); e100_set_intr_mask(bdp);exit: read_unlock(&(bdp->isolate_lock));}#endif/** * e100intr - interrupt handler * @irq: the IRQ number * @dev_inst: the net_device struct * @regs: registers (unused) * * This routine is the ISR for the e100 board. It services * the RX & TX queues & starts the RU if it has stopped due * to no resources. */voide100intr(int irq, void *dev_inst, struct pt_regs *regs){ struct net_device *dev; struct e100_private *bdp; u16 intr_status; dev = dev_inst; bdp = dev->priv; intr_status = readw(&bdp->scb->scb_status); if (!intr_status || (intr_status == 0xffff)) { return; } /* disable intr before we ack & after identifying the intr as ours */ e100_dis_intr(bdp); writew(intr_status, &bdp->scb->scb_status); /* ack intrs */ readw(&bdp->scb->scb_status); /* the device is closed, don't continue or else bad things may happen. */#ifdef IANS if (!(bdp->flags & DF_OPENED)) { e100_set_intr_mask(bdp); return; }#else if (!netif_running(dev)) { e100_set_intr_mask(bdp); return; }#endif read_lock(&(bdp->isolate_lock)); if (bdp->driver_isolated) { goto exit; } /* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */ if (intr_status & SCB_STATUS_ACK_SWI) { e100_alloc_skbs(bdp); } /* do recv work if any */ if (intr_status & (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI)) { int rx_congestion; bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp, 0, &rx_congestion);#ifdef E100_RX_CONGESTION_CONTROL if ((bdp->params.b_params & PRM_RX_CONG) && rx_congestion) { bdp->intr_mask |= SCB_INT_MASK; tasklet_schedule(&(bdp->polling_tasklet)); bdp->drv_stats.poll_intr_switch++; }#endif } /* clean up after tx'ed packets */ if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX)) { bdp->tx_count = 0; /* restart tx interrupt batch count */ e100_tx_srv(bdp); }exit: e100_set_intr_mask(bdp); read_unlock(&(bdp->isolate_lock));}/** * e100_tx_skb_free - free TX skbs resources * @bdp: atapter's private data struct * @tcb: associated tcb of the freed skb * * This routine frees resources of TX skbs. */static void inlinee100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb){ if (tcb->tcb_skb) {#ifdef MAX_SKB_FRAGS int i; tbd_t *tbd_arr = tcb->tbd_ptr; int frags = skb_shinfo(tcb->tcb_skb)->nr_frags; for (i = 0; i <= frags; i++, tbd_arr++) { pci_unmap_single(bdp->pdev, le32_to_cpu(tbd_arr->tbd_buf_addr), le16_to_cpu(tbd_arr->tbd_buf_cnt), PCI_DMA_TODEVICE); }#else pci_unmap_single(bdp->pdev, le32_to_cpu((tcb->tbd_ptr)->tbd_buf_addr), tcb->tcb_skb->len, PCI_DMA_TODEVICE);#endif dev_kfree_skb_irq(tcb->tcb_skb); tcb->tcb_skb = NULL; }}/** * e100_tx_srv - service TX queues * @bdp: atapter's private data struct * * This routine services the TX queues. It reclaims the TCB's & TBD's & other * resources used during the transmit of this buffer. It is called from the ISR. * We don't need a tx_lock since we always access buffers which were already * prepared. */voide100_tx_srv(struct e100_private *bdp){ tcb_t *tcb; int i; /* go over at most TxDescriptors buffers */ for (i = 0; i < bdp->params.TxDescriptors; i++) { tcb = bdp->tcb_pool.data; tcb += bdp->tcb_pool.head; rmb(); /* if the buffer at 'head' is not complete, break */ if (!(tcb->tcb_hdr.cb_status & __constant_cpu_to_le16(CB_STATUS_COMPLETE))) break; /* service next buffer, clear the out of resource condition */ e100_tx_skb_free(bdp, tcb);#ifdef IANS e100_tx_notify_start(bdp);#else if (netif_running(bdp->device)) netif_wake_queue(bdp->device);#endif /* if we've caught up with 'tail', break */ if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) { break; } bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head); }}/** * e100_rx_srv - service RX queue * @bdp: atapter's private data struct * @max_number_of_rfds: max number of RFDs to process * @rx_congestion: flag pointer, to inform the calling function of congestion. * * This routine processes the RX interrupt & services the RX queues. * For each successful RFD, it allocates a new msg block, links that * into the RFD list, and sends the old msg upstream. * The new RFD is then put at the end of the free list of RFD's. * It returns the number of serviced RFDs. */u32e100_rx_srv(struct e100_private *bdp, u32 max_number_of_rfds, int *rx_congestion){ rfd_t *rfd; /* new rfd, received rfd */ int i; u16 rfd_status; struct sk_buff *skb; struct net_device *dev; unsigned int data_sz; struct rx_list_elem *rx_struct; u32 rfd_cnt = 0; if (rx_congestion) { *rx_congestion = 0; } dev = bdp->device; /* current design of rx is as following: * 1. socket buffer (skb) used to pass network packet to upper layer * 2. all HW host memory structures (like RFDs, RBDs and data buffers) * are placed in a skb's data room * 3. when rx process is complete, we change skb internal pointers to exclude * from data area all unrelated things (RFD, RDB) and to leave * just rx'ed packet netto * 4. for each skb passed to upper layer, new one is allocated instead. * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made * (watchdog trigger SWI intr and isr should allocate new skbs) */ for (i = 0; i < bdp->params.RxDescriptors; i++) {#ifdef E100_RX_CONGESTION_CONTROL if (max_number_of_rfds && (rfd_cnt >= max_number_of_rfds)) { break; }#endif if (list_empty(&(bdp->active_rx_list))) { break; } rx_struct = list_entry(bdp->active_rx_list.next, struct rx_list_elem, list_elem); skb = rx_struct->skb; rfd = RFD_POINTER(skb, bdp); /* locate RFD within skb */ // sync only the RFD header pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr, bdp->rfd_size, PCI_DMA_FROMDEVICE); rfd_status = le16_to_cpu(rfd->rfd_header.cb_status); /* get RFD's status */ if (!(rfd_status & RFD_STATUS_COMPLETE)) /* does not contains data yet - exit */ break; /* to allow manipulation with current skb we need to unlink it */ list_del(&(rx_struct->list_elem)); /* do not free & unmap badly recieved packet. * move it to the end of skb list for reuse */ if (!(rfd_status & RFD_STATUS_OK)) { e100_add_skb_to_end(bdp, rx_struct); continue; } data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff), (sizeof (rfd_t) - bdp->rfd_size)); /* now sync all the data */ pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr, (data_sz + bdp->rfd_size), PCI_DMA_FROMDEVICE); pci_unmap_single(bdp->pdev, rx_struct->dma_addr, sizeof (rfd_t), PCI_DMA_FROMDEVICE); list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool)); /* end of dma access to rfd */ bdp->skb_req++; /* incr number of requested skbs */ e100_alloc_skbs(bdp); /* and get them */ /* set packet size, excluding checksum (2 last bytes) if it is present */ if ((bdp->flags & DF_CSUM_OFFLOAD) && (bdp->rev_id < D102_REV_ID)) skb_put(skb, (int) data_sz - 2); else skb_put(skb, (int) data_sz);#ifdef IANS /* Before we give it to the stack lets let ANS process it */ if (ANS_PRIVATE_DATA_FIELD(bdp)->iANS_status == IANS_COMMUNICATION_UP) { if (bd_ans_os_Receive(bdp, RFD_POINTER(skb, bdp), skb) == BD_ANS_FAILURE) { dev_kfree_skb_irq(skb); continue; } } else { /* set the protocol */ skb->protocol = eth_type_trans(skb, dev); }#else /* set the protocol */ skb->protocol = eth_type_trans(sk
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -