📄 acenic.c
字号:
printk(KERN_INFO "%s: Disabling flow control " "negotiation\n", dev->name); if (option & 0x200) tmp |= LNK_RX_FLOW_CTL_Y; if ((option & 0x400) && (ap->version >= 2)) { printk(KERN_INFO "%s: Enabling TX flow control\n", dev->name); tmp |= LNK_TX_FLOW_CTL_Y; } } ap->link = tmp; writel(tmp, ®s->TuneLink); if (ap->version >= 2) writel(tmp, ®s->TuneFastLink); if (ACE_IS_TIGON_I(ap)) writel(tigonFwStartAddr, ®s->Pc); if (ap->version == 2) writel(tigon2FwStartAddr, ®s->Pc); writel(0, ®s->Mb0Lo); /* * Set tx_csm before we start receiving interrupts, otherwise * the interrupt handler might think it is supposed to process * tx ints before we are up and running, which may cause a null * pointer access in the int handler. */ ap->tx_full = 0; ap->cur_rx = 0; ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; wmb(); ace_set_txprd(regs, ap, 0); writel(0, ®s->RxRetCsm); /* * Start the NIC CPU */ writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); /* * Wait for the firmware to spin up - max 3 seconds. */ myjif = jiffies + 3 * HZ; while (time_before(jiffies, myjif) && !ap->fw_running); if (!ap->fw_running) { printk(KERN_ERR "%s: Firmware NOT running!\n", dev->name); ace_dump_trace(ap); writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); /* aman@sgi.com - account for badly behaving firmware/NIC: * - have observed that the NIC may continue to generate * interrupts for some reason; attempt to stop it - halt * second CPU for Tigon II cards, and also clear Mb0 * - if we're a module, we'll fail to load if this was * the only GbE card in the system => if the kernel does * see an interrupt from the NIC, code to handle it is * gone and OOps! - so free_irq also */ if (ap->version >= 2) writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); writel(0, ®s->Mb0Lo); ecode = -EBUSY; goto init_error; } /* * We load the ring here as there seem to be no way to tell the * firmware to wipe the ring without re-initializing it. */ if (!test_and_set_bit(0, &ap->std_refill_busy)) ace_load_std_rx_ring(ap, RX_RING_SIZE); else printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", dev->name); if (ap->version >= 2) { if (!test_and_set_bit(0, &ap->mini_refill_busy)) ace_load_mini_rx_ring(ap, RX_MINI_SIZE); else printk(KERN_ERR "%s: Someone is busy refilling " "the RX mini ring\n", dev->name); } return 0; init_error: ace_init_cleanup(dev); return ecode;}static void ace_set_rxtx_parms(struct net_device *dev, int jumbo){ struct ace_private *ap; struct ace_regs *regs; int board_idx; ap = dev->priv; regs = ap->regs; board_idx = ap->board_idx; if (board_idx >= 0) { if (!jumbo) { if (!tx_coal_tick[board_idx]) writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); if (!max_tx_desc[board_idx]) writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); if (!rx_coal_tick[board_idx]) writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); if (!max_rx_desc[board_idx]) writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); if (!tx_ratio[board_idx]) writel(DEF_TX_RATIO, ®s->TxBufRat); } else { if (!tx_coal_tick[board_idx]) writel(DEF_JUMBO_TX_COAL, ®s->TuneTxCoalTicks); if (!max_tx_desc[board_idx]) writel(DEF_JUMBO_TX_MAX_DESC, ®s->TuneMaxTxDesc); if (!rx_coal_tick[board_idx]) writel(DEF_JUMBO_RX_COAL, ®s->TuneRxCoalTicks); if (!max_rx_desc[board_idx]) writel(DEF_JUMBO_RX_MAX_DESC, ®s->TuneMaxRxDesc); if (!tx_ratio[board_idx]) writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); } }}/* * Monitor the card to detect hangs. */static void ace_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct ace_private *ap = dev->priv; struct ace_regs *regs = ap->regs; /* * We haven't received a stats update event for more than 2.5 * seconds and there is data in the transmit queue, thus we * asume the card is stuck. */ if (*ap->tx_csm != ap->tx_ret_csm) { printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", dev->name, (unsigned int)readl(®s->HostCtrl)); } ap->timer.expires = jiffies + (5/2*HZ); add_timer(&ap->timer);}static void ace_tasklet(unsigned long dev){ struct ace_private *ap = ((struct net_device *)dev)->priv; int cur_size; cur_size = atomic_read(&ap->cur_rx_bufs); if ((cur_size < RX_LOW_STD_THRES) && !test_and_set_bit(0, &ap->std_refill_busy)) {#if DEBUG printk("refilling buffers (current %i)\n", cur_size);#endif ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size); } if (ap->version >= 2) { cur_size = atomic_read(&ap->cur_mini_bufs); if ((cur_size < RX_LOW_MINI_THRES) && !test_and_set_bit(0, &ap->mini_refill_busy)) {#if DEBUG printk("refilling mini buffers (current %i)\n", cur_size);#endif ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); } } cur_size = atomic_read(&ap->cur_jumbo_bufs); if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && !test_and_set_bit(0, &ap->jumbo_refill_busy)) {#if DEBUG printk("refilling jumbo buffers (current %i)\n", >cur_size);#endif ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); } ap->tasklet_pending = 0;}/* * Copy the contents of the NIC's trace buffer to kernel memory. */static void ace_dump_trace(struct ace_private *ap){#if 0 if (!ap->trace_buf) if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) return;#endif}/* * Load the standard rx ring. * * Loading rings is safe without holding the spin lock since this is * done only before the device is enabled, thus no interrupts are * generated and by the interrupt handler/tasklet handler. */static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs *regs; short i, idx; regs = ap->regs; idx = ap->rx_std_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC); if (!skb) break; /* * Make sure IP header starts on a fresh cache line. */ skb_reserve(skb, 2 + 16); mapping = pci_map_single(ap->pdev, skb->data, ACE_STD_BUFSIZE - (2 + 16), PCI_DMA_FROMDEVICE); ap->skb->rx_std_skbuff[idx].skb = skb; ap->skb->rx_std_skbuff[idx].mapping = mapping; rd = &ap->rx_std_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_STD_MTU + ETH_HLEN + 4; rd->idx = idx; idx = (idx + 1) % RX_STD_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_rx_bufs); ap->rx_std_skbprd = idx; if (ACE_IS_TIGON_I(ap)) { struct cmd cmd; cmd.evt = C_SET_RX_PRD_IDX; cmd.code = 0; cmd.idx = ap->rx_std_skbprd; ace_issue_cmd(regs, &cmd); } else { writel(idx, ®s->RxStdPrd); wmb(); } out: clear_bit(0, &ap->std_refill_busy); return; error_out: printk(KERN_INFO "Out of memory when allocating " "standard receive buffers\n"); goto out;}static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs *regs; short i, idx; regs = ap->regs; idx = ap->rx_mini_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC); if (!skb) break; /* * Make sure the IP header ends up on a fresh cache line */ skb_reserve(skb, 2 + 16); mapping = pci_map_single(ap->pdev, skb->data, ACE_MINI_BUFSIZE - (2 + 16), PCI_DMA_FROMDEVICE); ap->skb->rx_mini_skbuff[idx].skb = skb; ap->skb->rx_mini_skbuff[idx].mapping = mapping; rd = &ap->rx_mini_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_MINI_SIZE; rd->idx = idx; idx = (idx + 1) % RX_MINI_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_mini_bufs); ap->rx_mini_skbprd = idx; writel(idx, ®s->RxMiniPrd); wmb(); out: clear_bit(0, &ap->mini_refill_busy); return; error_out: printk(KERN_INFO "Out of memory when allocating " "mini receive buffers\n"); goto out;}/* * Load the jumbo rx ring, this may happen at any time if the MTU * is changed to a value > 1500. */static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs *regs; short i, idx; regs = ap->regs; idx = ap->rx_jumbo_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC); if (!skb) break; /* * Make sure the IP header ends up on a fresh cache line */ skb_reserve(skb, 2 + 16); mapping = pci_map_single(ap->pdev, skb->data, ACE_JUMBO_BUFSIZE - (2 + 16), PCI_DMA_FROMDEVICE); ap->skb->rx_jumbo_skbuff[idx].skb = skb; ap->skb->rx_jumbo_skbuff[idx].mapping = mapping; rd = &ap->rx_jumbo_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4; rd->idx = idx; idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_jumbo_bufs); ap->rx_jumbo_skbprd = idx; if (ACE_IS_TIGON_I(ap)) { struct cmd cmd; cmd.evt = C_SET_RX_JUMBO_PRD_IDX; cmd.code = 0; cmd.idx = ap->rx_jumbo_skbprd; ace_issue_cmd(regs, &cmd); } else { writel(idx, ®s->RxJumboPrd); wmb(); } out: clear_bit(0, &ap->jumbo_refill_busy); return; error_out: printk(KERN_INFO "Out of memory when allocating " "jumbo receive buffers\n"); goto out;}/* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd){ struct ace_private *ap; ap = dev->priv; while (evtcsm != evtprd) { switch (ap->evt_ring[evtcsm].evt) { case E_FW_RUNNING: printk(KERN_INFO "%s: Firmware up and running\n", dev->name); ap->fw_running = 1; wmb(); break; case E_STATS_UPDATED: break; case E_LNK_STATE: { u16 code = ap->evt_ring[evtcsm].code; switch (code) { case E_C_LINK_UP: printk(KERN_WARNING "%s: Optical link UP\n", dev->name); break; case E_C_LINK_DOWN: printk(KERN_WARNING "%s: Optical link DOWN\n", dev->name); break; case E_C_LINK_10_100: printk(KERN_WARNING "%s: 10/100BaseT link " "UP\n", dev->name); break; default: printk(KERN_ERR "%s: Unknown optical link " "state %02x\n", dev->name, code); } break; } case E_ERROR: switch(ap->evt_ring[evtcsm].code) { case E_C_ERR_INVAL_CMD: printk(KERN_ERR "%s: invalid command error\n", dev->name); break; case E_C_ERR_UNIMP_CMD: printk(KERN_ERR "%s: unimplemented command " "error\n", dev->name); break; case E_C_ERR_BAD_CFG: printk(KERN_ERR "%s: bad config error\n", dev->name); break; default: printk(KERN_ERR "%s: unknown error %02x\n", dev->name, ap->evt_ring[evtcsm].code); } break; case E_RESET_JUMBO_RNG: { int i; for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { if (ap->skb->rx_jumbo_skbuff[i].skb) { ap->rx_jumbo_ring[i].size = 0; set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); ap->skb->rx_jumbo_skbuff[i].skb = NULL; } } if (ACE_IS_TIGON_I(ap)) { struct cmd cmd; cmd.evt = C_SET_RX_JUMBO_PRD_IDX; cmd.code = 0; cmd.idx = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -