📄 acenic.c
字号:
} /* * Default link parameters */ tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; if(ap->version >= 2) tmp |= LNK_TX_FLOW_CTL_Y; /* * Override link default parameters */ if ((board_idx >= 0) && link_state[board_idx]) { int option = link_state[board_idx]; tmp = LNK_ENABLE; if (option & 0x01) { printk(KERN_INFO "%s: Setting half duplex link\n", ap->name); tmp &= ~LNK_FULL_DUPLEX; } if (option & 0x02) tmp &= ~LNK_NEGOTIATE; if (option & 0x10) tmp |= LNK_10MB; if (option & 0x20) tmp |= LNK_100MB; if (option & 0x40) tmp |= LNK_1000MB; if ((option & 0x70) == 0) { printk(KERN_WARNING "%s: No media speed specified, " "forcing auto negotiation\n", ap->name); tmp |= LNK_NEGOTIATE | LNK_1000MB | LNK_100MB | LNK_10MB; } if ((option & 0x100) == 0) tmp |= LNK_NEG_FCTL; else printk(KERN_INFO "%s: Disabling flow control " "negotiation\n", ap->name); if (option & 0x200) tmp |= LNK_RX_FLOW_CTL_Y; if ((option & 0x400) && (ap->version >= 2)) { printk(KERN_INFO "%s: Enabling TX flow control\n", ap->name); tmp |= LNK_TX_FLOW_CTL_Y; } } ap->link = tmp; writel(tmp, ®s->TuneLink); if (ap->version >= 2) writel(tmp, ®s->TuneFastLink); if (ACE_IS_TIGON_I(ap)) writel(tigonFwStartAddr, ®s->Pc); if (ap->version == 2) writel(tigon2FwStartAddr, ®s->Pc); writel(0, ®s->Mb0Lo); /* * Set tx_csm before we start receiving interrupts, otherwise * the interrupt handler might think it is supposed to process * tx ints before we are up and running, which may cause a null * pointer access in the int handler. */ ap->cur_rx = 0; ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; wmb(); ace_set_txprd(regs, ap, 0); writel(0, ®s->RxRetCsm); /* * Zero the stats before starting the interface */ memset(&ap->stats, 0, sizeof(ap->stats)); /* * Enable DMA engine now. * If we do this sooner, Mckinley box pukes. * I assume it's because Tigon II DMA engine wants to check * *something* even before the CPU is started. */ writel(1, ®s->AssistState); /* enable DMA */ /* * Start the NIC CPU */ writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); readl(®s->CpuCtrl); /* * Wait for the firmware to spin up - max 3 seconds. */ myjif = jiffies + 3 * HZ; while (time_before(jiffies, myjif) && !ap->fw_running) cpu_relax(); if (!ap->fw_running) { printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); ace_dump_trace(ap); writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); readl(®s->CpuCtrl); /* aman@sgi.com - account for badly behaving firmware/NIC: * - have observed that the NIC may continue to generate * interrupts for some reason; attempt to stop it - halt * second CPU for Tigon II cards, and also clear Mb0 * - if we're a module, we'll fail to load if this was * the only GbE card in the system => if the kernel does * see an interrupt from the NIC, code to handle it is * gone and OOps! - so free_irq also */ if (ap->version >= 2) writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); writel(0, ®s->Mb0Lo); readl(®s->Mb0Lo); ecode = -EBUSY; goto init_error; } /* * We load the ring here as there seem to be no way to tell the * firmware to wipe the ring without re-initializing it. */ if (!test_and_set_bit(0, &ap->std_refill_busy)) ace_load_std_rx_ring(ap, RX_RING_SIZE); else printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", ap->name); if (ap->version >= 2) { if (!test_and_set_bit(0, &ap->mini_refill_busy)) ace_load_mini_rx_ring(ap, RX_MINI_SIZE); else printk(KERN_ERR "%s: Someone is busy refilling " "the RX mini ring\n", ap->name); } return 0; init_error: ace_init_cleanup(dev); return ecode;}static void ace_set_rxtx_parms(struct net_device *dev, int jumbo){ struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; int board_idx = ap->board_idx; if (board_idx >= 0) { if (!jumbo) { if (!tx_coal_tick[board_idx]) writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); if (!max_tx_desc[board_idx]) writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); if (!rx_coal_tick[board_idx]) writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); if (!max_rx_desc[board_idx]) writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); if (!tx_ratio[board_idx]) writel(DEF_TX_RATIO, ®s->TxBufRat); } else { if (!tx_coal_tick[board_idx]) writel(DEF_JUMBO_TX_COAL, ®s->TuneTxCoalTicks); if (!max_tx_desc[board_idx]) writel(DEF_JUMBO_TX_MAX_DESC, ®s->TuneMaxTxDesc); if (!rx_coal_tick[board_idx]) writel(DEF_JUMBO_RX_COAL, ®s->TuneRxCoalTicks); if (!max_rx_desc[board_idx]) writel(DEF_JUMBO_RX_MAX_DESC, ®s->TuneMaxRxDesc); if (!tx_ratio[board_idx]) writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); } }}static void ace_watchdog(struct net_device *data){ struct net_device *dev = data; struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; /* * We haven't received a stats update event for more than 2.5 * seconds and there is data in the transmit queue, thus we * asume the card is stuck. */ if (*ap->tx_csm != ap->tx_ret_csm) { printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", dev->name, (unsigned int)readl(®s->HostCtrl)); /* This can happen due to ieee flow control. */ } else { printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", dev->name);#if 0 netif_wake_queue(dev);#endif }}static void ace_tasklet(unsigned long dev){ struct ace_private *ap = netdev_priv((struct net_device *)dev); int cur_size; cur_size = atomic_read(&ap->cur_rx_bufs); if ((cur_size < RX_LOW_STD_THRES) && !test_and_set_bit(0, &ap->std_refill_busy)) {#ifdef DEBUG printk("refilling buffers (current %i)\n", cur_size);#endif ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size); } if (ap->version >= 2) { cur_size = atomic_read(&ap->cur_mini_bufs); if ((cur_size < RX_LOW_MINI_THRES) && !test_and_set_bit(0, &ap->mini_refill_busy)) {#ifdef DEBUG printk("refilling mini buffers (current %i)\n", cur_size);#endif ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); } } cur_size = atomic_read(&ap->cur_jumbo_bufs); if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && !test_and_set_bit(0, &ap->jumbo_refill_busy)) {#ifdef DEBUG printk("refilling jumbo buffers (current %i)\n", cur_size);#endif ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); } ap->tasklet_pending = 0;}/* * Copy the contents of the NIC's trace buffer to kernel memory. */static void ace_dump_trace(struct ace_private *ap){#if 0 if (!ap->trace_buf) if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) return;#endif}/* * Load the standard rx ring. * * Loading rings is safe without holding the spin lock since this is * done only before the device is enabled, thus no interrupts are * generated and by the interrupt handler/tasklet handler. */static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs __iomem *regs = ap->regs; short i, idx; prefetchw(&ap->cur_rx_bufs); idx = ap->rx_std_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, NET_IP_ALIGN); mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), offset_in_page(skb->data), ACE_STD_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_std_skbuff[idx].skb = skb; pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], mapping, mapping); rd = &ap->rx_std_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_STD_BUFSIZE; rd->idx = idx; idx = (idx + 1) % RX_STD_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_rx_bufs); ap->rx_std_skbprd = idx; if (ACE_IS_TIGON_I(ap)) { struct cmd cmd; cmd.evt = C_SET_RX_PRD_IDX; cmd.code = 0; cmd.idx = ap->rx_std_skbprd; ace_issue_cmd(regs, &cmd); } else { writel(idx, ®s->RxStdPrd); wmb(); } out: clear_bit(0, &ap->std_refill_busy); return; error_out: printk(KERN_INFO "Out of memory when allocating " "standard receive buffers\n"); goto out;}static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs __iomem *regs = ap->regs; short i, idx; prefetchw(&ap->cur_mini_bufs); idx = ap->rx_mini_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, NET_IP_ALIGN); mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), offset_in_page(skb->data), ACE_MINI_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_mini_skbuff[idx].skb = skb; pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], mapping, mapping); rd = &ap->rx_mini_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_MINI_BUFSIZE; rd->idx = idx; idx = (idx + 1) % RX_MINI_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_mini_bufs); ap->rx_mini_skbprd = idx; writel(idx, ®s->RxMiniPrd); wmb(); out: clear_bit(0, &ap->mini_refill_busy); return; error_out: printk(KERN_INFO "Out of memory when allocating " "mini receive buffers\n"); goto out;}/* * Load the jumbo rx ring, this may happen at any time if the MTU * is changed to a value > 1500. */static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs){ struct ace_regs __iomem *regs = ap->regs; short i, idx; idx = ap->rx_jumbo_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; struct rx_desc *rd; dma_addr_t mapping; skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, NET_IP_ALIGN); mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), offset_in_page(skb->data), ACE_JUMBO_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_jumbo_skbuff[idx].skb = skb; pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], mapping, mapping); rd = &ap->rx_jumbo_ring[idx]; set_aceaddr(&rd->addr, mapping); rd->size = ACE_JUMBO_BUFSIZE; rd->idx = idx; idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; } if (!i) goto error_out; atomic_add(i, &ap->cur_jumbo_bufs); ap->rx_jumbo_skbprd = idx; if (ACE_IS_TIGON_I(ap)) { struct cmd cmd; cmd.evt = C_SET_RX_JUMBO_PRD_IDX; cmd.code = 0; cmd.idx = ap->rx_jumbo_skbprd; ace_issue_cmd(regs, &cmd); } else { writel(idx, ®s->RxJumboPrd); wmb(); } out: clear_bit(0, &ap->jumbo_refill_busy); return; error_out: if (net_ratelimit()) printk(KERN_INFO "Out of memory when allocating " "jumbo receive buffers\n"); goto out;}/* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd){ struct ace_private *ap;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -