📄 acenic.c
字号:
ace_issue_cmd(ap->regs, &cmd); } else { writel(0, &((ap->regs)->RxJumboPrd)); wmb(); } ap->jumbo = 0; ap->rx_jumbo_skbprd = 0; printk(KERN_INFO "%s: Jumbo ring flushed\n", dev->name); if (!ap->tx_full) netif_wake_queue(dev); clear_bit(0, &ap->jumbo_refill_busy); break; } default: printk(KERN_ERR "%s: Unhandled event 0x%02x\n", dev->name, ap->evt_ring[evtcsm].evt); } evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; } return evtcsm;}static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm){ struct ace_private *ap = dev->priv; u32 idx; int mini_count = 0, std_count = 0; idx = rxretcsm; while (idx != rxretprd) { struct ring_info *rip; struct sk_buff *skb; struct rx_desc *rxdesc, *retdesc; u32 skbidx; int desc_type, mapsize; u16 csum; retdesc = &ap->rx_return_ring[idx]; skbidx = retdesc->idx; desc_type = retdesc->flags & (BD_FLG_JUMBO | BD_FLG_MINI); switch(desc_type) { /* * Normal frames do not have any flags set * * Mini and normal frames arrive frequently, * so use a local counter to avoid doing * atomic operations for each packet arriving. */ case 0: rip = &ap->skb->rx_std_skbuff[skbidx]; mapsize = ACE_STD_BUFSIZE - (2 + 16); rxdesc = &ap->rx_std_ring[skbidx]; std_count++; break; case BD_FLG_JUMBO: rip = &ap->skb->rx_jumbo_skbuff[skbidx]; mapsize = ACE_JUMBO_BUFSIZE - (2 + 16); rxdesc = &ap->rx_jumbo_ring[skbidx]; atomic_dec(&ap->cur_jumbo_bufs); break; case BD_FLG_MINI: rip = &ap->skb->rx_mini_skbuff[skbidx]; mapsize = ACE_MINI_BUFSIZE - (2 + 16); rxdesc = &ap->rx_mini_ring[skbidx]; mini_count++; break; default: printk(KERN_INFO "%s: unknown frame type (0x%02x) " "returned by NIC\n", dev->name, retdesc->flags); goto error; } skb = rip->skb; rip->skb = NULL; pci_unmap_single(ap->pdev, rip->mapping, mapsize, PCI_DMA_FROMDEVICE); skb_put(skb, retdesc->size);#if 0 /* unncessary */ rxdesc->size = 0;#endif /* * Fly baby, fly! */ csum = retdesc->tcp_udp_csum; skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); /* * If the checksum is correct and this is not a * fragment, tell the stack that the data is correct. */ if(!(csum ^ 0xffff) && (!(((struct iphdr *)skb->data)->frag_off & __constant_htons(IP_MF|IP_OFFSET)))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); /* send it up */ ap->stats.rx_packets++; ap->stats.rx_bytes += retdesc->size; idx = (idx + 1) % RX_RETURN_RING_ENTRIES; } atomic_sub(std_count, &ap->cur_rx_bufs); if (!ACE_IS_TIGON_I(ap)) atomic_sub(mini_count, &ap->cur_mini_bufs); out: /* * According to the documentation RxRetCsm is obsolete with * the 12.3.x Firmware - my Tigon I NICs seem to disagree! */ if (ACE_IS_TIGON_I(ap)) { struct ace_regs *regs = ap->regs; writel(idx, ®s->RxRetCsm); } ap->cur_rx = idx; return; error: idx = rxretprd; goto out;}static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs){ struct ace_private *ap; struct ace_regs *regs; struct net_device *dev = (struct net_device *)dev_id; u32 idx; u32 txcsm, rxretcsm, rxretprd; u32 evtcsm, evtprd; ap = dev->priv; regs = ap->regs; /* * In case of PCI shared interrupts or spurious interrupts, * we want to make sure it is actually our interrupt before * spending any time in here. */ if (!(readl(®s->HostCtrl) & IN_INT)) return; /* * Tell the card not to generate interrupts while we are in here. */ writel(1, ®s->Mb0Lo); /* * There is no conflict between transmit handling in * start_xmit and receive processing, thus there is no reason * to take a spin lock for RX handling. Wait until we start * working on the other stuff - hey we don't need a spin lock * anymore. */ rxretprd = *ap->rx_ret_prd; rxretcsm = ap->cur_rx; if (rxretprd != rxretcsm) ace_rx_int(dev, rxretprd, rxretcsm); txcsm = *ap->tx_csm; idx = ap->tx_ret_csm; if (txcsm != idx) { do { struct sk_buff *skb; skb = ap->skb->tx_skbuff[idx].skb; /* * Race condition between the code cleaning * the tx queue in the interrupt handler and the * interface close, * * This is a kludge that really should be fixed * by preventing the driver from generating a tx * interrupt when the packet has already been * removed from the tx queue. * * Nailed by Don Dugger and Chip Salzenberg of * VA Linux. */ if (skb) { dma_addr_t mapping; mapping = ap->skb->tx_skbuff[idx].mapping; ap->stats.tx_packets++; ap->stats.tx_bytes += skb->len; pci_unmap_single(ap->pdev, mapping, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); ap->skb->tx_skbuff[idx].skb = NULL; } /* * Question here is whether one should not skip * these writes - I have never seen any errors * caused by the NIC actually trying to access * these incorrectly. */#ifdef ACE_64BIT_PTR ap->tx_ring[idx].addr.addrhi = 0;#endif ap->tx_ring[idx].addr.addrlo = 0; ap->tx_ring[idx].flagsize = 0; idx = (idx + 1) % TX_RING_ENTRIES; } while (idx != txcsm); /* * Once we actually get to this point the tx ring has * already been trimmed thus it cannot be full! * Ie. skip the comparison of the tx producer vs. the * consumer. */ if (netif_queue_stopped(dev) && xchg(&ap->tx_full, 0)) { /* * This does not need to be atomic (and expensive), * I've seen cases where it would fail otherwise ;-( */ netif_wake_queue(dev); ace_mark_net_bh(); /* * TX ring is no longer full, aka the * transmitter is working fine - kill timer. */ del_timer(&ap->timer); } ap->tx_ret_csm = txcsm; wmb(); } evtcsm = readl(®s->EvtCsm); evtprd = *ap->evt_prd; if (evtcsm != evtprd) { evtcsm = ace_handle_event(dev, evtcsm, evtprd); writel(evtcsm, ®s->EvtCsm); } /* * This has to go last in the interrupt handler and run with * the spin lock released ... what lock? */ if (netif_running(dev)) { int cur_size; int run_tasklet = 0; cur_size = atomic_read(&ap->cur_rx_bufs); if (cur_size < RX_LOW_STD_THRES) { if ((cur_size < RX_PANIC_STD_THRES) && !test_and_set_bit(0, &ap->std_refill_busy)) {#if DEBUG printk("low on std buffers %i\n", cur_size);#endif ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size); } else run_tasklet = 1; } if (!ACE_IS_TIGON_I(ap)) { cur_size = atomic_read(&ap->cur_mini_bufs); if (cur_size < RX_LOW_MINI_THRES) { if ((cur_size < RX_PANIC_MINI_THRES) && !test_and_set_bit(0, &ap->mini_refill_busy)) {#if DEBUG printk("low on mini buffers %i\n", cur_size);#endif ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); } else run_tasklet = 1; } } if (ap->jumbo) { cur_size = atomic_read(&ap->cur_jumbo_bufs); if (cur_size < RX_LOW_JUMBO_THRES) { if ((cur_size < RX_PANIC_JUMBO_THRES) && !test_and_set_bit(0, &ap->jumbo_refill_busy)){#if DEBUG printk("low on jumbo buffers %i\n", cur_size);#endif ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); } else run_tasklet = 1; } } if (run_tasklet && !ap->tasklet_pending) { ap->tasklet_pending = 1; tasklet_schedule(&ap->ace_tasklet); } } /* * Allow the card to generate interrupts again */ writel(0, ®s->Mb0Lo);}static int ace_open(struct net_device *dev){ struct ace_private *ap; struct ace_regs *regs; struct cmd cmd; ap = dev->priv; regs = ap->regs; if (!(ap->fw_running)) { printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); return -EBUSY; } writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu); /* * Zero the stats when restarting the interface... */ memset(&ap->stats, 0, sizeof(ap->stats)); cmd.evt = C_CLEAR_STATS; cmd.code = 0; cmd.idx = 0; ace_issue_cmd(regs, &cmd); cmd.evt = C_HOST_STATE; cmd.code = C_C_STACK_UP; cmd.idx = 0; ace_issue_cmd(regs, &cmd); if (ap->jumbo && !test_and_set_bit(0, &ap->jumbo_refill_busy)) ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE); if (dev->flags & IFF_PROMISC) { cmd.evt = C_SET_PROMISC_MODE; cmd.code = C_C_PROMISC_ENABLE; cmd.idx = 0; ace_issue_cmd(regs, &cmd); ap->promisc = 1; }else ap->promisc = 0; ap->mcast_all = 0;#if 0 cmd.evt = C_LNK_NEGOTIATION; cmd.code = 0; cmd.idx = 0; ace_issue_cmd(regs, &cmd);#endif netif_start_queue(dev); ACE_MOD_INC_USE_COUNT; /* * Setup the timer */ init_timer(&ap->timer); ap->timer.data = (unsigned long)dev; ap->timer.function = ace_timer; /* * Setup the bottom half rx ring refill handler */ tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); return 0;}static int ace_close(struct net_device *dev){ struct ace_private *ap; struct ace_regs *regs; struct cmd cmd; unsigned long flags; short i; ace_if_down(dev); netif_stop_queue(dev); ap = dev->priv; regs = ap->regs; del_timer(&ap->timer); if (ap->promisc) { cmd.evt = C_SET_PROMISC_MODE; cmd.code = C_C_PROMISC_DISABLE; cmd.idx = 0; ace_issue_cmd(regs, &cmd); ap->promisc = 0; } cmd.evt = C_HOST_STATE; cmd.code = C_C_STACK_DOWN; cmd.idx = 0; ace_issue_cmd(regs, &cmd); tasklet_kill(&ap->ace_tasklet); /* * Make sure one CPU is not processing packets while * buffers are being released by another. */ save_flags(flags); cli(); for (i = 0; i < TX_RING_ENTRIES; i++) { struct sk_buff *skb; dma_addr_t mapping; skb = ap->skb->tx_skbuff[i].skb; mapping = ap->skb->tx_skbuff[i].mapping; if (skb) { memset(&ap->tx_ring[i].addr, 0, sizeof(struct tx_desc)); pci_unmap_single(ap->pdev, mapping, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); ap->skb->tx_skbuff[i].skb = NULL; } } if (ap->jumbo) { cmd.evt = C_RESET_JUMBO_RNG; cmd.code = 0; cmd.idx = 0; ace_issue_cmd(regs, &cmd); } restore_flags(flags); ACE_MOD_DEC_USE_COUNT; return 0;}static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct ace_private *ap = dev->priv; struct ace_regs *regs = ap->regs; unsigned long addr; u32 idx, flagsize; /* * This only happens with pre-softnet, ie. 2.2.x kernels. */ if (early_stop_netif_stop_queue(dev)) return 1; idx = ap->tx_prd; if ((idx + 1) % TX_RING_ENTRIES == ap->tx_ret_csm) { ap->tx_full = 1;#if DEBUG printk("%s: trying to transmit while the
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -