📄 ns83820.c
字号:
res = 0x10 - res; res &= 0xf; skb_reserve(skb, res); skb->dev = &dev->net_dev; if (gfp != GFP_ATOMIC) spin_lock_irqsave(&dev->rx_info.lock, flags); res = ns83820_add_rx_skb(dev, skb); if (gfp != GFP_ATOMIC) spin_unlock_irqrestore(&dev->rx_info.lock, flags); if (res) { i = 1; break; } } if (gfp == GFP_ATOMIC) spin_unlock_irqrestore(&dev->rx_info.lock, flags); return i ? 0 : -ENOMEM;}/* REFILL */static inline void queue_refill(void *_dev){ struct ns83820 *dev = _dev; rx_refill(dev, GFP_KERNEL); if (dev->rx_info.up) kick_rx(dev);}static inline void clear_rx_desc(struct ns83820 *dev, unsigned i){ build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);}static void phy_intr(struct ns83820 *dev){ static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; u32 cfg, new_cfg; u32 tbisr, tanar, tanlpar; int speed, fullduplex, newlinkstate; cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; if (dev->CFG_cache & CFG_TBI_EN) { /* we have an optical transceiver */ tbisr = readl(dev->base + TBISR); tanar = readl(dev->base + TANAR); tanlpar = readl(dev->base + TANLPAR); dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n", tbisr, tanar, tanlpar); if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) && (tanar & TANAR_FULL_DUP)) ) { /* both of us are full duplex */ writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, dev->base + TXCFG); writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, dev->base + RXCFG); /* Light up full duplex LED */ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, dev->base + GPIOR); } else if(((tanlpar & TANAR_HALF_DUP) && (tanar & TANAR_HALF_DUP)) || ((tanlpar & TANAR_FULL_DUP) && (tanar & TANAR_HALF_DUP)) || ((tanlpar & TANAR_HALF_DUP) && (tanar & TANAR_FULL_DUP))) { /* one or both of us are half duplex */ writel((readl(dev->base + TXCFG) & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP, dev->base + TXCFG); writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD, dev->base + RXCFG); /* Turn off full duplex LED */ writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT, dev->base + GPIOR); } speed = 4; /* 1000F */ } else { /* we have a copper transceiver */ new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS); if (cfg & CFG_SPDSTS1) new_cfg |= CFG_MODE_1000 | CFG_SB; else new_cfg &= ~CFG_MODE_1000 | CFG_SB; if ((cfg & CFG_LNKSTS) && ((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) { writel(new_cfg, dev->base + CFG); dev->CFG_cache = new_cfg; } dev->CFG_cache &= ~CFG_SPDSTS; dev->CFG_cache |= cfg & CFG_SPDSTS; speed = ((cfg / CFG_SPDSTS0) & 3); fullduplex = (cfg & CFG_DUPSTS); } newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN; if (newlinkstate & LINK_UP && dev->linkstate != newlinkstate) { netif_start_queue(&dev->net_dev); netif_wake_queue(&dev->net_dev); printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n", dev->net_dev.name, speeds[speed], fullduplex ? "full" : "half"); } else if (newlinkstate & LINK_DOWN && dev->linkstate != newlinkstate) { netif_stop_queue(&dev->net_dev); printk(KERN_INFO "%s: link now down.\n", dev->net_dev.name); } dev->linkstate = newlinkstate;}static int ns83820_setup_rx(struct ns83820 *dev){ unsigned i; int ret; dprintk("ns83820_setup_rx(%p)\n", dev); dev->rx_info.idle = 1; dev->rx_info.next_rx = 0; dev->rx_info.next_empty = 0; for (i=0; i<NR_RX_DESC; i++) clear_rx_desc(dev, i); writel(0, dev->base + RXDP_HI); writel(dev->rx_info.phy_descs, dev->base + RXDP); ret = rx_refill(dev, GFP_KERNEL); if (!ret) { dprintk("starting receiver\n"); /* prevent the interrupt handler from stomping on us */ spin_lock_irq(&dev->rx_info.lock); writel(0x0001, dev->base + CCSR); writel(0, dev->base + RFCR); writel(0x7fc00000, dev->base + RFCR); writel(0xffc00000, dev->base + RFCR); dev->rx_info.up = 1; phy_intr(dev); /* Okay, let it rip */ spin_lock(&dev->misc_lock); dev->IMR_cache |= ISR_PHY; dev->IMR_cache |= ISR_RXRCMP; //dev->IMR_cache |= ISR_RXERR; //dev->IMR_cache |= ISR_RXOK; dev->IMR_cache |= ISR_RXORN; dev->IMR_cache |= ISR_RXSOVR; dev->IMR_cache |= ISR_RXDESC; dev->IMR_cache |= ISR_RXIDLE; dev->IMR_cache |= ISR_TXDESC; //dev->IMR_cache |= ISR_TXIDLE; writel(dev->IMR_cache, dev->base + IMR); writel(1, dev->base + IER); spin_unlock(&dev->misc_lock); kick_rx(dev); spin_unlock_irq(&dev->rx_info.lock); } return ret;}static void ns83820_cleanup_rx(struct ns83820 *dev){ unsigned i; long flags; dprintk("ns83820_cleanup_rx(%p)\n", dev); /* disable receive interrupts */ spin_lock_irqsave(&dev->misc_lock, flags); dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE); writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irqrestore(&dev->misc_lock, flags); /* synchronize with the interrupt handler and kill it */ dev->rx_info.up = 0; synchronize_irq(); /* touch the pci bus... */ readl(dev->base + IMR); /* assumes the transmitter is already disabled and reset */ writel(0, dev->base + RXDP_HI); writel(0, dev->base + RXDP); for (i=0; i<NR_RX_DESC; i++) { struct sk_buff *skb = dev->rx_info.skbs[i]; dev->rx_info.skbs[i] = NULL; clear_rx_desc(dev, i); if (skb) kfree_skb(skb); }}/* rx_irq * */static void FASTCALL(rx_irq(struct ns83820 *dev));static void rx_irq(struct ns83820 *dev){ struct rx_info *info = &dev->rx_info; unsigned next_rx; u32 cmdsts, *desc; long flags; int nr = 0; dprintk("rx_irq(%p)\n", dev); dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n", readl(dev->base + RXDP), (dev->rx_info.phy_descs), dev->rx_info.next_rx, (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)), dev->rx_info.next_empty, (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty)) ); spin_lock_irqsave(&info->lock, flags); if (!info->up) goto out; dprintk("walking descs\n"); next_rx = info->next_rx; desc = info->descs + (DESC_SIZE * next_rx); while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[CMDSTS]))) && (cmdsts != CMDSTS_OWN)) { struct sk_buff *skb; u32 extsts = le32_to_cpu(desc[EXTSTS]); dmaaddr_high_t bufptr = le32_to_cpu(desc[BUFPTR]); dprintk("cmdsts: %08x\n", cmdsts); dprintk("link: %08x\n", cpu_to_le32(desc[LINK])); dprintk("extsts: %08x\n", extsts); skb = info->skbs[next_rx]; info->skbs[next_rx] = NULL; info->next_rx = (next_rx + 1) % NR_RX_DESC; barrier(); clear_rx_desc(dev, next_rx); pci_unmap_single(dev->pci_dev, bufptr, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); if (CMDSTS_OK & cmdsts) {#if 0 //ndef __i386__ struct sk_buff *tmp;#endif int len = cmdsts & 0xffff; if (!skb) BUG(); skb_put(skb, len);#if 0 //ndef __i386__ /* I hate the network stack sometimes */ tmp = __dev_alloc_skb(RX_BUF_SIZE+16, GFP_ATOMIC); if (!tmp) goto done; tmp->dev = &dev->net_dev; skb_reserve(tmp, 2); memcpy(skb_put(tmp, len), skb->data, len); kfree_skb(skb); skb = tmp;#endif if (cmdsts & CMDSTS_DEST_MULTI) dev->stats.multicast ++; dev->stats.rx_packets ++; dev->stats.rx_bytes += len; if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { skb->ip_summed = CHECKSUM_NONE; } skb->protocol = eth_type_trans(skb, &dev->net_dev); if (NET_RX_DROP == netif_rx(skb)) dev->stats.rx_dropped ++;#if 0 //ndef __i386__ done:;#endif } else { kfree_skb(skb); } nr++; next_rx = info->next_rx; desc = info->descs + (DESC_SIZE * next_rx); } info->next_rx = next_rx;out: if (0 && !nr) { Dprintk("dazed: cmdsts_f: %08x\n", cmdsts); } spin_unlock_irqrestore(&info->lock, flags);}/* Packet Transmit code */static inline void kick_tx(struct ns83820 *dev){ dprintk("kick_tx(%p): tx_idle=%ld, tx_idx=%d free_idx=%d\n", dev, dev->tx_idle, dev->tx_idx, dev->tx_free_idx); writel(CR_TXE, dev->base + CR);}/* no spinlock needed on the transmit irq path as the interrupt handler is serialized */static void do_tx_done(struct ns83820 *dev){ u32 cmdsts, tx_done_idx, *desc; dprintk("do_tx_done(%p)\n", dev); tx_done_idx = dev->tx_done_idx; desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[CMDSTS])); while ((tx_done_idx != dev->tx_free_idx) && !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[CMDSTS]))) ) { struct sk_buff *skb; if (cmdsts & CMDSTS_ERR) dev->stats.tx_errors ++; if (cmdsts & CMDSTS_OK) dev->stats.tx_packets ++; if (cmdsts & CMDSTS_OK) dev->stats.tx_bytes += cmdsts & 0xffff; dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", tx_done_idx, dev->tx_free_idx, cmdsts); skb = dev->tx_skbs[tx_done_idx]; dev->tx_skbs[tx_done_idx] = NULL; dprintk("done(%p)\n", skb); if (skb) { pci_unmap_single(dev->pci_dev, le32_to_cpu(desc[BUFPTR]), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); } tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC; dev->tx_done_idx = tx_done_idx; desc[CMDSTS] = cpu_to_le32(0); barrier(); desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); } /* Allow network stack to resume queueing packets after we've * finished transmitting at least 1/4 of the packets in the queue. */ if (netif_queue_stopped(&dev->net_dev) && start_tx_okay(dev)) { dprintk("start_queue(%p)\n", dev); netif_start_queue(&dev->net_dev); netif_wake_queue(&dev->net_dev); }}static void ns83820_cleanup_tx(struct ns83820 *dev){ unsigned i; for (i=0; i<NR_TX_DESC; i++) { struct sk_buff *skb = dev->tx_skbs[i]; dev->tx_skbs[i] = NULL; if (skb) dev_kfree_skb(skb); } memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4); set_bit(0, &dev->tx_idle);}/* transmit routine. This code relies on the network layer serializing * its calls in, but will run happily in parallel with the interrupt * handler. This code currently has provisions for fragmenting tx buffers * while trying to track down a bug in either the zero copy code or * the tx fifo (hence the MAX_FRAG_LEN). */#define MAX_FRAG_LEN 8192 /* disabled for now */static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *_dev){ struct ns83820 *dev = (struct ns83820 *)_dev; u32 free_idx, cmdsts, extsts; int nr_free, nr_frags; unsigned tx_done_idx; dmaaddr_high_t buf; unsigned len; skb_frag_t *frag; int stopped = 0; int do_intr = 0; volatile u32 *first_desc; dprintk("ns83820_hard_start_xmit\n"); nr_frags = skb_shinfo(skb)->nr_frags;again: if (__builtin_expect(dev->CFG_cache & CFG_LNKSTS, 0)) { netif_stop_queue(&dev->net_dev); if (__builtin_expect(dev->CFG_cache & CFG_LNKSTS, 0)) return 1; netif_start_queue(&dev->net_dev); } free_idx = dev->tx_free_idx; tx_done_idx = dev->tx_done_idx; nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC; nr_free -= 1; if ((nr_free <= nr_frags) || (nr_free <= 8192 / MAX_FRAG_LEN)) { dprintk("stop_queue - not enough(%p)\n", dev); netif_stop_queue(&dev->net_dev); /* Check again: we may have raced with a tx done irq */ if (dev->tx_done_idx != tx_done_idx) { dprintk("restart queue(%p)\n", dev); netif_start_queue(&dev->net_dev); goto again; } return 1; } if (free_idx == dev->tx_intr_idx) { do_intr = 1; dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/2) % NR_TX_DESC; } nr_free -= nr_frags; if (nr_free < 1) { dprintk("stop_queue - last entry(%p)\n", dev); netif_stop_queue(&dev->net_dev); stopped = 1; } frag = skb_shinfo(skb)->frags; if (!nr_frags) frag = 0; extsts = 0; if (skb->ip_summed == CHECKSUM_HW) { extsts |= EXTSTS_IPPKT; if (IPPROTO_TCP == skb->nh.iph->protocol) extsts |= EXTSTS_TCPPKT; else if (IPPROTO_UDP == skb->nh.iph->protocol) extsts |= EXTSTS_UDPPKT; } len = skb->len; if (nr_frags) len -= skb->data_len; buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE); first_desc = dev->tx_descs + (free_idx * DESC_SIZE); for (;;) { volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE); u32 residue = 0;#if 0 if (len > MAX_FRAG_LEN) { residue = len; /* align the start address of the next fragment */ len = MAX_FRAG_LEN; residue -= len; }#endif dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len, (unsigned long long)buf); free_idx = (free_idx + 1) % NR_TX_DESC; desc[LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4)); desc[BUFPTR] = cpu_to_le32(buf); desc[EXTSTS] = cpu_to_le32(extsts); cmdsts = ((nr_frags|residue) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0); cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN; cmdsts |= len; desc[CMDSTS] = cpu_to_le32(cmdsts); if (residue) { buf += len; len = residue; continue; } if (!nr_frags) break; buf = pci_map_single_high(dev->pci_dev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); dprintk("frag: buf=%08Lx page=%08lx\n", (long long)buf, (long)(frag->page - mem_map)); len = frag->size; frag++; nr_frags--; } dprintk("done pkt\n"); dev->tx_skbs[free_idx] = skb; first_desc[CMDSTS] |= cpu_to_le32(CMDSTS_OWN); dev->tx_free_idx = free_idx; kick_tx(dev); /* Check again: we may have raced with a tx done irq */ if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) netif_start_queue(&dev->net_dev); return 0;}static void ns83820_update_stats(struct ns83820 *dev){ u8 *base = dev->base; dev->stats.rx_errors += readl(base + 0x60) & 0xffff; dev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; dev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; dev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; /*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70); dev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; dev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; /*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c); /*dev->stats.rx_pause_count += */ readl(base + 0x80); /*dev->stats.tx_pause_count += */ readl(base + 0x84); dev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff;}static struct net_device_stats *ns83820_get_stats(struct net_device *_dev){ struct ns83820 *dev = (void *)_dev; /* somewhat overkill */ spin_lock_irq(&dev->misc_lock); ns83820_update_stats(dev); spin_unlock_irq(&dev->misc_lock); return &dev->stats;}static int ns83820_ethtool_ioctl (struct ns83820 *dev, void *useraddr)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -