📄 starfire.c
字号:
dev->trans_start = jiffies; return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){ struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np; long ioaddr; int boguscnt = max_interrupt_work; int consumer; int tx_status;#ifndef final_version /* Can never occur. */ if (dev == NULL) { printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown device.\n", irq); return; }#endif ioaddr = dev->base_addr; np = dev->priv; do { u32 intr_status = readl(ioaddr + IntrClear); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & IntrRxDone) netdev_rx(dev); /* Scavenge the skbuff list based on the Tx-done queue. There are redundant checks here that may be cleaned up after the driver has proven to be reliable. */ consumer = readl(ioaddr + TxConsumerIdx); if (debug > 4) printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n", dev->name, consumer);#if 0 if (np->tx_done >= 250 || np->tx_done == 0) printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, %d is %8.8x.\n", dev->name, np->tx_done, le32_to_cpu(np->tx_done_q[np->tx_done].status), (np->tx_done+1) & (DONE_Q_SIZE-1), le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));#endif while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { if (debug > 4) printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n", dev->name, np->tx_done, tx_status); if ((tx_status & 0xe0000000) == 0xa0000000) { np->stats.tx_packets++; } else if ((tx_status & 0xe0000000) == 0x80000000) { struct sk_buff *skb;#ifdef ZEROCOPY int i;#endif /* ZEROCOPY */ u16 entry = tx_status; /* Implicit truncate */ entry /= sizeof(struct starfire_tx_desc); skb = np->tx_info[entry].skb; np->tx_info[entry].skb = NULL; pci_unmap_single(np->pci_dev, np->tx_info[entry].first_mapping, skb_first_frag_len(skb), PCI_DMA_TODEVICE); np->tx_info[entry].first_mapping = 0;#ifdef ZEROCOPY for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { pci_unmap_single(np->pci_dev, np->tx_info[entry].frag_mapping[i], skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); np->tx_info[entry].frag_mapping[i] = 0; }#endif /* ZEROCOPY */ /* Scavenge the descriptor. */ dev_kfree_skb_irq(skb); np->dirty_tx++; } np->tx_done_q[np->tx_done].status = 0; np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1); } writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) { /* The ring is no longer full, wake the queue. */ np->tx_full = 0; netif_wake_queue(dev); } /* Stats overflow */ if (intr_status & IntrStatsMax) { get_stats(dev); } /* Media change interrupt. */ if (intr_status & IntrLinkChange) netdev_media_change(dev); /* Abnormal error summary/uncommon events handlers. */ if (intr_status & IntrAbnormalSummary) netdev_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (debug > 4) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, (int)readl(ioaddr + IntrStatus));#ifndef final_version /* Code that should never be run! Remove after testing.. */ { static int stopit = 10; if (!netif_running(dev) && --stopit < 0) { printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", dev->name); free_irq(irq, dev); } }#endif}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){ struct netdev_private *np = dev->priv; int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; u32 desc_status; if (np->rx_done_q == 0) { printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n", dev->name, np->rx_done, np->tx_done_q); return 0; } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { struct sk_buff *skb; u16 pkt_len; int entry; if (debug > 4) printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status); if (--boguscnt < 0) break; if ( ! (desc_status & RxOK)) { /* There was a error. */ if (debug > 2) printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", desc_status); np->stats.rx_errors++; if (desc_status & RxFIFOErr) np->stats.rx_fifo_errors++; goto next_rx; } pkt_len = desc_status; /* Implicitly Truncate */ entry = (desc_status >> 16) & 0x7ff;#ifndef final_version if (debug > 4) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt);#endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb->dev = dev; skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single(np->pci_dev, np->rx_info[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE);#if HAS_IP_COPYSUM /* Call copy + cksum if available. */ eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len);#endif } else { pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb = np->rx_info[entry].skb; skb_put(skb, pkt_len); np->rx_info[entry].skb = NULL; np->rx_info[entry].mapping = 0; }#ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ if (debug > 5) printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " "%d.%d.%d.%d.\n", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15], skb->data[16], skb->data[17]);#endif skb->protocol = eth_type_trans(skb, dev);#if defined(full_rx_status) || defined(csum_rx_status) if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000) { skb->ip_summed = CHECKSUM_UNNECESSARY; } /* * This feature doesn't seem to be working, at least * with the two firmware versions I have. If the GFP sees * a fragment, it either ignores it completely, or reports * "bad checksum" on it. * * Maybe I missed something -- corrections are welcome. * Until then, the printk stays. :-) -Ion */ else if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x00400000) { skb->ip_summed = CHECKSUM_HW; skb->csum = le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0xffff; printk(KERN_DEBUG "%s: checksum_hw, status2 = %x\n", dev->name, np->rx_done_q[np->rx_done].status2); }#endif netif_rx(skb); dev->last_rx = jiffies; np->stats.rx_packets++;next_rx: np->cur_rx++; np->rx_done_q[np->rx_done].status = 0; np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1); } writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx); /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; int entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_info[entry].skb == NULL) { skb = dev_alloc_skb(np->rx_buf_sz); np->rx_info[entry].skb = skb; if (skb == NULL) break; /* Better luck next round. */ np->rx_info[entry].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; /* Mark as being used by this device. */ np->rx_ring[entry].rxaddr = cpu_to_le32(np->rx_info[entry].mapping | RxDescValid); } if (entry == RX_RING_SIZE - 1) np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing); /* We could defer this until later... */ writew(entry, dev->base_addr + RxDescQIdx); } if (debug > 5) printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status); /* Restart Rx engine if stopped. */ return 0;}static void netdev_media_change(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; u16 reg0, reg1, reg4, reg5; u32 new_tx_mode; /* reset status first */ mdio_read(dev, np->phys[0], MII_BMCR); mdio_read(dev, np->phys[0], MII_BMSR); reg0 = mdio_read(dev, np->phys[0], MII_BMCR); reg1 = mdio_read(dev, np->phys[0], MII_BMSR); if (reg1 & BMSR_LSTATUS) { /* link is up */ if (reg0 & BMCR_ANENABLE) { /* autonegotiation is enabled */ reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE); reg5 = mdio_read(dev, np->phys[0], MII_LPA); if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) { np->speed100 = 1; np->full_duplex = 1; } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) { np->speed100 = 1; np->full_duplex = 0; } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) { np->speed100 = 0; np->full_duplex = 1; } else { np->speed100 = 0; np->full_duplex = 0; } } else { /* autonegotiation is disabled */ if (reg0 & BMCR_SPEED100) np->speed100 = 1; else np->speed100 = 0; if (reg0 & BMCR_FULLDPLX) np->full_duplex = 1; else np->full_duplex = 0; } printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n", dev->name, np->speed100 ? "100" : "10", np->full_duplex ? "full" : "half"); new_tx_mode = np->tx_mode & ~0x2; /* duplex setting */ if (np->full_duplex) new_tx_mode |= 2; if (np->tx_mode != new_tx_mode) { np->tx_mode = new_tx_mode; writel(np->tx_mode | 0x8000, ioaddr + TxMode); udelay(1000); writel(np->tx_mode, ioaddr + TxMode); } } else { printk(KERN_DEBUG "%s: Link is down\n", dev->name); }}static void netdev_error(struct net_device *dev, int intr_status){ struct netdev_private *np = dev->priv; /* Came close to underrunning the Tx FIFO, increase threshold. */ if (intr_status & IntrTxDataLow) { writel(++np->tx_threshold, dev->base_addr + TxThreshold); printk(KERN_NOTICE "%s: Increasing Tx FIFO threshold to %d bytes\n", dev->name, np->tx_threshold * 16); } if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrPCIPad)) && debug) printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* Hmmmmm, it's not clear how to recover from DMA faults. */ if (intr_status & IntrDMAErr) np->stats.tx_fifo_errors++;}static struct net_device_stats *get_stats(struct net_device *dev){ long ioaddr = dev->base_addr; struct netdev_private *np = dev->priv; /* This adapter architecture needs no SMP locks. */ np->stats.tx_bytes = readl(ioaddr + 0x57010); np->stats.rx_bytes = readl(ioaddr + 0x57044); np->stats.tx_packets = readl(ioaddr + 0x57000); np->stats.tx_aborted_errors = readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028); np->stats.tx_window_errors = readl(ioaddr + 0x57018); np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008); /* The chip only need report frame silently dropped. */ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus); writew(0, ioaddr + RxDMAStatus); np->stats.rx_crc_errors = readl(ioaddr + 0x5703C); np->stats.rx_frame_errors = readl(ioaddr + 0x57040); np->stats.rx_length_errors = readl(ioaddr + 0x57058); np->stats.rx_missed_errors = readl(ioaddr + 0x5707C); return &np->stats;}/* The little-endian AUTODIN II ethernet CRC calculations. A big-endian version is also available. This is slow but compact code. Do not use this routine for bulk data, use a table-based routine instead. This is common code and should be moved to net/core/crc.c. Chips may use the upper or lower CRC bits, and may reverse and/or invert them. Select the endian-ness that results in minimal calculations.*/static unsigned const ethernet_polynomial_le = 0xedb88320U;static inline unsigned ether_crc_le(int length, unsigned char *data){ unsigned int crc = 0xffffffff; /* Initial value. */ while(--length >= 0) { unsigned char current_octet = *data++; int bit; for (bit = 8; --bit >= 0; current_octet >>= 1) { if ((crc ^ current_octet) & 1) { crc >>= 1; crc ^= ethernet_polynomial_le;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -