📄 starfire.c
字号:
/* The busy-wait will occur before a read. */ return;}static int netdev_open(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int i, retval; /* Do we ever need to reset the chip??? */ COMPAT_MOD_INC_USE_COUNT; retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev); if (retval) { COMPAT_MOD_DEC_USE_COUNT; return retval; } /* Disable the Rx and Tx, and reset the chip. */ writel(0, ioaddr + GenCtrl); writel(1, ioaddr + PCIDeviceConfig); if (debug > 1) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", dev->name, dev->irq); /* Allocate the various queues, failing gracefully. */ if (np->tx_done_q == 0) np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma); if (np->rx_done_q == 0) np->rx_done_q = pci_alloc_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, &np->rx_done_q_dma); if (np->tx_ring == 0) np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma); if (np->rx_ring == 0) np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma); if (np->tx_done_q == 0 || np->rx_done_q == 0 || np->rx_ring == 0 || np->tx_ring == 0) { if (np->tx_done_q) pci_free_consistent(np->pci_dev, PAGE_SIZE, np->tx_done_q, np->tx_done_q_dma); if (np->rx_done_q) pci_free_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, np->rx_done_q, np->rx_done_q_dma); if (np->tx_ring) pci_free_consistent(np->pci_dev, PAGE_SIZE, np->tx_ring, np->tx_ring_dma); if (np->rx_ring) pci_free_consistent(np->pci_dev, PAGE_SIZE, np->rx_ring, np->rx_ring_dma); COMPAT_MOD_DEC_USE_COUNT; return -ENOMEM; } init_ring(dev); /* Set the size of the Rx buffers. */ writel((np->rx_buf_sz << RxBufferLenShift) | (0 << RxMinDescrThreshShift) | RxPrefetchMode | RxVariableQ | RxDescSpace4, ioaddr + RxDescQCtrl);#ifdef ZEROCOPY /* Set Tx descriptor to type 0 and spacing to 64 bytes. */ writel((2 << TxHiPriFIFOThreshShift) | (0 << TxPadLenShift) | (4 << TxDMABurstSizeShift) | TxDescSpace64 | TxDescType0, ioaddr + TxDescCtrl);#else /* not ZEROCOPY */ /* Set Tx descriptor to type 1 and padding to 0 bytes. */ writel((2 << TxHiPriFIFOThreshShift) | (0 << TxPadLenShift) | (4 << TxDMABurstSizeShift) | TxDescSpaceUnlim | TxDescType1, ioaddr + TxDescCtrl);#endif /* not ZEROCOPY */#if defined(ADDR_64BITS) && defined(__alpha__) /* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */ writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr); writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr);#else writel(0, ioaddr + RxDescQHiAddr); writel(0, ioaddr + TxRingHiAddr); writel(0, ioaddr + CompletionHiAddr);#endif writel(np->rx_ring_dma, ioaddr + RxDescQAddr); writel(np->tx_ring_dma, ioaddr + TxRingPtr); writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);#ifdef full_rx_status writel(np->rx_done_q_dma | RxComplType3 | (0 << RxComplThreshShift), ioaddr + RxCompletionAddr);#else /* not full_rx_status */#ifdef csum_rx_status writel(np->rx_done_q_dma | RxComplType2 | (0 << RxComplThreshShift), ioaddr + RxCompletionAddr);#else /* not csum_rx_status */ writel(np->rx_done_q_dma | RxComplType0 | (0 << RxComplThreshShift), ioaddr + RxCompletionAddr);#endif /* not csum_rx_status */#endif /* not full_rx_status */ if (debug > 1) printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name); /* Fill both the unused Tx SA register and the Rx perfect filter. */ for (i = 0; i < 6; i++) writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i); for (i = 0; i < 16; i++) { u16 *eaddrs = (u16 *)dev->dev_addr; long setup_frm = ioaddr + PerfFilterTable + i * 16; writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4; writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4; writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8; } /* Initialize other registers. */ /* Configure the PCI bus bursts and FIFO thresholds. */ np->tx_mode = 0x0C04; /* modified when link is up. */ writel(0x8000 | np->tx_mode, ioaddr + TxMode); udelay(1000); writel(np->tx_mode, ioaddr + TxMode); np->tx_threshold = 4; writel(np->tx_threshold, ioaddr + TxThreshold); interrupt_mitigation &= 0x1f; np->intr_mitigation = interrupt_mitigation; writel(np->intr_mitigation, ioaddr + IntrTimerCtrl); netif_start_if(dev); netif_start_queue(dev); if (debug > 1) printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name); set_rx_mode(dev); np->advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE); check_duplex(dev); /* Enable GPIO interrupts on link change */ writel(0x0f00ff00, ioaddr + GPIOCtrl); /* Set the interrupt mask and enable PCI interrupts. */ writel(IntrRxDone | IntrRxEmpty | IntrDMAErr | IntrTxDone | IntrStatsMax | IntrLinkChange | IntrNormalSummary | IntrAbnormalSummary | IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID, ioaddr + IntrEnable); writel(0x00800000 | readl(ioaddr + PCIDeviceConfig), ioaddr + PCIDeviceConfig);#ifdef HAS_FIRMWARE /* Load Rx/Tx firmware into the frame processors */ for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++) writel(cpu_to_le32(firmware_rx[i]), ioaddr + RxGfpMem + i * 4); for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++) writel(cpu_to_le32(firmware_tx[i]), ioaddr + TxGfpMem + i * 4); /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ writel(0x003F, ioaddr + GenCtrl);#else /* not HAS_FIRMWARE */ /* Enable the Rx and Tx units only. */ writel(0x000F, ioaddr + GenCtrl);#endif /* not HAS_FIRMWARE */ if (debug > 2) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); return 0;}static void check_duplex(struct net_device *dev){ struct netdev_private *np = dev->priv; u16 reg0; mdio_write(dev, np->phys[0], MII_ADVERTISE, np->advertising); mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); udelay(500); while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET); reg0 = mdio_read(dev, np->phys[0], MII_BMCR); if (np->autoneg) { reg0 |= BMCR_ANENABLE | BMCR_ANRESTART; } else { reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART); if (np->speed100) reg0 |= BMCR_SPEED100; if (np->full_duplex) reg0 |= BMCR_FULLDPLX; printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n", dev->name, np->speed100 ? "100" : "10", np->full_duplex ? "full" : "half"); } mdio_write(dev, np->phys[0], MII_BMCR, reg0);}static void tx_timeout(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));#ifndef __alpha__ { int i; printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr)); printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status)); printk("\n"); }#endif /* Perhaps we should reinitialize the hardware here. */ /* Stop and restart the chip's Tx processes . */ /* Trigger an immediate transmit demand. */ dev->trans_start = jiffies; np->stats.tx_errors++; netif_wake_queue(dev);}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){ struct netdev_private *np = dev->priv; int i; np->tx_full = 0; np->cur_rx = np->cur_tx = 0; np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); np->rx_info[i].skb = skb; if (skb == NULL) break; np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; /* Mark as being used by this device. */ /* Grrr, we cannot offset to correctly align the IP header. */ np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid); } writew(i - 1, dev->base_addr + RxDescQIdx); np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* Clear the remainder of the Rx buffer ring. */ for ( ; i < RX_RING_SIZE; i++) { np->rx_ring[i].rxaddr = 0; np->rx_info[i].skb = NULL; np->rx_info[i].mapping = 0; } /* Mark the last entry as wrapping the ring. */ np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing); /* Clear the completion rings. */ for (i = 0; i < DONE_Q_SIZE; i++) { np->rx_done_q[i].status = 0; np->tx_done_q[i].status = 0; } for (i = 0; i < TX_RING_SIZE; i++) { np->tx_info[i].skb = NULL; np->tx_info[i].first_mapping = 0;#ifdef ZEROCOPY { int j; for (j = 0; j < MAX_STARFIRE_FRAGS; j++) np->tx_info[i].frag_mapping[j] = 0; }#endif /* ZEROCOPY */ np->tx_ring[i].status = 0; } return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = dev->priv; unsigned int entry;#ifdef ZEROCOPY int i;#endif kick_tx_timer(dev, tx_timeout, TX_TIMEOUT); /* Caution: the write order is important here, set the field with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE;#if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE) { int has_bad_length = 0; if (skb_first_frag_len(skb) == 1) has_bad_length = 1; else { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) if (skb_shinfo(skb)->frags[i].size == 1) { has_bad_length = 1; break; } } if (has_bad_length) skb_checksum_help(skb); }#endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */ np->tx_info[entry].skb = skb; np->tx_info[entry].first_mapping = pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping);#ifdef ZEROCOPY np->tx_ring[entry].first_len = cpu_to_le32(skb_first_frag_len(skb)); np->tx_ring[entry].total_len = cpu_to_le32(skb->len); /* Add "| TxDescIntr" to generate Tx-done interrupts. */ np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn); np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1);#else /* not ZEROCOPY */ /* Add "| TxDescIntr" to generate Tx-done interrupts. */ np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16);#endif /* not ZEROCOPY */ if (entry >= TX_RING_SIZE-1) /* Wrap ring */ np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);#ifdef ZEROCOPY if (skb->ip_summed == CHECKSUM_HW) np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP);#endif /* ZEROCOPY */ if (debug > 5) {#ifdef ZEROCOPY printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n", dev->name, np->cur_tx, entry, le32_to_cpu(np->tx_ring[entry].status), le32_to_cpu(np->tx_ring[entry].nbufs), le32_to_cpu(np->tx_ring[entry].first_len), le32_to_cpu(np->tx_ring[entry].total_len));#else /* not ZEROCOPY */ printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n", dev->name, np->cur_tx, entry, le32_to_cpu(np->tx_ring[entry].status));#endif /* not ZEROCOPY */ }#ifdef ZEROCOPY for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i]; /* we already have the proper value in entry */ np->tx_info[entry].frag_mapping[i] = pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE); np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]); np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size); if (debug > 5) { printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n", dev->name, np->cur_tx, i, le32_to_cpu(np->tx_ring[entry].frag[i].len)); } }#endif /* ZEROCOPY */ np->cur_tx++; if (entry >= TX_RING_SIZE-1) /* Wrap ring */ entry = -1; entry++; /* Non-x86: explicitly flush descriptor cache lines here. */ /* Ensure everything is written back above before the transmit is initiated. - Jes */ wmb(); /* Update the producer index. */ writel(entry * (sizeof(struct starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx); if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) { np->tx_full = 1; netif_stop_queue(dev); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -