📄 via-rhine.c
字号:
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (np->rx_skbuff[i]) { pci_unmap_single(np->pdev, np->rx_skbuff_dma[i], np->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skbuff[i]); } np->rx_skbuff[i] = 0; }}static void alloc_tbufs(struct net_device* dev){ struct netdev_private *np = dev->priv; dma_addr_t next; int i; np->dirty_tx = np->cur_tx = 0; next = np->tx_ring_dma; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = 0; np->tx_ring[i].tx_status = 0; np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000); next += sizeof(struct tx_desc); np->tx_ring[i].next_desc = cpu_to_le32(next); np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ]; } np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);}static void free_tbufs(struct net_device* dev){ struct netdev_private *np = dev->priv; int i; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].tx_status = 0; np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000); np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (np->tx_skbuff[i]) { if (np->tx_skbuff_dma[i]) { pci_unmap_single(np->pdev, np->tx_skbuff_dma[i], np->tx_skbuff[i]->len, PCI_DMA_TODEVICE); } dev_kfree_skb(np->tx_skbuff[i]); } np->tx_skbuff[i] = 0; np->tx_buf[i] = 0; }}static void init_registers(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int i; for (i = 0; i < 6; i++) writeb(dev->dev_addr[i], ioaddr + StationAddr + i); /* Initialize other registers. */ writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ /* Configure the FIFO thresholds. */ writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */ np->tx_thresh = 0x20; np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */ if (dev->if_port == 0) dev->if_port = np->default_port; writel(np->rx_ring_dma, ioaddr + RxRingPtr); writel(np->tx_ring_dma, ioaddr + TxRingPtr); via_rhine_set_rx_mode(dev); /* Enable interrupts by setting the interrupt mask. */ writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow| IntrRxDropped| IntrTxDone | IntrTxAbort | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange, ioaddr + IntrEnable); np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll; if (np->mii_if.duplex_lock) np->chip_cmd |= CmdFDuplex; writew(np->chip_cmd, ioaddr + ChipCmd); via_rhine_check_duplex(dev); /* The LED outputs of various MII xcvrs should be configured. */ /* For NS or Mison phys, turn on bit 1 in register 0x17 */ /* For ESI phys, turn on bit 7 in register 0x17. */ mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) | (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);}/* Read and write over the MII Management Data I/O (MDIO) interface. */static int mdio_read(struct net_device *dev, int phy_id, int regnum){ long ioaddr = dev->base_addr; int boguscnt = 1024; /* Wait for a previous command to complete. */ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0) ; writeb(0x00, ioaddr + MIICmd); writeb(phy_id, ioaddr + MIIPhyAddr); writeb(regnum, ioaddr + MIIRegAddr); writeb(0x40, ioaddr + MIICmd); /* Trigger read */ boguscnt = 1024; while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0) ; return readw(ioaddr + MIIData);}static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int boguscnt = 1024; if (phy_id == np->phys[0]) { switch (regnum) { case 0: /* Is user forcing speed/duplex? */ if (value & 0x9000) /* Autonegotiation. */ np->mii_if.duplex_lock = 0; else np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0; break; case 4: np->mii_if.advertising = value; break; } } /* Wait for a previous command to complete. */ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0) ; writeb(0x00, ioaddr + MIICmd); writeb(phy_id, ioaddr + MIIPhyAddr); writeb(regnum, ioaddr + MIIRegAddr); writew(value, ioaddr + MIIData); writeb(0x20, ioaddr + MIICmd); /* Trigger write. */}static int via_rhine_open(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int i; /* Reset the chip. */ writew(CmdReset, ioaddr + ChipCmd); i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev); if (i) return i; if (debug > 1) printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n", dev->name, np->pdev->irq); i = alloc_ring(dev); if (i) return i; alloc_rbufs(dev); alloc_tbufs(dev); wait_for_reset(dev, dev->name); init_registers(dev); if (debug > 2) printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x " "MII status: %4.4x.\n", dev->name, readw(ioaddr + ChipCmd), mdio_read(dev, np->phys[0], MII_BMSR)); netif_start_queue(dev); /* Set the timer to check for link beat. */ init_timer(&np->timer); np->timer.expires = jiffies + 2; np->timer.data = (unsigned long)dev; np->timer.function = &via_rhine_timer; /* timer handler */ add_timer(&np->timer); return 0;}static void via_rhine_check_duplex(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); int negotiated = mii_lpa & np->mii_if.advertising; int duplex; if (np->mii_if.duplex_lock || mii_lpa == 0xffff) return; duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; if (np->mii_if.full_duplex != duplex) { np->mii_if.full_duplex = duplex; if (debug) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" " partner capability of %4.4x.\n", dev->name, duplex ? "full" : "half", np->phys[0], mii_lpa); if (duplex) np->chip_cmd |= CmdFDuplex; else np->chip_cmd &= ~CmdFDuplex; writew(np->chip_cmd, ioaddr + ChipCmd); }}static void via_rhine_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int next_tick = 10*HZ; int mii_status; if (debug > 3) { printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n", dev->name, readw(ioaddr + IntrStatus)); } spin_lock_irq (&np->lock); via_rhine_check_duplex(dev); /* make IFF_RUNNING follow the MII status bit "Link established" */ mii_status = mdio_read(dev, np->phys[0], MII_BMSR); if ( (mii_status & MIILink) != (np->mii_status & MIILink) ) { if (mii_status & MIILink) netif_carrier_on(dev); else netif_carrier_off(dev); } np->mii_status = mii_status; spin_unlock_irq (&np->lock); np->timer.expires = jiffies + next_tick; add_timer(&np->timer);}static void via_rhine_tx_timeout (struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " "%4.4x, resetting...\n", dev->name, readw (ioaddr + IntrStatus), mdio_read (dev, np->phys[0], MII_BMSR)); dev->if_port = 0; /* protect against concurrent rx interrupts */ disable_irq(np->pdev->irq); spin_lock(&np->lock); /* Reset the chip. */ writew(CmdReset, ioaddr + ChipCmd); /* clear all descriptors */ free_tbufs(dev); free_rbufs(dev); alloc_tbufs(dev); alloc_rbufs(dev); /* Reinitialize the hardware. */ wait_for_reset(dev, dev->name); init_registers(dev); spin_unlock(&np->lock); enable_irq(np->pdev->irq); dev->trans_start = jiffies; np->stats.tx_errors++; netif_wake_queue(dev);}static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = dev->priv; unsigned entry; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; if ((np->drv_flags & ReqTxAlign) && (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW) ) { /* Must use alignment buffer. */ if (skb->len > PKT_BUF_SZ) { /* packet too long, drop it */ dev_kfree_skb(skb); np->tx_skbuff[entry] = NULL; np->stats.tx_dropped++; return 0; } skb_copy_and_csum_dev(skb, np->tx_buf[entry]); np->tx_skbuff_dma[entry] = 0; np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma + (np->tx_buf[entry] - np->tx_bufs)); } else { np->tx_skbuff_dma[entry] = pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]); } np->tx_ring[entry].desc_length = cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); /* lock eth irq */ spin_lock_irq (&np->lock); wmb(); np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); wmb(); np->cur_tx++; /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd); if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN) netif_stop_queue(dev); dev->trans_start = jiffies; spin_unlock_irq (&np->lock); if (debug > 4) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs){ struct net_device *dev = dev_instance; long ioaddr; u32 intr_status; int boguscnt = max_interrupt_work; ioaddr = dev->base_addr; while ((intr_status = readw(ioaddr + IntrStatus))) { /* Acknowledge all of the current interrupt sources ASAP. */ writew(intr_status & 0xffff, ioaddr + IntrStatus); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) via_rhine_rx(dev); if (intr_status & (IntrTxDone | IntrTxAbort | IntrTxUnderrun | IntrTxAborted)) via_rhine_tx(dev); /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange | IntrStatsMax | IntrTxAbort | IntrTxUnderrun)) via_rhine_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); break; } } if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readw(ioaddr + IntrStatus));}/* This routine is logically part of the interrupt handler, but isolated for clarity. */static void via_rhine_tx(struct net_device *dev){ struct netdev_private *np = dev->priv; int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE; spin_lock (&np->lock); /* find and cleanup dirty tx descriptors */ while (np->dirty_tx != np->cur_tx) { txstatus = le32_to_cpu(np->tx_ring[entry].tx_status); if (txstatus & DescOwn) break; if (debug > 6) printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n", entry, txstatus); if (txstatus & 0x8000) { if (debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, txstatus); np->stats.tx_errors++; if (txstatus & 0x0400) np->stats.tx_carrier_errors++; if (txstatus & 0x0200) np->stats.tx_window_errors++; if (txstatus & 0x0100) np->stats.tx_aborted_errors++; if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++; if (txstatus & 0x0002) np->stats.tx_fifo_errors++; /* Transmitter restarted in 'abnormal' handler. */ } else { np->stats.collisions += (txstatus >> 3) & 15; np->stats.tx_bytes += np->tx_skbuff[entry]->len; np->stats.tx_packets++; } /* Free the original skb. */ if (np->tx_skbuff_dma[entry]) { pci_unmap_single(np->pdev, np->tx_skbuff_dma[entry], np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); } dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; entry = (++np->dirty_tx) % TX_RING_SIZE; } if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4) netif_wake_queue (dev); spin_unlock (&np->lock);}/* This routine is logically part of the interrupt handler, but isolated for clarity and better register allocation. */static void via_rhine_rx(struct net_device *dev){ struct netdev_private *np = dev->priv; int entry = np->cur_rx % RX_RING_SIZE;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -