📄 pcnet32.c
字号:
if (pcnet32_debug > 1) printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", dev->name, dev->irq, (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)), (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)), (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); /* set/reset autoselect bit */ val = lp->a.read_bcr (ioaddr, 2) & ~2; if (lp->options & PORT_ASEL) val |= 2; lp->a.write_bcr (ioaddr, 2, val); /* handle full duplex setting */ if (lp->full_duplex) { val = lp->a.read_bcr (ioaddr, 9) & ~3; if (lp->options & PORT_FD) { val |= 1; if (lp->options == (PORT_FD | PORT_AUI)) val |= 2; } lp->a.write_bcr (ioaddr, 9, val); } /* set/reset GPSI bit in test register */ val = lp->a.read_csr (ioaddr, 124) & ~0x10; if ((lp->options & PORT_PORTSEL) == PORT_GPSI) val |= 0x10; lp->a.write_csr (ioaddr, 124, val); if (lp->mii & !(lp->options & PORT_ASEL)) { val = lp->a.read_bcr (ioaddr, 32) & ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */ if (lp->options & PORT_FD) val |= 0x10; if (lp->options & PORT_100) val |= 0x08; lp->a.write_bcr (ioaddr, 32, val); }#ifdef DO_DXSUFLO if (lp->dxsuflo) { /* Disable transmit stop on underflow */ val = lp->a.read_csr (ioaddr, 3); val |= 0x40; lp->a.write_csr (ioaddr, 3, val); }#endif if (lp->ltint) { /* Enable TxDone-intr inhibitor */ val = lp->a.read_csr (ioaddr, 5); val |= (1<<14); lp->a.write_csr (ioaddr, 5, val); } lp->init_block.mode = le16_to_cpu((lp->options & PORT_PORTSEL) << 7); lp->init_block.filter[0] = 0x00000000; lp->init_block.filter[1] = 0x00000000; if (pcnet32_init_ring(dev)) return -ENOMEM; /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr (ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &0xffff); lp->a.write_csr (ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> 16); lp->a.write_csr (ioaddr, 4, 0x0915); lp->a.write_csr (ioaddr, 0, 0x0001); netif_start_queue(dev); i = 0; while (i++ < 100) if (lp->a.read_csr (ioaddr, 0) & 0x0100) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974. */ lp->a.write_csr (ioaddr, 0, 0x0042); if (pcnet32_debug > 2) printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", dev->name, i, (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)), lp->a.read_csr (ioaddr, 0)); MOD_INC_USE_COUNT; return 0; /* Always succeed */}/* * The LANCE has been halted for one reason or another (busmaster memory * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, * etc.). Modern LANCE variants always reload their ring-buffer * configuration when restarted, so we must reinitialize our ring * context before restarting. As part of this reinitialization, * find all packets still on the Tx ring and pretend that they had been * sent (in effect, drop the packets on the floor) - the higher-level * protocols will time out and retransmit. It'd be better to shuffle * these skbs to a temp list and then actually re-Tx them after * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com */static void pcnet32_purge_tx_ring(struct net_device *dev){ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv; int i; for (i = 0; i < TX_RING_SIZE; i++) { if (lp->tx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(lp->tx_skbuff[i]); lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; } }}/* Initialize the PCNET32 Rx and Tx rings. */static intpcnet32_init_ring(struct net_device *dev){ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv; int i; lp->tx_full = 0; lp->cur_rx = lp->cur_tx = 0; lp->dirty_rx = lp->dirty_tx = 0; for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; if (rx_skbuff == NULL) { if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { /* there is not much, we can do at this point */ printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",dev->name); return -1; } skb_reserve (rx_skbuff, 2); } lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, rx_skbuff->len, PCI_DMA_FROMDEVICE); lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ); lp->rx_ring[i].status = le16_to_cpu(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear the upper ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) { lp->tx_ring[i].base = 0; lp->tx_ring[i].status = 0; lp->tx_dma_addr[i] = 0; } lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); for (i = 0; i < 6; i++) lp->init_block.phys_addr[i] = dev->dev_addr[i]; lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)); lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)); return 0;}static voidpcnet32_restart(struct net_device *dev, unsigned int csr0_bits){ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv; unsigned long ioaddr = dev->base_addr; int i; pcnet32_purge_tx_ring(dev); if (pcnet32_init_ring(dev)) return; /* ReInit Ring */ lp->a.write_csr (ioaddr, 0, 1); i = 0; while (i++ < 100) if (lp->a.read_csr (ioaddr, 0) & 0x0100) break; lp->a.write_csr (ioaddr, 0, csr0_bits);}static voidpcnet32_tx_timeout (struct net_device *dev){ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv; unsigned int ioaddr = dev->base_addr; /* Transmitter timeout, serious problems. */ printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", dev->name, lp->a.read_csr (ioaddr, 0)); lp->a.write_csr (ioaddr, 0, 0x0004); lp->stats.tx_errors++; if (pcnet32_debug > 2) { int i; printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx); for (i = 0 ; i < RX_RING_SIZE; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status); for (i = 0 ; i < TX_RING_SIZE; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", lp->tx_ring[i].base, -lp->tx_ring[i].length, lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status); printk("\n"); } pcnet32_restart(dev, 0x0042); dev->trans_start = jiffies; netif_start_queue(dev);}static intpcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv; unsigned int ioaddr = dev->base_addr; u16 status; int entry; unsigned long flags; if (pcnet32_debug > 3) { printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", dev->name, lp->a.read_csr (ioaddr, 0)); } spin_lock_irqsave(&lp->lock, flags); /* Default status -- will not enable Successful-TxDone * interrupt when that option is available to us. */ status = 0x8300; if ((lp->ltint) && ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE/2) || (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE-2))) { /* Enable Successful-TxDone interrupt if we have * 1/2 of, or nearly all of, our ring buffer Tx'd * but not yet cleaned up. Thus, most of the time, * we will not enable Successful-TxDone interrupts. */ status = 0x9300; } /* Fill in a Tx ring entry */ /* Mask to ring buffer boundary. */ entry = lp->cur_tx & TX_RING_MOD_MASK; /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ lp->tx_ring[entry].length = le16_to_cpu(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); lp->tx_ring[entry].status = le16_to_cpu(status); lp->cur_tx++; lp->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ lp->a.write_csr (ioaddr, 0, 0x0048); dev->trans_start = jiffies; if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0) netif_start_queue(dev); else { lp->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); return 0;}/* The PCNET32 interrupt handler. */static voidpcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = (struct net_device *)dev_id; struct pcnet32_private *lp; unsigned long ioaddr; u16 csr0,rap; int boguscnt = max_interrupt_work; int must_restart; if (dev == NULL) { printk (KERN_DEBUG "pcnet32_interrupt(): irq %d for unknown device.\n", irq); return; } ioaddr = dev->base_addr; lp = (struct pcnet32_private *)dev->priv; spin_lock(&lp->lock); rap = lp->a.read_rap(ioaddr); while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8600 && --boguscnt >= 0) { /* Acknowledge all of the current interrupt sources ASAP. */ lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f); must_restart = 0; if (pcnet32_debug > 5) printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", dev->name, csr0, lp->a.read_csr (ioaddr, 0)); if (csr0 & 0x0400) /* Rx interrupt */ pcnet32_rx(dev); if (csr0 & 0x0200) { /* Tx-done interrupt */ unsigned int dirty_tx = lp->dirty_tx; while (dirty_tx < lp->cur_tx) { int entry = dirty_tx & TX_RING_MOD_MASK; int status = (short)le16_to_cpu(lp->tx_ring[entry].status); if (status < 0) break; /* It still hasn't been Txed */ lp->tx_ring[entry].base = 0; if (status & 0x4000) { /* There was an major error, log it. */ int err_status = le32_to_cpu(lp->tx_ring[entry].misc); lp->stats.tx_errors++; if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; if (err_status & 0x10000000) lp->stats.tx_window_errors++;#ifndef DO_DXSUFLO if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", dev->name, csr0); must_restart = 1; }#else if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; if (! lp->dxsuflo) { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", dev->name, csr0); must_restart = 1; } }#endif } else { if (status & 0x1800) lp->stats.collisions++; lp->stats.tx_packets++; } /* We must free the original skb */ if (lp->tx_skbuff[entry]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = 0; lp->tx_dma_addr[entry] = 0; } dirty_tx++; }#ifndef final_version if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dirty_tx, lp->cur_tx, lp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (lp->tx_full && netif_queue_stopped(dev) && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue (dev); } lp->dirty_tx = dirty_tx; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -