📄 pcnet32.c
字号:
} else { if (lp->options & PCNET32_PORT_ASEL) { /* enable auto negotiate, setup, disable fd */ val = lp->a.read_bcr(ioaddr, 32) & ~0x98; val |= 0x20; lp->a.write_bcr(ioaddr, 32, val); } }#ifdef DO_DXSUFLO if (lp->dxsuflo) { /* Disable transmit stop on underflow */ val = lp->a.read_csr (ioaddr, 3); val |= 0x40; lp->a.write_csr (ioaddr, 3, val); }#endif if (lp->ltint) { /* Enable TxDone-intr inhibitor */ val = lp->a.read_csr (ioaddr, 5); val |= (1<<14); lp->a.write_csr (ioaddr, 5, val); } lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); lp->init_block.filter[0] = 0x00000000; lp->init_block.filter[1] = 0x00000000; if (pcnet32_init_ring(dev)) return -ENOMEM; /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr (ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &0xffff); lp->a.write_csr (ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> 16); lp->a.write_csr (ioaddr, 4, 0x0915); lp->a.write_csr (ioaddr, 0, 0x0001); netif_start_queue(dev); i = 0; while (i++ < 100) if (lp->a.read_csr (ioaddr, 0) & 0x0100) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974. */ lp->a.write_csr (ioaddr, 0, 0x0042); if (pcnet32_debug > 2) printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", dev->name, i, (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)), lp->a.read_csr (ioaddr, 0)); MOD_INC_USE_COUNT; return 0; /* Always succeed */}/* * The LANCE has been halted for one reason or another (busmaster memory * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, * etc.). Modern LANCE variants always reload their ring-buffer * configuration when restarted, so we must reinitialize our ring * context before restarting. As part of this reinitialization, * find all packets still on the Tx ring and pretend that they had been * sent (in effect, drop the packets on the floor) - the higher-level * protocols will time out and retransmit. It'd be better to shuffle * these skbs to a temp list and then actually re-Tx them after * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com */static void pcnet32_purge_tx_ring(struct net_device *dev){ struct pcnet32_private *lp = dev->priv; int i; for (i = 0; i < TX_RING_SIZE; i++) { if (lp->tx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(lp->tx_skbuff[i]); lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; } }}/* Initialize the PCNET32 Rx and Tx rings. */static intpcnet32_init_ring(struct net_device *dev){ struct pcnet32_private *lp = dev->priv; int i; lp->tx_full = 0; lp->cur_rx = lp->cur_tx = 0; lp->dirty_rx = lp->dirty_tx = 0; for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; if (rx_skbuff == NULL) { if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { /* there is not much, we can do at this point */ printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",dev->name); return -1; } skb_reserve (rx_skbuff, 2); } lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, rx_skbuff->len, PCI_DMA_FROMDEVICE); lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ); lp->rx_ring[i].status = le16_to_cpu(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear the upper ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) { lp->tx_ring[i].base = 0; lp->tx_ring[i].status = 0; lp->tx_dma_addr[i] = 0; } lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); for (i = 0; i < 6; i++) lp->init_block.phys_addr[i] = dev->dev_addr[i]; lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)); lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)); return 0;}static voidpcnet32_restart(struct net_device *dev, unsigned int csr0_bits){ struct pcnet32_private *lp = dev->priv; unsigned long ioaddr = dev->base_addr; int i; pcnet32_purge_tx_ring(dev); if (pcnet32_init_ring(dev)) return; /* ReInit Ring */ lp->a.write_csr (ioaddr, 0, 1); i = 0; while (i++ < 100) if (lp->a.read_csr (ioaddr, 0) & 0x0100) break; lp->a.write_csr (ioaddr, 0, csr0_bits);}static voidpcnet32_tx_timeout (struct net_device *dev){ struct pcnet32_private *lp = dev->priv; unsigned int ioaddr = dev->base_addr; /* Transmitter timeout, serious problems. */ printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", dev->name, lp->a.read_csr (ioaddr, 0)); lp->a.write_csr (ioaddr, 0, 0x0004); lp->stats.tx_errors++; if (pcnet32_debug > 2) { int i; printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx); for (i = 0 ; i < RX_RING_SIZE; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status); for (i = 0 ; i < TX_RING_SIZE; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", lp->tx_ring[i].base, -lp->tx_ring[i].length, lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status); printk("\n"); } pcnet32_restart(dev, 0x0042); dev->trans_start = jiffies; netif_start_queue(dev);}static intpcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct pcnet32_private *lp = dev->priv; unsigned int ioaddr = dev->base_addr; u16 status; int entry; unsigned long flags; if (pcnet32_debug > 3) { printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", dev->name, lp->a.read_csr (ioaddr, 0)); } spin_lock_irqsave(&lp->lock, flags); /* Default status -- will not enable Successful-TxDone * interrupt when that option is available to us. */ status = 0x8300; if ((lp->ltint) && ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE/2) || (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE-2))) { /* Enable Successful-TxDone interrupt if we have * 1/2 of, or nearly all of, our ring buffer Tx'd * but not yet cleaned up. Thus, most of the time, * we will not enable Successful-TxDone interrupts. */ status = 0x9300; } /* Fill in a Tx ring entry */ /* Mask to ring buffer boundary. */ entry = lp->cur_tx & TX_RING_MOD_MASK; /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ lp->tx_ring[entry].length = le16_to_cpu(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); lp->tx_ring[entry].status = le16_to_cpu(status); lp->cur_tx++; lp->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ lp->a.write_csr (ioaddr, 0, 0x0048); dev->trans_start = jiffies; if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0) netif_start_queue(dev); else { lp->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); return 0;}/* The PCNET32 interrupt handler. */static voidpcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = dev_id; struct pcnet32_private *lp; unsigned long ioaddr; u16 csr0,rap; int boguscnt = max_interrupt_work; int must_restart; if (dev == NULL) { printk (KERN_DEBUG "pcnet32_interrupt(): irq %d for unknown device.\n", irq); return; } ioaddr = dev->base_addr; lp = dev->priv; spin_lock(&lp->lock); rap = lp->a.read_rap(ioaddr); while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8600 && --boguscnt >= 0) { /* Acknowledge all of the current interrupt sources ASAP. */ lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f); must_restart = 0; if (pcnet32_debug > 5) printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", dev->name, csr0, lp->a.read_csr (ioaddr, 0)); if (csr0 & 0x0400) /* Rx interrupt */ pcnet32_rx(dev); if (csr0 & 0x0200) { /* Tx-done interrupt */ unsigned int dirty_tx = lp->dirty_tx; while (dirty_tx < lp->cur_tx) { int entry = dirty_tx & TX_RING_MOD_MASK; int status = (short)le16_to_cpu(lp->tx_ring[entry].status); if (status < 0) break; /* It still hasn't been Txed */ lp->tx_ring[entry].base = 0; if (status & 0x4000) { /* There was an major error, log it. */ int err_status = le32_to_cpu(lp->tx_ring[entry].misc); lp->stats.tx_errors++; if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; if (err_status & 0x10000000) lp->stats.tx_window_errors++;#ifndef DO_DXSUFLO if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", dev->name, csr0); must_restart = 1; }#else if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; if (! lp->dxsuflo) { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", dev->name, csr0); must_restart = 1; } }#endif } else { if (status & 0x1800) lp->stats.collisions++; lp->stats.tx_packets++; } /* We must free the original skb */ if (lp->tx_skbuff[entry]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = 0; lp->tx_dma_addr[entry] = 0; } dirty_tx++; }#ifndef final_version if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dirty_tx, lp->cur_tx, lp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (lp->tx_full && netif_queue_stopped(dev) && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue (dev); } lp->dirty_tx = dirty_tx; } /* Log misc errors. */ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ if (csr0 & 0x1000) { /* * this happens when our receive ring is full. This shouldn't * be a problem as we will see normal rx interrupts for the frames * in the receive ring. But there are some PCI chipsets (I can reproduce * this on SP3G with Intel saturn chipset) which have sometimes problems * and will fill up the receive ring with error descriptors. In this * situation we don't get a rx interrupt, but a missed frame interrupt sooner * or later. So we try to clean up our receive ring here. */ pcnet32_rx(dev); lp->stats.rx_errors++; /* Missed a Rx frame. */ } if (csr0 & 0x0800) { printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); /* unlike for the lance, there is no restart needed */ } if (must_restart) { /* stop the chip to clear the error condition, then restart */ lp->a.write_csr (ioaddr, 0, 0x0004); pcnet32_restart(dev, 0x0002); } } /* Clear any other interrupt, and set interrupt enable. */ lp->a.write_csr (ioaddr, 0, 0x7940); lp->a.write_rap(ioaddr,rap); if (pcnet32_debug > 4) printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", dev->name, lp->a.read_csr (ioaddr, 0)); spin_unlock(&lp->lock);}static intpcnet32_rx(struct net_device *dev){ struct pcnet32_private *lp = dev->priv; int entry = lp->cur_rx & RX_RING_MOD_MASK; /* If we own the next entry, it's a new packet. Send it up. */ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; if (status != 0x03) { /* There was an error. */ /* * There is a tricky error noted by John Murphy, * <murf@perftech.com> to Russ Nelson: Even with full-sized * buffers it's possible for a jabber packet to use two * buffers, with only the last correctly noting the error. */ if (status & 0x01) /* Only count a general error at the */ lp->stats.rx_errors++; /* end of a packet.*/ if (status & 0x20) lp->stats.rx_frame_errors++; if (status & 0x10) lp->stats.rx_over_errors++; if (status & 0x08) lp->stats.rx_crc_errors++; if (status & 0x04) lp->stats.rx_fifo_errors++; lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); } else { /* Malloc up new buffer, compatible with net-2e. */ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; struct sk_buff *skb; if(pkt_len < 60) { printk(KERN_ERR "%s: Runt packet!\n",dev->name); lp->stats.rx_errors++; } else { int rx_in_place = 0; if (pkt_len > rx_copybreak) { struct sk_buff *newskb; if ((newskb = dev_alloc_skb (PKT_BUF_SZ))) { skb_reserve (newskb, 2);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -