📄 3c575_cb.c
字号:
if (vp->cur_tx - vp->dirty_tx > 1 && (jiffies - dev->trans_start) > TX_TIMEOUT) { /* Check for blocked interrupts. */ if (inw(ioaddr + EL3_STATUS) & IntLatch) { /* We have a blocked IRQ line. This should never happen, but we recover as best we can.*/ if ( ! vp->polling) { if (jiffies - vp->last_reset > 10*HZ) { printk(KERN_ERR "%s: IRQ %d is physically blocked! " "Failing back to low-rate polling.\n", dev->name, dev->irq); vp->last_reset = jiffies; } vp->polling = 1; } vortex_interrupt(dev->irq, dev, 0); next_tick = jiffies + 2; } else { vortex_tx_timeout(dev); vp->last_reset = jiffies; } } disable_irq(dev->irq); old_window = inw(ioaddr + EL3_CMD) >> 13; EL3WINDOW(4); media_status = inw(ioaddr + Wn4_Media); switch (dev->if_port) { case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: if (media_status & Media_LnkBeat) { ok = 1; if (vortex_debug > 1) printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } else if (vortex_debug > 1) printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); break; case XCVR_MII: case XCVR_NWAY: spin_lock_irqsave(&vp->lock, flags); mii_status = mdio_read(ioaddr, vp->phys[0], 1); ok = 1; if (debug > 1) printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", dev->name, mii_status); if (mii_status & 0x0004) { int mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5); if (! vp->medialock && mii_reg5 != 0xffff) { int duplex; mii_reg5 &= vp->advertising; duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040; if (vp->full_duplex != duplex) { vp->full_duplex = duplex; printk(KERN_INFO "%s: Setting %s-duplex based on MII " "#%d link partner capability of %4.4x.\n", dev->name, vp->full_duplex ? "full" : "half", vp->phys[0], mii_reg5); /* Set the full-duplex bit. */ EL3WINDOW(3); outb((vp->full_duplex ? 0x20 : 0) | (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); } next_tick = 60*HZ; } } spin_unlock_irqrestore(&vp->lock, flags); break; default: /* Other media types handled by Tx timeouts. */ if (vortex_debug > 1) printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); ok = 1; } if ( ! ok) { union wn3_config config; do { dev->if_port = media_tbl[dev->if_port].next; } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); if (dev->if_port == XCVR_Default) { /* Go back to default. */ dev->if_port = vp->default_media; if (vortex_debug > 1) printk(KERN_DEBUG "%s: Media selection failing, using default " "%s port.\n", dev->name, media_tbl[dev->if_port].name); } else { if (vortex_debug > 1) printk(KERN_DEBUG "%s: Media selection failed, now trying " "%s port.\n", dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } outw((media_status & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); EL3WINDOW(3); config.i = inl(ioaddr + Wn3_Config); config.u.xcvr = dev->if_port; outl(config.i, ioaddr + Wn3_Config); outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); } EL3WINDOW(old_window); enable_irq(dev->irq); if (vortex_debug > 2) printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); vp->timer.expires = jiffies + next_tick; add_timer(&vp->timer); if (vp->deferred_irqs) outw(FakeIntr, ioaddr + EL3_CMD); return;}static void vortex_tx_timeout(struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, inb(ioaddr + TxStatus), inw(ioaddr + EL3_STATUS)); EL3WINDOW(4); printk(KERN_ERR " diagnostics: net %04x media %04x dma %8.8x.\n", inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media), inl(ioaddr + PktStatus)); /* Slight code bloat to be user friendly. */ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" " network cable problem?\n", dev->name); if (inw(ioaddr + EL3_STATUS) & IntLatch) { printk(KERN_ERR "%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); }#if ! defined(final_version) if (vp->full_bus_master_tx) { int i; printk(KERN_DEBUG " Flags; bus-master %d, full %d; dirty %d " "current %d.\n", vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx % TX_RING_SIZE, vp->cur_tx % TX_RING_SIZE); printk(KERN_DEBUG " Transmit list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr), &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); for (i = 0; i < TX_RING_SIZE; i++) { printk(KERN_DEBUG " %d: @%p length %8.8x status %8.8x\n", i, &vp->tx_ring[i], le32_to_cpu(vp->tx_ring[i].length), le32_to_cpu(vp->tx_ring[i].status)); } }#endif issue_and_wait(dev, TxReset); vp->stats.tx_errors++; if (vp->full_bus_master_tx) { if (vortex_debug > 0) printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->drv_flags & HAS_FIFO_BUG) /* Disable tx reclaim */ outw(SetTxReclaim | 0xff, ioaddr + EL3_CMD); if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0) outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]), ioaddr + DownListPtr); if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_QUEUE_LEN - 1)) { vp->tx_full = 0; netif_start_queue(dev); } if ((vp->drv_flags & IS_BOOMERANG) || !down_poll_rate) { /* Room for a packet, to avoid long DownStall delays. */ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); outw(DownUnstall, ioaddr + EL3_CMD); } else outb(down_poll_rate, ioaddr + DownPollRate); } else { netif_start_queue(dev); vp->stats.tx_dropped++; } /* Issue Tx Enable */ outw(TxEnable, ioaddr + EL3_CMD); vp->restart_tx = 1; dev->trans_start = jiffies; /* Switch to register set 7 for normal use. */ EL3WINDOW(7);}/* * Handle uncommon interrupt sources. This is a separate routine to minimize * the cache impact. */static voidvortex_error(struct net_device *dev, int status){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; int do_tx_reset = 0; if (status & TxComplete) { /* Really "TxError" for us. */ unsigned char tx_status = inb(ioaddr + TxStatus); /* Presumably a tx-timeout. We must merely re-enable. */ if (vortex_debug > 2 || (tx_status != 0x88 && vortex_debug > 0)) printk(KERN_INFO "%s: Transmit error, Tx status register %2.2x.\n", dev->name, tx_status); if (tx_status & 0x14) vp->stats.tx_fifo_errors++; if (tx_status & 0x38) vp->stats.tx_aborted_errors++; outb(0, ioaddr + TxStatus); if ((tx_status & 0x08) && (vp->drv_flags & IS_TORNADO)) issue_and_wait(dev, TxReset | 0x0108); if (tx_status & 0x30) do_tx_reset = 1; else { /* Merely re-enable the transmitter. */ outw(TxEnable, ioaddr + EL3_CMD); vp->restart_tx = 1; } } if (status & RxEarly) { /* Rx early is unused. */ vortex_rx(dev); outw(AckIntr | RxEarly, ioaddr + EL3_CMD); } if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat = 0; if (vortex_debug > 4) printk(KERN_DEBUG "%s: Updating stats.\n", dev->name); update_stats(ioaddr, dev); /* HACK: Disable statistics as an interrupt source. */ /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { printk(KERN_WARNING "%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); EL3WINDOW(5); outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); EL3WINDOW(7); DoneDidThat++; } } if (status & IntReq) { /* Restore all interrupt sources. */ outw(vp->status_enable, ioaddr + EL3_CMD); outw(vp->intr_enable, ioaddr + EL3_CMD); } if (status & HostError) { u16 fifo_diag; EL3WINDOW(4); fifo_diag = inw(ioaddr + Wn4_FIFODiag); printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { if (vortex_debug) printk(KERN_ERR "%s: PCI bus error, bus status %8.8x.\n", dev->name, inl(ioaddr + PktStatus)); vortex_down(dev); issue_and_wait(dev, TotalReset | 0xff); vortex_up(dev); } else if (fifo_diag & 0x0400) do_tx_reset = 1; if (fifo_diag & 0x3000) { issue_and_wait(dev, RxReset); /* Set the Rx filter to the current state. */ set_rx_mode(dev); outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ outw(AckIntr | HostError, ioaddr + EL3_CMD); } } if (do_tx_reset) { issue_and_wait(dev, DownStall); issue_and_wait(dev, TxReset); outw(TxEnable, ioaddr + EL3_CMD); if ((vp->drv_flags & IS_BOOMERANG) || !down_poll_rate) { /* Room for a packet, to avoid long DownStall delays. */ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); outw(DownUnstall, ioaddr + EL3_CMD); } else outb(down_poll_rate, ioaddr + DownPollRate); vp->restart_tx = 1; }}static intvortex_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; tx_timeout_check(dev, vortex_tx_timeout); /* Put out the doubleword header... */ outl(skb->len, ioaddr + TX_FIFO); if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ outl(virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr); outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; outw(StartDMADown, ioaddr + EL3_CMD); /* queue will be restarted at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); dev_free_skb(skb); if (inw(ioaddr + TxFree) > 1536) { netif_start_queue(dev); } else /* Interrupt us when the FIFO has room for max-sized packet. */ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } dev->trans_start = jiffies; /* Clear the Tx status stack. */ { int tx_status; int i = 32; while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ if (vortex_debug > 2) printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", dev->name, tx_status); if (tx_status & 0x04) vp->stats.tx_fifo_errors++; if (tx_status & 0x38) vp->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } outw(TxEnable, ioaddr + EL3_CMD); vp->restart_tx = 1; } outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ } } return 0;}static intboomerang_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; int entry; struct boom_tx_desc *prev_entry; unsigned long flags; int i; tx_timeout_check(dev, vortex_tx_timeout); /* Calculate the next Tx descriptor entry. */ entry = vp->cur_tx % TX_RING_SIZE; prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; if (vortex_debug > 3) printk(KERN_DEBUG "%s: Queuing Tx packet, index %d.\n", dev->name, vp->cur_tx); /* Impossible error. */ if (vp->tx_full) { if (vortex_debug >0) printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n", dev->name); return 1; } vp->tx_skbuff[entry] = skb; vp->tx_ring[entry].next = 0; vp->tx_ring[entry].addr = virt_to_le32desc(skb->data); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); spin_lock_irqsave(&vp->lock, flags); if ((vp->drv_flags & IS_BOOMERANG) || !down_poll_rate) { /* Wait for the stall to complete. */ issue_and_wait(dev, DownStall); vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]); vp->tx_desc_tail = &vp->tx_ring[entry]; if (inl(ioaddr + DownListPtr) == 0) { outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr); queued_packet++; } outw(DownUnstall, ioaddr + EL3_CMD); } else { vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]); vp->tx_desc_tail = &vp->tx_ring[entry]; if (vp->restart_tx) { if (vp->drv_flags & HAS_FIFO_BUG) /* Disable tx reclaim */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -