📄 3c59x.c
字号:
dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } outw((media_status & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); EL3WINDOW(3); config = inl(ioaddr + Wn3_Config); config = BFINS(config, dev->if_port, 20, 4); outl(config, ioaddr + Wn3_Config); outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); if (vortex_debug > 1) printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ } EL3WINDOW(old_window); enable_irq(dev->irq); if (vortex_debug > 2) printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) outw(FakeIntr, ioaddr + EL3_CMD); return;}static void vortex_tx_timeout(struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, inb(ioaddr + TxStatus), inw(ioaddr + EL3_STATUS)); /* Slight code bloat to be user friendly. */ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" " network cable problem?\n", dev->name); if (inw(ioaddr + EL3_STATUS) & IntLatch) { printk(KERN_ERR "%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ { /* * AKPM: block interrupts because vortex_interrupt * does a bare spin_lock() */ unsigned long flags; local_irq_save(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev, 0); else vortex_interrupt(dev->irq, dev, 0); local_irq_restore(flags); } } if (vortex_debug > 0) dump_tx_ring(dev); wait_for_completion(dev, TxReset); vp->stats.tx_errors++; if (vp->full_bus_master_tx) { if (vortex_debug > 0) printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0) outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_RING_SIZE - 1)) { vp->tx_full = 0; netif_wake_queue (dev); } if (vp->tx_full) netif_stop_queue (dev); if (vp->drv_flags & IS_BOOMERANG) outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); outw(DownUnstall, ioaddr + EL3_CMD); } else { vp->stats.tx_dropped++; netif_wake_queue(dev); } /* Issue Tx Enable */ outw(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* Switch to register set 7 for normal use. */ EL3WINDOW(7);}/* * Handle uncommon interrupt sources. This is a separate routine to minimize * the cache impact. */static voidvortex_error(struct net_device *dev, int status){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; int do_tx_reset = 0, reset_mask = 0; unsigned char tx_status = 0; if (vortex_debug > 2) { printk(KERN_DEBUG "%s: vortex_error(), status=0x%x\n", dev->name, status); } if (status & TxComplete) { /* Really "TxError" for us. */ tx_status = inb(ioaddr + TxStatus); /* Presumably a tx-timeout. We must merely re-enable. */ if (vortex_debug > 2 || (tx_status != 0x88 && vortex_debug > 0)) { printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n", dev->name, tx_status); dump_tx_ring(dev); } if (tx_status & 0x14) vp->stats.tx_fifo_errors++; if (tx_status & 0x38) vp->stats.tx_aborted_errors++; outb(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ do_tx_reset = 1; } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ do_tx_reset = 1; reset_mask = 0x0108; /* Reset interface logic, but not download logic */ } else { /* Merely re-enable the transmitter. */ outw(TxEnable, ioaddr + EL3_CMD); } } if (status & RxEarly) { /* Rx early is unused. */ vortex_rx(dev); outw(AckIntr | RxEarly, ioaddr + EL3_CMD); } if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) printk(KERN_DEBUG "%s: Updating stats.\n", dev->name); update_stats(ioaddr, dev); /* HACK: Disable statistics as an interrupt source. */ /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { printk(KERN_WARNING "%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); EL3WINDOW(5); outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); vp->intr_enable &= ~StatsFull; EL3WINDOW(7); DoneDidThat++; } } if (status & IntReq) { /* Restore all interrupt sources. */ outw(vp->status_enable, ioaddr + EL3_CMD); outw(vp->intr_enable, ioaddr + EL3_CMD); } if (status & HostError) { u16 fifo_diag; EL3WINDOW(4); fifo_diag = inw(ioaddr + Wn4_FIFODiag); printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { /* In this case, blow the card away */ vortex_down(dev); wait_for_completion(dev, TotalReset | 0xff); vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ } else if (fifo_diag & 0x0400) do_tx_reset = 1; if (fifo_diag & 0x3000) { wait_for_completion(dev, RxReset); /* Set the Rx filter to the current state. */ set_rx_mode(dev); outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ outw(AckIntr | HostError, ioaddr + EL3_CMD); } } if (do_tx_reset) { wait_for_completion(dev, TxReset|reset_mask); outw(TxEnable, ioaddr + EL3_CMD); if (!vp->full_bus_master_tx) netif_wake_queue(dev); }}static intvortex_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; /* Put out the doubleword header... */ outl(skb->len, ioaddr + TX_FIFO); if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; outl( vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data, len, PCI_DMA_TODEVICE), ioaddr + Wn7_MasterAddr); outw(len, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; outw(StartDMADown, ioaddr + EL3_CMD); /* netif_wake_queue() will be called at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); dev_kfree_skb (skb); if (inw(ioaddr + TxFree) > 1536) { netif_start_queue (dev); /* AKPM: redundant? */ } else { /* Interrupt us when the FIFO has room for max-sized packet. */ netif_stop_queue(dev); outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } } dev->trans_start = jiffies; /* Clear the Tx status stack. */ { int tx_status; int i = 32; while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ if (vortex_debug > 2) printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", dev->name, tx_status); if (tx_status & 0x04) vp->stats.tx_fifo_errors++; if (tx_status & 0x38) vp->stats.tx_aborted_errors++; if (tx_status & 0x30) { wait_for_completion(dev, TxReset); } outw(TxEnable, ioaddr + EL3_CMD); } outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ } } return 0;}static intboomerang_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; if (vortex_debug > 6) { printk(KERN_DEBUG "boomerang_start_xmit()\n"); if (vortex_debug > 3) printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n", dev->name, vp->cur_tx); } if (vp->tx_full) { if (vortex_debug > 0) printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n", dev->name); return 1; } vp->tx_skbuff[entry] = skb; vp->tx_ring[entry].next = 0; vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); spin_lock_irqsave(&vp->lock, flags); /* Wait for the stall to complete. */ wait_for_completion(dev, DownStall); prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); if (inl(ioaddr + DownListPtr) == 0) { outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); queued_packet++; } vp->cur_tx++; if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { vp->tx_full = 1; netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */#if defined(tx_interrupt_mitigation) prev_entry->status &= cpu_to_le32(~TxIntrUploaded);#endif /* netif_start_queue (dev); */ /* AKPM: redundant? */ } outw(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); dev->trans_start = jiffies; return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. *//* * This is the ISR for the vortex series chips. * full_bus_master_tx == 0 && full_bus_master_rx == 0 */static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = dev_id; struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr; int status; int work_done = max_interrupt_work; ioaddr = dev->base_addr; spin_lock(&vp->lock); status = inw(ioaddr + EL3_STATUS); if (vortex_debug > 6) printk("vortex_interrupt(). status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs cause this */ if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (status == 0xffff) /* AKPM: h/w no longer present (hotplug)? */ goto handler_exit; if (vortex_debug > 4) printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, inb(ioaddr + Timer)); do { if (vortex_debug > 5) printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & RxComplete) vortex_rx(dev); if (status & TxAvailable) { if (vortex_debug > 5) printk(KERN_DEBUG " TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); netif_wake_queue (dev); } if (status & DMADone) { if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (inw(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to * insufficient FIFO room, the TxAvailable test will succeed and call * netif_wake_queue() */ netif_wake_queue(dev); } else { /* Interrupt when FIFO has room for max-sized packet. */ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); netif_stop_queue(dev); /* AKPM: This is new */ } } } /* Check
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -