📄 3c59x.c
字号:
clear_bit(0, (void*)&dev->tbusy); } dev->trans_start = jiffies; vp->stats.tx_bytes += skb->len; return 0; }}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct device *dev = dev_id; struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr; int latency, status; int work_done = max_interrupt_work;#if defined(__i386__) /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ if (test_and_set_bit(0, (void*)&dev->interrupt)) { printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", dev->name); dev->interrupt = 0; /* Avoid halting machine. */ return; }#else if (dev->interrupt) { printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); return; } dev->interrupt = 1;#endif dev->interrupt = 1; ioaddr = dev->base_addr; latency = inb(ioaddr + Timer); status = inw(ioaddr + EL3_STATUS); if (vortex_debug > 4) printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, latency); do { if (vortex_debug > 5) printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & RxComplete) vortex_rx(dev); if (status & UpComplete) { outw(AckIntr | UpComplete, ioaddr + EL3_CMD); boomerang_rx(dev); } if (status & TxAvailable) { if (vortex_debug > 5) printk(KERN_DEBUG " TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); clear_bit(0, (void*)&dev->tbusy); mark_bh(NET_BH); } if (status & DownComplete) { unsigned int dirty_tx = vp->dirty_tx; while (vp->cur_tx - dirty_tx > 0) { int entry = dirty_tx % TX_RING_SIZE; if (inl(ioaddr + DownListPtr) == virt_to_bus(&vp->tx_ring[entry])) break; /* It still hasn't been processed. */ if (vp->tx_skbuff[entry]) { DEV_FREE_SKB(vp->tx_skbuff[entry]); vp->tx_skbuff[entry] = 0; } /* vp->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; outw(AckIntr | DownComplete, ioaddr + EL3_CMD); if (vp->tx_full && (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { vp->tx_full= 0; clear_bit(0, (void*)&dev->tbusy); mark_bh(NET_BH); } } if (status & DMADone) { if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ DEV_FREE_SKB(vp->tx_skb); /* Release the transfered buffer */ if (inw(ioaddr + TxFree) > 1536) { clear_bit(0, (void*)&dev->tbusy); mark_bh(NET_BH); } else /* Interrupt when FIFO has room for max-sized packet. */ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; vortex_error(dev, status); } if (--work_done < 0) { if ((status & (0x7fe - (UpComplete | DownComplete))) == 0) { /* Just ack these and return. */ outw(AckIntr | UpComplete | DownComplete, ioaddr + EL3_CMD); } else { printk(KERN_WARNING "%s: Too much work in interrupt, status " "%4.4x. Temporarily disabling functions (%4.4x).\n", dev->name, status, SetStatusEnb | ((~status) & 0x7FE)); /* Disable all pending interrupts. */ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD); outw(AckIntr | 0x7FF, ioaddr + EL3_CMD); /* The timer will reenable interrupts. */ break; } } /* Acknowledge the IRQ. */ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ writel(0x8000, vp->cb_fn_base + 4); } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); if (vortex_debug > 4) printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name, status);#if defined(__i386__) clear_bit(0, (void*)&dev->interrupt);#else dev->interrupt = 0;#endif return;}static int vortex_rx(struct device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; int i; short rx_status; if (vortex_debug > 5) printk(KERN_DEBUG" In rx_packet(), status %4.4x, rx_status %4.4x.\n", inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); while ((rx_status = inw(ioaddr + RxStatus)) > 0) { if (rx_status & 0x4000) { /* Error, update stats. */ unsigned char rx_error = inb(ioaddr + RxErrors); if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); vp->stats.rx_errors++; if (rx_error & 0x01) vp->stats.rx_over_errors++; if (rx_error & 0x02) vp->stats.rx_length_errors++; if (rx_error & 0x04) vp->stats.rx_frame_errors++; if (rx_error & 0x08) vp->stats.rx_crc_errors++; if (rx_error & 0x10) vp->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; skb = dev_alloc_skb(pkt_len + 5); if (vortex_debug > 4) printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); if (skb != NULL) { skb->dev = dev; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) { outl(virt_to_bus(skb_put(skb, pkt_len)), ioaddr + Wn7_MasterAddr); outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); outw(StartDMAUp, ioaddr + EL3_CMD); while (inw(ioaddr + Wn7_MasterStatus) & 0x8000) ; } else { insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len), (pkt_len + 3) >> 2); } outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; vp->stats.rx_packets++; vp->stats.rx_bytes += skb->len; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) break; continue; } else if (vortex_debug) printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %d.\n", dev->name, pkt_len); } outw(RxDiscard, ioaddr + EL3_CMD); vp->stats.rx_dropped++; /* Wait a limited time to skip this packet. */ for (i = 200; i >= 0; i--) if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) break; } return 0;}static intboomerang_rx(struct device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; int entry = vp->cur_rx % RX_RING_SIZE; long ioaddr = dev->base_addr; int rx_status; int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; if (vortex_debug > 5) printk(KERN_DEBUG " In boomerang_rx(), status %4.4x, rx_status " "%4.4x.\n", inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ if (--rx_work_limit < 0) break; if (rx_status & RxDError) { /* Error, update stats. */ unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); vp->stats.rx_errors++; if (rx_error & 0x01) vp->stats.rx_over_errors++; if (rx_error & 0x02) vp->stats.rx_length_errors++; if (rx_error & 0x04) vp->stats.rx_frame_errors++; if (rx_error & 0x08) vp->stats.rx_crc_errors++; if (rx_error & 0x10) vp->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; vp->stats.rx_bytes += pkt_len; if (vortex_debug > 4) printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { skb->dev = dev; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ memcpy(skb_put(skb, pkt_len), bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)), pkt_len); rx_copy++; } else { void *temp; /* Pass up the skbuff already on the Rx ring. */ skb = vp->rx_skbuff[entry]; vp->rx_skbuff[entry] = NULL; temp = skb_put(skb, pkt_len); /* Remove this checking code for final release. */ if (bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)) != temp) printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match" " in boomerang_rx: %p vs. %p.\n", dev->name, bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)), temp); rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); { /* Use hardware checksum info. */ int csum_bits = rx_status & 0xee000000; if (csum_bits && (csum_bits == (IPChksumValid | TCPChksumValid) || csum_bits == (IPChksumValid | UDPChksumValid))) { skb->ip_summed = CHECKSUM_UNNECESSARY; rx_csumhits++; } } netif_rx(skb); dev->last_rx = jiffies; vp->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; vp->dirty_rx < vp->cur_rx; vp->dirty_rx++) { struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(PKT_BUF_SZ); if (skb == NULL) break; /* Bad news! */ skb->dev = dev; /* Mark as being used by this device. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ vp->rx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->tail)); vp->rx_skbuff[entry] = skb; } vp->rx_ring[entry].status = 0; /* Clear complete bit. */ outw(UpUnstall, ioaddr + EL3_CMD); } return 0;}static intvortex_close(struct device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; int i; dev->start = 0; dev->tbusy = 1; if (vortex_debug > 1) { printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n", dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus)); printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d" " tx_queued %d Rx pre-checksummed %d.\n", dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits); } del_timer(&vp->timer); /* Turn off statistics ASAP. We update vp->stats below. */ outw(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ outw(RxDisable, ioaddr + EL3_CMD); outw(TxDisable, ioaddr + EL3_CMD); if (dev->if_port == XCVR_10base2) /* Turn off thinnet power. Green! */ outw(StopCoax, ioaddr + EL3_CMD); free_irq(dev->irq, dev); outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); update_stats(ioaddr, dev); if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ outl(0, ioaddr + UpListPtr); for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) {#if LINUX_VERSION_CODE < 0x20100 vp->rx_skbuff[i]->free = 1;#endif DEV_FREE_SKB(vp->rx_skbuff[i]); vp->rx_skbuff[i] = 0; } } if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ outl(0, ioaddr + DownListPtr); for (i = 0; i < TX_RING_SIZE; i++) if (vp->tx_skbuff[i]) { DEV_FREE_SKB(vp->tx_skbuff[i]); vp->tx_skbuff[i] = 0; } } MOD_DEC_USE_COUNT; return 0;}static struct net_device_stats *vortex_get_stats(struct device *dev){ struct vortex_private *vp = (struct vortex_private *)dev->priv; unsigned long flags; if (dev->start) { save_flags(flags); cli(); update_stats(dev->base_addr, dev); restore_flags(flags); } return &vp->stats;}/* Update statistics. Unlike with the EL3 we need not worry about interrupts changing the window setting from underneath us, but we must still guar
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -