📄 eepro100.c
字号:
#endif int i; /* Print a few items for debugging. */ if (speedo_debug > 0) { int i; printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n", dev->name, sp->cur_tx, sp->dirty_tx); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name, i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ', i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ', i, sp->tx_ring[i].status); } printk(KERN_DEBUG "%s: Printing Rx ring" " (next to receive into %u, dirty index %u).\n", dev->name, sp->cur_rx, sp->dirty_rx); for (i = 0; i < RX_RING_SIZE; i++) printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name, sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ', i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ', i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ', i, (sp->rx_ringp[i] != NULL) ? (unsigned)sp->rx_ringp[i]->status : 0);#if 0 for (i = 0; i < 16; i++) { /* FIXME: what does it mean? --SAW */ if (i == 6) i = 21; printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n", dev->name, phy_num, i, mdio_read(ioaddr, phy_num, i)); }#endif}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static voidspeedo_init_rx_ring(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; struct RxFD *rxf, *last_rxf = NULL; int i; sp->cur_rx = 0; for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); sp->rx_skbuff[i] = skb; if (skb == NULL) break; /* OK. Just initially short of Rx bufs. */ skb->dev = dev; /* Mark as being used by this device. */ rxf = (struct RxFD *)skb->tail; sp->rx_ringp[i] = rxf; skb_reserve(skb, sizeof(struct RxFD)); if (last_rxf) last_rxf->link = virt_to_le32desc(rxf); last_rxf = rxf; rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */ rxf->link = 0; /* None yet. */ /* This field unused by i82557. */ rxf->rx_buf_addr = 0xffffffff; rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); } sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* Mark the last entry as end-of-list. */ last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */ sp->last_rxf = last_rxf;}static void speedo_purge_tx(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; int entry; while ((int)(sp->cur_tx - sp->dirty_tx) > 0) { entry = sp->dirty_tx % TX_RING_SIZE; if (sp->tx_skbuff[entry]) { sp->stats.tx_errors++; dev_free_skb(sp->tx_skbuff[entry]); sp->tx_skbuff[entry] = 0; } sp->dirty_tx++; } while (sp->mc_setup_head != NULL) { struct speedo_mc_block *t; if (speedo_debug > 1) printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name); t = sp->mc_setup_head->next; kfree(sp->mc_setup_head); sp->mc_setup_head = t; } sp->mc_setup_tail = NULL; sp->tx_full = 0; netif_wake_queue(dev);}static void reset_mii(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */ if ((sp->phy[0] & 0x8000) == 0) { int phy_addr = sp->phy[0] & 0x1f; int advertising = mdio_read(ioaddr, phy_addr, 4); int mii_bmcr = mdio_read(ioaddr, phy_addr, 0); mdio_write(ioaddr, phy_addr, 0, 0x0400); mdio_write(ioaddr, phy_addr, 1, 0x0000); mdio_write(ioaddr, phy_addr, 4, 0x0000); mdio_write(ioaddr, phy_addr, 0, 0x8000);#ifdef honor_default_port mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);#else mdio_read(ioaddr, phy_addr, 0); mdio_write(ioaddr, phy_addr, 0, mii_bmcr); mdio_write(ioaddr, phy_addr, 4, advertising);#endif }}static void speedo_tx_timeout(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; int status = inw(ioaddr + SCBStatus); unsigned long flags; printk(KERN_WARNING "%s: Transmit timed out: status %4.4x " " %4.4x at %d/%d command %8.8x.\n", dev->name, status, inw(ioaddr + SCBCmd), sp->dirty_tx, sp->cur_tx, sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status); /* Trigger a stats dump to give time before the reset. */ speedo_get_stats(dev); speedo_show_state(dev);#if 0 if ((status & 0x00C0) != 0x0080 && (status & 0x003C) == 0x0010) { /* Only the command unit has stopped. */ printk(KERN_WARNING "%s: Trying to restart the transmitter...\n", dev->name); outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]), ioaddr + SCBPointer); outw(CUStart, ioaddr + SCBCmd); reset_mii(dev); } else {#else {#endif start_bh_atomic(); /* Ensure that timer routine doesn't run! */ del_timer(&sp->timer); end_bh_atomic(); /* Reset the Tx and Rx units. */ outl(PortReset, ioaddr + SCBPort); /* We may get spurious interrupts here. But I don't think that they may do much harm. 1999/12/09 SAW */ udelay(10); /* Disable interrupts. */ outw(SCBMaskAll, ioaddr + SCBCmd); synchronize_irq(); speedo_tx_buffer_gc(dev); /* Free as much as possible. It helps to recover from a hang because of out-of-memory. It also simplifies speedo_resume() in case TX ring is full or close-to-be full. */ speedo_purge_tx(dev); speedo_refill_rx_buffers(dev, 1); spin_lock_irqsave(&sp->lock, flags); speedo_resume(dev); sp->rx_mode = -1; dev->trans_start = jiffies; spin_unlock_irqrestore(&sp->lock, flags); set_rx_mode(dev); /* it takes the spinlock itself --SAW */ /* Reset MII transceiver. Do it before starting the timer to serialize mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */ reset_mii(dev); sp->timer.expires = RUN_AT(2*HZ); add_timer(&sp->timer); } return;}static intspeedo_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; int entry;#if ! defined(HAS_NETIF_QUEUE) if (test_bit(0, (void*)&dev->tbusy) != 0) { int tickssofar = jiffies - dev->trans_start; if (tickssofar < TX_TIMEOUT - 2) return 1; if (tickssofar < TX_TIMEOUT) { /* Reap sent packets from the full Tx queue. */ unsigned long flags; /* Take a spinlock to make wait_for_cmd_done and sending the command atomic. --SAW */ spin_lock_irqsave(&sp->lock, flags); wait_for_cmd_done(ioaddr + SCBCmd); outw(SCBTriggerIntr, ioaddr + SCBCmd); spin_unlock_irqrestore(&sp->lock, flags); return 1; } speedo_tx_timeout(dev); return 1; }#endif { /* Prevent interrupts from changing the Tx ring from underneath us. */ unsigned long flags; spin_lock_irqsave(&sp->lock, flags); /* Check if there are enough space. */ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name); netif_stop_queue(dev); sp->tx_full = 1; spin_unlock_irqrestore(&sp->lock, flags); return 1; } /* Calculate the Tx descriptor entry. */ entry = sp->cur_tx++ % TX_RING_SIZE; sp->tx_skbuff[entry] = skb; sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex); if (!(entry & ((TX_RING_SIZE>>2)-1))) sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr); sp->tx_ring[entry].link = virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]); sp->tx_ring[entry].tx_desc_addr = virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0); /* The data region is always in one buffer descriptor. */ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold); sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data); sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len); /* Trigger the command unit resume. */ wait_for_cmd_done(ioaddr + SCBCmd); clear_suspend(sp->last_cmd); /* We want the time window between clearing suspend flag on the previous command and resuming CU to be as small as possible. Interrupts in between are very undesired. --SAW */ outb(CUResume, ioaddr + SCBCmd); sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; /* Leave room for set_rx_mode(). If there is no more space than reserved for multicast filter mark the ring as full. */ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { netif_stop_queue(dev); sp->tx_full = 1; } spin_unlock_irqrestore(&sp->lock, flags); } dev->trans_start = jiffies; return 0;}static void speedo_tx_buffer_gc(struct net_device *dev){ unsigned int dirty_tx; struct speedo_private *sp = (struct speedo_private *)dev->priv; dirty_tx = sp->dirty_tx; while ((int)(sp->cur_tx - dirty_tx) > 0) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(sp->tx_ring[entry].status); if (speedo_debug > 5) printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n", entry, status); if ((status & StatusComplete) == 0) break; /* It still hasn't been processed. */ if (status & TxUnderrun) if (sp->tx_threshold < 0x01e08000) { if (speedo_debug > 2) printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n", dev->name); sp->tx_threshold += 0x00040000; } /* Free the original skb. */ if (sp->tx_skbuff[entry]) { sp->stats.tx_packets++; /* Count only user packets. */ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len; dev_free_skb(sp->tx_skbuff[entry]); sp->tx_skbuff[entry] = 0; } dirty_tx++; } if (speedo_debug && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) { printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d," " full=%d.\n", dirty_tx, sp->cur_tx, sp->tx_full); dirty_tx += TX_RING_SIZE; } while (sp->mc_setup_head != NULL && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) { struct speedo_mc_block *t; if (speedo_debug > 1) printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name); t = sp->mc_setup_head->next; kfree(sp->mc_setup_head); sp->mc_setup_head = t; } if (sp->mc_setup_head == NULL) sp->mc_setup_tail = NULL; sp->dirty_tx = dirty_tx;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *dev = (struct net_device *)dev_instance; struct speedo_private *sp; long ioaddr, boguscnt = max_interrupt_work; unsigned short status;#ifndef final_version if (dev == NULL) { printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq); return; }#endif ioaddr = dev->base_addr; sp = (struct speedo_private *)dev->priv;#ifndef final_version /* A lock to prevent simultaneous entry on SMP machines. */ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) { printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", dev->name); sp->in_interrupt = 0; /* Avoid halting machine. */ return; } dev->interrupt = 1;#endif do { status = inw(ioaddr + SCBStatus); /* Acknowledge all of the current interrupt sources ASAP. */ /* Will change from 0xfc00 to 0xff00 when we start handling FCP and ER interrupts --Dragan */ outw(status & 0xfc00, ioaddr + SCBStatus); if (speedo_debug > 3) printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n", dev->name, status); if ((status & 0xfc00) == 0) break; /* Always check if all rx buffers are allocated. --SAW */ speedo_refill_rx_buffers(dev, 0); if ((status & 0x5000) || /* Packet received, or Rx error. */ (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed) /* Need to gather the postponed packet. */ speedo_rx(dev); if (status & 0x1000) { spin_lock(&sp->lock); if ((status & 0x003c) == 0x0028) { /* No more Rx buffers. */ struct RxFD *rxf; printk(KERN_WARNING "%s: card reports no RX buffers.\n", dev->name); rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]; if (rxf == NULL) { if (speedo_debug > 2) printk(KERN_DEBUG "%s: NULL cur_rx in speedo_interrupt().\n", dev->name); sp->rx_ring_state |= RrNoMem|RrNoResources; } else if (rxf == sp->last_rxf) { if (speedo_debug > 2) printk(KERN_DEBUG
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -