📄 yellowfin.c
字号:
YF_OUTW(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */ YF_OUTW(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ YF_OUTL(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */ YF_OUTL(0x80008000, ioaddr + TxCtrl); if (yellowfin_debug > 2) { printk(KERN_DEBUG "%s: Done yellowfin_open().\n", dev->name); } /* Set the timer to check for link beat. */ init_timer(&yp->timer); yp->timer.expires = jiffies + 3*HZ; yp->timer.data = (unsigned long)dev; yp->timer.function = &yellowfin_timer; /* timer handler */ add_timer(&yp->timer); return 0;}static void yellowfin_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; long ioaddr = dev->base_addr; int next_tick = 60*HZ; if (yellowfin_debug > 3) { printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n", dev->name, YF_INW(ioaddr + IntrStatus)); } if (yp->mii_cnt) { int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1); int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5); int negotiated = mii_reg5 & yp->advertising; if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, " "link partner capability %4.4x.\n", dev->name, yp->phys[0], mii_reg1, mii_reg5); if ( ! yp->duplex_lock && ((negotiated & 0x0300) == 0x0100 || (negotiated & 0x00C0) == 0x0040)) { yp->full_duplex = 1; } YF_OUTW(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); if (mii_reg1 & 0x0004) next_tick = 60*HZ; else next_tick = 3*HZ; } yp->timer.expires = jiffies + next_tick; add_timer(&yp->timer);}static void yellowfin_tx_timeout(struct net_device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx " "status %4.4x, Rx status %4.4x, resetting...\n", dev->name, yp->cur_tx, yp->dirty_tx, YF_INL(ioaddr + TxStatus), YF_INL(ioaddr + RxStatus)); /* Note: these should be KERN_DEBUG. */ if (yellowfin_debug) { int i; printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", yp->rx_ring[i].result_status); printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs, yp->tx_ring[i].result_status); printk("\n"); } /* If the hardware is found to hang regularly, we will update the code to reinitialize the chip here. */ dev->if_port = 0; /* Wake the potentially-idle transmit channel. */ YF_OUTL(0x10001000, dev->base_addr + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_wake_queue (dev); /* Typical path */ dev->trans_start = jiffies; yp->stats.tx_errors++; return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void yellowfin_init_ring(struct net_device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; int i; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; yp->dirty_tx = 0; yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); yp->rx_head_desc = &yp->rx_ring[0]; for (i = 0; i < RX_RING_SIZE; i++) { yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]); } /* Mark the last entry as wrapping the ring. */ yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); yp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ skb_reserve(skb, 2); /* 16 byte align the IP header. */ yp->rx_ring[i].addr = virt_to_le32desc(skb->tail); } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);#define NO_TXSTATS#ifdef NO_TXSTATS /* In this mode the Tx ring needs only a single descriptor. */ for (i = 0; i < TX_RING_SIZE; i++) { yp->tx_skbuff[i] = 0; yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]); } /* Wrap ring */ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);#else /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE*2; i++) { yp->tx_skbuff[i/2] = 0; /* Branch on Tx error. */ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]); i++; if (yp->flags & FullTxStatus) { yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i])); yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]); yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]); } else { /* Symbios chips write only tx_errs word. */ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2); yp->tx_ring[i].request_cnt = 2; yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs); } yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]); } /* Wrap ring */ yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);#endif yp->tx_tail_desc = &yp->tx_status[0]; return;}static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; unsigned entry; netif_stop_queue (dev); /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = yp->cur_tx % TX_RING_SIZE; yp->tx_skbuff[entry] = skb; if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */ int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32; /* Fix GX chipset errata. */ if (cacheline_end > 24 || cacheline_end == 0) skb->len += 32 - cacheline_end + 1; }#ifdef NO_TXSTATS yp->tx_ring[entry].addr = virt_to_le32desc(skb->data); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len); } else { yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len); } yp->cur_tx++;#else yp->tx_ring[entry<<1].request_cnt = skb->len; yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ yp->cur_tx++; { unsigned next_entry = yp->cur_tx % TX_RING_SIZE; yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); } /* Final step -- overwrite the old 'stop' command. */ yp->tx_ring[entry<<1].dbdma_cmd = cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);#endif /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ YF_OUTL(0x10001000, dev->base_addr + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_start_queue (dev); /* Typical path */ else yp->tx_full = 1; dev->trans_start = jiffies; if (yellowfin_debug > 4) { printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", dev->name, yp->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *dev = (struct net_device *)dev_instance; struct yellowfin_private *yp; long ioaddr, boguscnt = max_interrupt_work;#ifndef final_version /* Can never occur. */ if (dev == NULL) { printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq); return; }#endif ioaddr = dev->base_addr; yp = (struct yellowfin_private *)dev->priv; spin_lock (&yp->lock); do { u16 intr_status = YF_INW(ioaddr + IntrClear); if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & (IntrRxDone | IntrEarlyRx)) { yellowfin_rx(dev); YF_OUTL(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ }#ifdef NO_TXSTATS for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { int entry = yp->dirty_tx % TX_RING_SIZE; if (yp->tx_ring[entry].result_status == 0) break; yp->stats.tx_bytes += yp->tx_skbuff[entry]->len; yp->stats.tx_packets++; /* Free the original skb. */ dev_kfree_skb_irq(yp->tx_skbuff[entry]); yp->tx_skbuff[entry] = 0; } if (yp->tx_full && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; } if (yp->tx_full) netif_stop_queue(dev); else netif_wake_queue(dev);#else if (intr_status & IntrTxDone || yp->tx_tail_desc->tx_errs) { unsigned dirty_tx = yp->dirty_tx; for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; dirty_tx++) { /* Todo: optimize this. */ int entry = dirty_tx % TX_RING_SIZE; u16 tx_errs = yp->tx_status[entry].tx_errs;#ifndef final_version if (yellowfin_debug > 5) printk(KERN_DEBUG "%s: Tx queue %d check, Tx status " "%4.4x %4.4x %4.4x %4.4x.\n", dev->name, entry, yp->tx_status[entry].tx_cnt, yp->tx_status[entry].tx_errs, yp->tx_status[entry].total_tx_cnt, yp->tx_status[entry].paused);#endif if (tx_errs == 0) break; /* It still hasn't been Txed */ if (tx_errs & 0xF810) { /* There was an major error, log it. */#ifndef final_version if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, tx_errs);#endif yp->stats.tx_errors++; if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++; if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++; if (tx_errs & 0x2000) yp->stats.tx_window_errors++; if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;#ifdef ETHER_STATS if (tx_errs & 0x1000) yp->stats.collisions16++;#endif } else {#ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", dev->name, tx_errs);#endif#ifdef ETHER_STATS if (tx_errs & 0x0400) yp->stats.tx_deferred++;#endif yp->stats.tx_bytes += yp->tx_skbuff[entry]->len; yp->stats.collisions += tx_errs & 15; yp->stats.tx_packets++; } /* Free the original skb. */ dev_kfree_skb_irq(yp->tx_skbuff[entry]); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; }#ifndef final_version if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dev->name, dirty_tx, yp->cur_tx, yp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (yp->tx_full && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; } if (yp->tx_full) netif_stop_queue(dev); else netif_wake_queue(dev); yp->dirty_tx = dirty_tx; yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; }#endif /* Log errors and other uncommon events. */ if (intr_status & 0x2ee) /* Abnormal error summary. */ yellowfin_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (yellowfin_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, YF_INW(ioaddr + IntrStatus)); /* Code that should never be run! Perhaps remove after testing.. */ { static int stopit = 10; if ((!(netif_running(dev))) && --stopit < 0) { printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", dev->name); free_irq(irq, dev); } } spin_unlock (&yp->lock);}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int yellowfin_rx(struct net_device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; int entry = yp->cur_rx % RX_RING_SIZE; int boguscnt = 20; if (yellowfin_debug > 4) { printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n", entry, yp->rx_ring[entry].result_status); printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n", entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, yp->rx_ring[entry].result_status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (yp->rx_head_desc->result_status) { struct yellowfin_desc *desc = yp->rx_head_desc; u16 desc_status = le32_to_cpu(desc->result_status) >> 16; int data_size = (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status)) & 0xffff; u8 *buf_addr = le32desc_to_virt(desc->addr); s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2])); if (yellowfin_debug > 4) printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", frame_status); if (--boguscnt < 0) break; if ( ! (desc_status & RX_EOP)) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," " status %4.4x!\n", dev->name, desc_status); yp->stats.rx_length_errors++; } else if ((yp->flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", frame_status); yp->stats.rx_errors++; if (frame_status & 0x0060) yp->stats.rx_length_errors++; if (frame_status & 0x0008) yp->stats.rx_frame_errors++; if (frame_status & 0x0010) yp->stats.rx_crc_errors++; if (frame_status < 0) yp->stats.rx_dropped++; } else if ( !(yp->flags & IsGigabit) && ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { u8 status1 = buf_addr[data_size-2]; u8 status2 = buf_addr[data_size-1]; yp->stats.rx_errors++; if (status1 & 0xC0) yp->stats.rx_length_errors++; if (status2 & 0x03) yp->stats.rx_frame_errors++; if (status2 & 0x04) yp->stats.rx_crc_errors++; if (status2 & 0x80) yp->stats.rx_dropped++;#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ } else if ((yp->flags & HasMACAddrBug) && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr), dev->dev_addr, 6) != 0 && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr), "\377\377\377\377\377\377", 6) != 0) { if (bogus_rx++ == 0) printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:" "%2.2x:%2.2x.\n", dev->name, buf_addr[0], buf_addr[1], buf_addr[2], buf_addr[3], buf_addr[4], buf_addr[5]);#endif } else { struct sk_buff *skb;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -