📄 yellowfin.c
字号:
/* Make certain the descriptor lists are aligned. */ yp = (void *)(((long)kmalloc(sizeof(*yp), GFP_KERNEL) + 31) & ~31); memset(yp, 0, sizeof(*yp)); dev->priv = yp; yp->next_module = root_yellowfin_dev; root_yellowfin_dev = dev; yp->chip_id = chip_id; option = card_idx < MAX_UNITS ? options[card_idx] : 0; if (dev->mem_start) option = dev->mem_start; /* The lower four bits are the media type. */ if (option > 0) { if (option & 0x200) yp->full_duplex = 1; yp->default_port = option & 15; if (yp->default_port) yp->medialock = 1; } if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) yp->full_duplex = 1; if (yp->full_duplex) yp->duplex_lock = 1; /* The Yellowfin-specific entries in the device structure. */ dev->open = &yellowfin_open; dev->hard_start_xmit = &yellowfin_start_xmit; dev->stop = &yellowfin_close; dev->get_stats = &yellowfin_get_stats; dev->set_multicast_list = &set_rx_mode;#ifdef HAVE_PRIVATE_IOCTL dev->do_ioctl = &mii_ioctl;#endif if (mtu) dev->mtu = mtu; if (chip_tbl[yp->chip_id].flags & HasMII) { int phy, phy_idx = 0; for (phy = 0; phy < 32 && phy_idx < 4; phy++) { int mii_status = mdio_read(ioaddr, phy, 1); if (mii_status != 0xffff && mii_status != 0x0000) { yp->phys[phy_idx++] = phy; yp->advertising = mdio_read(ioaddr, phy, 4); printk(KERN_INFO "%s: MII PHY found at address %d, status " "0x%4.4x advertising %4.4x.\n", dev->name, phy, mii_status, yp->advertising); } } yp->mii_cnt = phy_idx; } return dev;}static int read_eeprom(long ioaddr, int location){ int bogus_cnt = 1000; outb(location, ioaddr + EEAddr); outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl); while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0) ; return inb(ioaddr + EERead);}/* MII Managemen Data I/O accesses. These routines assume the MDIO controller is idle, and do not exit until the command is finished. */static int mdio_read(long ioaddr, int phy_id, int location){ int i; outw((phy_id<<8) + location, ioaddr + MII_Addr); outw(1, ioaddr + MII_Cmd); for (i = 10000; i >= 0; i--) if ((inw(ioaddr + MII_Status) & 1) == 0) break; return inw(ioaddr + MII_Rd_Data);}static void mdio_write(long ioaddr, int phy_id, int location, int value){ int i; outw((phy_id<<8) + location, ioaddr + MII_Addr); outw(value, ioaddr + MII_Wr_Data); /* Wait for the command to finish. */ for (i = 10000; i >= 0; i--) if ((inw(ioaddr + MII_Status) & 1) == 0) break; return;}static int yellowfin_open(struct device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; long ioaddr = dev->base_addr; int i; /* Reset the chip. */ outl(0x80000000, ioaddr + DMACtrl); if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev)) return -EAGAIN; if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", dev->name, dev->irq); MOD_INC_USE_COUNT; yellowfin_init_ring(dev); outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr); outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr); for (i = 0; i < 6; i++) outb(dev->dev_addr[i], ioaddr + StnAddr + i); /* Set up various condition 'select' registers. There are no options here. */ outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */ outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */ outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */ outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */ outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */ outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */ /* Initialize other registers: with so many this eventually this will converted to an offset/value list. */ outl(dma_ctrl, ioaddr + DMACtrl); outw(fifo_cfg, ioaddr + FIFOcfg); /* Enable automatic generation of flow control frames, period 0xffff. */ outl(0x0030FFFF, ioaddr + FlowCtrl); if (dev->if_port == 0) dev->if_port = yp->default_port; dev->tbusy = 0; dev->interrupt = 0; yp->in_interrupt = 0; /* Setting the Rx mode will start the Rx process. */ if (yp->chip_id == 0) { /* We are always in full-duplex mode with gigabit! */ yp->full_duplex = 1; outw(0x01CF, ioaddr + Cnfg); } else { outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */ outw(0x1018, ioaddr + FrameGap1); outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); } set_rx_mode(dev); dev->start = 1; /* Enable interrupts by setting the interrupt mask. */ outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */ outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */ outl(0x80008000, ioaddr + TxCtrl); if (yellowfin_debug > 2) { printk(KERN_DEBUG "%s: Done yellowfin_open().\n", dev->name); } /* Set the timer to check for link beat. */ init_timer(&yp->timer); yp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ yp->timer.data = (unsigned long)dev; yp->timer.function = &yellowfin_timer; /* timer handler */ add_timer(&yp->timer); return 0;}static void yellowfin_timer(unsigned long data){ struct device *dev = (struct device *)data; struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; long ioaddr = dev->base_addr; int next_tick = 0; if (yellowfin_debug > 3) { printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n", dev->name, inw(ioaddr + IntrStatus)); } if (yp->mii_cnt) { int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1); int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5); int negotiated = mii_reg5 & yp->advertising; if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, " "link partner capability %4.4x.\n", dev->name, yp->phys[0], mii_reg1, mii_reg5); if ( ! yp->duplex_lock && ((negotiated & 0x0300) == 0x0100 || (negotiated & 0x00C0) == 0x0040)) { yp->full_duplex = 1; } outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); if (mii_reg1 & 0x0004) next_tick = 60*HZ; else next_tick = 3*HZ; } if (next_tick) { yp->timer.expires = RUN_AT(next_tick); add_timer(&yp->timer); }}static void yellowfin_tx_timeout(struct device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Yellowfin transmit timed out, status %8.8x, resetting...\n", dev->name, inl(ioaddr));#ifndef __alpha__ { int i; printk(KERN_DEBUG " Rx ring %8.8x: ", (int)yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)yp->rx_ring[i].status); printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %4.4x /%4.4x", yp->tx_status[i].tx_errs, yp->tx_ring[i].status); printk("\n"); }#endif /* Perhaps we should reinitialize the hardware here. */ dev->if_port = 0; /* Stop and restart the chip's Tx processes . */ /* Trigger an immediate transmit demand. */ dev->trans_start = jiffies; yp->stats.tx_errors++; return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void yellowfin_init_ring(struct device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; int i; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; yp->dirty_rx = yp->dirty_tx = 0; yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); yp->rx_head_desc = &yp->rx_ring[0]; for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; yp->rx_ring[i].request_cnt = yp->rx_buf_sz; yp->rx_ring[i].cmd = CMD_RX_BUF | INTR_ALWAYS; skb = dev_alloc_skb(yp->rx_buf_sz); yp->rx_skbuff[i] = skb; if (skb) { skb->dev = dev; /* Mark as being used by this device. */ skb_reserve(skb, 2); /* 16 byte align the IP header. */ yp->rx_ring[i].addr = virt_to_bus(skb->tail); } else if (yp->dirty_rx == 0) yp->dirty_rx = (unsigned int)(0 - RX_RING_SIZE); yp->rx_ring[i].branch_addr = virt_to_bus(&yp->rx_ring[i+1]); } /* Mark the last entry as wrapping the ring. */ yp->rx_ring[i-1].cmd = CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS; yp->rx_ring[i-1].branch_addr = virt_to_bus(&yp->rx_ring[0]);/*#define NO_TXSTATS*/#ifdef NO_TXSTATS /* In this mode the Tx ring needs only a single descriptor. */ for (i = 0; i < TX_RING_SIZE; i++) { yp->tx_skbuff[i] = 0; yp->tx_ring[i].cmd = CMD_STOP; yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]); } yp->tx_ring[--i].cmd = CMD_STOP | BRANCH_ALWAYS; /* Wrap ring */ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);#else /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE*2; i++) { yp->tx_skbuff[i/2] = 0; yp->tx_ring[i].cmd = CMD_STOP; /* Branch on Tx error. */ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]); i++; if (chip_tbl[yp->chip_id].flags & FullTxStatus) { yp->tx_ring[i].cmd = CMD_TXSTATUS; yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]); yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2]); } else { /* Symbios chips write only tx_errs word. */ yp->tx_ring[i].cmd = CMD_TXSTATUS | INTR_ALWAYS; yp->tx_ring[i].request_cnt = 2; yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2].tx_errs); } yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]); } /* Wrap ring */ yp->tx_ring[--i].cmd = CMD_TXSTATUS | BRANCH_ALWAYS | INTR_ALWAYS; yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);#endif yp->tx_tail_desc = &yp->tx_status[0]; return;}static int yellowfin_start_xmit(struct sk_buff *skb, struct device *dev){ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv; unsigned entry; /* Block a timer-based transmit from overlapping. This could better be done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { if (jiffies - dev->trans_start < TX_TIMEOUT) return 1; yellowfin_tx_timeout(dev); return 1; } /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = yp->cur_tx % TX_RING_SIZE; yp->tx_skbuff[entry] = skb;#ifdef NO_TXSTATS yp->tx_ring[entry].request_cnt = skb->len; yp->tx_ring[entry].addr = virt_to_bus(skb->data); yp->tx_ring[entry].status = 0; if (entry >= TX_RING_SIZE-1) { yp->tx_ring[0].cmd = CMD_STOP; /* New stop command. */ yp->tx_ring[TX_RING_SIZE-1].cmd = CMD_TX_PKT | BRANCH_ALWAYS; } else { yp->tx_ring[entry+1].cmd = CMD_STOP; /* New stop command. */ yp->tx_ring[entry].cmd = CMD_TX_PKT | BRANCH_IFTRUE; } yp->cur_tx++;#else yp->tx_ring[entry<<1].request_cnt = skb->len; yp->tx_ring[entry<<1].addr = virt_to_bus(skb->data); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ yp->cur_tx++; { unsigned next_entry = yp->cur_tx % TX_RING_SIZE; yp->tx_ring[next_entry<<1].cmd = CMD_STOP; } /* Final step -- overwrite the old 'stop' command. */ yp->tx_ring[entry<<1].cmd = (entry % 6) == 0 ? CMD_TX_PKT | INTR_ALWAYS | BRANCH_IFTRUE : CMD_TX_PKT | BRANCH_IFTRUE;#endif /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ outl(0x10001000, dev->base_addr + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 1) clear_bit(0, (void*)&dev->tbusy); /* Typical path */ else yp->tx_full = 1; dev->trans_start = jiffies; if (yellowfin_debug > 4) { printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", dev->name, yp->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct device *dev = (struct device *)dev_instance; struct yellowfin_private *yp; long ioaddr, boguscnt = max_interrupt_work;#ifndef final_version /* Can never occur. */ if (dev == NULL) { printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq); return; }#endif ioaddr = dev->base_addr; yp = (struct yellowfin_private *)dev->priv; if (test_and_set_bit(0, (void*)&yp->in_interrupt)) { dev->interrupt = 1; printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); return; } do { u16 intr_status = inw(ioaddr + IntrClear); if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & (IntrRxDone | IntrEarlyRx)) { yellowfin_rx(dev); outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ }#ifdef NO_TXSTATS for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { int entry = yp->dirty_tx % TX_RING_SIZE; if (yp->tx_ring[entry].status == 0) break; /* Free the original skb. */ DEV_FREE_SKB(yp->tx_skbuff[entry]); yp->tx_skbuff[entry] = 0; yp->stats.tx_packets++; } if (yp->tx_full && dev->tbusy && yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; clear_bit(0, (void*)&dev->tbusy); mark_bh(NET_BH); }#else if (intr_status & IntrTxDone || yp->tx_tail_desc->tx_errs) { unsigned dirty_tx = yp->dirty_tx; for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; dirty_tx++) { /* Todo: optimize this. */ int entry = dirty_tx % TX_RING_SIZE; u16 tx_errs = yp->tx_status[entry].tx_errs;#ifndef final_version if (yellowfin_debug > 5) printk(KERN_DEBUG "%s: Tx queue %d check, Tx status " "%4.4x %4.4x %4.4x %4.4x.\n", dev->name, entry, yp->tx_status[entry].tx_cnt, yp->tx_status[entry].tx_errs, yp->tx_status[entry].total_tx_cnt, yp->tx_status[entry].paused);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -