📄 sundance_main.c
字号:
/* Fix DFE-580TX packet drop issue */ if (np->pci_rev_id >= 0x14) writeb(0x01, ioaddr + DebugCtrl1); netif_start_queue(dev); // 04/19/2005 Jesse fix for complete initial step spin_lock(&np->lock); reset_tx(dev); spin_unlock(&np->lock); writew(0x200, ioaddr + DMACtrl); writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " "MAC Control %x, %4.4x %4.4x.\n", dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus), readl(ioaddr + MACCtrl0), readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ init_timer(&np->timer); np->timer.expires = jiffies + 3*HZ; np->timer.data = (unsigned long)dev; np->timer.function = &netdev_timer; /* timer handler */ add_timer(&np->timer); /* Enable interrupts by setting the interrupt mask. */ writew(DEFAULT_INTR, ioaddr + IntrEnable); return 0;}static void check_speed(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int ctl_reg, status_reg, advertise_reg, lp_advertise_reg; int speed=10, duplex=0; ctl_reg = mdio_read(dev, np->phys[0], MII_BMCR); status_reg = mdio_read(dev, np->phys[0], MII_BMSR); advertise_reg = mdio_read(dev, np->phys[0], MII_ADVERTISE); lp_advertise_reg = mdio_read(dev, np->phys[0], MII_LPA); /*If no link, then return*/ if ( !(status_reg & BMSR_LSTATUS) ) { printk(KERN_INFO "%s Link down, %x.\n", dev->name, status_reg); /*Clear link informations*/ np->mii_if.full_duplex = 2; np->speed = 0; return; } /*Check auto-negotiation*/ if (status_reg & BMSR_ANEGCOMPLETE) { if (advertise_reg & lp_advertise_reg & (LPA_100FULL | LPA_10FULL)) duplex = 1; if (advertise_reg & lp_advertise_reg & (LPA_100FULL | LPA_100HALF)) speed = 100; } else { if(ctl_reg & BMCR_FULLDPLX) duplex = 1; if(ctl_reg & BMCR_SPEED100) speed = 100; } if (np->mii_if.full_duplex != duplex) { if (duplex) writew(readw(ioaddr + MACCtrl0) | 0x20, ioaddr + MACCtrl0); else writew(readw(ioaddr + MACCtrl0) & 0xffdf, ioaddr + MACCtrl0); } if ((np->mii_if.full_duplex != duplex) || (np->speed != speed)) printk(KERN_INFO "%s: Link changed: %dMbps, %s duplex.\n", dev->name, speed, duplex ? "full" : "half"); np->mii_if.full_duplex = duplex; np->speed = speed;}static void check_duplex(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); int negotiated = mii_lpa & np->mii_if.advertising; int duplex; /* Force media */ if (!np->an_enable || mii_lpa == 0xffff) { if (np->mii_if.full_duplex) writew (readw (ioaddr + MACCtrl0) | EnbFullDuplex, ioaddr + MACCtrl0); return; } /* Autonegotiation */ duplex = (negotiated & 0x01e1); if (np->mii_if.full_duplex != duplex) { np->mii_if.full_duplex = duplex; if (netif_msg_link(np)) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " "negotiated capability %4.4x.\n", dev->name, duplex ? "full" : "half", np->phys[0], negotiated); if(duplex) writew(readw(ioaddr + MACCtrl0) | 0x20 , ioaddr + MACCtrl0); else writew(readw(ioaddr + MACCtrl0) & 0xffdf, ioaddr + MACCtrl0); }}static void netdev_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int next_tick = 10*HZ; if (netif_msg_timer(np)) { printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " "Tx %x Rx %x.\n", dev->name, readw(ioaddr + IntrEnable), readb(ioaddr + TxStatus), readl(ioaddr + RxStatus)); } np->timer.expires = jiffies + next_tick; add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; unsigned long flag; netif_stop_queue(dev); tasklet_disable(&np->tx_tasklet); writew(0, ioaddr + IntrEnable); printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " "TxFrameId %2.2x," " resetting...\n", dev->name, readb(ioaddr + TxStatus), readb(ioaddr + TxFrameId)); { int i; for (i=0; i<TX_RING_SIZE; i++) { printk(KERN_DEBUG "%02x %08x %08x %08x(%02x) %08x %08x\n", i, (unsigned int)np->tx_ring_dma + i*sizeof(*np->tx_ring), le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, le32_to_cpu(np->tx_ring[i].frag[0].addr), le32_to_cpu(np->tx_ring[i].frag[0].length)); } printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", readl(dev->base_addr + TxListPtr), netif_queue_stopped(dev)); printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", np->cur_tx, np->cur_tx % TX_RING_SIZE, np->dirty_tx, np->dirty_tx % TX_RING_SIZE); printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); } spin_lock_irqsave(&np->lock, flag); /* Stop and restart the chip's Tx processes . */ reset_tx(dev); spin_unlock_irqrestore(&np->lock, flag); dev->if_port = 0; dev->trans_start = jiffies; np->stats.tx_errors++; if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { netif_wake_queue(dev); } writew(DEFAULT_INTR, ioaddr + IntrEnable); tasklet_enable(&np->tx_tasklet);}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){ struct netdev_private *np = dev->priv; int i; np->cur_rx = np->cur_tx = 0; np->dirty_rx = np->dirty_tx = 0; np->cur_task = 0; np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); np->rx_ring[i].status = 0; np->rx_ring[i].frag[0].length = 0; np->rx_skbuff[i] = 0; } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ if(rx_copybreak<DEFAULT_COPYBREAK)skb_reserve(skb, 2); /* 16 byte align the IP header. */ np->rx_ring[i].frag[0].addr = cpu_to_le32( pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); } np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = 0; np->tx_ring[i].status = 0; } return;}static void tx_poll (unsigned long data){ struct net_device *dev = (struct net_device *)data; struct netdev_private *np = dev->priv; unsigned head = np->cur_task % TX_RING_SIZE; struct netdev_desc *txdesc = &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; /* Chain the next pointer */ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { int entry = np->cur_task % TX_RING_SIZE; txdesc = &np->tx_ring[entry]; if (np->last_tx) { np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + entry*sizeof(struct netdev_desc)); } np->last_tx = txdesc; } /* Indicate the latest descriptor of tx ring */ txdesc->status |= cpu_to_le32(DescIntrOnTx); if (readl (dev->base_addr + TxListPtr) == 0) writel (np->tx_ring_dma + head * sizeof(struct netdev_desc), dev->base_addr + TxListPtr); return;}static intstart_tx (struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = dev->priv; struct netdev_desc *txdesc; unsigned entry; /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; txdesc->next_desc = 0; txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); /* Increment cur_tx before tasklet_schedule() */ np->cur_tx++; mb(); /* Schedule a tx_poll() task */ tasklet_schedule(&np->tx_tasklet); /* On some architectures: explicitly flush cache lines here. */ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && !netif_queue_stopped(dev)) { /* do nothing */ } else { netif_stop_queue (dev); } dev->trans_start = jiffies; if (netif_msg_tx_queued(np)) { printk (KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return 0;}/* Reset hardware tx and free all of tx buffers */static intreset_tx (struct net_device *dev){ struct netdev_private *np = (struct netdev_private*) dev->priv; long ioaddr = dev->base_addr; struct sk_buff *skb; int i; int irq = in_interrupt(); tasklet_kill(&np->tx_tasklet); /* Reset tx logic, TxListPtr will be cleaned */ writew (TxDisable, ioaddr + MACCtrl1); writew (TxReset | DMAReset | FIFOReset | NetworkReset, ioaddr + ASICCtrl + 2); for (i=50; i > 0; i--) { if ((readw(ioaddr + ASICCtrl + 2) & ResetBusy) == 0) break; mdelay(1); } /* free all tx skbuff */ for (i = 0; i < TX_RING_SIZE; i++) {// 2005/11/4 np->tx_ring[i].next_desc=0;// skb = np->tx_skbuff[i]; if (skb) { pci_unmap_single(np->pci_dev, np->tx_ring[i].frag[0].addr, skb->len, PCI_DMA_TODEVICE); if (irq) dev_kfree_skb_irq (skb); else dev_kfree_skb (skb); np->tx_skbuff[i] = 0; np->stats.tx_dropped++; } } np->cur_tx = np->dirty_tx = 0; np->cur_task = 0; np->last_tx=0; writeb(127, ioaddr + TxDMAPollPeriod); writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); return 0;}/* The interrupt handler cleans up after the Tx thread, and schedule a Rx thread work */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)#elsestatic irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)#endif{ struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np; long ioaddr; int hw_frame_id; int tx_cnt; int tx_status;#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) irqreturn_t intr_handled=IRQ_HANDLED;//IRQ_NONE;#endif ioaddr = dev->base_addr; np = dev->priv; do { int intr_status = readw(ioaddr + IntrStatus); writew(intr_status, ioaddr + IntrStatus); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (!(intr_status & DEFAULT_INTR)) break; if (intr_status & (IntrRxDMADone)) { writew(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), ioaddr + IntrEnable); if (np->budget < 0) np->budget = RX_BUDGET; tasklet_schedule(&np->rx_tasklet); } if (intr_status & (IntrTxDone | IntrDrvRqst)) { tx_status = readw (ioaddr + TxStatus); for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { if (netif_msg_tx_done(np)) printk ("%s: Transmit status is %2.2x.\n", dev->name, tx_status); if (tx_status & 0x1e) { np->stats.tx_errors++; if (tx_status & 0x10) np->stats.tx_fifo_errors++; if (tx_status & 0x08) np->stats.collisions++; if (tx_status & 0x02) np->stats.tx_window_errors++; /* This reset has not been verified!. */ if (tx_status & 0x10) { /* Reset the Tx. */ np->stats.tx_fifo_errors++; spin_lock(&np->lock); reset_tx(dev); spin_unlock(&np->lock); } if (tx_status & 0x1e) { /* It could fail to restart the tx when MaxCollions, need to try more times */ int i = 10; do { writew (readw(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); if (readw(ioaddr + MACCtrl1) & TxEnabled) break; mdelay(1); } while (--i); } } /* Yup, this is a documentation bug. It cost me *hours*. */ writew (0, ioaddr + TxStatus); tx_status = readw (ioaddr + TxStatus); if (tx_cnt < 0) break; } hw_frame_id = (tx_status >> 8) & 0xff; } else { hw_frame_id = readb(ioaddr + TxFrameId); } if (np->pci_rev_id >= 0x14) { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; int sw_frame_id; sw_frame_id = (le32_to_cpu( np->tx_ring[entry].status) >> 2) & 0xff; if (sw_frame_id == hw_frame_id && !(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; if (sw_frame_id == (hw_frame_id + 1) % TX_RING_SIZE) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->tx_ring[entry].frag[0].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq (np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; } spin_unlock(&np->lock); } else { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (!(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->tx_ring[entry].frag[0].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq (np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; } spin_unlock(&np->lock); } if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, clear busy flag. */ netif_wake_queue (dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) netdev_error(dev, intr_status); } while (0); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readw(ioaddr + IntrStatus)); writel(5000, ioaddr + DownCounter);#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) return intr_handled;#endif}static void rx_poll(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct netdev_private *np = dev->priv; int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->budget; long ioaddr = dev->base_addr; int received = 0; /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct netdev_desc *desc = &(np->rx_ring[entry]); u32 frame_status = le32_to_cpu(desc->status); int pkt_len; if (--boguscnt < 0) { goto not_done; } if (!(frame_status & DescOwn)) break; pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", frame_status);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -