📄 tc35815.c
字号:
lp->fd_buf_dma); lp->fd_buf = NULL; return -ENOMEM; }#else lp->rx_skbs[i].skb = alloc_rxbuf_skb(dev, lp->pci_dev, &lp->rx_skbs[i].skb_dma); if (!lp->rx_skbs[i].skb) { while (--i >= 0) { free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, lp->rx_skbs[i].skb_dma); lp->rx_skbs[i].skb = NULL; } pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, lp->fd_buf, lp->fd_buf_dma); lp->fd_buf = NULL; return -ENOMEM; }#endif } printk(KERN_DEBUG "%s: FD buf %p DataBuf", dev->name, lp->fd_buf);#ifdef TC35815_USE_PACKEDBUFFER printk(" DataBuf"); for (i = 0; i < RX_BUF_NUM; i++) printk(" %p", lp->data_buf[i]);#endif printk("\n"); } else { for (i = 0; i < FD_PAGE_NUM; i++) { clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); } } fd_addr = (unsigned long)lp->fd_buf; /* Free Descriptors (for Receive) */ lp->rfd_base = (struct RxFD *)fd_addr; fd_addr += sizeof(struct RxFD) * RX_FD_NUM; for (i = 0; i < RX_FD_NUM; i++) { lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); } lp->rfd_cur = lp->rfd_base; lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); /* Transmit Descriptors */ lp->tfd_base = (struct TxFD *)fd_addr; fd_addr += sizeof(struct TxFD) * TX_FD_NUM; for (i = 0; i < TX_FD_NUM; i++) { lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); } lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); lp->tfd_start = 0; lp->tfd_end = 0; /* Buffer List (for Receive) */ lp->fbl_ptr = (struct FrFD *)fd_addr; lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);#ifndef TC35815_USE_PACKEDBUFFER /* * move all allocated skbs to head of rx_skbs[] array. * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in * tc35815_rx() had failed. */ lp->fbl_count = 0; for (i = 0; i < RX_BUF_NUM; i++) { if (lp->rx_skbs[i].skb) { if (i != lp->fbl_count) { lp->rx_skbs[lp->fbl_count].skb = lp->rx_skbs[i].skb; lp->rx_skbs[lp->fbl_count].skb_dma = lp->rx_skbs[i].skb_dma; } lp->fbl_count++; } }#endif for (i = 0; i < RX_BUF_NUM; i++) {#ifdef TC35815_USE_PACKEDBUFFER lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);#else if (i >= lp->fbl_count) { lp->fbl_ptr->bd[i].BuffData = 0; lp->fbl_ptr->bd[i].BDCtl = 0; continue; } lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->rx_skbs[i].skb_dma);#endif /* BDID is index of FrFD.bd[] */ lp->fbl_ptr->bd[i].BDCtl = cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | RX_BUF_SIZE); }#ifdef TC35815_USE_PACKEDBUFFER lp->fbl_curid = 0;#endif printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); return 0;}static voidtc35815_clear_queues(struct net_device *dev){ struct tc35815_local *lp = dev->priv; int i; for (i = 0; i < TX_FD_NUM; i++) { u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); struct sk_buff *skb = fdsystem != 0xffffffff ? lp->tx_skbs[fdsystem].skb : NULL;#ifdef DEBUG if (lp->tx_skbs[i].skb != skb) { printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); panic_queues(dev); }#else BUG_ON(lp->tx_skbs[i].skb != skb);#endif if (skb) { pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); lp->tx_skbs[i].skb = NULL; lp->tx_skbs[i].skb_dma = 0; dev_kfree_skb_any(skb); } lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); } tc35815_init_queues(dev);}static voidtc35815_free_queues(struct net_device *dev){ struct tc35815_local *lp = dev->priv; int i; if (lp->tfd_base) { for (i = 0; i < TX_FD_NUM; i++) { u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); struct sk_buff *skb = fdsystem != 0xffffffff ? lp->tx_skbs[fdsystem].skb : NULL;#ifdef DEBUG if (lp->tx_skbs[i].skb != skb) { printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); panic_queues(dev); }#else BUG_ON(lp->tx_skbs[i].skb != skb);#endif if (skb) { dev_kfree_skb(skb); pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); lp->tx_skbs[i].skb = NULL; lp->tx_skbs[i].skb_dma = 0; } lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); } } lp->rfd_base = NULL; lp->rfd_limit = NULL; lp->rfd_cur = NULL; lp->fbl_ptr = NULL; for (i = 0; i < RX_BUF_NUM; i++) {#ifdef TC35815_USE_PACKEDBUFFER if (lp->data_buf[i]) { free_rxbuf_page(lp->pci_dev, lp->data_buf[i], lp->data_buf_dma[i]); lp->data_buf[i] = NULL; }#else if (lp->rx_skbs[i].skb) { free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, lp->rx_skbs[i].skb_dma); lp->rx_skbs[i].skb = NULL; }#endif } if (lp->fd_buf) { pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, lp->fd_buf, lp->fd_buf_dma); lp->fd_buf = NULL; }}static voiddump_txfd(struct TxFD *fd){ printk("TxFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); printk("BD: "); printk(" %08x %08x", le32_to_cpu(fd->bd.BuffData), le32_to_cpu(fd->bd.BDCtl)); printk("\n");}static intdump_rxfd(struct RxFD *fd){ int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; if (bd_count > 8) bd_count = 8; printk("RxFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) return 0; printk("BD: "); for (i = 0; i < bd_count; i++) printk(" %08x %08x", le32_to_cpu(fd->bd[i].BuffData), le32_to_cpu(fd->bd[i].BDCtl)); printk("\n"); return bd_count;}#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER)static voiddump_frfd(struct FrFD *fd){ int i; printk("FrFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); printk("BD: "); for (i = 0; i < RX_BUF_NUM; i++) printk(" %08x %08x", le32_to_cpu(fd->bd[i].BuffData), le32_to_cpu(fd->bd[i].BDCtl)); printk("\n");}#endif#ifdef DEBUGstatic voidpanic_queues(struct net_device *dev){ struct tc35815_local *lp = dev->priv; int i; printk("TxFD base %p, start %u, end %u\n", lp->tfd_base, lp->tfd_start, lp->tfd_end); printk("RxFD base %p limit %p cur %p\n", lp->rfd_base, lp->rfd_limit, lp->rfd_cur); printk("FrFD %p\n", lp->fbl_ptr); for (i = 0; i < TX_FD_NUM; i++) dump_txfd(&lp->tfd_base[i]); for (i = 0; i < RX_FD_NUM; i++) { int bd_count = dump_rxfd(&lp->rfd_base[i]); i += (bd_count + 1) / 2; /* skip BDs */ } dump_frfd(lp->fbl_ptr); panic("%s: Illegal queue state.", dev->name);}#endifstatic void print_eth(char *add){ int i; printk("print_eth(%p)\n", add); for (i = 0; i < 6; i++) printk(" %2.2X", (unsigned char) add[i + 6]); printk(" =>"); for (i = 0; i < 6; i++) printk(" %2.2X", (unsigned char) add[i]); printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]);}static int tc35815_tx_full(struct net_device *dev){ struct tc35815_local *lp = dev->priv; return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);}static void tc35815_restart(struct net_device *dev){ struct tc35815_local *lp = dev->priv; int pid = lp->phy_addr; int do_phy_reset = 1; del_timer(&lp->timer); /* Kill if running */ if (lp->mii_id[0] == 0x0016 && (lp->mii_id[1] & 0xfc00) == 0xf800) { /* Resetting PHY cause problem on some chip... (SEEQ 80221) */ do_phy_reset = 0; } if (do_phy_reset) { int timeout; tc_mdio_write(dev, pid, MII_BMCR, BMCR_RESET); timeout = 100; while (--timeout) { if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_RESET)) break; udelay(1); } if (!timeout) printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); } tc35815_chip_reset(dev); tc35815_clear_queues(dev); tc35815_chip_init(dev); /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ tc35815_set_multicast_list(dev);}static void tc35815_tx_timeout(struct net_device *dev){ struct tc35815_local *lp = dev->priv; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; printk(KERN_WARNING "%s: transmit timed out, status %#x\n", dev->name, tc_readl(&tr->Tx_Stat)); /* Try to restart the adaptor. */ spin_lock_irq(&lp->lock); tc35815_restart(dev); spin_unlock_irq(&lp->lock); lp->stats.tx_errors++; /* If we have space available to accept new transmit * requests, wake up the queueing layer. This would * be the case if the chipset_init() call above just * flushes out the tx queue and empties it. * * If instead, the tx queue is retained then the * netif_wake_queue() call should be placed in the * TX completion interrupt handler of the driver instead * of here. */ if (!tc35815_tx_full(dev)) netif_wake_queue(dev);}/* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */static inttc35815_open(struct net_device *dev){ struct tc35815_local *lp = dev->priv; /* * This is used if the interrupt line can turned off (shared). * See 3c503.c for an example of selecting the IRQ at config-time. */ if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) { return -EAGAIN; } del_timer(&lp->timer); /* Kill if running */ tc35815_chip_reset(dev); if (tc35815_init_queues(dev) != 0) { free_irq(dev->irq, dev); return -EAGAIN; }#ifdef TC35815_NAPI napi_enable(&lp->napi);#endif /* Reset the hardware here. Don't forget to set the station address. */ spin_lock_irq(&lp->lock); tc35815_chip_init(dev); spin_unlock_irq(&lp->lock); /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. */ netif_start_queue(dev); return 0;}/* This will only be invoked if your driver is _not_ in XOFF state. * What this means is that you need not check it, and that this * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev){ struct tc35815_local *lp = dev->priv; struct TxFD *txfd; unsigned long flags; /* If some error occurs while trying to transmit this * packet, you should return '1' from this function. * In such a case you _may not_ do anything to the * SKB, it is still owned by the network queueing * layer when an error is returned. This means you * may not modify any SKB fields, you may not free * the SKB, etc. */ /* This is the most common case for modern hardware. * The spinlock protects this code from the TX complete * hardware interrupt handler. Queue flow control is * thus managed under this lock as well. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -