📄 tc35815.c
字号:
spin_lock_irqsave(&lp->lock, flags); /* failsafe... (handle txdone now if half of FDs are used) */ if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > TX_FD_NUM / 2) tc35815_txdone(dev); if (netif_msg_pktdata(lp)) print_eth(skb->data);#ifdef DEBUG if (lp->tx_skbs[lp->tfd_start].skb) { printk("%s: tx_skbs conflict.\n", dev->name); panic_queues(dev); }#else BUG_ON(lp->tx_skbs[lp->tfd_start].skb);#endif lp->tx_skbs[lp->tfd_start].skb = skb; lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); /*add to ring */ txfd = &lp->tfd_base[lp->tfd_start]; txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); txfd->bd.BDCtl = cpu_to_le32(skb->len); txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); if (lp->tfd_start == lp->tfd_end) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; /* Start DMA Transmitter. */ txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);#ifdef GATHER_TXINT txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);#endif if (netif_msg_tx_queued(lp)) { printk("%s: starting TxFD.\n", dev->name); dump_txfd(txfd); } tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); } else { txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); if (netif_msg_tx_queued(lp)) { printk("%s: queueing TxFD.\n", dev->name); dump_txfd(txfd); } } lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; dev->trans_start = jiffies; /* If we just used up the very last entry in the * TX ring on this device, tell the queueing * layer to send no more. */ if (tc35815_tx_full(dev)) { if (netif_msg_tx_queued(lp)) printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); netif_stop_queue(dev); } /* When the TX completion hw interrupt arrives, this * is when the transmit statistics are updated. */ spin_unlock_irqrestore(&lp->lock, flags); return 0;}#define FATAL_ERROR_INT \ (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status){ static int count; printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", dev->name, status); if (status & Int_IntPCI) printk(" IntPCI"); if (status & Int_DmParErr) printk(" DmParErr"); if (status & Int_IntNRAbt) printk(" IntNRAbt"); printk("\n"); if (count++ > 100) panic("%s: Too many fatal errors.", dev->name); printk(KERN_WARNING "%s: Resetting ...\n", dev->name); /* Try to restart the adaptor. */ tc35815_restart(dev);}#ifdef TC35815_NAPIstatic int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)#elsestatic int tc35815_do_interrupt(struct net_device *dev, u32 status)#endif{ struct tc35815_local *lp = dev->priv; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int ret = -1; /* Fatal errors... */ if (status & FATAL_ERROR_INT) { tc35815_fatal_error_interrupt(dev, status); return 0; } /* recoverable errors */ if (status & Int_IntFDAEx) { /* disable FDAEx int. (until we make rooms...) */ tc_writel(tc_readl(&tr->Int_En) & ~Int_FDAExEn, &tr->Int_En); printk(KERN_WARNING "%s: Free Descriptor Area Exhausted (%#x).\n", dev->name, status); lp->stats.rx_dropped++; ret = 0; } if (status & Int_IntBLEx) { /* disable BLEx int. (until we make rooms...) */ tc_writel(tc_readl(&tr->Int_En) & ~Int_BLExEn, &tr->Int_En); printk(KERN_WARNING "%s: Buffer List Exhausted (%#x).\n", dev->name, status); lp->stats.rx_dropped++; ret = 0; } if (status & Int_IntExBD) { printk(KERN_WARNING "%s: Excessive Buffer Descriptiors (%#x).\n", dev->name, status); lp->stats.rx_length_errors++; ret = 0; } /* normal notification */ if (status & Int_IntMacRx) { /* Got a packet(s). */#ifdef TC35815_NAPI ret = tc35815_rx(dev, limit);#else tc35815_rx(dev); ret = 0;#endif lp->lstats.rx_ints++; } if (status & Int_IntMacTx) { /* Transmit complete. */ lp->lstats.tx_ints++; tc35815_txdone(dev); netif_wake_queue(dev); ret = 0; } return ret;}/* * The typical workload of the driver: * Handle the network interface interrupts. */static irqreturn_t tc35815_interrupt(int irq, void *dev_id){ struct net_device *dev = dev_id; struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr;#ifdef TC35815_NAPI u32 dmactl = tc_readl(&tr->DMA_Ctl); if (!(dmactl & DMA_IntMask)) { /* disable interrupts */ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); if (netif_rx_schedule_prep(dev, &lp->napi)) __netif_rx_schedule(dev, &lp->napi); else { printk(KERN_ERR "%s: interrupt taken in poll\n", dev->name); BUG(); } (void)tc_readl(&tr->Int_Src); /* flush */ return IRQ_HANDLED; } return IRQ_NONE;#else int handled; u32 status; spin_lock(&lp->lock); status = tc_readl(&tr->Int_Src); tc_writel(status, &tr->Int_Src); /* write to clear */ handled = tc35815_do_interrupt(dev, status); (void)tc_readl(&tr->Int_Src); /* flush */ spin_unlock(&lp->lock); return IRQ_RETVAL(handled >= 0);#endif /* TC35815_NAPI */}#ifdef CONFIG_NET_POLL_CONTROLLERstatic void tc35815_poll_controller(struct net_device *dev){ disable_irq(dev->irq); tc35815_interrupt(dev->irq, dev); enable_irq(dev->irq);}#endif/* We have a good packet(s), get it/them out of the buffers. */#ifdef TC35815_NAPIstatic inttc35815_rx(struct net_device *dev, int limit)#elsestatic voidtc35815_rx(struct net_device *dev)#endif{ struct tc35815_local *lp = dev->priv; unsigned int fdctl; int i; int buf_free_count = 0; int fd_free_count = 0;#ifdef TC35815_NAPI int received = 0;#endif while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); int pkt_len = fdctl & FD_FDLength_MASK; int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;#ifdef DEBUG struct RxFD *next_rfd;#endif#if (RX_CTL_CMD & Rx_StripCRC) == 0 pkt_len -= 4;#endif if (netif_msg_rx_status(lp)) dump_rxfd(lp->rfd_cur); if (status & Rx_Good) { struct sk_buff *skb; unsigned char *data; int cur_bd;#ifdef TC35815_USE_PACKEDBUFFER int offset;#endif#ifdef TC35815_NAPI if (--limit < 0) break;#endif#ifdef TC35815_USE_PACKEDBUFFER BUG_ON(bd_count > 2); skb = dev_alloc_skb(pkt_len + 2); /* +2: for reserve */ if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } skb_reserve(skb, 2); /* 16 bit alignment */ data = skb_put(skb, pkt_len); /* copy from receive buffer */ cur_bd = 0; offset = 0; while (offset < pkt_len && cur_bd < bd_count) { int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) & BD_BuffLength_MASK; dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData); void *rxbuf = rxbuf_bus_to_virt(lp, dma); if (offset + len > pkt_len) len = pkt_len - offset;#ifdef TC35815_DMA_SYNC_ONDEMAND pci_dma_sync_single_for_cpu(lp->pci_dev, dma, len, PCI_DMA_FROMDEVICE);#endif memcpy(data + offset, rxbuf, len);#ifdef TC35815_DMA_SYNC_ONDEMAND pci_dma_sync_single_for_device(lp->pci_dev, dma, len, PCI_DMA_FROMDEVICE);#endif offset += len; cur_bd++; }#else /* TC35815_USE_PACKEDBUFFER */ BUG_ON(bd_count > 1); cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;#ifdef DEBUG if (cur_bd >= RX_BUF_NUM) { printk("%s: invalid BDID.\n", dev->name); panic_queues(dev); } BUG_ON(lp->rx_skbs[cur_bd].skb_dma != (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); if (!lp->rx_skbs[cur_bd].skb) { printk("%s: NULL skb.\n", dev->name); panic_queues(dev); }#else BUG_ON(cur_bd >= RX_BUF_NUM);#endif skb = lp->rx_skbs[cur_bd].skb; prefetch(skb->data); lp->rx_skbs[cur_bd].skb = NULL; lp->fbl_count--; pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); if (!HAVE_DMA_RXALIGN(lp)) memmove(skb->data, skb->data - 2, pkt_len); data = skb_put(skb, pkt_len);#endif /* TC35815_USE_PACKEDBUFFER */ if (netif_msg_pktdata(lp)) print_eth(data); skb->protocol = eth_type_trans(skb, dev);#ifdef TC35815_NAPI netif_receive_skb(skb); received++;#else netif_rx(skb);#endif dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; } else { lp->stats.rx_errors++; printk(KERN_DEBUG "%s: Rx error (status %x)\n", dev->name, status & Rx_Stat_Mask); /* WORKAROUND: LongErr and CRCErr means Overflow. */ if ((status & Rx_LongErr) && (status & Rx_CRCErr)) { status &= ~(Rx_LongErr|Rx_CRCErr); status |= Rx_Over; } if (status & Rx_LongErr) lp->stats.rx_length_errors++; if (status & Rx_Over) lp->stats.rx_fifo_errors++; if (status & Rx_CRCErr) lp->stats.rx_crc_errors++; if (status & Rx_Align) lp->stats.rx_frame_errors++; } if (bd_count > 0) { /* put Free Buffer back to controller */ int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); unsigned char id = (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;#ifdef DEBUG if (id >= RX_BUF_NUM) { printk("%s: invalid BDID.\n", dev->name); panic_queues(dev); }#else BUG_ON(id >= RX_BUF_NUM);#endif /* free old buffers */#ifdef TC35815_USE_PACKEDBUFFER while (lp->fbl_curid != id)#else while (lp->fbl_count < RX_BUF_NUM)#endif {#ifdef TC35815_USE_PACKEDBUFFER unsigned char curid = lp->fbl_curid;#else unsigned char curid = (id + 1 + lp->fbl_count) % RX_BUF_NUM;#endif struct BDesc *bd = &lp->fbl_ptr->bd[curid];#ifdef DEBUG bdctl = le32_to_cpu(bd->BDCtl); if (bdctl & BD_CownsBD) { printk("%s: Freeing invalid BD.\n", dev->name); panic_queues(dev); }#endif /* pass BD to controller */#ifndef TC35815_USE_PACKEDBUFFER if (!lp->rx_skbs[curid].skb) { lp->rx_skbs[curid].skb = alloc_rxbuf_skb(dev, lp->pci_dev, &lp->rx_skbs[curid].skb_dma); if (!lp->rx_skbs[curid].skb) break; /* try on next reception */ bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); }#endif /* TC35815_USE_PACKEDBUFFER */ /* Note: BDLength was modified by chip. */ bd->BDCtl = cpu_to_le32(BD_CownsBD | (curid << BD_RxBDID_SHIFT) | RX_BUF_SIZE);#ifdef TC35815_USE_PACKEDBUFFER lp->fbl_curid = (curid + 1) % RX_BUF_NUM; if (netif_msg_rx_status(lp)) { printk("%s: Entering new FBD %d\n", dev->name, lp->fbl_curid); dump_frfd(lp->fbl_ptr); }#else lp->fbl_count++;#endif buf_free_count++; } } /* put RxFD back to controller */#ifdef DEBUG next_rfd = fd_bus_to_virt(lp, le32_to_cpu(lp->rfd_cur->fd.FDNext)); if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { printk("%s: RxFD FDNext invalid.\n", dev->name); panic_queues(dev); }#endif for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { /* pass FD to controller */#ifdef DEBUG lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);#else lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);#endif lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); lp->rfd_cur++; fd_free_count++; } if (lp->rfd_cur > lp->rfd_limit) lp->rfd_cur = lp->rfd_base;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -