📄 dscc4.c
字号:
skb = dev_alloc_skb(len); dpriv->rx_skbuff[dirty] = skb; if (skb) { skb->protocol = hdlc_type_trans(skb, dev); rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, len, PCI_DMA_FROMDEVICE); } else { rx_fd->data = (u32) NULL; ret = -1; } return ret;}/* * IRQ/thread/whatever safe */static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, struct net_device *dev, char *msg){ s8 i = 0; do { if (!(scc_readl_star(dpriv, dev) & SccBusy)) { printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, msg, i); goto done; } schedule_timeout_uninterruptible(10); rmb(); } while (++i > 0); printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);done: return (i >= 0) ? i : -EAGAIN;}static int dscc4_do_action(struct net_device *dev, char *msg){ void __iomem *ioaddr = dscc4_priv(dev)->base_addr; s16 i = 0; writel(Action, ioaddr + GCMDR); ioaddr += GSTAR; do { u32 state = readl(ioaddr); if (state & ArAck) { printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg); writel(ArAck, ioaddr); goto done; } else if (state & Arf) { printk(KERN_ERR "%s: %s failed\n", dev->name, msg); writel(Arf, ioaddr); i = -1; goto done; } rmb(); } while (++i > 0); printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);done: return i;}static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv){ int cur = dpriv->iqtx_current%IRQ_RING_SIZE; s8 i = 0; do { if (!(dpriv->flags & (NeedIDR | NeedIDT)) || (dpriv->iqtx[cur] & Xpr)) break; smp_rmb(); schedule_timeout_uninterruptible(10); } while (++i > 0); return (i >= 0 ) ? i : -EAGAIN;}#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev){ unsigned long flags; spin_lock_irqsave(&dpriv->pci_priv->lock, flags); /* Cf errata DS5 p.6 */ writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); scc_patchl(PowerUp, 0, dpriv, dev, CCR0); readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); writel(Action, dpriv->base_addr + GCMDR); spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);}#endif#if 0static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev){ u16 i = 0; /* Cf errata DS5 p.7 */ scc_patchl(PowerUp, 0, dpriv, dev, CCR0); scc_writel(0x00050000, dpriv, dev, CCR2); /* * Must be longer than the time required to fill the fifo. */ while (!dscc4_tx_quiescent(dpriv, dev) && ++i) { udelay(1); wmb(); } writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); if (dscc4_do_action(dev, "Rdt") < 0) printk(KERN_ERR "%s: Tx reset failed\n", dev->name);}#endif/* TODO: (ab)use this function to refill a completely depleted RX ring. */static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev){ struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; struct net_device_stats *stats = hdlc_stats(dev); struct pci_dev *pdev = dpriv->pci_priv->pdev; struct sk_buff *skb; int pkt_len; skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; if (!skb) { printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); goto refill; } pkt_len = TO_SIZE(rx_fd->state2); pci_unmap_single(pdev, rx_fd->data, RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { stats->rx_packets++; stats->rx_bytes += pkt_len; skb_put(skb, pkt_len); if (netif_running(dev)) skb->protocol = hdlc_type_trans(skb, dev); skb->dev->last_rx = jiffies; netif_rx(skb); } else { if (skb->data[pkt_len] & FrameRdo) stats->rx_fifo_errors++; else if (!(skb->data[pkt_len] | ~FrameCrc)) stats->rx_crc_errors++; else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab))) stats->rx_length_errors++; else stats->rx_errors++; dev_kfree_skb_irq(skb); }refill: while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { if (try_get_rx_skb(dpriv, dev) < 0) break; dpriv->rx_dirty++; } dscc4_rx_update(dpriv, dev); rx_fd->state2 = 0x00000000; rx_fd->end = 0xbabeface;}static void dscc4_free1(struct pci_dev *pdev){ struct dscc4_pci_priv *ppriv; struct dscc4_dev_priv *root; int i; ppriv = pci_get_drvdata(pdev); root = ppriv->root; for (i = 0; i < dev_per_card; i++) unregister_hdlc_device(dscc4_to_dev(root + i)); pci_set_drvdata(pdev, NULL); for (i = 0; i < dev_per_card; i++) free_netdev(root[i].dev); kfree(root); kfree(ppriv);}static int __devinit dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent){ struct dscc4_pci_priv *priv; struct dscc4_dev_priv *dpriv; void __iomem *ioaddr; int i, rc; printk(KERN_DEBUG "%s", version); rc = pci_enable_device(pdev); if (rc < 0) goto out; rc = pci_request_region(pdev, 0, "registers"); if (rc < 0) { printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n", DRV_NAME); goto err_disable_0; } rc = pci_request_region(pdev, 1, "LBI interface"); if (rc < 0) { printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n", DRV_NAME); goto err_free_mmio_region_1; } ioaddr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!ioaddr) { printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n", DRV_NAME, pci_resource_len(pdev, 0), pci_resource_start(pdev, 0)); rc = -EIO; goto err_free_mmio_regions_2; } printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n", pci_resource_start(pdev, 0), pci_resource_start(pdev, 1), pdev->irq); /* Cf errata DS5 p.2 */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8); pci_set_master(pdev); rc = dscc4_found1(pdev, ioaddr); if (rc < 0) goto err_iounmap_3; priv = pci_get_drvdata(pdev); rc = request_irq(pdev->irq, dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root); if (rc < 0) { printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq); goto err_release_4; } /* power up/little endian/dma core controlled via lrda/ltda */ writel(0x00000001, ioaddr + GMODE); /* Shared interrupt queue */ { u32 bits; bits = (IRQ_RING_SIZE >> 5) - 1; bits |= bits << 4; bits |= bits << 8; bits |= bits << 16; writel(bits, ioaddr + IQLENR0); } /* Global interrupt queue */ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); priv->iqcfg = (u32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); rc = -ENOMEM; /* * SCC 0-3 private rx/tx irq structures * IQRX/TXi needs to be set soon. Learned it the hard way... */ for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); if (!dpriv->iqtx) goto err_free_iqtx_6; writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); } for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); if (!dpriv->iqrx) goto err_free_iqrx_7; writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); } /* Cf application hint. Beware of hard-lock condition on threshold. */ writel(0x42104000, ioaddr + FIFOCR1); //writel(0x9ce69800, ioaddr + FIFOCR2); writel(0xdef6d800, ioaddr + FIFOCR2); //writel(0x11111111, ioaddr + FIFOCR4); writel(0x18181818, ioaddr + FIFOCR4); // FIXME: should depend on the chipset revision writel(0x0000000e, ioaddr + FIFOCR3); writel(0xff200001, ioaddr + GCMDR); rc = 0;out: return rc;err_free_iqrx_7: while (--i >= 0) { dpriv = priv->root + i; pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), dpriv->iqrx, dpriv->iqrx_dma); } i = dev_per_card;err_free_iqtx_6: while (--i >= 0) { dpriv = priv->root + i; pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), dpriv->iqtx, dpriv->iqtx_dma); } pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, priv->iqcfg_dma);err_free_irq_5: free_irq(pdev->irq, priv->root);err_release_4: dscc4_free1(pdev);err_iounmap_3: iounmap (ioaddr);err_free_mmio_regions_2: pci_release_region(pdev, 1);err_free_mmio_region_1: pci_release_region(pdev, 0);err_disable_0: pci_disable_device(pdev); goto out;};/* * Let's hope the default values are decent enough to protect my * feet from the user's gun - Ueimor */static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, struct net_device *dev){ /* No interrupts, SCC core disabled. Let's relax */ scc_writel(0x00000000, dpriv, dev, CCR0); scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); /* * No address recognition/crc-CCITT/cts enabled * Shared flags transmission disabled - cf errata DS5 p.11 * Carrier detect disabled - cf errata p.14 * FIXME: carrier detection/polarity may be handled more gracefully. */ scc_writel(0x02408000, dpriv, dev, CCR1); /* crc not forwarded - Cf errata DS5 p.11 */ scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); // crc forwarded //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);}static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz){ int ret = 0; if ((hz < 0) || (hz > DSCC4_HZ_MAX)) ret = -EOPNOTSUPP; else dpriv->pci_priv->xtal_hz = hz; return ret;}static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr){ struct dscc4_pci_priv *ppriv; struct dscc4_dev_priv *root; int i, ret = -ENOMEM; root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL); if (!root) { printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME); goto err_out; } memset(root, 0, dev_per_card*sizeof(*root)); for (i = 0; i < dev_per_card; i++) { root[i].dev = alloc_hdlcdev(root + i); if (!root[i].dev) goto err_free_dev; } ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL); if (!ppriv) { printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME); goto err_free_dev; } memset(ppriv, 0, sizeof(struct dscc4_pci_priv)); ppriv->root = root; spin_lock_init(&ppriv->lock); for (i = 0; i < dev_per_card; i++) { struct dscc4_dev_priv *dpriv = root + i; struct net_device *d = dscc4_to_dev(dpriv); hdlc_device *hdlc = dev_to_hdlc(d); d->base_addr = (unsigned long)ioaddr; d->init = NULL; d->irq = pdev->irq; d->open = dscc4_open; d->stop = dscc4_close; d->set_multicast_list = NULL; d->do_ioctl = dscc4_ioctl; d->tx_timeout = dscc4_tx_timeout; d->watchdog_timeo = TX_TIMEOUT; SET_MODULE_OWNER(d); SET_NETDEV_DEV(d, &pdev->dev); dpriv->dev_id = i; dpriv->pci_priv = ppriv; dpriv->base_addr = ioaddr; spin_lock_init(&dpriv->lock); hdlc->xmit = dscc4_start_xmit; hdlc->attach = dscc4_hdlc_attach; dscc4_init_registers(dpriv, d); dpriv->parity = PARITY_CRC16_PR0_CCITT; dpriv->encoding = ENCODING_NRZ; ret = dscc4_init_ring(d); if (ret < 0) goto err_unregister; ret = register_hdlc_device(d); if (ret < 0) { printk(KERN_ERR "%s: unable to register\n", DRV_NAME); dscc4_release_ring(dpriv); goto err_unregister; } } ret = dscc4_set_quartz(root, quartz); if (ret < 0) goto err_unregister; pci_set_drvdata(pdev, ppriv); return ret;err_unregister: while (i-- > 0) { dscc4_release_ring(root + i); unregister_hdlc_device(dscc4_to_dev(root + i)); } kfree(ppriv); i = dev_per_card;err_free_dev: while (i-- > 0) free_netdev(root[i].dev); kfree(root);err_out: return ret;};/* FIXME: get rid of the unneeded code */static void dscc4_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct dscc4_dev_priv *dpriv = dscc4_priv(dev);// struct dscc4_pci_priv *ppriv; goto done;done: dpriv->timer.expires = jiffies + TX_TIMEOUT; add_timer(&dpriv->timer);}static void dscc4_tx_timeout(struct net_device *dev){ /* FIXME: something is missing there */}static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv){ sync_serial_settings *settings = &dpriv->settings; if (settings->loopback && (settings->clock_type != CLOCK_INT)) { struct net_device *dev = dscc4_to_dev(dpriv); printk(KERN_INFO "%s: loopback requires clock\n", dev->name); return -1; } return 0;}#ifdef CONFIG_DSCC4_PCI_RST/* * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together * so as to provide a safe way to reset the asic while not the whole machine * rebooting. * * This code doesn't need to be efficient. Keep It Simple */static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr){ int i; down(&dscc4_sem); for (i = 0; i < 16; i++) pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ writel(0x001c0000, ioaddr + GMODE); /* Configure GPIO port as output */ writel(0x0000ffff, ioaddr + GPDIR); /* Disable interruption */ writel(0x0000ffff, ioaddr + GPIM); writel(0x0000ffff, ioaddr + GPDATA); writel(0x00000000, ioaddr + GPDATA); /* Flush posted writes */ readl(ioaddr + GSTAR); schedule_timeout_uninterruptible(10);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -