📄 dscc4.c
字号:
goto err; if ((ret = hdlc_open(hdlc))) goto err; MOD_INC_USE_COUNT; ppriv = dpriv->pci_priv; if ((ret = dscc4_init_ring(dev))) goto err_out; /* IDT+IDR during XPR */ dpriv->flags = NeedIDR | NeedIDT; /* * The following is a bit paranoid... * * NB: the datasheet "...CEC will stay active if the SCC is in * power-down mode or..." and CCR2.RAC = 1 are two different * situations. */ if (scc_readl_star(dpriv, dev) & SccBusy) { printk(KERN_ERR "%s busy. Try later\n", dev->name); ret = -EAGAIN; goto err_free_ring; } else printk(KERN_INFO "%s: available. Good\n", dev->name); /* Posted write is flushed in the wait_ack loop */ scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) goto err_free_ring; /* * I would expect XPR near CE completion (before ? after ?). * At worst, this code won't see a late XPR and people * will have to re-issue an ifconfig (this is harmless). * WARNING, a really missing XPR usually means a hardware * reset is needed. Suggestions anyone ? */ if ((ret = dscc4_xpr_ack(dpriv)) < 0) { printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR"); goto err_free_ring; } if (debug > 2) dscc4_tx_print(dev, dpriv, "Open"); netif_start_queue(dev); init_timer(&dpriv->timer); dpriv->timer.expires = jiffies + 10*HZ; dpriv->timer.data = (unsigned long)dev; dpriv->timer.function = &dscc4_timer; add_timer(&dpriv->timer); netif_carrier_on(dev); return 0;err_free_ring: dscc4_release_ring(dpriv);err_out: hdlc_close(hdlc); MOD_DEC_USE_COUNT;err: return ret;}#ifdef DSCC4_POLLINGstatic int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev){ /* FIXME: it's gonna be easy (TM), for sure */}#endif /* DSCC4_POLLING */static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct dscc4_dev_priv *dpriv = dscc4_priv(dev); struct dscc4_pci_priv *ppriv = dpriv->pci_priv; struct TxFD *tx_fd; int next; next = dpriv->tx_current%TX_RING_SIZE; dpriv->tx_skbuff[next] = skb; tx_fd = dpriv->tx_fd + next; tx_fd->state = FrameEnd | TO_STATE(skb->len); tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); tx_fd->complete = 0x00000000; tx_fd->jiffies = jiffies; mb();#ifdef DSCC4_POLLING spin_lock(&dpriv->lock); while (dscc4_tx_poll(dpriv, dev)); spin_unlock(&dpriv->lock);#endif dev->trans_start = jiffies; if (debug > 2) dscc4_tx_print(dev, dpriv, "Xmit"); /* To be cleaned(unsigned int)/optimized. Later, ok ? */ if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) netif_stop_queue(dev); if (dscc4_tx_quiescent(dpriv, dev)) dscc4_do_tx(dpriv, dev); return 0;}static int dscc4_close(struct net_device *dev){ struct dscc4_dev_priv *dpriv = dscc4_priv(dev); hdlc_device *hdlc = dev_to_hdlc(dev); unsigned long flags; del_timer_sync(&dpriv->timer); netif_stop_queue(dev); spin_lock_irqsave(&dpriv->pci_priv->lock, flags); dscc4_rx_reset(dpriv, dev); spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); dscc4_tx_reset(dpriv, dev); hdlc_close(hdlc); dscc4_release_ring(dpriv); MOD_DEC_USE_COUNT; return 0;}static inline int dscc4_check_clock_ability(int port){ int ret = 0;#ifdef CONFIG_DSCC4_CLOCK_ON_TWO_PORTS_ONLY if (port >= 2) ret = -1;#endif return ret;}static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state){ struct dscc4_dev_priv *dpriv = dscc4_priv(dev); int ret = -1; u32 brr; *state &= ~Ccr0ClockMask; if (*bps) { /* Clock generated - required for DCE */ u32 n = 0, m = 0, divider; int xtal; xtal = dpriv->pci_priv->xtal_hz; if (!xtal) goto done; if (dscc4_check_clock_ability(dpriv->dev_id) < 0) goto done; divider = xtal / *bps; if (divider > BRR_DIVIDER_MAX) { divider >>= 4; *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ } else *state |= 0x00000037; /* Clock mode 7b (BRG) */ if (divider >> 22) { n = 63; m = 15; } else if (divider) { /* Extraction of the 6 highest weighted bits */ m = 0; while (0xffffffc0 & divider) { m++; divider >>= 1; } n = divider; } brr = (m << 8) | n; divider = n << m; if (!(*state & 0x00000001)) /* Clock mode 6b */ divider <<= 4; *bps = xtal / divider; } else { /* * External clock - DTE * "state" already reflects Clock mode 0a. * Nothing more to be done */ brr = 0; } scc_writel(brr, dpriv, dev, BRR); ret = 0;done: return ret;}static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd){ sync_serial_settings *line = ifr->ifr_settings.ifs_ifsu.sync; struct dscc4_dev_priv *dpriv = dscc4_priv(dev); const size_t size = sizeof(dpriv->settings); int ret = 0; if (dev->flags & IFF_UP) return -EBUSY; if (cmd != SIOCWANDEV) return -EOPNOTSUPP; switch(ifr->ifr_settings.type) { case IF_GET_IFACE: ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &dpriv->settings, size)) return -EFAULT; break; case IF_IFACE_SYNC_SERIAL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&dpriv->settings, line, size)) return -EFAULT; ret = dscc4_set_iface(dpriv, dev); break; default: ret = hdlc_ioctl(dev, ifr, cmd); break; } return ret;}static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz){ int ret = 0; if ((hz < 0) || (hz > DSCC4_HZ_MAX)) ret = -EOPNOTSUPP; else dpriv->pci_priv->xtal_hz = hz; return ret;}static int dscc4_match(struct thingie *p, int value){ int i; for (i = 0; p[i].define != -1; i++) { if (value == p[i].define) break; } if (p[i].define == -1) return -1; else return i;}static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, struct net_device *dev){ sync_serial_settings *settings = &dpriv->settings; int ret = -EOPNOTSUPP; u32 bps, state; bps = settings->clock_rate; state = scc_readl(dpriv, CCR0); if (dscc4_set_clock(dev, &bps, &state) < 0) goto done; if (bps) { /* DCE */ printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name); if (settings->clock_rate != bps) { settings->clock_rate = bps; printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n", dev->name, dpriv->settings.clock_rate, bps); } } else { /* DTE */ state = 0x80001000; printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); } scc_writel(state, dpriv, dev, CCR0); ret = 0;done: return ret;}static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, struct net_device *dev){ struct thingie encoding[] = { { ENCODING_NRZ, 0x00000000 }, { ENCODING_NRZI, 0x00200000 }, { ENCODING_FM_MARK, 0x00400000 }, { ENCODING_FM_SPACE, 0x00500000 }, { ENCODING_MANCHESTER, 0x00600000 }, { -1, 0} }; int i, ret = 0; i = dscc4_match(encoding, dpriv->encoding); if (i >= 0) scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); else ret = -EOPNOTSUPP; return ret;}static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, struct net_device *dev){ sync_serial_settings *settings = &dpriv->settings; u32 state; state = scc_readl(dpriv, CCR1); if (settings->loopback) { printk(KERN_DEBUG "%s: loopback\n", dev->name); state |= 0x00000100; } else { printk(KERN_DEBUG "%s: normal\n", dev->name); state &= ~0x00000100; } scc_writel(state, dpriv, dev, CCR1); return 0;}static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, struct net_device *dev){ struct thingie crc[] = { { PARITY_CRC16_PR0_CCITT, 0x00000010 }, { PARITY_CRC16_PR1_CCITT, 0x00000000 }, { PARITY_CRC32_PR0_CCITT, 0x00000011 }, { PARITY_CRC32_PR1_CCITT, 0x00000001 } }; int i, ret = 0; i = dscc4_match(crc, dpriv->parity); if (i >= 0) scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); else ret = -EOPNOTSUPP; return ret;}static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev){ struct { int (*action)(struct dscc4_dev_priv *, struct net_device *); } *p, do_setting[] = { { dscc4_encoding_setting }, { dscc4_clock_setting }, { dscc4_loopback_setting }, { dscc4_crc_setting }, { NULL } }; int ret = 0; for (p = do_setting; p->action; p++) { if ((ret = p->action(dpriv, dev)) < 0) break; } return ret;}static void dscc4_irq(int irq, void *token, struct pt_regs *ptregs){ struct dscc4_dev_priv *root = token; struct dscc4_pci_priv *priv; struct net_device *dev; u32 ioaddr, state; unsigned long flags; int i; priv = root->pci_priv; dev = hdlc_to_dev(&root->hdlc); spin_lock_irqsave(&priv->lock, flags); ioaddr = dev->base_addr; state = readl(ioaddr + GSTAR); if (!state) goto out; writel(state, ioaddr + GSTAR); if (state & Arf) { printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n", dev->name); goto out; } state &= ~ArAck; if (state & Cfg) { if (debug > 0) printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf) printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG"); if (!(state &= ~Cfg)) goto out; } if (state & RxEvt) { i = dev_per_card - 1; do { dscc4_rx_irq(priv, root + i); } while (--i >= 0); state &= ~RxEvt; } if (state & TxEvt) { i = dev_per_card - 1; do { dscc4_tx_irq(priv, root + i); } while (--i >= 0); state &= ~TxEvt; }out: spin_unlock_irqrestore(&priv->lock, flags);}static inline void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, struct dscc4_dev_priv *dpriv){ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc); u32 state; int cur, loop = 0;try: cur = dpriv->iqtx_current%IRQ_RING_SIZE; state = dpriv->iqtx[cur]; if (!state) { if ((debug > 1) && (loop > 1)) printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); if (loop && netif_queue_stopped(dev)) if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) netif_wake_queue(dev); if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && !dscc4_tx_done(dpriv)) dscc4_do_tx(dpriv, dev); return; } loop++; dpriv->iqtx[cur] = 0; dpriv->iqtx_current++; if (state_check(state, dpriv, dev, "Tx") < 0) return; if (state & SccEvt) { if (state & Alls) { struct net_device_stats *stats = &dpriv->hdlc.stats; struct sk_buff *skb; struct TxFD *tx_fd; if (debug > 2) dscc4_tx_print(dev, dpriv, "Alls"); /* * DataComplete can't be trusted for Tx completion. * Cf errata DS5 p.8 */ cur = dpriv->tx_dirty%TX_RING_SIZE; tx_fd = dpriv->tx_fd + cur; skb = dpriv->tx_skbuff[cur]; if (skb) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -