📄 vlsi_ir.c
字号:
u8 cmd; unsigned iobase; iobase = ndev->base_addr; netif_stop_queue(ndev); if (idev->irlap) irlap_close(idev->irlap); idev->irlap = NULL; outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending + disable further IRQ */ wmb(); outw(0, iobase+VLSI_PIO_IRENABLE); outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ wmb(); outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE); mb(); /* ... from now on */ outw(0, iobase+VLSI_PIO_IRENABLE); wmb(); vlsi_clear_regs(ndev->base_addr); vlsi_stop_clock(pdev); vlsi_unset_clock(pdev); free_irq(ndev->irq,ndev); vlsi_free_ringbuf(&idev->rx_ring); vlsi_free_ringbuf(&idev->tx_ring); if (idev->busaddr) pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->virtaddr,idev->busaddr); idev->virtaddr = NULL; idev->busaddr = 0; pci_read_config_byte(pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MASTER; pci_write_config_byte(pdev, PCI_COMMAND, cmd); pci_release_regions(pdev); printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name); return 0;}static struct net_device_stats * vlsi_get_stats(struct net_device *ndev){ vlsi_irda_dev_t *idev = ndev->priv; return &idev->stats;}static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev){ vlsi_irda_dev_t *idev = ndev->priv; struct vlsi_ring *r; unsigned long flags; unsigned iobase; u8 status; u16 config; int mtt; int len, speed; struct timeval now, ready; status = 0; speed = irda_get_next_speed(skb); if (speed != -1 && speed != idev->baud) { idev->new_baud = speed; if (!skb->len) { dev_kfree_skb(skb); vlsi_set_baud(ndev); return 0; } status = TX_STAT_CLRENTX; /* stop tx-ring after this frame */ } if (skb->len == 0) { printk(KERN_ERR "%s: blocking 0-size packet???\n", __FUNCTION__); dev_kfree_skb(skb); return 0; } r = &idev->tx_ring; if (rd_is_active(r, r->head)) BUG(); if (idev->mode == IFF_SIR) { status |= TX_STAT_DISCRC; len = async_wrap_skb(skb, r->buf[r->head].data, XFER_BUF_SIZE); } else { /* hw deals with MIR/FIR mode */ len = skb->len; memcpy(r->buf[r->head].data, skb->data, len); } rd_set_count(r, r->head, len); rd_set_addr_status(r, r->head, virt_to_bus(r->buf[r->head].data), status); /* new entry not yet activated! */#if 0 printk(KERN_DEBUG "%s: dump entry %d: %u %02x %08x\n", __FUNCTION__, r->head, idev->ring_hw[r->head].rd_count, (unsigned)idev->ring_hw[r->head].rd_status, idev->ring_hw[r->head].rd_addr & 0xffffffff); vlsi_reg_debug(iobase,__FUNCTION__);#endif /* let mtt delay pass before we need to acquire the spinlock! */ if ((mtt = irda_get_mtt(skb)) > 0) { ready.tv_usec = idev->last_rx.tv_usec + mtt; ready.tv_sec = idev->last_rx.tv_sec; if (ready.tv_usec >= 1000000) { ready.tv_usec -= 1000000; ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */ } for(;;) { do_gettimeofday(&now); if (now.tv_sec > ready.tv_sec || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) break; udelay(100); } }/* * race window ahead, due to concurrent controller processing! * * We need to disable IR output in order to switch to TX mode. * Better not do this blindly anytime we want to transmit something * because TX may already run. However the controller may stop TX * at any time when fetching an inactive descriptor or one with * CLR_ENTX set. So we switch on TX only, if TX was not running * _after_ the new descriptor was activated on the ring. This ensures * we will either find TX already stopped or we can be sure, there * will be a TX-complete interrupt even if the chip stopped doing * TX just after we found it still running. The ISR will then find * the non-empty ring and restart TX processing. The enclosing * spinlock is required to get serialization with the ISR right. */ iobase = ndev->base_addr; spin_lock_irqsave(&idev->lock,flags); rd_activate(r, r->head); ring_put(r); if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { outw(0, iobase+VLSI_PIO_IRENABLE); config = inw(iobase+VLSI_PIO_IRCFG); rmb(); outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); wmb(); outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE); mb(); outw(0, iobase+VLSI_PIO_PROMPT); wmb(); } if (r->head == r->tail) { netif_stop_queue(ndev); printk(KERN_DEBUG "%s: tx ring full - queue stopped: %d/%d\n", __FUNCTION__, r->head, r->tail);#if 0 printk(KERN_INFO "%s: dump stalled entry %d: %u %02x %08x\n", __FUNCTION__, r->tail, r->hw[r->tail].rd_count, (unsigned)r->hw[r->tail].rd_status, r->hw[r->tail].rd_addr & 0xffffffff);#endif vlsi_reg_debug(iobase,__FUNCTION__); } spin_unlock_irqrestore(&idev->lock, flags); dev_kfree_skb(skb); return 0;}static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd){ vlsi_irda_dev_t *idev = ndev->priv; struct if_irda_req *irq = (struct if_irda_req *) rq; unsigned long flags; u16 fifocnt; int ret = 0; spin_lock_irqsave(&idev->lock,flags); switch (cmd) { case SIOCSBANDWIDTH: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } idev->new_baud = irq->ifr_baudrate; break; case SIOCSMEDIABUSY: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } irda_device_set_media_busy(ndev, TRUE); break; case SIOCGRECEIVING: /* the best we can do: check whether there are any bytes in rx fifo. * The trustable window (in case some data arrives just afterwards) * may be as short as 1usec or so at 4Mbps - no way for future-telling. */ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; break; default: printk(KERN_ERR "%s: notsupp - cmd=%04x\n", __FUNCTION__, cmd); ret = -EOPNOTSUPP; } spin_unlock_irqrestore(&idev->lock,flags); return ret;}int vlsi_irda_init(struct net_device *ndev){ vlsi_irda_dev_t *idev = ndev->priv; struct pci_dev *pdev = idev->pdev; u8 byte; SET_MODULE_OWNER(ndev); ndev->irq = pdev->irq; ndev->base_addr = pci_resource_start(pdev,0); /* PCI busmastering - see include file for details! */ if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)) { printk(KERN_ERR "%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__); return -1; } pci_set_master(pdev); pdev->dma_mask = DMA_MASK_MSTRPAGE; pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); /* we don't use the legacy UART, disable its address decoding */ pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); irda_init_max_qos_capabilies(&idev->qos); /* the VLSI82C147 does not support 576000! */ idev->qos.baud_rate.bits = IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 | IR_1152000 | (IR_4000000 << 8); idev->qos.min_turn_time.bits = qos_mtt_bits; irda_qos_bits_to_value(&idev->qos); irda_device_setup(ndev); /* currently no public media definitions for IrDA */ ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; ndev->if_port = IF_PORT_UNKNOWN; ndev->open = vlsi_open; ndev->stop = vlsi_close; ndev->get_stats = vlsi_get_stats; ndev->hard_start_xmit = vlsi_hard_start_xmit; ndev->do_ioctl = vlsi_ioctl; return 0;} /**************************************************************/static int __devinitvlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id){ struct net_device *ndev; vlsi_irda_dev_t *idev; int alloc_size; if (pci_enable_device(pdev)) goto out; printk(KERN_INFO "%s: IrDA PCI controller %s detected\n", drivername, pdev->name); if ( !pci_resource_start(pdev,0) || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { printk(KERN_ERR "%s: bar 0 invalid", __FUNCTION__); goto out_disable; } alloc_size = sizeof(*ndev) + sizeof(*idev); ndev = (struct net_device *) kmalloc (alloc_size, GFP_KERNEL); if (ndev==NULL) { printk(KERN_ERR "%s: Unable to allocate device memory.\n", __FUNCTION__); goto out_disable; } memset(ndev, 0, alloc_size); idev = (vlsi_irda_dev_t *) (ndev + 1); ndev->priv = (void *) idev; spin_lock_init(&idev->lock); idev->pdev = pdev; ndev->init = vlsi_irda_init; strcpy(ndev->name,"irda%d"); if (register_netdev(ndev)) { printk(KERN_ERR "%s: register_netdev failed\n", __FUNCTION__); goto out_freedev; } printk(KERN_INFO "%s: registered device %s\n", drivername, ndev->name); pci_set_drvdata(pdev, ndev); return 0;out_freedev: kfree(ndev);out_disable: pci_disable_device(pdev);out: pci_set_drvdata(pdev, NULL); return -ENODEV;}static void __devexit vlsi_irda_remove(struct pci_dev *pdev){ struct net_device *ndev = pci_get_drvdata(pdev); if (ndev) { printk(KERN_INFO "%s: unregister device %s\n", drivername, ndev->name); unregister_netdev(ndev); /* do not free - async completed by unregister_netdev() * ndev->destructor called (if present) when going to free */ } else printk(KERN_CRIT "%s: lost netdevice?\n", drivername); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); printk(KERN_INFO "%s: %s disabled\n", drivername, pdev->name);}static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state){ printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name); return 0;}static int vlsi_irda_resume(struct pci_dev *pdev){ printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name); return 0;}/*********************************************************/static struct pci_driver vlsi_irda_driver = { name: drivername, id_table: vlsi_irda_table, probe: vlsi_irda_probe, remove: __devexit_p(vlsi_irda_remove), suspend: vlsi_irda_suspend, resume: vlsi_irda_resume,};static int __init vlsi_mod_init(void){ int i; if (clksrc < 0 || clksrc > 3) { printk(KERN_ERR "%s: invalid clksrc=%d\n", drivername, clksrc); return -1; } for (i = 0; i < 2; i++) { switch(ringsize[i]) { case 4: case 8: case 16: case 32: case 64: break; default: printk(KERN_WARNING "%s: invalid %s ringsize %d", drivername, (i)?"rx":"tx", ringsize[i]); printk(", using default=8\n"); ringsize[i] = 8; break; } } sirpulse = !!sirpulse; return pci_module_init(&vlsi_irda_driver);}static void __exit vlsi_mod_exit(void){ pci_unregister_driver(&vlsi_irda_driver);}module_init(vlsi_mod_init);module_exit(vlsi_mod_exit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -