📄 smsc-ircc2.c
字号:
/* Setup DMA controller (must be done after enabling chip DMA) */ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, DMA_TX_MODE); /* Enable interrupt */ register_bank(iobase, 0); outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER); outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER); /* Enable transmit */ outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);}/* * Function smsc_ircc_dma_xmit_complete (self) * * The transfer of a frame in finished. This function will only be called * by the interrupt handler * */static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self){ int iobase = self->io.fir_base; IRDA_DEBUG(3, "%s\n", __FUNCTION__);#if 0 /* Disable Tx */ register_bank(iobase, 0); outb(0x00, iobase + IRCC_LCR_B);#endif register_bank(iobase, 1); outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, iobase + IRCC_SCE_CFGB); /* Check for underrun! */ register_bank(iobase, 0); if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) { self->stats.tx_errors++; self->stats.tx_fifo_errors++; /* Reset error condition */ register_bank(iobase, 0); outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER); outb(0x00, iobase + IRCC_MASTER); } else { self->stats.tx_packets++; self->stats.tx_bytes += self->tx_buff.len; } /* Check if it's time to change the speed */ if (self->new_speed) { smsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; } netif_wake_queue(self->netdev);}/* * Function smsc_ircc_dma_receive(self) * * Get ready for receiving a frame. The device will initiate a DMA * if it starts to receive a frame. * */static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self){ int iobase = self->io.fir_base;#if 0 /* Turn off chip DMA */ register_bank(iobase, 1); outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, iobase + IRCC_SCE_CFGB);#endif /* Disable Tx */ register_bank(iobase, 0); outb(0x00, iobase + IRCC_LCR_B); /* Turn off chip DMA */ register_bank(iobase, 1); outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, iobase + IRCC_SCE_CFGB); self->io.direction = IO_RECV; self->rx_buff.data = self->rx_buff.head; /* Set max Rx frame size */ register_bank(iobase, 4); outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI); outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO); /* Setup DMA controller */ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, DMA_RX_MODE); /* Enable burst mode chip Rx DMA */ register_bank(iobase, 1); outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE | IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB); /* Enable interrupt */ register_bank(iobase, 0); outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER); outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER); /* Enable receiver */ register_bank(iobase, 0); outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B); return 0;}/* * Function smsc_ircc_dma_receive_complete(self) * * Finished with receiving frames * */static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self){ struct sk_buff *skb; int len, msgcnt, lsr; int iobase = self->io.fir_base; register_bank(iobase, 0); IRDA_DEBUG(3, "%s\n", __FUNCTION__);#if 0 /* Disable Rx */ register_bank(iobase, 0); outb(0x00, iobase + IRCC_LCR_B);#endif register_bank(iobase, 0); outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR); lsr= inb(iobase + IRCC_LSR); msgcnt = inb(iobase + IRCC_LCR_B) & 0x08; IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__, get_dma_residue(self->io.dma)); len = self->rx_buff.truesize - get_dma_residue(self->io.dma); /* Look for errors */ if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) { self->stats.rx_errors++; if (lsr & IRCC_LSR_FRAME_ERROR) self->stats.rx_frame_errors++; if (lsr & IRCC_LSR_CRC_ERROR) self->stats.rx_crc_errors++; if (lsr & IRCC_LSR_SIZE_ERROR) self->stats.rx_length_errors++; if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) self->stats.rx_length_errors++; return; } /* Remove CRC */ len -= self->io.speed < 4000000 ? 2 : 4; if (len < 2 || len > 2050) { IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len); return; } IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len); skb = dev_alloc_skb(len + 1); if (!skb) { IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", __FUNCTION__); return; } /* Make sure IP header gets aligned */ skb_reserve(skb, 1); memcpy(skb_put(skb, len), self->rx_buff.data, len); self->stats.rx_packets++; self->stats.rx_bytes += len; skb->dev = self->netdev; skb->mac.raw = skb->data; skb->protocol = htons(ETH_P_IRDA); netif_rx(skb);}/* * Function smsc_ircc_sir_receive (self) * * Receive one frame from the infrared port * */static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self){ int boguscount = 0; int iobase; IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; /* * Receive all characters in Rx FIFO, unwrap and unstuff them. * async_unwrap_char will deliver all found frames */ do { async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, inb(iobase + UART_RX)); /* Make sure we don't stay here to long */ if (boguscount++ > 32) { IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__); break; } } while (inb(iobase + UART_LSR) & UART_LSR_DR);}/* * Function smsc_ircc_interrupt (irq, dev_id, regs) * * An interrupt from the chip has arrived. Time to do some work * */static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = (struct net_device *) dev_id; struct smsc_ircc_cb *self; int iobase, iir, lcra, lsr; irqreturn_t ret = IRQ_NONE; if (dev == NULL) { printk(KERN_WARNING "%s: irq %d for unknown device.\n", driver_name, irq); goto irq_ret; } self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return IRQ_NONE;); /* Serialise the interrupt handler in various CPUs, stop Tx path */ spin_lock(&self->lock); /* Check if we should use the SIR interrupt handler */ if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) { ret = smsc_ircc_interrupt_sir(dev); goto irq_ret_unlock; } iobase = self->io.fir_base; register_bank(iobase, 0); iir = inb(iobase + IRCC_IIR); if (iir == 0) goto irq_ret_unlock; ret = IRQ_HANDLED; /* Disable interrupts */ outb(0, iobase + IRCC_IER); lcra = inb(iobase + IRCC_LCR_A); lsr = inb(iobase + IRCC_LSR); IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir); if (iir & IRCC_IIR_EOM) { if (self->io.direction == IO_RECV) smsc_ircc_dma_receive_complete(self); else smsc_ircc_dma_xmit_complete(self); smsc_ircc_dma_receive(self); } if (iir & IRCC_IIR_ACTIVE_FRAME) { /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/ } /* Enable interrupts again */ register_bank(iobase, 0); outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER); irq_ret_unlock: spin_unlock(&self->lock); irq_ret: return ret;}/* * Function irport_interrupt_sir (irq, dev_id, regs) * * Interrupt handler for SIR modes */static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev){ struct smsc_ircc_cb *self = netdev_priv(dev); int boguscount = 0; int iobase; int iir, lsr; /* Already locked comming here in smsc_ircc_interrupt() */ /*spin_lock(&self->lock);*/ iobase = self->io.sir_base; iir = inb(iobase + UART_IIR) & UART_IIR_ID; if (iir == 0) return IRQ_NONE; while (iir) { /* Clear interrupt */ lsr = inb(iobase + UART_LSR); IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__); break; case UART_IIR_RDI: /* Receive interrupt */ smsc_ircc_sir_receive(self); break; case UART_IIR_THRI: if (lsr & UART_LSR_THRE) /* Transmitter ready for data */ smsc_ircc_sir_write_wakeup(self); break; default: IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir); break; } /* Make sure we don't stay here to long */ if (boguscount++ > 100) break; iir = inb(iobase + UART_IIR) & UART_IIR_ID; } /*spin_unlock(&self->lock);*/ return IRQ_HANDLED;}#if 0 /* unused *//* * Function ircc_is_receiving (self) * * Return TRUE is we are currently receiving a frame * */static int ircc_is_receiving(struct smsc_ircc_cb *self){ int status = FALSE; /* int iobase; */ IRDA_DEBUG(1, "%s\n", __FUNCTION__); IRDA_ASSERT(self != NULL, return FALSE;); IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__, get_dma_residue(self->io.dma)); status = (self->rx_buff.state != OUTSIDE_FRAME); return status;}#endif /* unused *//* * Function smsc_ircc_net_open (dev) * * Start the device * */static int smsc_ircc_net_open(struct net_device *dev){ struct smsc_ircc_cb *self; char hwname[16]; unsigned long flags; IRDA_DEBUG(1, "%s\n", __FUNCTION__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, (void *) dev)) { IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", __FUNCTION__, self->io.irq); return -EAGAIN; } spin_lock_irqsave(&self->lock, flags); /*smsc_ircc_sir_start(self);*/ self->io.speed = 0; smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); spin_unlock_irqrestore(&self->lock, flags); /* Give self a hardware name */ /* It would be cool to offer the chip revision here - Jean II */ sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base); /* * Open new IrLAP layer instance, now that everything should be * initialized properly */ self->irlap = irlap_open(dev, &self->qos, hwname); /* * Always allocate the DMA channel after the IRQ, * and clean up on failure. */ if (request_dma(self->io.dma, dev->name)) { smsc_ircc_net_close(dev); IRDA_WARNING("%s(), unable to allocate DMA=%d\n", __FUNCTION__, self->io.dma); return -EAGAIN; } netif_start_queue(dev); return 0;}/* * Function smsc_ircc_net_close (dev) * * Stop the device * */static int smsc_ircc_net_close(struct net_device *dev){ struct smsc_ircc_cb *self; IRDA_DEBUG(1, "%s\n", __FUNCTION__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); /* Stop device */ netif_stop_queue(dev); /* Stop and remove instance of IrLAP */ if (self->irlap) irlap_close(self->irlap); self->irlap = NULL; free_irq(self->io.irq, dev); disable_dma(self->io.dma); free_dma(self->io.dma); return 0;}static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level){ struct smsc_ircc_cb *self = dev_get_drvdata(dev); IRDA_MESSAGE("%s, Suspending\n", driver_name); if (level == SUSPEND_DISABLE && !self->io.suspended) { smsc_ircc_net_close(self->netdev); self->io.suspended = 1; } return 0;}static int smsc_ircc_resume(struct device *dev, u32 level){ struct smsc_ircc_cb *self = dev_get_drvdata(dev); if (level == RESUME_ENABLE && self->io.suspended) { smsc_ircc_net_open(self->netdev); self->io.suspended = 0; IRDA_MESSAGE("%s, Waking up\n", driver_name); } return 0;}/* * Function smsc_ircc_close (self) * * Close driver instance * */static int __exit smsc_ircc_close(struct smsc_ircc_cb *self){ int iobase; unsigned long flags; IRDA_DEBUG(1, "%s\n", __FUNCTION__); IRDA_ASSERT(self != NULL, return -1;); platform_device_unregister(self->pldev); /* Remove netdevice */ unregister_netdev(self->netdev); /* Make sure the irq handler is not exectuting */ spin_lock_irqsave(&self->lock, flags); /* Stop interrupts */ iobase = self->io.fir_base; register_bank(iobase, 0); outb(0, iobase + IRCC_IER); outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER); outb(0x00, iobase + IRCC_MASTER);#if 0 /* Reset to SIR mode */ register_bank(iobase, 1); outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA); outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);#endif spin_unlock_irqrestore(&self->lock, flags); /* Release the PORTS that this driver is using */ IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, self->io.sir_base); release_region(self->io.sir_base, self->io.sir_ext); if (self->tx_buff.head) dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); if (self->rx_buff.head) dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); free_netdev(self->netdev); return 0;}static void __exit smsc_ircc_cleanup(void){ int i; IRDA_DEBUG(1, "%s\n", __FUNCTION__); for (i = 0; i < 2; i++) { if (dev_self[i]) smsc_ircc_close(dev_self[i]); } driver_unregister(&smsc_ircc_driver);}/* * Start SIR operations * * This function *must* be called with spinlock held, because it may * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II */void smsc_ircc_sir_start(struct smsc_ircc_cb *self){ struct net_device *dev; int fir_base, sir_base; IRDA_DEBUG(3, "%s\n", __FUNCTION__); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; IRDA_ASSERT(dev != NULL, return;); dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir; fir_base = self->io.fir_base; sir_base = self->io.sir_base; /* Reset everything */ outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER); #if SMSC_IRCC2_C_SIR_STOP /*smsc_ircc_sir_stop(self);*/ #endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -