⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smsc-ircc2.c

📁 《linux驱动程序设计从入门到精通》一书中所有的程序代码含驱动和相应的应用程序
💻 C
📖 第 1 页 / 共 5 页
字号:
	IRDA_ASSERT(self != NULL, return;);	fir_base = self->io.fir_base;	register_bank(fir_base, 0);	/*outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);*/	outb(inb(fir_base + IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base + IRCC_LCR_B);}/* * Function smsc_ircc_change_speed(self, baud) * *    Change the speed of the device * * This function *must* be called with spinlock held, because it may * be called from the irq handler. - Jean II */static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed){	struct net_device *dev;	int last_speed_was_sir;	IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);	IRDA_ASSERT(self != NULL, return;);	dev = self->netdev;	last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;	#if 0	/* Temp Hack */	speed= 1152000;	self->io.speed = speed;	last_speed_was_sir = 0;	smsc_ircc_fir_start(self);	#endif	if (self->io.speed == 0)		smsc_ircc_sir_start(self);	#if 0	if (!last_speed_was_sir) speed = self->io.speed;	#endif	if (self->io.speed != speed)		smsc_ircc_set_transceiver_for_speed(self, speed);	self->io.speed = speed;	if (speed <= SMSC_IRCC2_MAX_SIR_SPEED) {		if (!last_speed_was_sir) {			smsc_ircc_fir_stop(self);			smsc_ircc_sir_start(self);		}		smsc_ircc_set_sir_speed(self, speed);	} else {		if (last_speed_was_sir) {			#if SMSC_IRCC2_C_SIR_STOP			smsc_ircc_sir_stop(self);			#endif			smsc_ircc_fir_start(self);		}		smsc_ircc_set_fir_speed(self, speed);		#if 0		self->tx_buff.len = 10;		self->tx_buff.data = self->tx_buff.head;		smsc_ircc_dma_xmit(self, 4000);		#endif		/* Be ready for incoming frames */		smsc_ircc_dma_receive(self);	}	netif_wake_queue(dev);}/* * Function smsc_ircc_set_sir_speed (self, speed) * *    Set speed of IrDA port to specified baudrate * */void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed){	int iobase;	int fcr;    /* FIFO control reg */	int lcr;    /* Line control reg */	int divisor;	IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed);	IRDA_ASSERT(self != NULL, return;);	iobase = self->io.sir_base;	/* Update accounting for new speed */	self->io.speed = speed;	/* Turn off interrupts */	outb(0, iobase + UART_IER);	divisor = SMSC_IRCC2_MAX_SIR_SPEED / speed;	fcr = UART_FCR_ENABLE_FIFO;	/*	 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and	 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget	 * about this timeout since it will always be fast enough.	 */	fcr |= self->io.speed < 38400 ?		UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;	/* IrDA ports use 8N1 */	lcr = UART_LCR_WLEN8;	outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */	outb(divisor & 0xff,      iobase + UART_DLL); /* Set speed */	outb(divisor >> 8,	  iobase + UART_DLM);	outb(lcr,		  iobase + UART_LCR); /* Set 8N1 */	outb(fcr,		  iobase + UART_FCR); /* Enable FIFO's */	/* Turn on interrups */	outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);	IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);}/* * Function smsc_ircc_hard_xmit_fir (skb, dev) * *    Transmit the frame! * */static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev){	struct smsc_ircc_cb *self;	unsigned long flags;	s32 speed;	int mtt;	IRDA_ASSERT(dev != NULL, return 0;);	self = netdev_priv(dev);	IRDA_ASSERT(self != NULL, return 0;);	netif_stop_queue(dev);	/* Make sure test of self->io.speed & speed change are atomic */	spin_lock_irqsave(&self->lock, flags);	/* Check if we need to change the speed after this frame */	speed = irda_get_next_speed(skb);	if (speed != self->io.speed && speed != -1) {		/* Check for empty frame */		if (!skb->len) {			/* Note : you should make sure that speed changes			 * are not going to corrupt any outgoing frame.			 * Look at nsc-ircc for the gory details - Jean II */			smsc_ircc_change_speed(self, speed);			spin_unlock_irqrestore(&self->lock, flags);			dev_kfree_skb(skb);			return 0;		}		self->new_speed = speed;	}	memcpy(self->tx_buff.head, skb->data, skb->len);	self->tx_buff.len = skb->len;	self->tx_buff.data = self->tx_buff.head;	mtt = irda_get_mtt(skb);	if (mtt) {		int bofs;		/*		 * Compute how many BOFs (STA or PA's) we need to waste the		 * min turn time given the speed of the link.		 */		bofs = mtt * (self->io.speed / 1000) / 8000;		if (bofs > 4095)			bofs = 4095;		smsc_ircc_dma_xmit(self, bofs);	} else {		/* Transmit frame */		smsc_ircc_dma_xmit(self, 0);	}	spin_unlock_irqrestore(&self->lock, flags);	dev_kfree_skb(skb);	return 0;}/* * Function smsc_ircc_dma_xmit (self, bofs) * *    Transmit data using DMA * */static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs){	int iobase = self->io.fir_base;	u8 ctrl;	IRDA_DEBUG(3, "%s\n", __FUNCTION__);#if 1	/* Disable Rx */	register_bank(iobase, 0);	outb(0x00, iobase + IRCC_LCR_B);#endif	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,	     iobase + IRCC_SCE_CFGB);	self->io.direction = IO_XMIT;	/* Set BOF additional count for generating the min turn time */	register_bank(iobase, 4);	outb(bofs & 0xff, iobase + IRCC_BOF_COUNT_LO);	ctrl = inb(iobase + IRCC_CONTROL) & 0xf0;	outb(ctrl | ((bofs >> 8) & 0x0f), iobase + IRCC_BOF_COUNT_HI);	/* Set max Tx frame size */	outb(self->tx_buff.len >> 8, iobase + IRCC_TX_SIZE_HI);	outb(self->tx_buff.len & 0xff, iobase + IRCC_TX_SIZE_LO);	/*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/	/* Enable burst mode chip Tx DMA */	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |	     IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);	/* Setup DMA controller (must be done after enabling chip DMA) */	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,		       DMA_TX_MODE);	/* Enable interrupt */	register_bank(iobase, 0);	outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);	outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);	/* Enable transmit */	outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);}/* * Function smsc_ircc_dma_xmit_complete (self) * *    The transfer of a frame in finished. This function will only be called *    by the interrupt handler * */static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self){	int iobase = self->io.fir_base;	IRDA_DEBUG(3, "%s\n", __FUNCTION__);#if 0	/* Disable Tx */	register_bank(iobase, 0);	outb(0x00, iobase + IRCC_LCR_B);#endif	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,	     iobase + IRCC_SCE_CFGB);	/* Check for underrun! */	register_bank(iobase, 0);	if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {		self->stats.tx_errors++;		self->stats.tx_fifo_errors++;		/* Reset error condition */		register_bank(iobase, 0);		outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);		outb(0x00, iobase + IRCC_MASTER);	} else {		self->stats.tx_packets++;		self->stats.tx_bytes += self->tx_buff.len;	}	/* Check if it's time to change the speed */	if (self->new_speed) {		smsc_ircc_change_speed(self, self->new_speed);		self->new_speed = 0;	}	netif_wake_queue(self->netdev);}/* * Function smsc_ircc_dma_receive(self) * *    Get ready for receiving a frame. The device will initiate a DMA *    if it starts to receive a frame. * */static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self){	int iobase = self->io.fir_base;#if 0	/* Turn off chip DMA */	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,	     iobase + IRCC_SCE_CFGB);#endif	/* Disable Tx */	register_bank(iobase, 0);	outb(0x00, iobase + IRCC_LCR_B);	/* Turn off chip DMA */	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,	     iobase + IRCC_SCE_CFGB);	self->io.direction = IO_RECV;	self->rx_buff.data = self->rx_buff.head;	/* Set max Rx frame size */	register_bank(iobase, 4);	outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI);	outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO);	/* Setup DMA controller */	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,		       DMA_RX_MODE);	/* Enable burst mode chip Rx DMA */	register_bank(iobase, 1);	outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |	     IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);	/* Enable interrupt */	register_bank(iobase, 0);	outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);	outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);	/* Enable receiver */	register_bank(iobase, 0);	outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,	     iobase + IRCC_LCR_B);	return 0;}/* * Function smsc_ircc_dma_receive_complete(self) * *    Finished with receiving frames * */static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self){	struct sk_buff *skb;	int len, msgcnt, lsr;	int iobase = self->io.fir_base;	register_bank(iobase, 0);	IRDA_DEBUG(3, "%s\n", __FUNCTION__);#if 0	/* Disable Rx */	register_bank(iobase, 0);	outb(0x00, iobase + IRCC_LCR_B);#endif	register_bank(iobase, 0);	outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR);	lsr= inb(iobase + IRCC_LSR);	msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;	IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,		   get_dma_residue(self->io.dma));	len = self->rx_buff.truesize - get_dma_residue(self->io.dma);	/* Look for errors */	if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {		self->stats.rx_errors++;		if (lsr & IRCC_LSR_FRAME_ERROR)			self->stats.rx_frame_errors++;		if (lsr & IRCC_LSR_CRC_ERROR)			self->stats.rx_crc_errors++;		if (lsr & IRCC_LSR_SIZE_ERROR)			self->stats.rx_length_errors++;		if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))			self->stats.rx_length_errors++;		return;	}	/* Remove CRC */	len -= self->io.speed < 4000000 ? 2 : 4;	if (len < 2 || len > 2050) {		IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);		return;	}	IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);	skb = dev_alloc_skb(len + 1);	if (!skb) {		IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",			     __FUNCTION__);		return;	}	/* Make sure IP header gets aligned */	skb_reserve(skb, 1);	memcpy(skb_put(skb, len), self->rx_buff.data, len);	self->stats.rx_packets++;	self->stats.rx_bytes += len;	skb->dev = self->netdev;	skb->mac.raw  = skb->data;	skb->protocol = htons(ETH_P_IRDA);	netif_rx(skb);}/* * Function smsc_ircc_sir_receive (self) * *    Receive one frame from the infrared port * */static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self){	int boguscount = 0;	int iobase;	IRDA_ASSERT(self != NULL, return;);	iobase = self->io.sir_base;	/*	 * Receive all characters in Rx FIFO, unwrap and unstuff them.         * async_unwrap_char will deliver all found frames	 */	do {		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,				  inb(iobase + UART_RX));		/* Make sure we don't stay here to long */		if (boguscount++ > 32) {			IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);			break;		}	} while (inb(iobase + UART_LSR) & UART_LSR_DR);}/* * Function smsc_ircc_interrupt (irq, dev_id, regs) * *    An interrupt from the chip has arrived. Time to do some work * */static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id){	struct net_device *dev = (struct net_device *) dev_id;	struct smsc_ircc_cb *self;	int iobase, iir, lcra, lsr;	irqreturn_t ret = IRQ_NONE;	if (dev == NULL) {		printk(KERN_WARNING "%s: irq %d for unknown device.\n",		       driver_name, irq);		goto irq_ret;	}	self = netdev_priv(dev);	IRDA_ASSERT(self != NULL, return IRQ_NONE;);	/* Serialise the interrupt handler in various CPUs, stop Tx path */	spin_lock(&self->lock);	/* Check if we should use the SIR interrupt handler */	if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {		ret = smsc_ircc_interrupt_sir(dev);		goto irq_ret_unlock;	}	iobase = self->io.fir_base;	register_bank(iobase, 0);	iir = inb(iobase + IRCC_IIR);	if (iir == 0)		goto irq_ret_unlock;	ret = IRQ_HANDLED;	/* Disable interrupts */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -