⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sir_dev.c

📁 h内核
💻 C
📖 第 1 页 / 共 2 页
字号:
/********************************************************************* * *	sir_dev.c:	irda sir network device *  *	Copyright (c) 2002 Martin Diehl *  *	This program is free software; you can redistribute it and/or  *	modify it under the terms of the GNU General Public License as  *	published by the Free Software Foundation; either version 2 of  *	the License, or (at your option) any later version. * ********************************************************************/    #include <linux/module.h>#include <linux/kernel.h>#include <linux/init.h>#include <linux/smp_lock.h>#include <net/irda/irda.h>#include <net/irda/wrapper.h>#include <net/irda/irda_device.h>#include "sir-dev.h"/***************************************************************************/void sirdev_enable_rx(struct sir_dev *dev){	if (unlikely(atomic_read(&dev->enable_rx)))		return;	/* flush rx-buffer - should also help in case of problems with echo cancelation */	dev->rx_buff.data = dev->rx_buff.head;	dev->rx_buff.len = 0;	dev->rx_buff.in_frame = FALSE;	dev->rx_buff.state = OUTSIDE_FRAME;	atomic_set(&dev->enable_rx, 1);}static int sirdev_is_receiving(struct sir_dev *dev){	if (!atomic_read(&dev->enable_rx))		return 0;	return (dev->rx_buff.state != OUTSIDE_FRAME);}int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type){	int err;	IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);	err = sirdev_schedule_dongle_open(dev, type);	if (unlikely(err))		return err;	down(&dev->fsm.sem);		/* block until config change completed */	err = dev->fsm.result;	up(&dev->fsm.sem);	return err;}/* used by dongle drivers for dongle programming */int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len){	unsigned long flags;	int ret;	if (unlikely(len > dev->tx_buff.truesize))		return -ENOSPC;	spin_lock_irqsave(&dev->tx_lock, flags);	/* serialize with other tx operations */	while (dev->tx_buff.len > 0) {			/* wait until tx idle */		spin_unlock_irqrestore(&dev->tx_lock, flags);		set_current_state(TASK_UNINTERRUPTIBLE);		schedule_timeout(msecs_to_jiffies(10));		spin_lock_irqsave(&dev->tx_lock, flags);	}	dev->tx_buff.data = dev->tx_buff.head;	memcpy(dev->tx_buff.data, buf, len);		dev->tx_buff.len = len;	ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);	if (ret > 0) {		IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);		dev->tx_buff.data += ret;		dev->tx_buff.len -= ret;		dev->raw_tx = 1;		ret = len;		/* all data is going to be sent */	}	spin_unlock_irqrestore(&dev->tx_lock, flags);	return ret;}/* seems some dongle drivers may need this */int sirdev_raw_read(struct sir_dev *dev, char *buf, int len){	int count;	if (atomic_read(&dev->enable_rx))		return -EIO;		/* fail if we expect irda-frames */	count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;	if (count > 0) {		memcpy(buf, dev->rx_buff.data, count);		dev->rx_buff.data += count;		dev->rx_buff.len -= count;	}	/* remaining stuff gets flushed when re-enabling normal rx */	return count;}int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts){	int ret = -ENXIO;	if (dev->drv->set_dtr_rts != 0)		ret =  dev->drv->set_dtr_rts(dev, dtr, rts);	return ret;}	/**********************************************************************//* called from client driver - likely with bh-context - to indicate * it made some progress with transmission. Hence we send the next * chunk, if any, or complete the skb otherwise */void sirdev_write_complete(struct sir_dev *dev){	unsigned long flags;	struct sk_buff *skb;	int actual = 0;	int err;		spin_lock_irqsave(&dev->tx_lock, flags);	IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",		   __FUNCTION__, dev->tx_buff.len);	if (likely(dev->tx_buff.len > 0))  {		/* Write data left in transmit buffer */		actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);		if (likely(actual>0)) {			dev->tx_buff.data += actual;			dev->tx_buff.len  -= actual;		}		else if (unlikely(actual<0)) {			/* could be dropped later when we have tx_timeout to recover */			ERROR("%s: drv->do_write failed (%d)\n", __FUNCTION__, actual);			if ((skb=dev->tx_skb) != NULL) {				dev->tx_skb = NULL;				dev_kfree_skb_any(skb);				dev->stats.tx_errors++;		      				dev->stats.tx_dropped++;		      			}			dev->tx_buff.len = 0;		}		if (dev->tx_buff.len > 0)			goto done;	/* more data to send later */	}	if (unlikely(dev->raw_tx != 0)) {		/* in raw mode we are just done now after the buffer was sent		 * completely. Since this was requested by some dongle driver		 * running under the control of the irda-thread we must take		 * care here not to re-enable the queue. The queue will be		 * restarted when the irda-thread has completed the request.		 */		IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);		dev->raw_tx = 0;		goto done;	/* no post-frame handling in raw mode */	}	/* we have finished now sending this skb.	 * update statistics and free the skb.	 * finally we check and trigger a pending speed change, if any.	 * if not we switch to rx mode and wake the queue for further	 * packets.	 * note the scheduled speed request blocks until the lower	 * client driver and the corresponding hardware has really	 * finished sending all data (xmit fifo drained f.e.)	 * before the speed change gets finally done and the queue	 * re-activated.	 */	IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);			if ((skb=dev->tx_skb) != NULL) {		dev->tx_skb = NULL;		dev->stats.tx_packets++;		      		dev->stats.tx_bytes += skb->len;		dev_kfree_skb_any(skb);	}	if (unlikely(dev->new_speed > 0)) {		IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);		err = sirdev_schedule_speed(dev, dev->new_speed);		if (unlikely(err)) {			/* should never happen			 * forget the speed change and hope the stack recovers			 */			ERROR("%s - schedule speed change failed: %d\n", __FUNCTION__, err);			netif_wake_queue(dev->netdev);		}		/* else: success		 *	speed change in progress now		 *	on completion dev->new_speed gets cleared,		 *	rx-reenabled and the queue restarted		 */	}	else {		sirdev_enable_rx(dev);		netif_wake_queue(dev->netdev);	}done:	spin_unlock_irqrestore(&dev->tx_lock, flags);}/* called from client driver - likely with bh-context - to give us * some more received bytes. We put them into the rx-buffer, * normally unwrapping and building LAP-skb's (unless rx disabled) */int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) {	if (!dev || !dev->netdev) {		WARNING("%s(), not ready yet!\n", __FUNCTION__);		return -1;	}	if (!dev->irlap) {		WARNING("%s - too early: %p / %zd!\n",			__FUNCTION__, cp, count);		return -1;	}	if (cp==NULL) {		/* error already at lower level receive		 * just update stats and set media busy		 */		irda_device_set_media_busy(dev->netdev, TRUE);		dev->stats.rx_dropped++;		IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);		return 0;	}	/* Read the characters into the buffer */	if (likely(atomic_read(&dev->enable_rx))) {		while (count--)			/* Unwrap and destuff one byte */			async_unwrap_char(dev->netdev, &dev->stats, 					  &dev->rx_buff, *cp++);	} else {		while (count--) {			/* rx not enabled: save the raw bytes and never			 * trigger any netif_rx. The received bytes are flushed			 * later when we re-enable rx but might be read meanwhile			 * by the dongle driver.			 */			dev->rx_buff.data[dev->rx_buff.len++] = *cp++;			/* What should we do when the buffer is full? */			if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))				dev->rx_buff.len = 0;		}	}	return 0;}/**********************************************************************//* callbacks from network layer */static struct net_device_stats *sirdev_get_stats(struct net_device *ndev){	struct sir_dev *dev = ndev->priv;	return (dev) ? &dev->stats : NULL;}static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev){	struct sir_dev *dev = ndev->priv;	unsigned long flags;	int actual = 0;	int err;	s32 speed;	ASSERT(dev != NULL, return 0;);	netif_stop_queue(ndev);	IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);	speed = irda_get_next_speed(skb);	if ((speed != dev->speed) && (speed != -1)) {		if (!skb->len) {			err = sirdev_schedule_speed(dev, speed);			if (unlikely(err == -EWOULDBLOCK)) {				/* Failed to initiate the speed change, likely the fsm				 * is still busy (pretty unlikely, but...)				 * We refuse to accept the skb and return with the queue				 * stopped so the network layer will retry after the				 * fsm completes and wakes the queue.				 */				 return 1;			}			else if (unlikely(err)) {				/* other fatal error - forget the speed change and				 * hope the stack will recover somehow				 */				 netif_start_queue(ndev);			}			/* else: success			 *	speed change in progress now			 *	on completion the queue gets restarted			 */			dev_kfree_skb_any(skb);			return 0;		} else			dev->new_speed = speed;	}	/* Init tx buffer*/	dev->tx_buff.data = dev->tx_buff.head;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -