⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irttp.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
 * *    Duplicate TSAP, can be used by servers to confirm a connection on a *    new TSAP so it can keep listening on the old one. */struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance){	struct tsap_cb *new;	unsigned long flags;	IRDA_DEBUG(1, "%s()\n", __FUNCTION__);	/* Protect our access to the old tsap instance */	spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);	/* Find the old instance */	if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {		IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __FUNCTION__);		spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);		return NULL;	}	/* Allocate a new instance */	new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);	if (!new) {		IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__);		spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);		return NULL;	}	/* Dup */	memcpy(new, orig, sizeof(struct tsap_cb));	/* We don't need the old instance any more */	spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);	/* Try to dup the LSAP (may fail if we were too slow) */	new->lsap = irlmp_dup(orig->lsap, new);	if (!new->lsap) {		IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__);		kfree(new);		return NULL;	}	/* Not everything should be copied */	new->notify.instance = instance;	/* Initialize internal objects */	irttp_init_tsap(new);	/* This is locked */	hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);	return new;}EXPORT_SYMBOL(irttp_dup);/* * Function irttp_disconnect_request (self) * *    Close this connection please! If priority is high, the queued data *    segments, if any, will be deallocated first * */int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,			     int priority){	int ret;	IRDA_ASSERT(self != NULL, return -1;);	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);	/* Already disconnected? */	if (!self->connected) {		IRDA_DEBUG(4, "%s(), already disconnected!\n", __FUNCTION__);		if (userdata)			dev_kfree_skb(userdata);		return -1;	}	/* Disconnect already pending ?	 * We need to use an atomic operation to prevent reentry. This	 * function may be called from various context, like user, timer	 * for following a disconnect_indication() (i.e. net_bh).	 * Jean II */	if(test_and_set_bit(0, &self->disconnect_pend)) {		IRDA_DEBUG(0, "%s(), disconnect already pending\n",			   __FUNCTION__);		if (userdata)			dev_kfree_skb(userdata);		/* Try to make some progress */		irttp_run_tx_queue(self);		return -1;	}	/*	 *  Check if there is still data segments in the transmit queue	 */	if (!skb_queue_empty(&self->tx_queue)) {		if (priority == P_HIGH) {			/*			 *  No need to send the queued data, if we are			 *  disconnecting right now since the data will			 *  not have any usable connection to be sent on			 */			IRDA_DEBUG(1, "%s(): High priority!!()\n", __FUNCTION__);			irttp_flush_queues(self);		} else if (priority == P_NORMAL) {			/*			 *  Must delay disconnect until after all data segments			 *  have been sent and the tx_queue is empty			 */			/* We'll reuse this one later for the disconnect */			self->disconnect_skb = userdata;  /* May be NULL */			irttp_run_tx_queue(self);			irttp_start_todo_timer(self, HZ/10);			return -1;		}	}	/* Note : we don't need to check if self->rx_queue is full and the	 * state of self->rx_sdu_busy because the disconnect response will	 * be sent at the LMP level (so even if the peer has its Tx queue	 * full of data). - Jean II */	IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __FUNCTION__);	self->connected = FALSE;	if (!userdata) {		struct sk_buff *tx_skb;		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);		if (!tx_skb)			return -ENOMEM;		/*		 *  Reserve space for MUX and LAP header		 */		skb_reserve(tx_skb, LMP_MAX_HEADER);		userdata = tx_skb;	}	ret = irlmp_disconnect_request(self->lsap, userdata);	/* The disconnect is no longer pending */	clear_bit(0, &self->disconnect_pend);	/* FALSE */	return ret;}EXPORT_SYMBOL(irttp_disconnect_request);/* * Function irttp_disconnect_indication (self, reason) * *    Disconnect indication, TSAP disconnected by peer? * */void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,				 struct sk_buff *skb){	struct tsap_cb *self;	IRDA_DEBUG(4, "%s()\n", __FUNCTION__);	self = (struct tsap_cb *) instance;	IRDA_ASSERT(self != NULL, return;);	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);	/* Prevent higher layer to send more data */	self->connected = FALSE;	/* Check if client has already tried to close the TSAP */	if (self->close_pend) {		/* In this case, the higher layer is probably gone. Don't		 * bother it and clean up the remains - Jean II */		if (skb)			dev_kfree_skb(skb);		irttp_close_tsap(self);		return;	}	/* If we are here, we assume that is the higher layer is still	 * waiting for the disconnect notification and able to process it,	 * even if he tried to disconnect. Otherwise, it would have already	 * attempted to close the tsap and self->close_pend would be TRUE.	 * Jean II */	/* No need to notify the client if has already tried to disconnect */	if(self->notify.disconnect_indication)		self->notify.disconnect_indication(self->notify.instance, self,						   reason, skb);	else		if (skb)			dev_kfree_skb(skb);}/* * Function irttp_do_data_indication (self, skb) * *    Try to deliver reassembled skb to layer above, and requeue it if that *    for some reason should fail. We mark rx sdu as busy to apply back *    pressure is necessary. */static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb){	int err;	/* Check if client has already closed the TSAP and gone away */	if (self->close_pend) {		dev_kfree_skb(skb);		return;	}	err = self->notify.data_indication(self->notify.instance, self, skb);	/* Usually the layer above will notify that it's input queue is	 * starting to get filled by using the flow request, but this may	 * be difficult, so it can instead just refuse to eat it and just	 * give an error back	 */	if (err) {		IRDA_DEBUG(0, "%s() requeueing skb!\n", __FUNCTION__);		/* Make sure we take a break */		self->rx_sdu_busy = TRUE;		/* Need to push the header in again */		skb_push(skb, TTP_HEADER);		skb->data[0] = 0x00; /* Make sure MORE bit is cleared */		/* Put skb back on queue */		skb_queue_head(&self->rx_queue, skb);	}}/* * Function irttp_run_rx_queue (self) * *     Check if we have any frames to be transmitted, or if we have any *     available credit to give away. */void irttp_run_rx_queue(struct tsap_cb *self){	struct sk_buff *skb;	int more = 0;	IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__,		   self->send_credit, self->avail_credit, self->remote_credit);	/* Get exclusive access to the rx queue, otherwise don't touch it */	if (irda_lock(&self->rx_queue_lock) == FALSE)		return;	/*	 *  Reassemble all frames in receive queue and deliver them	 */	while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {		/* This bit will tell us if it's the last fragment or not */		more = skb->data[0] & 0x80;		/* Remove TTP header */		skb_pull(skb, TTP_HEADER);		/* Add the length of the remaining data */		self->rx_sdu_size += skb->len;		/*		 * If SAR is disabled, or user has requested no reassembly		 * of received fragments then we just deliver them		 * immediately. This can be requested by clients that		 * implements byte streams without any message boundaries		 */		if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {			irttp_do_data_indication(self, skb);			self->rx_sdu_size = 0;			continue;		}		/* Check if this is a fragment, and not the last fragment */		if (more) {			/*			 *  Queue the fragment if we still are within the			 *  limits of the maximum size of the rx_sdu			 */			if (self->rx_sdu_size <= self->rx_max_sdu_size) {				IRDA_DEBUG(4, "%s(), queueing frag\n",					   __FUNCTION__);				skb_queue_tail(&self->rx_fragments, skb);			} else {				/* Free the part of the SDU that is too big */				dev_kfree_skb(skb);			}			continue;		}		/*		 *  This is the last fragment, so time to reassemble!		 */		if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||		    (self->rx_max_sdu_size == TTP_SAR_UNBOUND))		{			/*			 * A little optimizing. Only queue the fragment if			 * there are other fragments. Since if this is the			 * last and only fragment, there is no need to			 * reassemble :-)			 */			if (!skb_queue_empty(&self->rx_fragments)) {				skb_queue_tail(&self->rx_fragments,					       skb);				skb = irttp_reassemble_skb(self);			}			/* Now we can deliver the reassembled skb */			irttp_do_data_indication(self, skb);		} else {			IRDA_DEBUG(1, "%s(), Truncated frame\n", __FUNCTION__);			/* Free the part of the SDU that is too big */			dev_kfree_skb(skb);			/* Deliver only the valid but truncated part of SDU */			skb = irttp_reassemble_skb(self);			irttp_do_data_indication(self, skb);		}		self->rx_sdu_size = 0;	}	/*	 * It's not trivial to keep track of how many credits are available	 * by incrementing at each packet, because delivery may fail	 * (irttp_do_data_indication() may requeue the frame) and because	 * we need to take care of fragmentation.	 * We want the other side to send up to initial_credit packets.	 * We have some frames in our queues, and we have already allowed it	 * to send remote_credit.	 * No need to spinlock, write is atomic and self correcting...	 * Jean II	 */	self->avail_credit = (self->initial_credit -			      (self->remote_credit +			       skb_queue_len(&self->rx_queue) +			       skb_queue_len(&self->rx_fragments)));	/* Do we have too much credits to send to peer ? */	if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&	    (self->avail_credit > 0)) {		/* Send explicit credit frame */		irttp_give_credit(self);		/* Note : do *NOT* check if tx_queue is non-empty, that		 * will produce deadlocks. I repeat : send a credit frame		 * even if we have something to send in our Tx queue.		 * If we have credits, it means that our Tx queue is blocked.		 *		 * Let's suppose the peer can't keep up with our Tx. He will		 * flow control us by not sending us any credits, and we		 * will stop Tx and start accumulating credits here.		 * Up to the point where the peer will stop its Tx queue,		 * for lack of credits.		 * Let's assume the peer application is single threaded.		 * It will block on Tx and never consume any Rx buffer.		 * Deadlock. Guaranteed. - Jean II		 */	}	/* Reset lock */	self->rx_queue_lock = 0;}#ifdef CONFIG_PROC_FSstruct irttp_iter_state {	int id;};static void *irttp_seq_start(struct seq_file *seq, loff_t *pos){	struct irttp_iter_state *iter = seq->private;	struct tsap_cb *self;	/* Protect our access to the tsap list */	spin_lock_irq(&irttp->tsaps->hb_spinlock);	iter->id = 0;	for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);	     self != NULL;	     self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {		if (iter->id == *pos)			break;		++iter->id;	}	return self;}static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos){	struct irttp_iter_state *iter = seq->private;	++*pos;	++iter->id;	return (void *) hashbin_get_next(irttp->tsaps);}static void irttp_seq_stop(struct seq_file *seq, void *v){	spin_unlock_irq(&irttp->tsaps->hb_spinlock);}static int irttp_seq_show(struct seq_file *seq, void *v){	const struct irttp_iter_state *iter = seq->private;	const struct tsap_cb *self = v;	seq_printf(seq, "TSAP %d, ", iter->id);	seq_printf(seq, "stsap_sel: %02x, ",		   self->stsap_sel);	seq_printf(seq, "dtsap_sel: %02x\n",		   self->dtsap_sel);	seq_printf(seq, "  connected: %s, ",		   self->connected? "TRUE":"FALSE");	seq_printf(seq, "avail credit: %d, ",		   self->avail_credit);	seq_printf(seq, "remote credit: %d, ",		   self->remote_credit);	seq_printf(seq, "send credit: %d\n",		   self->send_credit);	seq_printf(seq, "  tx packets: %ld, ",		   self->stats.tx_packets);	seq_printf(seq, "rx packets: %ld, ",		   self->stats.rx_packets);	seq_printf(seq, "tx_queue len: %d ",		   skb_queue_len(&self->tx_queue));	seq_printf(seq, "rx_queue len: %d\n",		   skb_queue_len(&self->rx_queue));	seq_printf(seq, "  tx_sdu_busy: %s, ",		   self->tx_sdu_busy? "TRUE":"FALSE");	seq_printf(seq, "rx_sdu_busy: %s\n",		   self->rx_sdu_busy? "TRUE":"FALSE");	seq_printf(seq, "  max_seg_size: %d, ",		   self->max_seg_size);	seq_printf(seq, "tx_max_sdu_size: %d, ",		   self->tx_max_sdu_size);	seq_printf(seq, "rx_max_sdu_size: %d\n",		   self->rx_max_sdu_size);	seq_printf(seq, "  Used by (%s)\n\n",		   self->notify.name);	return 0;}static const struct seq_operations irttp_seq_ops = {	.start  = irttp_seq_start,	.next   = irttp_seq_next,	.stop   = irttp_seq_stop,	.show   = irttp_seq_show,};static int irttp_seq_open(struct inode *inode, struct file *file){	return seq_open_private(file, &irttp_seq_ops,			sizeof(struct irttp_iter_state));}const struct file_operations irttp_seq_fops = {	.owner		= THIS_MODULE,	.open           = irttp_seq_open,	.read           = seq_read,	.llseek         = seq_lseek,	.release	= seq_release_private,};#endif /* PROC_FS */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -