ppp_synctty.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 791 行 · 第 1/2 页

C
791
字号
/* * PPP synchronous tty channel driver for Linux. * * This is a ppp channel driver that can be used with tty device drivers * that are frame oriented, such as synchronous HDLC devices. * * Complete PPP frames without encoding/decoding are exchanged between * the channel driver and the device driver. *  * The async map IOCTL codes are implemented to keep the user mode * applications happy if they call them. Synchronous PPP does not use * the async maps. * * Copyright 1999 Paul Mackerras. * * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com * *  This program is free software; you can redistribute it and/or *  modify it under the terms of the GNU General Public License *  as published by the Free Software Foundation; either version *  2 of the License, or (at your option) any later version. * * This driver provides the encapsulation and framing for sending * and receiving PPP frames over sync serial lines.  It relies on * the generic PPP layer to give it frames to send and to process * received frames.  It implements the PPP line discipline. * * Part of the code in this driver was inspired by the old async-only * PPP driver, written by Michael Callahan and Al Longyear, and * subsequently hacked by Paul Mackerras. * * ==FILEVERSION 20040616== */#include <linux/module.h>#include <linux/kernel.h>#include <linux/skbuff.h>#include <linux/tty.h>#include <linux/netdevice.h>#include <linux/poll.h>#include <linux/ppp_defs.h>#include <linux/if_ppp.h>#include <linux/ppp_channel.h>#include <linux/spinlock.h>#include <linux/init.h>#include <asm/uaccess.h>#include <asm/semaphore.h>#define PPP_VERSION	"2.4.2"/* Structure for storing local state. */struct syncppp {	struct tty_struct *tty;	unsigned int	flags;	unsigned int	rbits;	int		mru;	spinlock_t	xmit_lock;	spinlock_t	recv_lock;	unsigned long	xmit_flags;	u32		xaccm[8];	u32		raccm;	unsigned int	bytes_sent;	unsigned int	bytes_rcvd;	struct sk_buff	*tpkt;	unsigned long	last_xmit;	struct sk_buff_head rqueue;	struct tasklet_struct tsk;	atomic_t	refcnt;	struct semaphore dead_sem;	struct ppp_channel chan;	/* interface to generic ppp layer */};/* Bit numbers in xmit_flags */#define XMIT_WAKEUP	0#define XMIT_FULL	1/* Bits in rbits */#define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)#define PPPSYNC_MAX_RQLEN	32	/* arbitrary *//* * Prototypes. */static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,			  unsigned long arg);static void ppp_sync_process(unsigned long arg);static int ppp_sync_push(struct syncppp *ap);static void ppp_sync_flush_output(struct syncppp *ap);static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,			   char *flags, int count);static struct ppp_channel_ops sync_ops = {	ppp_sync_send,	ppp_sync_ioctl};/* * Utility procedures to print a buffer in hex/ascii */static voidppp_print_hex (register __u8 * out, const __u8 * in, int count){	register __u8 next_ch;	static char hex[] = "0123456789ABCDEF";	while (count-- > 0) {		next_ch = *in++;		*out++ = hex[(next_ch >> 4) & 0x0F];		*out++ = hex[next_ch & 0x0F];		++out;	}}static voidppp_print_char (register __u8 * out, const __u8 * in, int count){	register __u8 next_ch;	while (count-- > 0) {		next_ch = *in++;		if (next_ch < 0x20 || next_ch > 0x7e)			*out++ = '.';		else {			*out++ = next_ch;			if (next_ch == '%')   /* printk/syslogd has a bug !! */				*out++ = '%';		}	}	*out = '\0';}static voidppp_print_buffer (const char *name, const __u8 *buf, int count){	__u8 line[44];	if (name != NULL)		printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);	while (count > 8) {		memset (line, 32, 44);		ppp_print_hex (line, buf, 8);		ppp_print_char (&line[8 * 3], buf, 8);		printk(KERN_DEBUG "%s\n", line);		count -= 8;		buf += 8;	}	if (count > 0) {		memset (line, 32, 44);		ppp_print_hex (line, buf, count);		ppp_print_char (&line[8 * 3], buf, count);		printk(KERN_DEBUG "%s\n", line);	}}/* * Routines implementing the synchronous PPP line discipline. *//* * We have a potential race on dereferencing tty->disc_data, * because the tty layer provides no locking at all - thus one * cpu could be running ppp_synctty_receive while another * calls ppp_synctty_close, which zeroes tty->disc_data and * frees the memory that ppp_synctty_receive is using.  The best * way to fix this is to use a rwlock in the tty struct, but for now * we use a single global rwlock for all ttys in ppp line discipline. * * FIXME: Fixed in tty_io nowdays. */static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;static struct syncppp *sp_get(struct tty_struct *tty){	struct syncppp *ap;	read_lock(&disc_data_lock);	ap = tty->disc_data;	if (ap != NULL)		atomic_inc(&ap->refcnt);	read_unlock(&disc_data_lock);	return ap;}static void sp_put(struct syncppp *ap){	if (atomic_dec_and_test(&ap->refcnt))		up(&ap->dead_sem);}/* * Called when a tty is put into sync-PPP line discipline. */static intppp_sync_open(struct tty_struct *tty){	struct syncppp *ap;	int err;	ap = kmalloc(sizeof(*ap), GFP_KERNEL);	err = -ENOMEM;	if (ap == 0)		goto out;	/* initialize the syncppp structure */	memset(ap, 0, sizeof(*ap));	ap->tty = tty;	ap->mru = PPP_MRU;	spin_lock_init(&ap->xmit_lock);	spin_lock_init(&ap->recv_lock);	ap->xaccm[0] = ~0U;	ap->xaccm[3] = 0x60000000U;	ap->raccm = ~0U;	skb_queue_head_init(&ap->rqueue);	tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);	atomic_set(&ap->refcnt, 1);	init_MUTEX_LOCKED(&ap->dead_sem);	ap->chan.private = ap;	ap->chan.ops = &sync_ops;	ap->chan.mtu = PPP_MRU;	ap->chan.hdrlen = 2;	/* for A/C bytes */	err = ppp_register_channel(&ap->chan);	if (err)		goto out_free;	tty->disc_data = ap;	return 0; out_free:	kfree(ap); out:	return err;}/* * Called when the tty is put into another line discipline * or it hangs up.  We have to wait for any cpu currently * executing in any of the other ppp_synctty_* routines to * finish before we can call ppp_unregister_channel and free * the syncppp struct.  This routine must be called from * process context, not interrupt or softirq context. */static voidppp_sync_close(struct tty_struct *tty){	struct syncppp *ap;	write_lock_irq(&disc_data_lock);	ap = tty->disc_data;	tty->disc_data = NULL;	write_unlock_irq(&disc_data_lock);	if (ap == 0)		return;	/*	 * We have now ensured that nobody can start using ap from now	 * on, but we have to wait for all existing users to finish.	 * Note that ppp_unregister_channel ensures that no calls to	 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress	 * by the time it returns.	 */	if (!atomic_dec_and_test(&ap->refcnt))		down(&ap->dead_sem);	tasklet_kill(&ap->tsk);	ppp_unregister_channel(&ap->chan);	skb_queue_purge(&ap->rqueue);	if (ap->tpkt != 0)		kfree_skb(ap->tpkt);	kfree(ap);}/* * Read does nothing - no data is ever available this way. * Pppd reads and writes packets via /dev/ppp instead. */static ssize_tppp_sync_read(struct tty_struct *tty, struct file *file,	       unsigned char __user *buf, size_t count){	return -EAGAIN;}/* * Write on the tty does nothing, the packets all come in * from the ppp generic stuff. */static ssize_tppp_sync_write(struct tty_struct *tty, struct file *file,		const unsigned char __user *buf, size_t count){	return -EAGAIN;}static intppp_synctty_ioctl(struct tty_struct *tty, struct file *file,		  unsigned int cmd, unsigned long arg){	struct syncppp *ap = sp_get(tty);	int __user *p = (int __user *)arg;	int err, val;	if (ap == 0)		return -ENXIO;	err = -EFAULT;	switch (cmd) {	case PPPIOCGCHAN:		err = -ENXIO;		if (ap == 0)			break;		err = -EFAULT;		if (put_user(ppp_channel_index(&ap->chan), p))			break;		err = 0;		break;	case PPPIOCGUNIT:		err = -ENXIO;		if (ap == 0)			break;		err = -EFAULT;		if (put_user(ppp_unit_number(&ap->chan), p))			break;		err = 0;		break;	case TCGETS:	case TCGETA:		err = n_tty_ioctl(tty, file, cmd, arg);		break;	case TCFLSH:		/* flush our buffers and the serial port's buffer */		if (arg == TCIOFLUSH || arg == TCOFLUSH)			ppp_sync_flush_output(ap);		err = n_tty_ioctl(tty, file, cmd, arg);		break;	case FIONREAD:		val = 0;		if (put_user(val, p))			break;		err = 0;		break;	default:		err = -ENOIOCTLCMD;	}	sp_put(ap);	return err;}/* No kernel lock - fine */static unsigned intppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait){	return 0;}static intppp_sync_room(struct tty_struct *tty){	return 65535;}/* * This can now be called from hard interrupt level as well * as soft interrupt level or mainline. */static voidppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,		  char *cflags, int count){	struct syncppp *ap = sp_get(tty);	unsigned long flags;	if (ap == 0)		return;	spin_lock_irqsave(&ap->recv_lock, flags);	ppp_sync_input(ap, buf, cflags, count);	spin_unlock_irqrestore(&ap->recv_lock, flags);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?