⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ldl.c

📁 7号信令功能代码,为开源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 */STATIC void ndev_free(struct ndev *ndev){	ASSERT(ndev != NULL);	ASSERT(ndev->magic == NDEV_MAGIC);	ASSERT(ndev->dev == NULL);	if (atomic_read(&ndev->wr_cur) > 0 || ndev->endpoints != NULL)		return; /* Have to wait to free */	if (ndev->tx_congest_timer) {		/* Drop old timer */		untimeout(ndev->tx_congest_timer);		ndev->tx_congest_timer = 0;	}	ndev->magic = 0;	FREE(ndev);	--ndev_n_alloc;}/* *  ndev_release  - detach from netdevice * *  Not reentrant. */STATIC void ndev_release(struct dl *dl){	struct ndev *ndev = dl->ndev;	struct dl **dlp;	ASSERT(ndev != NULL);	ASSERT(ndev->magic == NDEV_MAGIC);	for (dlp = &ndev->endpoints; *dlp; dlp = &(*dlp)->next_ndev) {		ASSERT((*dlp)->magic == DL_MAGIC);		ASSERT((*dlp)->ndev == ndev);		if (*dlp == dl)			break;	}	ASSERT(*dlp == dl);	*dlp = dl->next_ndev;	dl->ndev = NULL;	if (ndev->endpoints == NULL) {		/* No more attached endpoints: Try to free. */		if (ndev->dev != NULL)		{			/*			 * We may get unloaded while the driver below			 * has buffers from us still queued.  Bad news			 * since we have a destructor function pointer			 * pointing back to us.  So we have to flush			 * the driver's queues before we go away.  Of			 * course this also flushes buffers for other			 * clients of the driver, but that's just the			 * way has to be.  DMG 8/25/00			 */			START_BH_ATOMIC(ndev->dev) ;			qdisc_reset(ndev->dev->qdisc) ;			END_BH_ATOMIC(ndev->dev) ;			ndev->dev = NULL;		}		ndev_free(ndev);	}}/* *  ndev_down  - handle netdevice shutdown * *  The hard flag is set if the netdevice is removed. *  Not reentrant. */STATIC void ndev_down(struct ndev *ndev, int hard){	struct dl *dl;	ASSERT(ndev->magic == NDEV_MAGIC);	if (hard)		ndev->dev = NULL;	for (dl = ndev->endpoints; dl; dl = dl->next_ndev) {		ASSERT(dl->magic == DL_MAGIC);		if (hard || (dl->flags & LDLFLAG_SET_ADDR) == 0) {			ndev_release(dl);			hangup_set(dl);		}	}}/* *  ndev_wr_wakeup_endp  - wakeup endpoints * *  Not reentrant. */STATIC void ndev_wr_wakeup_endp(struct ndev *ndev){	struct dl *dl;	ASSERT(ndev->magic == NDEV_MAGIC);	for (dl = ndev->endpoints; dl; dl = dl->next_ndev) {		ASSERT(dl->magic == DL_MAGIC);		ASSERT(dl->ndev == ndev);		qenable(WR(dl->rq));	}}/* *  ndev_wr_wakeup  - End device write "sleep". * *  Not reentrant. */STATIC void ndev_wr_wakeup(struct ndev *ndev){	ASSERT(ndev->magic == NDEV_MAGIC);	ASSERT(ndev->sleeping);	if (ndev->tx_congest_timer) {		untimeout(ndev->tx_congest_timer); /* Drop old timer */		ndev->tx_congest_timer = 0;	}	ndev->sleeping = 0;	ndev_wr_wakeup_endp(ndev);}#ifndef KERNEL_2_1		/* old 2.0 kernel, not supported anymore */STATIC void tx_congestion_timeout(caddr_t dp){	int psw;	struct ndev *ndev = (struct ndev *)dp;	SPLSTR(psw);	ASSERT(ndev->magic == NDEV_MAGIC);	if (ndev->tx_congest_timer != 0) {		ndev->tx_congest_timer = 0;		ndev_wr_wakeup_endp(ndev);	}	SPLX(psw);}/* *  ndev_wr_sleep  - Start device write "sleep". * *  Not reentrant. */STATIC void ndev_wr_sleep(struct ndev *ndev){	ASSERT(!ndev->sleeping);	if (!ndev->tx_congest_timer)		ndev->tx_congest_timer = timeout(tx_congestion_timeout,						 (caddr_t)ndev,						 CONGESTION_BACKOFF_TICKS);	if (ndev->tx_congest_timer)		ndev->sleeping = 1;	else		printk("ldl: ndev_wr_sleep() failed\n");}#endif/* *  This is the skb destructor callback. *  Need to adjust amount of outstanding data on the netdevice write queue, *  and possibly free our ndev structure if no longer in use. */STATIC void ndev_skb_destruct(struct sk_buff *skb){	struct ndev *ndev;	ASSERT(skb != NULL);#ifdef KERNEL_2_1	if (skb_cloned(skb))		return;#else	if (skb->data_skb != skb)		return;#endif	ndev = (struct ndev *)skb->sk;	skb->sk = NULL;	ASSERT(ndev != NULL);	ASSERT(ndev->magic == NDEV_MAGIC);	ASSERT(atomic_read(&ndev->wr_cur) >= skb->truesize);	if (ndev->dev != NULL) {		if (atomic_read(&ndev->wr_cur) <= ndev->wr_min) {			if (ndev->sleeping)				ndev_wr_wakeup(ndev);			atomic_sub(skb->truesize, &ndev->wr_cur);		}	} else {		ASSERT(ndev->endpoints == NULL);		atomic_sub(skb->truesize, &ndev->wr_cur);		if (atomic_read(&ndev->wr_cur) == 0)			ndev_free(ndev);	}}#if defined(KERNEL_2_1)STATIC int ndev_xmit(struct ndev *ndev, struct sk_buff *skb){	ASSERT(skb != NULL);	ASSERT(ndev != NULL);	ASSERT(ndev->magic == NDEV_MAGIC);	ASSERT(ndev->dev != NULL);	skb->mac.raw = skb->data;	skb->dev = ndev->dev;	atomic_add(skb->truesize, &ndev->wr_cur);	(struct ndev *)skb->sk = ndev;	skb->destructor = ndev_skb_destruct;	dev_queue_xmit(skb);	return DONE;}#elif defined(too_complicated_KERNEL_2_1)STATIC int ndev_xmit(struct ndev *ndev, struct sk_buff *skb){	int psw;	struct ldldev *dev;	ASSERT(skb != NULL);	ASSERT(ndev != NULL);	ASSERT(ndev->magic == NDEV_MAGIC);	ASSERT(ndev->dev != NULL);	skb->mac.raw = skb->data;	skb->dev = dev = ndev->dev;	atomic_add(skb->truesize, &ndev->wr_cur);	(struct ndev *)skb->sk = ndev;	skb->destructor = ndev_skb_destruct;	if (atomic_read(&ndev->wr_cur) <= ndev->wr_max) {		struct Qdisc *q;		START_BH_ATOMIC(dev);		q = dev->qdisc;		ASSERT(q != NULL);		if (q->enqueue) {			int ret;			ret = q->enqueue(skb, q);			qdisc_wakeup(dev);			END_BH_ATOMIC(dev);			if (ret == 1)				return DONE;		} else {			END_BH_ATOMIC(dev);			if (dev_queue_xmit(skb) >= 0)				return DONE;		}		SPLSTR(psw);		if (!ndev->sleeping)			ndev_wr_sleep(ndev);		SPLX(psw);		return RETRY;	} else {		SPLSTR(psw);		if (!ndev->sleeping)			ndev_wr_sleep(ndev);		SPLX(psw);		kfree_skb(skb);		return RETRY;	}}#elseSTATIC int ndev_xmit(struct ndev *ndev, struct sk_buff *skb, int pri){	atomic_add(skb->truesize, &ndev->wr_cur);	(struct ndev *)skb->sk = ndev;	skb->dev = ndev->dev;	skb->arp = 1;	skb->destructor = ndev_skb_destruct;	dev_queue_xmit(skb, ndev->dev, pri);	return DONE;}#endif/****************************************************************************//*                                                                          *//*  Netdevice notifier handling.                                            *//*                                                                          *//****************************************************************************//* *  device_notification  - device notification callback. */STATIC intdevice_notification(struct notifier_block *notifier,		    unsigned long event, void *ptr){	struct ldldev *dev = ptr;	struct ndev *d;	struct dl *dl;	int psw;	SPLSTR(psw);#ifdef KERNEL_2_1	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {#else	if (event == NETDEV_DOWN) {#endif		if ((d = ndev_find(dev)) != NULL) {			ndev_down(d, event != NETDEV_DOWN);			if (n_hangup) {				SPLX(psw);				lis_spin_lock(&first_open_lock);				for (dl = first_open; dl; dl = dl->next_open)					if ((dl->flags & LDLFLAG_HANGUP) != 0)						hangup_do(dl);				lis_spin_unlock(&first_open_lock);				return NOTIFY_DONE;			}		}	}	SPLX(psw);	return NOTIFY_DONE;}STATIC struct notifier_block dl_notifier = {	device_notification,	NULL,	0};STATIC INLINE int notifier_register(void){	return register_netdevice_notifier(&dl_notifier);}STATIC INLINE int notifier_unregister(void){	return unregister_netdevice_notifier(&dl_notifier);}/****************************************************************************//*                                                                          *//*  bufcall() handling.                                                     *//*                                                                          *//****************************************************************************/STATIC void dl_bufcallback(long idx){	struct dl *dl = &dl_dl[idx];	ASSERT(dl->rq != NULL);	ASSERT(dl->bufwait);	dl->bufwait = 0;	qenable(WR(dl->rq));}STATIC int dl_bufcall(struct dl *dl, mblk_t *mp, int size){	ASSERT(!dl->bufwait);	if ((dl->bufwait = bufcall(size, BPRI_HI, dl_bufcallback, dl-dl_dl)) == 0) {		printk("ldl: bufcall failed\n");		freemsg(mp);		return DONE;	}	return RETRY;}/****************************************************************************//*                                                                          *//*  A few utility routines.                                                 *//*                                                                          *//****************************************************************************//* *  Translate from DLPI priority to Linux netdevice priority */STATIC INLINE int pri_dlpi2netdevice(dl_ulong pri){	ASSERT(pri >= 0);	ASSERT(pri <= 100);	return (pri < 33) ? LDLPRI_HI :			    (pri < 66) ? LDLPRI_MED : LDLPRI_LO;}/* *  Check if a msgblk can be reused for a reply. * *  If the data block of the first message block is large *  enough, any remaining message blocks in the message are *  freed, read and write pointers are reset, and 1 is returned. *  Otherwise, the message is untouched and 0 is returned. */STATIC INLINE int reuse_msg(msgb_t *mp, dl_ushort size){	msgb_t *bp;	ASSERT(mp != NULL);	ASSERT(mp->b_datap != NULL);	if (mp->b_datap->db_lim - mp->b_datap->db_base < size);		return 0;	if (mp->b_datap->db_ref != 1)		return 0;	if ((bp = unlinkb(mp)) != NULL)		freemsg(bp);	mp->b_wptr = mp->b_rptr = mp->b_datap->db_base; 	return 1;}/* *  Convert mp to DL_OK_ACK. */STATIC INLINE void make_dl_ok_ack(msgb_t *mp, dl_ulong primitive){	dl_ok_ack_t *ackp;	/* ASSERT(reuse_msg(mp, DL_OK_ACK_SIZE)); */	mp->b_datap->db_type = M_PCPROTO;	ackp = (dl_ok_ack_t *)mp->b_datap->db_base;	ackp->dl_primitive = DL_OK_ACK;	ackp->dl_correct_primitive = primitive;	mp->b_wptr += DL_OK_ACK_SIZE;}/* *  Convert mp to DL_ERROR_ACK. */STATIC INLINE void make_dl_error_ack(msgb_t *mp, dl_ulong primitive,				     dl_ulong err, dl_ulong uerr){	dl_error_ack_t *ackp;	/* ASSERT(reuse_msg(mp, DL_ERROR_ACK_SIZE)); */	mp->b_datap->db_type = M_PCPROTO;	ackp = (dl_error_ack_t *)mp->b_datap->db_base;	ackp->dl_primitive = DL_ERROR_ACK;	ackp->dl_error_primitive = primitive;	ackp->dl_errno = err;	ackp->dl_unix_errno = uerr;	mp->b_wptr += DL_ERROR_ACK_SIZE;}/* *  Create DL_OK_ACK reply * *  On success, DONE is returned and *mp points to the ack. *  On failure, RETRY is returned and *mp is untouched. *  (But if bufcall fails *mp is freed and set to NULL *   and a bogus DONE is returned to avoid hanging.) */STATIC INLINE int do_ok_ack(struct dl *dl, msgb_t **mp, dl_ulong primitive){	if (!reuse_msg(*mp, DL_OK_ACK_SIZE)) {		mblk_t *bp;		if ((bp = allocb(DL_OK_ACK_SIZE, BPRI_HI)) == NULL) {			if (dl_bufcall(dl, *mp, DL_OK_ACK_SIZE) == RETRY)				return RETRY;			else {				*mp = NULL;				return DONE;			}		}		freemsg(*mp);		*mp = bp;	}	make_dl_ok_ack(*mp, primitive);	return DONE;}/* *  Send DL_ERROR_ACK reply * *  On success, DONE is returned and mp has been reused or freed. *  On failure, RETRY is returned and mp is untouched.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -