⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sch_cbq.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
rtattr_failure:	nlmsg_trim(skb, b);	return -1;}static intcbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d){	struct cbq_sched_data *q = qdisc_priv(sch);	q->link.xstats.avgidle = q->link.avgidle;	return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));}static intcbq_dump_class(struct Qdisc *sch, unsigned long arg,	       struct sk_buff *skb, struct tcmsg *tcm){	struct cbq_class *cl = (struct cbq_class*)arg;	unsigned char *b = skb_tail_pointer(skb);	struct rtattr *rta;	if (cl->tparent)		tcm->tcm_parent = cl->tparent->classid;	else		tcm->tcm_parent = TC_H_ROOT;	tcm->tcm_handle = cl->classid;	tcm->tcm_info = cl->q->handle;	rta = (struct rtattr*)b;	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);	if (cbq_dump_attr(skb, cl) < 0)		goto rtattr_failure;	rta->rta_len = skb_tail_pointer(skb) - b;	return skb->len;rtattr_failure:	nlmsg_trim(skb, b);	return -1;}static intcbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,	struct gnet_dump *d){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl = (struct cbq_class*)arg;	cl->qstats.qlen = cl->q->q.qlen;	cl->xstats.avgidle = cl->avgidle;	cl->xstats.undertime = 0;	if (cl->undertime != PSCHED_PASTPERFECT)		cl->xstats.undertime = cl->undertime - q->now;	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||	    gnet_stats_copy_queue(d, &cl->qstats) < 0)		return -1;	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));}static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,		     struct Qdisc **old){	struct cbq_class *cl = (struct cbq_class*)arg;	if (cl) {		if (new == NULL) {			if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,						     cl->classid)) == NULL)				return -ENOBUFS;		} else {#ifdef CONFIG_NET_CLS_ACT			if (cl->police == TC_POLICE_RECLASSIFY)				new->reshape_fail = cbq_reshape_fail;#endif		}		sch_tree_lock(sch);		*old = xchg(&cl->q, new);		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);		qdisc_reset(*old);		sch_tree_unlock(sch);		return 0;	}	return -ENOENT;}static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg){	struct cbq_class *cl = (struct cbq_class*)arg;	return cl ? cl->q : NULL;}static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg){	struct cbq_class *cl = (struct cbq_class *)arg;	if (cl->q->q.qlen == 0)		cbq_deactivate_class(cl);}static unsigned long cbq_get(struct Qdisc *sch, u32 classid){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl = cbq_class_lookup(q, classid);	if (cl) {		cl->refcnt++;		return (unsigned long)cl;	}	return 0;}static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl){	struct cbq_sched_data *q = qdisc_priv(sch);	BUG_TRAP(!cl->filters);	tcf_destroy_chain(cl->filter_list);	qdisc_destroy(cl->q);	qdisc_put_rtab(cl->R_tab);	gen_kill_estimator(&cl->bstats, &cl->rate_est);	if (cl != &q->link)		kfree(cl);}static voidcbq_destroy(struct Qdisc* sch){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl;	unsigned h;#ifdef CONFIG_NET_CLS_ACT	q->rx_class = NULL;#endif	/*	 * Filters must be destroyed first because we don't destroy the	 * classes from root to leafs which means that filters can still	 * be bound to classes which have been destroyed already. --TGR '04	 */	for (h = 0; h < 16; h++) {		for (cl = q->classes[h]; cl; cl = cl->next) {			tcf_destroy_chain(cl->filter_list);			cl->filter_list = NULL;		}	}	for (h = 0; h < 16; h++) {		struct cbq_class *next;		for (cl = q->classes[h]; cl; cl = next) {			next = cl->next;			cbq_destroy_class(sch, cl);		}	}}static void cbq_put(struct Qdisc *sch, unsigned long arg){	struct cbq_class *cl = (struct cbq_class*)arg;	if (--cl->refcnt == 0) {#ifdef CONFIG_NET_CLS_ACT		struct cbq_sched_data *q = qdisc_priv(sch);		spin_lock_bh(&sch->dev->queue_lock);		if (q->rx_class == cl)			q->rx_class = NULL;		spin_unlock_bh(&sch->dev->queue_lock);#endif		cbq_destroy_class(sch, cl);	}}static intcbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,		 unsigned long *arg){	int err;	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl = (struct cbq_class*)*arg;	struct rtattr *opt = tca[TCA_OPTIONS-1];	struct rtattr *tb[TCA_CBQ_MAX];	struct cbq_class *parent;	struct qdisc_rate_table *rtab = NULL;	if (opt==NULL || rtattr_parse_nested(tb, TCA_CBQ_MAX, opt))		return -EINVAL;	if (tb[TCA_CBQ_OVL_STRATEGY-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_OVL_STRATEGY-1]) < sizeof(struct tc_cbq_ovl))		return -EINVAL;	if (tb[TCA_CBQ_FOPT-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_FOPT-1]) < sizeof(struct tc_cbq_fopt))		return -EINVAL;	if (tb[TCA_CBQ_RATE-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec))			return -EINVAL;	if (tb[TCA_CBQ_LSSOPT-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt))			return -EINVAL;	if (tb[TCA_CBQ_WRROPT-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))			return -EINVAL;#ifdef CONFIG_NET_CLS_ACT	if (tb[TCA_CBQ_POLICE-1] &&	    RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))			return -EINVAL;#endif	if (cl) {		/* Check parent */		if (parentid) {			if (cl->tparent && cl->tparent->classid != parentid)				return -EINVAL;			if (!cl->tparent && parentid != TC_H_ROOT)				return -EINVAL;		}		if (tb[TCA_CBQ_RATE-1]) {			rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]);			if (rtab == NULL)				return -EINVAL;		}		/* Change class parameters */		sch_tree_lock(sch);		if (cl->next_alive != NULL)			cbq_deactivate_class(cl);		if (rtab) {			rtab = xchg(&cl->R_tab, rtab);			qdisc_put_rtab(rtab);		}		if (tb[TCA_CBQ_LSSOPT-1])			cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));		if (tb[TCA_CBQ_WRROPT-1]) {			cbq_rmprio(q, cl);			cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1]));		}		if (tb[TCA_CBQ_OVL_STRATEGY-1])			cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));#ifdef CONFIG_NET_CLS_ACT		if (tb[TCA_CBQ_POLICE-1])			cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));#endif		if (tb[TCA_CBQ_FOPT-1])			cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));		if (cl->q->q.qlen)			cbq_activate_class(cl);		sch_tree_unlock(sch);		if (tca[TCA_RATE-1])			gen_replace_estimator(&cl->bstats, &cl->rate_est,					      &sch->dev->queue_lock,					      tca[TCA_RATE-1]);		return 0;	}	if (parentid == TC_H_ROOT)		return -EINVAL;	if (tb[TCA_CBQ_WRROPT-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL ||	    tb[TCA_CBQ_LSSOPT-1] == NULL)		return -EINVAL;	rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]);	if (rtab == NULL)		return -EINVAL;	if (classid) {		err = -EINVAL;		if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))			goto failure;	} else {		int i;		classid = TC_H_MAKE(sch->handle,0x8000);		for (i=0; i<0x8000; i++) {			if (++q->hgenerator >= 0x8000)				q->hgenerator = 1;			if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)				break;		}		err = -ENOSR;		if (i >= 0x8000)			goto failure;		classid = classid|q->hgenerator;	}	parent = &q->link;	if (parentid) {		parent = cbq_class_lookup(q, parentid);		err = -EINVAL;		if (parent == NULL)			goto failure;	}	err = -ENOBUFS;	cl = kzalloc(sizeof(*cl), GFP_KERNEL);	if (cl == NULL)		goto failure;	cl->R_tab = rtab;	rtab = NULL;	cl->refcnt = 1;	if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))		cl->q = &noop_qdisc;	cl->classid = classid;	cl->tparent = parent;	cl->qdisc = sch;	cl->allot = parent->allot;	cl->quantum = cl->allot;	cl->weight = cl->R_tab->rate.rate;	sch_tree_lock(sch);	cbq_link_class(cl);	cl->borrow = cl->tparent;	if (cl->tparent != &q->link)		cl->share = cl->tparent;	cbq_adjust_levels(parent);	cl->minidle = -0x7FFFFFFF;	cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));	cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1]));	if (cl->ewma_log==0)		cl->ewma_log = q->link.ewma_log;	if (cl->maxidle==0)		cl->maxidle = q->link.maxidle;	if (cl->avpkt==0)		cl->avpkt = q->link.avpkt;	cl->overlimit = cbq_ovl_classic;	if (tb[TCA_CBQ_OVL_STRATEGY-1])		cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));#ifdef CONFIG_NET_CLS_ACT	if (tb[TCA_CBQ_POLICE-1])		cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));#endif	if (tb[TCA_CBQ_FOPT-1])		cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));	sch_tree_unlock(sch);	if (tca[TCA_RATE-1])		gen_new_estimator(&cl->bstats, &cl->rate_est,				  &sch->dev->queue_lock, tca[TCA_RATE-1]);	*arg = (unsigned long)cl;	return 0;failure:	qdisc_put_rtab(rtab);	return err;}static int cbq_delete(struct Qdisc *sch, unsigned long arg){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl = (struct cbq_class*)arg;	unsigned int qlen;	if (cl->filters || cl->children || cl == &q->link)		return -EBUSY;	sch_tree_lock(sch);	qlen = cl->q->q.qlen;	qdisc_reset(cl->q);	qdisc_tree_decrease_qlen(cl->q, qlen);	if (cl->next_alive)		cbq_deactivate_class(cl);	if (q->tx_borrowed == cl)		q->tx_borrowed = q->tx_class;	if (q->tx_class == cl) {		q->tx_class = NULL;		q->tx_borrowed = NULL;	}#ifdef CONFIG_NET_CLS_ACT	if (q->rx_class == cl)		q->rx_class = NULL;#endif	cbq_unlink_class(cl);	cbq_adjust_levels(cl->tparent);	cl->defmap = 0;	cbq_sync_defmap(cl);	cbq_rmprio(q, cl);	sch_tree_unlock(sch);	if (--cl->refcnt == 0)		cbq_destroy_class(sch, cl);	return 0;}static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *cl = (struct cbq_class *)arg;	if (cl == NULL)		cl = &q->link;	return &cl->filter_list;}static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,				     u32 classid){	struct cbq_sched_data *q = qdisc_priv(sch);	struct cbq_class *p = (struct cbq_class*)parent;	struct cbq_class *cl = cbq_class_lookup(q, classid);	if (cl) {		if (p && p->level <= cl->level)			return 0;		cl->filters++;		return (unsigned long)cl;	}	return 0;}static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg){	struct cbq_class *cl = (struct cbq_class*)arg;	cl->filters--;}static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg){	struct cbq_sched_data *q = qdisc_priv(sch);	unsigned h;	if (arg->stop)		return;	for (h = 0; h < 16; h++) {		struct cbq_class *cl;		for (cl = q->classes[h]; cl; cl = cl->next) {			if (arg->count < arg->skip) {				arg->count++;				continue;			}			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {				arg->stop = 1;				return;			}			arg->count++;		}	}}static struct Qdisc_class_ops cbq_class_ops = {	.graft		=	cbq_graft,	.leaf		=	cbq_leaf,	.qlen_notify	=	cbq_qlen_notify,	.get		=	cbq_get,	.put		=	cbq_put,	.change		=	cbq_change_class,	.delete		=	cbq_delete,	.walk		=	cbq_walk,	.tcf_chain	=	cbq_find_tcf,	.bind_tcf	=	cbq_bind_filter,	.unbind_tcf	=	cbq_unbind_filter,	.dump		=	cbq_dump_class,	.dump_stats	=	cbq_dump_class_stats,};static struct Qdisc_ops cbq_qdisc_ops = {	.next		=	NULL,	.cl_ops		=	&cbq_class_ops,	.id		=	"cbq",	.priv_size	=	sizeof(struct cbq_sched_data),	.enqueue	=	cbq_enqueue,	.dequeue	=	cbq_dequeue,	.requeue	=	cbq_requeue,	.drop		=	cbq_drop,	.init		=	cbq_init,	.reset		=	cbq_reset,	.destroy	=	cbq_destroy,	.change		=	NULL,	.dump		=	cbq_dump,	.dump_stats	=	cbq_dump_stats,	.owner		=	THIS_MODULE,};static int __init cbq_module_init(void){	return register_qdisc(&cbq_qdisc_ops);}static void __exit cbq_module_exit(void){	unregister_qdisc(&cbq_qdisc_ops);}module_init(cbq_module_init)module_exit(cbq_module_exit)MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -