📄 sch_htb.c
字号:
RTA_PUT(skb, TCA_STATS, sizeof(sch->stats), &sch->stats); HTB_QUNLOCK(sch); return skb->len;rtattr_failure: HTB_QUNLOCK(sch); skb_trim(skb, skb->tail - skb->data); return -1;}static int htb_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm){#ifdef HTB_DEBUG struct htb_sched *q = qdisc_priv(sch);#endif struct htb_class *cl = (struct htb_class*)arg; unsigned char *b = skb->tail; struct rtattr *rta; struct tc_htb_opt opt; HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid); HTB_QLOCK(sch); tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; tcm->tcm_handle = cl->classid; if (!cl->level && cl->un.leaf.q) { tcm->tcm_info = cl->un.leaf.q->handle; cl->stats.qlen = cl->un.leaf.q->q.qlen; } rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); memset (&opt,0,sizeof(opt)); opt.rate = cl->rate->rate; opt.buffer = cl->buffer; opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer; opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio; opt.level = cl->level; RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); rta->rta_len = skb->tail - b;#ifdef HTB_RATECM cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE); cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);#endif cl->xstats.tokens = cl->tokens; cl->xstats.ctokens = cl->ctokens; RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats); RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats); HTB_QUNLOCK(sch); return skb->len;rtattr_failure: HTB_QUNLOCK(sch); skb_trim(skb, b - skb->data); return -1;}static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old){ struct htb_class *cl = (struct htb_class*)arg; if (cl && !cl->level) { if (new == NULL && (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL) return -ENOBUFS; sch_tree_lock(sch); if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { if (cl->prio_activity) htb_deactivate (qdisc_priv(sch),cl); /* TODO: is it correct ? Why CBQ doesn't do it ? */ sch->q.qlen -= (*old)->q.qlen; qdisc_reset(*old); } sch_tree_unlock(sch); return 0; } return -ENOENT;}static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg){ struct htb_class *cl = (struct htb_class*)arg; return (cl && !cl->level) ? cl->un.leaf.q : NULL;}static unsigned long htb_get(struct Qdisc *sch, u32 classid){#ifdef HTB_DEBUG struct htb_sched *q = qdisc_priv(sch);#endif struct htb_class *cl = htb_find(classid,sch); HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0); if (cl) cl->refcnt++; return (unsigned long)cl;}static void htb_destroy_filters(struct tcf_proto **fl){ struct tcf_proto *tp; while ((tp = *fl) != NULL) { *fl = tp->next; tcf_destroy(tp); }}static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl){ struct htb_sched *q = qdisc_priv(sch); HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0); if (!cl->level) { BUG_TRAP(cl->un.leaf.q); sch->q.qlen -= cl->un.leaf.q->q.qlen; qdisc_destroy(cl->un.leaf.q); } qdisc_put_rtab(cl->rate); qdisc_put_rtab(cl->ceil); #ifdef CONFIG_NET_ESTIMATOR qdisc_kill_estimator(&cl->stats);#endif htb_destroy_filters (&cl->filter_list); while (!list_empty(&cl->children)) htb_destroy_class (sch,list_entry(cl->children.next, struct htb_class,sibling)); /* note: this delete may happen twice (see htb_delete) */ list_del(&cl->hlist); list_del(&cl->sibling); if (cl->prio_activity) htb_deactivate (q,cl); if (cl->cmode != HTB_CAN_SEND) htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level); kfree(cl);}/* always caled under BH & queue lock */static void htb_destroy(struct Qdisc* sch){ struct htb_sched *q = qdisc_priv(sch); HTB_DBG(0,1,"htb_destroy q=%p\n",q); del_timer_sync (&q->timer);#ifdef HTB_RATECM del_timer_sync (&q->rttim);#endif /* This line used to be after htb_destroy_class call below and surprisingly it worked in 2.4. But it must precede it because filter need its target class alive to be able to call unbind_filter on it (without Oops). */ htb_destroy_filters(&q->filter_list); while (!list_empty(&q->root)) htb_destroy_class (sch,list_entry(q->root.next, struct htb_class,sibling)); __skb_queue_purge(&q->direct_queue);}static int htb_delete(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class*)arg; HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0); // TODO: why don't allow to delete subtree ? references ? does // tc subsys quarantee us that in htb_destroy it holds no class // refs so that we can remove children safely there ? if (!list_empty(&cl->children) || cl->filter_cnt) return -EBUSY; sch_tree_lock(sch); /* delete from hash and active; remainder in destroy_class */ list_del_init(&cl->hlist); if (cl->prio_activity) htb_deactivate (q,cl); if (--cl->refcnt == 0) htb_destroy_class(sch,cl); sch_tree_unlock(sch); return 0;}static void htb_put(struct Qdisc *sch, unsigned long arg){#ifdef HTB_DEBUG struct htb_sched *q = qdisc_priv(sch);#endif struct htb_class *cl = (struct htb_class*)arg; HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0); if (--cl->refcnt == 0) htb_destroy_class(sch,cl);}static int htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, unsigned long *arg){ int err = -EINVAL; struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class*)*arg,*parent; struct rtattr *opt = tca[TCA_OPTIONS-1]; struct qdisc_rate_table *rtab = NULL, *ctab = NULL; struct rtattr *tb[TCA_HTB_RTAB]; struct tc_htb_opt *hopt; /* extract all subattrs from opt attr */ if (!opt || rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) || tb[TCA_HTB_PARMS-1] == NULL || RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt)) goto failure; parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch); hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]); HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum); rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]); ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]); if (!rtab || !ctab) goto failure; if (!cl) { /* new class */ struct Qdisc *new_q; /* check for valid classid */ if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch)) goto failure; /* check maximal depth */ if (parent && parent->parent && parent->parent->level < 2) { printk(KERN_ERR "htb: tree is too deep\n"); goto failure; } err = -ENOBUFS; if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) goto failure; memset(cl, 0, sizeof(*cl)); cl->refcnt = 1; INIT_LIST_HEAD(&cl->sibling); INIT_LIST_HEAD(&cl->hlist); INIT_LIST_HEAD(&cl->children); INIT_LIST_HEAD(&cl->un.leaf.drop_list);#ifdef HTB_DEBUG cl->magic = HTB_CMAGIC;#endif /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) so that can't be used inside of sch_tree_lock -- thanks to Karlis Peisenieks */ new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); sch_tree_lock(sch); if (parent && !parent->level) { /* turn parent into inner node */ sch->q.qlen -= parent->un.leaf.q->q.qlen; qdisc_destroy (parent->un.leaf.q); if (parent->prio_activity) htb_deactivate (q,parent); /* remove from evt list because of level change */ if (parent->cmode != HTB_CAN_SEND) { htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/); parent->cmode = HTB_CAN_SEND; } parent->level = (parent->parent ? parent->parent->level : TC_HTB_MAXDEPTH) - 1; memset (&parent->un.inner,0,sizeof(parent->un.inner)); } /* leaf (we) needs elementary qdisc */ cl->un.leaf.q = new_q ? new_q : &noop_qdisc; cl->classid = classid; cl->parent = parent; /* set class to be in HTB_CAN_SEND state */ cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; cl->mbuffer = 60000000; /* 1min */ PSCHED_GET_TIME(cl->t_c); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ list_add_tail(&cl->hlist, q->hash+htb_hash(classid)); list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);#ifdef HTB_DEBUG { int i; for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1; cl->pq_node.rb_color = -1; }#endif } else sch_tree_lock(sch); /* it used to be a nasty bug here, we have to check that node is really leaf before changing cl->un.leaf ! */ if (!cl->level) { cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; if (!hopt->quantum && cl->un.leaf.quantum < 1000) { printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid); cl->un.leaf.quantum = 1000; } if (!hopt->quantum && cl->un.leaf.quantum > 200000) { printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid); cl->un.leaf.quantum = 200000; } if (hopt->quantum) cl->un.leaf.quantum = hopt->quantum; if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO) cl->un.leaf.prio = TC_HTB_NUMPRIO - 1; } cl->buffer = hopt->buffer; cl->cbuffer = hopt->cbuffer; if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab; if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab; sch_tree_unlock(sch); *arg = (unsigned long)cl; return 0;failure: if (rtab) qdisc_put_rtab(rtab); if (ctab) qdisc_put_rtab(ctab); return err;}static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl); return fl;}static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = htb_find (classid,sch); HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt); /*if (cl && !cl->level) return 0; The line above used to be there to prevent attaching filters to leaves. But at least tc_index filter uses this just to get class for other reasons so that we have to allow for it. ---- 19.6.2002 As Werner explained it is ok - bind filter is just another way to "lock" the class - unlike "get" this lock can be broken by class during destroy IIUC. */ if (cl) cl->filter_cnt++; else q->filter_cnt++; return (unsigned long)cl;}static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt); if (cl) cl->filter_cnt--; else q->filter_cnt--;}static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg){ struct htb_sched *q = qdisc_priv(sch); int i; if (arg->stop) return; for (i = 0; i < HTB_HSIZE; i++) { struct list_head *p; list_for_each (p,q->hash+i) { struct htb_class *cl = list_entry(p,struct htb_class,hlist); if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } }}static struct Qdisc_class_ops htb_class_ops = { .graft = htb_graft, .leaf = htb_leaf, .get = htb_get, .put = htb_put, .change = htb_change_class, .delete = htb_delete, .walk = htb_walk, .tcf_chain = htb_find_tcf, .bind_tcf = htb_bind_filter, .unbind_tcf = htb_unbind_filter, .dump = htb_dump_class,};static struct Qdisc_ops htb_qdisc_ops = { .next = NULL, .cl_ops = &htb_class_ops, .id = "htb", .priv_size = sizeof(struct htb_sched), .enqueue = htb_enqueue, .dequeue = htb_dequeue, .requeue = htb_requeue, .drop = htb_drop, .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, .change = NULL /* htb_change */, .dump = htb_dump, .owner = THIS_MODULE,};static int __init htb_module_init(void){ return register_qdisc(&htb_qdisc_ops);}static void __exit htb_module_exit(void) { unregister_qdisc(&htb_qdisc_ops);}module_init(htb_module_init)module_exit(htb_module_exit)MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -