📄 sch_htb.c
字号:
RTA_PUT(skb, TCA_OPTIONS, 0, NULL); RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); rta->rta_len = skb_tail_pointer(skb) - b; spin_unlock_bh(&sch->dev->queue_lock); return skb->len;rtattr_failure: spin_unlock_bh(&sch->dev->queue_lock); nlmsg_trim(skb, skb_tail_pointer(skb)); return -1;}static int htb_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm){ struct htb_class *cl = (struct htb_class *)arg; unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; struct tc_htb_opt opt; spin_lock_bh(&sch->dev->queue_lock); tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; tcm->tcm_handle = cl->classid; if (!cl->level && cl->un.leaf.q) tcm->tcm_info = cl->un.leaf.q->handle; rta = (struct rtattr *)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); memset(&opt, 0, sizeof(opt)); opt.rate = cl->rate->rate; opt.buffer = cl->buffer; opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer; opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio; opt.level = cl->level; RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); rta->rta_len = skb_tail_pointer(skb) - b; spin_unlock_bh(&sch->dev->queue_lock); return skb->len;rtattr_failure: spin_unlock_bh(&sch->dev->queue_lock); nlmsg_trim(skb, b); return -1;}static inthtb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d){ struct htb_class *cl = (struct htb_class *)arg; if (!cl->level && cl->un.leaf.q) cl->qstats.qlen = cl->un.leaf.q->q.qlen; cl->xstats.tokens = cl->tokens; cl->xstats.ctokens = cl->ctokens; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, &cl->qstats) < 0) return -1; return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));}static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old){ struct htb_class *cl = (struct htb_class *)arg; if (cl && !cl->level) { if (new == NULL && (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, cl->classid)) == NULL) return -ENOBUFS; sch_tree_lock(sch); if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); } sch_tree_unlock(sch); return 0; } return -ENOENT;}static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg){ struct htb_class *cl = (struct htb_class *)arg; return (cl && !cl->level) ? cl->un.leaf.q : NULL;}static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg){ struct htb_class *cl = (struct htb_class *)arg; if (cl->un.leaf.q->q.qlen == 0) htb_deactivate(qdisc_priv(sch), cl);}static unsigned long htb_get(struct Qdisc *sch, u32 classid){ struct htb_class *cl = htb_find(classid, sch); if (cl) cl->refcnt++; return (unsigned long)cl;}static inline int htb_parent_last_child(struct htb_class *cl){ if (!cl->parent) /* the root class */ return 0; if (!(cl->parent->children.next == &cl->sibling && cl->parent->children.prev == &cl->sibling)) /* not the last child */ return 0; return 1;}static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q){ struct htb_class *parent = cl->parent; BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); parent->level = 0; memset(&parent->un.inner, 0, sizeof(parent->un.inner)); INIT_LIST_HEAD(&parent->un.leaf.drop_list); parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->un.leaf.quantum = parent->quantum; parent->un.leaf.prio = parent->prio; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; parent->t_c = psched_get_time(); parent->cmode = HTB_CAN_SEND;}static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl){ struct htb_sched *q = qdisc_priv(sch); if (!cl->level) { BUG_TRAP(cl->un.leaf.q); qdisc_destroy(cl->un.leaf.q); } gen_kill_estimator(&cl->bstats, &cl->rate_est); qdisc_put_rtab(cl->rate); qdisc_put_rtab(cl->ceil); tcf_destroy_chain(cl->filter_list); while (!list_empty(&cl->children)) htb_destroy_class(sch, list_entry(cl->children.next, struct htb_class, sibling)); /* note: this delete may happen twice (see htb_delete) */ hlist_del_init(&cl->hlist); list_del(&cl->sibling); if (cl->prio_activity) htb_deactivate(q, cl); if (cl->cmode != HTB_CAN_SEND) htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); kfree(cl);}/* always caled under BH & queue lock */static void htb_destroy(struct Qdisc *sch){ struct htb_sched *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); /* This line used to be after htb_destroy_class call below and surprisingly it worked in 2.4. But it must precede it because filter need its target class alive to be able to call unbind_filter on it (without Oops). */ tcf_destroy_chain(q->filter_list); while (!list_empty(&q->root)) htb_destroy_class(sch, list_entry(q->root.next, struct htb_class, sibling)); __skb_queue_purge(&q->direct_queue);}static int htb_delete(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; unsigned int qlen; struct Qdisc *new_q = NULL; int last_child = 0; // TODO: why don't allow to delete subtree ? references ? does // tc subsys quarantee us that in htb_destroy it holds no class // refs so that we can remove children safely there ? if (!list_empty(&cl->children) || cl->filter_cnt) return -EBUSY; if (!cl->level && htb_parent_last_child(cl)) { new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, cl->parent->classid); last_child = 1; } sch_tree_lock(sch); if (!cl->level) { qlen = cl->un.leaf.q->q.qlen; qdisc_reset(cl->un.leaf.q); qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); } /* delete from hash and active; remainder in destroy_class */ hlist_del_init(&cl->hlist); if (cl->prio_activity) htb_deactivate(q, cl); if (last_child) htb_parent_to_leaf(cl, new_q); if (--cl->refcnt == 0) htb_destroy_class(sch, cl); sch_tree_unlock(sch); return 0;}static void htb_put(struct Qdisc *sch, unsigned long arg){ struct htb_class *cl = (struct htb_class *)arg; if (--cl->refcnt == 0) htb_destroy_class(sch, cl);}static int htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, unsigned long *arg){ int err = -EINVAL; struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)*arg, *parent; struct rtattr *opt = tca[TCA_OPTIONS - 1]; struct qdisc_rate_table *rtab = NULL, *ctab = NULL; struct rtattr *tb[TCA_HTB_RTAB]; struct tc_htb_opt *hopt; /* extract all subattrs from opt attr */ if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) || tb[TCA_HTB_PARMS - 1] == NULL || RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt)) goto failure; parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]); rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]); ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]); if (!rtab || !ctab) goto failure; if (!cl) { /* new class */ struct Qdisc *new_q; int prio; struct { struct rtattr rta; struct gnet_estimator opt; } est = { .rta = { .rta_len = RTA_LENGTH(sizeof(est.opt)), .rta_type = TCA_RATE, }, .opt = { /* 4s interval, 16s averaging constant */ .interval = 2, .ewma_log = 2, }, }; /* check for valid classid */ if (!classid || TC_H_MAJ(classid ^ sch->handle) || htb_find(classid, sch)) goto failure; /* check maximal depth */ if (parent && parent->parent && parent->parent->level < 2) { printk(KERN_ERR "htb: tree is too deep\n"); goto failure; } err = -ENOBUFS; if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) goto failure; gen_new_estimator(&cl->bstats, &cl->rate_est, &sch->dev->queue_lock, tca[TCA_RATE-1] ? : &est.rta); cl->refcnt = 1; INIT_LIST_HEAD(&cl->sibling); INIT_HLIST_NODE(&cl->hlist); INIT_LIST_HEAD(&cl->children); INIT_LIST_HEAD(&cl->un.leaf.drop_list); RB_CLEAR_NODE(&cl->pq_node); for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) RB_CLEAR_NODE(&cl->node[prio]); /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) so that can't be used inside of sch_tree_lock -- thanks to Karlis Peisenieks */ new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); sch_tree_lock(sch); if (parent && !parent->level) { unsigned int qlen = parent->un.leaf.q->q.qlen; /* turn parent into inner node */ qdisc_reset(parent->un.leaf.q); qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); qdisc_destroy(parent->un.leaf.q); if (parent->prio_activity) htb_deactivate(q, parent); /* remove from evt list because of level change */ if (parent->cmode != HTB_CAN_SEND) { htb_safe_rb_erase(&parent->pq_node, q->wait_pq); parent->cmode = HTB_CAN_SEND; } parent->level = (parent->parent ? parent->parent->level : TC_HTB_MAXDEPTH) - 1; memset(&parent->un.inner, 0, sizeof(parent->un.inner)); } /* leaf (we) needs elementary qdisc */ cl->un.leaf.q = new_q ? new_q : &noop_qdisc; cl->classid = classid; cl->parent = parent; /* set class to be in HTB_CAN_SEND state */ cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ cl->t_c = psched_get_time(); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); list_add_tail(&cl->sibling, parent ? &parent->children : &q->root); } else { if (tca[TCA_RATE-1]) gen_replace_estimator(&cl->bstats, &cl->rate_est, &sch->dev->queue_lock, tca[TCA_RATE-1]); sch_tree_lock(sch); } /* it used to be a nasty bug here, we have to check that node is really leaf before changing cl->un.leaf ! */ if (!cl->level) { cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; if (!hopt->quantum && cl->un.leaf.quantum < 1000) { printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid); cl->un.leaf.quantum = 1000; } if (!hopt->quantum && cl->un.leaf.quantum > 200000) { printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid); cl->un.leaf.quantum = 200000; } if (hopt->quantum) cl->un.leaf.quantum = hopt->quantum; if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO) cl->un.leaf.prio = TC_HTB_NUMPRIO - 1; /* backup for htb_parent_to_leaf */ cl->quantum = cl->un.leaf.quantum; cl->prio = cl->un.leaf.prio; } cl->buffer = hopt->buffer; cl->cbuffer = hopt->cbuffer; if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab; if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab; sch_tree_unlock(sch); *arg = (unsigned long)cl; return 0;failure: if (rtab) qdisc_put_rtab(rtab); if (ctab) qdisc_put_rtab(ctab); return err;}static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; return fl;}static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = htb_find(classid, sch); /*if (cl && !cl->level) return 0; The line above used to be there to prevent attaching filters to leaves. But at least tc_index filter uses this just to get class for other reasons so that we have to allow for it. ---- 19.6.2002 As Werner explained it is ok - bind filter is just another way to "lock" the class - unlike "get" this lock can be broken by class during destroy IIUC. */ if (cl) cl->filter_cnt++; else q->filter_cnt++; return (unsigned long)cl;}static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg){ struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; if (cl) cl->filter_cnt--; else q->filter_cnt--;}static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg){ struct htb_sched *q = qdisc_priv(sch); int i; if (arg->stop) return; for (i = 0; i < HTB_HSIZE; i++) { struct hlist_node *p; struct htb_class *cl; hlist_for_each_entry(cl, p, q->hash + i, hlist) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } }}static struct Qdisc_class_ops htb_class_ops = { .graft = htb_graft, .leaf = htb_leaf, .qlen_notify = htb_qlen_notify, .get = htb_get, .put = htb_put, .change = htb_change_class, .delete = htb_delete, .walk = htb_walk, .tcf_chain = htb_find_tcf, .bind_tcf = htb_bind_filter, .unbind_tcf = htb_unbind_filter, .dump = htb_dump_class, .dump_stats = htb_dump_class_stats,};static struct Qdisc_ops htb_qdisc_ops = { .next = NULL, .cl_ops = &htb_class_ops, .id = "htb", .priv_size = sizeof(struct htb_sched), .enqueue = htb_enqueue, .dequeue = htb_dequeue, .requeue = htb_requeue, .drop = htb_drop, .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, .change = NULL /* htb_change */, .dump = htb_dump, .owner = THIS_MODULE,};static int __init htb_module_init(void){ return register_qdisc(&htb_qdisc_ops);}static void __exit htb_module_exit(void){ unregister_qdisc(&htb_qdisc_ops);}module_init(htb_module_init)module_exit(htb_module_exit)MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -