📄 sch_hfsc.c
字号:
tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {#ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS; case TC_ACT_SHOT: return NULL; }#endif if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = hfsc_find_class(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ } if (cl->level == 0) return cl; /* hit leaf class */ /* apply inner filter chain */ tcf = cl->filter_list; } /* classification failed, try default class */ cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); if (cl == NULL || cl->level > 0) return NULL; return cl;}static inthfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old){ struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl == NULL) return -ENOENT; if (cl->level > 0) return -EINVAL; if (new == NULL) { new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, cl->classid); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); hfsc_purge_queue(sch, cl); *old = xchg(&cl->qdisc, new); sch_tree_unlock(sch); return 0;}static struct Qdisc *hfsc_class_leaf(struct Qdisc *sch, unsigned long arg){ struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl != NULL && cl->level == 0) return cl->qdisc; return NULL;}static voidhfsc_qlen_notify(struct Qdisc *sch, unsigned long arg){ struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->qdisc->q.qlen == 0) { update_vf(cl, 0, 0); set_passive(cl); }}static unsigned longhfsc_get_class(struct Qdisc *sch, u32 classid){ struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) cl->refcnt++; return (unsigned long)cl;}static voidhfsc_put_class(struct Qdisc *sch, unsigned long arg){ struct hfsc_class *cl = (struct hfsc_class *)arg; if (--cl->refcnt == 0) hfsc_destroy_class(sch, cl);}static unsigned longhfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid){ struct hfsc_class *p = (struct hfsc_class *)parent; struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) { if (p != NULL && p->level <= cl->level) return 0; cl->filter_cnt++; } return (unsigned long)cl;}static voidhfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg){ struct hfsc_class *cl = (struct hfsc_class *)arg; cl->filter_cnt--;}static struct tcf_proto **hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl == NULL) cl = &q->root; return &cl->filter_list;}static inthfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc){ struct tc_service_curve tsc; tsc.m1 = sm2m(sc->sm1); tsc.d = dx2d(sc->dx); tsc.m2 = sm2m(sc->sm2); RTA_PUT(skb, attr, sizeof(tsc), &tsc); return skb->len; rtattr_failure: return -1;}static inline inthfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl){ if ((cl->cl_flags & HFSC_RSC) && (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) goto rtattr_failure; if ((cl->cl_flags & HFSC_FSC) && (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) goto rtattr_failure; if ((cl->cl_flags & HFSC_USC) && (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) goto rtattr_failure; return skb->len; rtattr_failure: return -1;}static inthfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm){ struct hfsc_class *cl = (struct hfsc_class *)arg; unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta = (struct rtattr *)b; tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; tcm->tcm_handle = cl->classid; if (cl->level == 0) tcm->tcm_info = cl->qdisc->handle; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (hfsc_dump_curves(skb, cl) < 0) goto rtattr_failure; rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: nlmsg_trim(skb, b); return -1;}static inthfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d){ struct hfsc_class *cl = (struct hfsc_class *)arg; struct tc_hfsc_stats xstats; cl->qstats.qlen = cl->qdisc->q.qlen; xstats.level = cl->level; xstats.period = cl->cl_vtperiod; xstats.work = cl->cl_total; xstats.rtwork = cl->cl_cumul; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, &cl->qstats) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats));}static voidhfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; unsigned int i; if (arg->stop) return; for (i = 0; i < HFSC_HSIZE; i++) { list_for_each_entry(cl, &q->clhash[i], hlist) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } }}static voidhfsc_schedule_watchdog(struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; u64 next_time = 0; if ((cl = eltree_get_minel(q)) != NULL) next_time = cl->cl_e; if (q->root.cl_cfmin != 0) { if (next_time == 0 || next_time > q->root.cl_cfmin) next_time = q->root.cl_cfmin; } WARN_ON(next_time == 0); qdisc_watchdog_schedule(&q->watchdog, next_time);}static inthfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt){ struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; unsigned int i; if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) return -EINVAL; qopt = RTA_DATA(opt); q->defcls = qopt->defcls; for (i = 0; i < HFSC_HSIZE; i++) INIT_LIST_HEAD(&q->clhash[i]); q->eligible = RB_ROOT; INIT_LIST_HEAD(&q->droplist); skb_queue_head_init(&q->requeue); q->root.refcnt = 1; q->root.classid = sch->handle; q->root.sched = q; q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; INIT_LIST_HEAD(&q->root.children); q->root.vt_tree = RB_ROOT; q->root.cf_tree = RB_ROOT; list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]); qdisc_watchdog_init(&q->watchdog, sch); return 0;}static inthfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt){ struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) return -EINVAL; qopt = RTA_DATA(opt); sch_tree_lock(sch); q->defcls = qopt->defcls; sch_tree_unlock(sch); return 0;}static voidhfsc_reset_class(struct hfsc_class *cl){ cl->cl_total = 0; cl->cl_cumul = 0; cl->cl_d = 0; cl->cl_e = 0; cl->cl_vt = 0; cl->cl_vtadj = 0; cl->cl_vtoff = 0; cl->cl_cvtmin = 0; cl->cl_cvtmax = 0; cl->cl_cvtoff = 0; cl->cl_pcvtoff = 0; cl->cl_vtperiod = 0; cl->cl_parentperiod = 0; cl->cl_f = 0; cl->cl_myf = 0; cl->cl_myfadj = 0; cl->cl_cfmin = 0; cl->cl_nactive = 0; cl->vt_tree = RB_ROOT; cl->cf_tree = RB_ROOT; qdisc_reset(cl->qdisc); if (cl->cl_flags & HFSC_RSC) rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); if (cl->cl_flags & HFSC_FSC) rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); if (cl->cl_flags & HFSC_USC) rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);}static voidhfsc_reset_qdisc(struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; unsigned int i; for (i = 0; i < HFSC_HSIZE; i++) { list_for_each_entry(cl, &q->clhash[i], hlist) hfsc_reset_class(cl); } __skb_queue_purge(&q->requeue); q->eligible = RB_ROOT; INIT_LIST_HEAD(&q->droplist); qdisc_watchdog_cancel(&q->watchdog); sch->q.qlen = 0;}static voidhfsc_destroy_qdisc(struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl, *next; unsigned int i; for (i = 0; i < HFSC_HSIZE; i++) { list_for_each_entry_safe(cl, next, &q->clhash[i], hlist) hfsc_destroy_class(sch, cl); } __skb_queue_purge(&q->requeue); qdisc_watchdog_cancel(&q->watchdog);}static inthfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb){ struct hfsc_sched *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; qopt.defcls = q->defcls; RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); return skb->len; rtattr_failure: nlmsg_trim(skb, b); return -1;}static inthfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch){ struct hfsc_class *cl; unsigned int len; int err; cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { if (err == NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; } len = skb->len; err = cl->qdisc->enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { cl->qstats.drops++; sch->qstats.drops++; return err; } if (cl->qdisc->q.qlen == 1) set_active(cl, len); cl->bstats.packets++; cl->bstats.bytes += len; sch->bstats.packets++; sch->bstats.bytes += len; sch->q.qlen++; return NET_XMIT_SUCCESS;}static struct sk_buff *hfsc_dequeue(struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; struct sk_buff *skb; u64 cur_time; unsigned int next_len; int realtime = 0; if (sch->q.qlen == 0) return NULL; if ((skb = __skb_dequeue(&q->requeue))) goto out; cur_time = psched_get_time(); /* * if there are eligible classes, use real-time criteria. * find the class with the minimum deadline among * the eligible classes. */ if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { realtime = 1; } else { /* * use link-sharing criteria * get the class with the minimum vt in the hierarchy */ cl = vttree_get_minvt(&q->root, cur_time); if (cl == NULL) { sch->qstats.overlimits++; hfsc_schedule_watchdog(sch); return NULL; } } skb = cl->qdisc->dequeue(cl->qdisc); if (skb == NULL) { if (net_ratelimit()) printk("HFSC: Non-work-conserving qdisc ?\n"); return NULL; } update_vf(cl, skb->len, cur_time); if (realtime) cl->cl_cumul += skb->len; if (cl->qdisc->q.qlen != 0) { if (cl->cl_flags & HFSC_RSC) { /* update ed */ next_len = qdisc_peek_len(cl->qdisc); if (realtime) update_ed(cl, next_len); else update_d(cl, next_len); } } else { /* the class becomes passive */ set_passive(cl); } out: sch->flags &= ~TCQ_F_THROTTLED; sch->q.qlen--; return skb;}static inthfsc_requeue(struct sk_buff *skb, struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); __skb_queue_head(&q->requeue, skb); sch->q.qlen++; sch->qstats.requeues++; return NET_XMIT_SUCCESS;}static unsigned inthfsc_drop(struct Qdisc *sch){ struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; unsigned int len; list_for_each_entry(cl, &q->droplist, dlist) { if (cl->qdisc->ops->drop != NULL && (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { if (cl->qdisc->q.qlen == 0) { update_vf(cl, 0, 0); set_passive(cl); } else { list_move_tail(&cl->dlist, &q->droplist); } cl->qstats.drops++; sch->qstats.drops++; sch->q.qlen--; return len; } } return 0;}static struct Qdisc_class_ops hfsc_class_ops = { .change = hfsc_change_class, .delete = hfsc_delete_class, .graft = hfsc_graft_class, .leaf = hfsc_class_leaf, .qlen_notify = hfsc_qlen_notify, .get = hfsc_get_class, .put = hfsc_put_class, .bind_tcf = hfsc_bind_tcf, .unbind_tcf = hfsc_unbind_tcf, .tcf_chain = hfsc_tcf_chain, .dump = hfsc_dump_class, .dump_stats = hfsc_dump_class_stats, .walk = hfsc_walk};static struct Qdisc_ops hfsc_qdisc_ops = { .id = "hfsc", .init = hfsc_init_qdisc, .change = hfsc_change_qdisc, .reset = hfsc_reset_qdisc, .destroy = hfsc_destroy_qdisc, .dump = hfsc_dump_qdisc, .enqueue = hfsc_enqueue, .dequeue = hfsc_dequeue, .requeue = hfsc_requeue, .drop = hfsc_drop, .cl_ops = &hfsc_class_ops, .priv_size = sizeof(struct hfsc_sched), .owner = THIS_MODULE};static int __inithfsc_init(void){ return register_qdisc(&hfsc_qdisc_ops);}static void __exithfsc_cleanup(void){ unregister_qdisc(&hfsc_qdisc_ops);}MODULE_LICENSE("GPL");module_init(hfsc_init);module_exit(hfsc_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -