📄 sch_cbq.c
字号:
if (delay <= 0) delay = 1; q->wd_timer.expires = jiffies + delay; add_timer(&q->wd_timer); sch->flags |= TCQ_F_THROTTLED; } } return NULL;}/* CBQ class maintanance routines */static void cbq_adjust_levels(struct cbq_class *this){ if (this == NULL) return; do { int level = 0; struct cbq_class *cl; if ((cl = this->children) != NULL) { do { if (cl->level > level) level = cl->level; } while ((cl = cl->sibling) != this->children); } this->level = level+1; } while ((this = this->tparent) != NULL);}static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio){ struct cbq_class *cl; unsigned h; if (q->quanta[prio] == 0) return; for (h=0; h<16; h++) { for (cl = q->classes[h]; cl; cl = cl->next) { /* BUGGGG... Beware! This expression suffer of arithmetic overflows! */ if (cl->priority == prio) { cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ q->quanta[prio]; } if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum); cl->quantum = cl->qdisc->dev->mtu/2 + 1; } } }}static void cbq_sync_defmap(struct cbq_class *cl){ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data; struct cbq_class *split = cl->split; unsigned h; int i; if (split == NULL) return; for (i=0; i<=TC_PRIO_MAX; i++) { if (split->defaults[i] == cl && !(cl->defmap&(1<<i))) split->defaults[i] = NULL; } for (i=0; i<=TC_PRIO_MAX; i++) { int level = split->level; if (split->defaults[i]) continue; for (h=0; h<16; h++) { struct cbq_class *c; for (c = q->classes[h]; c; c = c->next) { if (c->split == split && c->level < level && c->defmap&(1<<i)) { split->defaults[i] = c; level = c->level; } } } }}static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask){ struct cbq_class *split = NULL; if (splitid == 0) { if ((split = cl->split) == NULL) return; splitid = split->classid; } if (split == NULL || split->classid != splitid) { for (split = cl->tparent; split; split = split->tparent) if (split->classid == splitid) break; } if (split == NULL) return; if (cl->split != split) { cl->defmap = 0; cbq_sync_defmap(cl); cl->split = split; cl->defmap = def&mask; } else cl->defmap = (cl->defmap&~mask)|(def&mask); cbq_sync_defmap(cl);}static void cbq_unlink_class(struct cbq_class *this){ struct cbq_class *cl, **clp; struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data; for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) { if (cl == this) { *clp = cl->next; cl->next = NULL; break; } } if (this->tparent) { clp=&this->sibling; cl = *clp; do { if (cl == this) { *clp = cl->sibling; break; } clp = &cl->sibling; } while ((cl = *clp) != this->sibling); if (this->tparent->children == this) { this->tparent->children = this->sibling; if (this->sibling == this) this->tparent->children = NULL; } } else { BUG_TRAP(this->sibling == this); }}static void cbq_link_class(struct cbq_class *this){ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data; unsigned h = cbq_hash(this->classid); struct cbq_class *parent = this->tparent; this->sibling = this; this->next = q->classes[h]; q->classes[h] = this; if (parent == NULL) return; if (parent->children == NULL) { parent->children = this; } else { this->sibling = parent->children->sibling; parent->children->sibling = this; }}static int cbq_drop(struct Qdisc* sch){ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data; struct cbq_class *cl, *cl_head; int prio; for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { if ((cl_head = q->active[prio]) == NULL) continue; cl = cl_head; do { if (cl->q->ops->drop && cl->q->ops->drop(cl->q)) return 1; } while ((cl = cl->next_alive) != cl_head); } return 0;}static voidcbq_reset(struct Qdisc* sch){ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data; struct cbq_class *cl; int prio; unsigned h; q->activemask = 0; q->pmask = 0; q->tx_class = NULL; q->tx_borrowed = NULL; del_timer(&q->wd_timer); del_timer(&q->delay_timer); q->toplevel = TC_CBQ_MAXLEVEL; PSCHED_GET_TIME(q->now); q->now_rt = q->now; for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) q->active[prio] = NULL; for (h = 0; h < 16; h++) { for (cl = q->classes[h]; cl; cl = cl->next) { qdisc_reset(cl->q); cl->next_alive = NULL; PSCHED_SET_PASTPERFECT(cl->undertime); cl->avgidle = cl->maxidle; cl->deficit = cl->quantum; cl->cpriority = cl->priority; } } sch->q.qlen = 0;}static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss){ if (lss->change&TCF_CBQ_LSS_FLAGS) { cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; } if (lss->change&TCF_CBQ_LSS_EWMA) cl->ewma_log = lss->ewma_log; if (lss->change&TCF_CBQ_LSS_AVPKT) cl->avpkt = lss->avpkt; if (lss->change&TCF_CBQ_LSS_MINIDLE) cl->minidle = -(long)lss->minidle; if (lss->change&TCF_CBQ_LSS_MAXIDLE) { cl->maxidle = lss->maxidle; cl->avgidle = lss->maxidle; } if (lss->change&TCF_CBQ_LSS_OFFTIME) cl->offtime = lss->offtime; return 0;}static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl){ q->nclasses[cl->priority]--; q->quanta[cl->priority] -= cl->weight; cbq_normalize_quanta(q, cl->priority);}static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl){ q->nclasses[cl->priority]++; q->quanta[cl->priority] += cl->weight; cbq_normalize_quanta(q, cl->priority);}static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr){ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data; if (wrr->allot) cl->allot = wrr->allot; if (wrr->weight) cl->weight = wrr->weight; if (wrr->priority) { cl->priority = wrr->priority-1; cl->cpriority = cl->priority; if (cl->priority >= cl->priority2) cl->priority2 = TC_CBQ_MAXPRIO-1; } cbq_addprio(q, cl); return 0;}static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl){ switch (ovl->strategy) { case TC_CBQ_OVL_CLASSIC: cl->overlimit = cbq_ovl_classic; break; case TC_CBQ_OVL_DELAY: cl->overlimit = cbq_ovl_delay; break; case TC_CBQ_OVL_LOWPRIO: if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || ovl->priority2-1 <= cl->priority) return -EINVAL; cl->priority2 = ovl->priority2-1; cl->overlimit = cbq_ovl_lowprio; break; case TC_CBQ_OVL_DROP: cl->overlimit = cbq_ovl_drop; break; case TC_CBQ_OVL_RCLASSIC: cl->overlimit = cbq_ovl_rclassic; break; default: return -EINVAL; } cl->penalty = (ovl->penalty*HZ)/1000; return 0;}#ifdef CONFIG_NET_CLS_POLICEstatic int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p){ cl->police = p->police; if (cl->q->handle) { if (p->police == TC_POLICE_RECLASSIFY) cl->q->reshape_fail = cbq_reshape_fail; else cl->q->reshape_fail = NULL; } return 0;}#endifstatic int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt){ cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); return 0;}static int cbq_init(struct Qdisc *sch, struct rtattr *opt){ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data; struct rtattr *tb[TCA_CBQ_MAX]; struct tc_ratespec *r; if (rtattr_parse(tb, TCA_CBQ_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0 || tb[TCA_CBQ_RTAB-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL || RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec)) return -EINVAL; if (tb[TCA_CBQ_LSSOPT-1] && RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt)) return -EINVAL; r = RTA_DATA(tb[TCA_CBQ_RATE-1]); MOD_INC_USE_COUNT; if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB-1])) == NULL) { MOD_DEC_USE_COUNT; return -EINVAL; } q->link.refcnt = 1; q->link.sibling = &q->link; q->link.classid = sch->handle; q->link.qdisc = sch; if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) q->link.q = &noop_qdisc; q->link.priority = TC_CBQ_MAXPRIO-1; q->link.priority2 = TC_CBQ_MAXPRIO-1; q->link.cpriority = TC_CBQ_MAXPRIO-1; q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; q->link.overlimit = cbq_ovl_classic; q->link.allot = psched_mtu(sch->dev); q->link.quantum = q->link.allot; q->link.weight = q->link.R_tab->rate.rate; q->link.ewma_log = TC_CBQ_DEF_EWMA; q->link.avpkt = q->link.allot/2; q->link.minidle = -0x7FFFFFFF; q->link.stats.lock = &sch->dev->queue_lock; init_timer(&q->wd_timer); q->wd_timer.data = (unsigned long)sch; q->wd_timer.function = cbq_watchdog; init_timer(&q->delay_timer); q->delay_timer.data = (unsigned long)sch; q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; PSCHED_GET_TIME(q->now); q->now_rt = q->now; cbq_link_class(&q->link); if (tb[TCA_CBQ_LSSOPT-1]) cbq_set_lss(&q->link, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); cbq_addprio(q, &q->link); return 0;}#ifdef CONFIG_RTNETLINKstatic __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; struct tc_cbq_lssopt opt; opt.flags = 0; if (cl->borrow == NULL) opt.flags |= TCF_CBQ_LSS_BOUNDED; if (cl->share == NULL) opt.flags |= TCF_CBQ_LSS_ISOLATED; opt.ewma_log = cl->ewma_log; opt.level = cl->level; opt.avpkt = cl->avpkt; opt.maxidle = cl->maxidle; opt.minidle = (u32)(-cl->minidle); opt.offtime = cl->offtime; opt.change = ~0; RTA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; struct tc_cbq_wrropt opt; opt.flags = 0; opt.allot = cl->allot; opt.priority = cl->priority+1; opt.cpriority = cl->cpriority+1; opt.weight = cl->weight; RTA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; struct tc_cbq_ovl opt; opt.strategy = cl->ovl_strategy; opt.priority2 = cl->priority2+1; opt.penalty = (cl->penalty*1000)/HZ; RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; struct tc_cbq_fopt opt; if (cl->split || cl->defmap) { opt.split = cl->split ? cl->split->classid : 0; opt.defmap = cl->defmap; opt.defchange = ~0; RTA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); } return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}#ifdef CONFIG_NET_CLS_POLICEstatic __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl){ unsigned char *b = skb->tail; struct tc_cbq_police opt; if (cl->police) { opt.police = cl->police; RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); } return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}#endifstatic int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl){ if (cbq_dump_lss(skb, cl) < 0 || cbq_dump_rate(skb, cl) < 0 || cbq_dump_wrr(skb, cl) < 0 || cbq_dump_ovl(skb, cl) < 0 ||#ifdef CONFIG_NET_CLS_POLICE cbq_dump_police(skb, cl) < 0 ||#endif cbq_dump_fopt(skb, cl) < 0) return -1; return 0;}int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st){ RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st); return 0;rtattr_failure: return -1;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -