📄 sch_htb.c
字号:
old_mode = cl->cmode; diff = 0; htb_change_class_mode(q,cl,&diff); if (old_mode != cl->cmode) { if (old_mode != HTB_CAN_SEND) htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level); if (cl->cmode != HTB_CAN_SEND) htb_add_to_wait_tree (q,cl,diff,1); } #ifdef HTB_RATECM /* update rate counters */ cl->sum_bytes += bytes; cl->sum_packets++;#endif /* update byte stats except for leaves which are already updated */ if (cl->level) { cl->stats.bytes += bytes; cl->stats.packets++; } cl = cl->parent; }}/** * htb_do_events - make mode changes to classes at the level * * Scans event queue for pending events and applies them. Returns jiffies to * next pending event (0 for no event in pq). * Note: Aplied are events whose have cl->pq_key <= jiffies. */static long htb_do_events(struct htb_sched *q,int level){ int i; HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n", level,q->wait_pq[level].rb_node,q->row_mask[level]); for (i = 0; i < 500; i++) { struct htb_class *cl; long diff; struct rb_node *p = q->wait_pq[level].rb_node; if (!p) return 0; while (p->rb_left) p = p->rb_left; cl = rb_entry(p, struct htb_class, pq_node); if (time_after(cl->pq_key, q->jiffies)) { HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies); return cl->pq_key - q->jiffies; } htb_safe_rb_erase(p,q->wait_pq+level); diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);#ifdef HTB_DEBUG if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) { if (net_ratelimit()) printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n", cl->classid, diff,#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY q->now.tv_sec * 1000000ULL + q->now.tv_usec, cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,#else (unsigned long long) q->now, (unsigned long long) cl->t_c,#endif q->jiffies); diff = 1000; }#endif htb_change_class_mode(q,cl,&diff); if (cl->cmode != HTB_CAN_SEND) htb_add_to_wait_tree (q,cl,diff,2); } if (net_ratelimit()) printk(KERN_WARNING "htb: too many events !\n"); return HZ/10;}/* Returns class->node+prio from id-tree where classe's id is >= id. NULL is no such one exists. */static struct rb_node *htb_id_find_next_upper(int prio,struct rb_node *n,u32 id){ struct rb_node *r = NULL; while (n) { struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]); if (id == cl->classid) return n; if (id > cl->classid) { n = n->rb_right; } else { r = n; n = n->rb_left; } } return r;}/** * htb_lookup_leaf - returns next leaf class in DRR order * * Find leaf where current feed pointers points to. */static struct htb_class *htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid){ int i; struct { struct rb_node *root; struct rb_node **pptr; u32 *pid; } stk[TC_HTB_MAXDEPTH],*sp = stk; BUG_TRAP(tree->rb_node); sp->root = tree->rb_node; sp->pptr = pptr; sp->pid = pid; for (i = 0; i < 65535; i++) { HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid); if (!*sp->pptr && *sp->pid) { /* ptr was invalidated but id is valid - try to recover the original or next ptr */ *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid); } *sp->pid = 0; /* ptr is valid now so that remove this hint as it can become out of date quickly */ if (!*sp->pptr) { /* we are at right end; rewind & go up */ *sp->pptr = sp->root; while ((*sp->pptr)->rb_left) *sp->pptr = (*sp->pptr)->rb_left; if (sp > stk) { sp--; BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL; htb_next_rb_node (sp->pptr); } } else { struct htb_class *cl; cl = rb_entry(*sp->pptr,struct htb_class,node[prio]); HTB_CHCL(cl); if (!cl->level) return cl; (++sp)->root = cl->un.inner.feed[prio].rb_node; sp->pptr = cl->un.inner.ptr+prio; sp->pid = cl->un.inner.last_ptr_id+prio; } } BUG_TRAP(0); return NULL;}/* dequeues packet at given priority and level; call only if you are sure that there is active class at prio/level */static struct sk_buff *htb_dequeue_tree(struct htb_sched *q,int prio,int level){ struct sk_buff *skb = NULL; struct htb_class *cl,*start; /* look initial class up in the row */ start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio, q->ptr[level]+prio,q->last_ptr_id[level]+prio); do {next: BUG_TRAP(cl); if (!cl) return NULL; HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n", prio,level,cl->classid,cl->un.leaf.deficit[level]); /* class can be empty - it is unlikely but can be true if leaf qdisc drops packets in enqueue routine or if someone used graft operation on the leaf since last dequeue; simply deactivate and skip such class */ if (unlikely(cl->un.leaf.q->q.qlen == 0)) { struct htb_class *next; htb_deactivate(q,cl); /* row/level might become empty */ if ((q->row_mask[level] & (1 << prio)) == 0) return NULL; next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio, prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio); if (cl == start) /* fix start if we just deleted it */ start = next; cl = next; goto next; } if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) break; if (!cl->warned) { printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid); cl->warned = 1; } q->nwc_hit++; htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio, q->last_ptr_id[level]+prio); } while (cl != start); if (likely(skb != NULL)) { if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n", level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum); cl->un.leaf.deficit[level] += cl->un.leaf.quantum; htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); } /* this used to be after charge_class but this constelation gives us slightly better performance */ if (!cl->un.leaf.q->q.qlen) htb_deactivate (q,cl); htb_charge_class (q,cl,level,skb->len); } return skb;}static void htb_delay_by(struct Qdisc *sch,long delay){ struct htb_sched *q = qdisc_priv(sch); if (delay <= 0) delay = 1; if (unlikely(delay > 5*HZ)) { if (net_ratelimit()) printk(KERN_INFO "HTB delay %ld > 5sec\n", delay); delay = 5*HZ; } /* why don't use jiffies here ? because expires can be in past */ mod_timer(&q->timer, q->jiffies + delay); sch->flags |= TCQ_F_THROTTLED; sch->stats.overlimits++; HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);}static struct sk_buff *htb_dequeue(struct Qdisc *sch){ struct sk_buff *skb = NULL; struct htb_sched *q = qdisc_priv(sch); int level; long min_delay;#ifdef HTB_DEBUG int evs_used = 0;#endif q->jiffies = jiffies; HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue), sch->q.qlen); /* try to dequeue direct packets as high prio (!) to minimize cpu work */ if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) { sch->flags &= ~TCQ_F_THROTTLED; sch->q.qlen--; return skb; } if (!sch->q.qlen) goto fin; PSCHED_GET_TIME(q->now); min_delay = LONG_MAX; q->nwc_hit = 0; for (level = 0; level < TC_HTB_MAXDEPTH; level++) { /* common case optimization - skip event handler quickly */ int m; long delay; if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { delay = htb_do_events(q,level); q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);#ifdef HTB_DEBUG evs_used++;#endif } else delay = q->near_ev_cache[level] - q->jiffies; if (delay && min_delay > delay) min_delay = delay; m = ~q->row_mask[level]; while (m != (int)(-1)) { int prio = ffz (m); m |= 1 << prio; skb = htb_dequeue_tree(q,prio,level); if (likely(skb != NULL)) { sch->q.qlen--; sch->flags &= ~TCQ_F_THROTTLED; goto fin; } } }#ifdef HTB_DEBUG if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) { if (min_delay == LONG_MAX) { printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n", evs_used,q->jiffies,jiffies); htb_debug_dump(q); } else printk(KERN_WARNING "HTB: mindelay=%ld, some class has " "too small rate\n",min_delay); }#endif htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);fin: HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb); return skb;}/* try to drop from each class (by prio) until one succeed */static unsigned int htb_drop(struct Qdisc* sch){ struct htb_sched *q = qdisc_priv(sch); int prio; for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { struct list_head *p; list_for_each (p,q->drops+prio) { struct htb_class *cl = list_entry(p, struct htb_class, un.leaf.drop_list); unsigned int len; if (cl->un.leaf.q->ops->drop && (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { sch->q.qlen--; if (!cl->un.leaf.q->q.qlen) htb_deactivate (q,cl); return len; } } } return 0;}/* reset all classes *//* always caled under BH & queue lock */static void htb_reset(struct Qdisc* sch){ struct htb_sched *q = qdisc_priv(sch); int i; HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle); for (i = 0; i < HTB_HSIZE; i++) { struct list_head *p; list_for_each (p,q->hash+i) { struct htb_class *cl = list_entry(p,struct htb_class,hlist); if (cl->level) memset(&cl->un.inner,0,sizeof(cl->un.inner)); else { if (cl->un.leaf.q) qdisc_reset(cl->un.leaf.q); INIT_LIST_HEAD(&cl->un.leaf.drop_list); } cl->prio_activity = 0; cl->cmode = HTB_CAN_SEND;#ifdef HTB_DEBUG cl->pq_node.rb_color = -1; memset(cl->node,255,sizeof(cl->node));#endif } } sch->flags &= ~TCQ_F_THROTTLED; del_timer(&q->timer); __skb_queue_purge(&q->direct_queue); sch->q.qlen = 0; memset(q->row,0,sizeof(q->row)); memset(q->row_mask,0,sizeof(q->row_mask)); memset(q->wait_pq,0,sizeof(q->wait_pq)); memset(q->ptr,0,sizeof(q->ptr)); for (i = 0; i < TC_HTB_NUMPRIO; i++) INIT_LIST_HEAD(q->drops+i);}static int htb_init(struct Qdisc *sch, struct rtattr *opt){ struct htb_sched *q = qdisc_priv(sch); struct rtattr *tb[TCA_HTB_INIT]; struct tc_htb_glob *gopt; int i;#ifdef HTB_DEBUG printk(KERN_INFO "HTB init, kernel part version %d.%d\n", HTB_VER >> 16,HTB_VER & 0xffff);#endif if (!opt || rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) || tb[TCA_HTB_INIT-1] == NULL || RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) { printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); return -EINVAL; } gopt = RTA_DATA(tb[TCA_HTB_INIT-1]); if (gopt->version != HTB_VER >> 16) { printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n", HTB_VER >> 16,HTB_VER & 0xffff,gopt->version); return -EINVAL; } q->debug = gopt->debug; HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum); INIT_LIST_HEAD(&q->root); for (i = 0; i < HTB_HSIZE; i++) INIT_LIST_HEAD(q->hash+i); for (i = 0; i < TC_HTB_NUMPRIO; i++) INIT_LIST_HEAD(q->drops+i); init_timer(&q->timer); skb_queue_head_init(&q->direct_queue); q->direct_qlen = sch->dev->tx_queue_len; if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ q->direct_qlen = 2; q->timer.function = htb_timer; q->timer.data = (unsigned long)sch;#ifdef HTB_RATECM init_timer(&q->rttim); q->rttim.function = htb_rate_timer; q->rttim.data = (unsigned long)sch; q->rttim.expires = jiffies + HZ; add_timer(&q->rttim);#endif if ((q->rate2quantum = gopt->rate2quantum) < 1) q->rate2quantum = 1; q->defcls = gopt->defcls; return 0;}static int htb_dump(struct Qdisc *sch, struct sk_buff *skb){ struct htb_sched *q = qdisc_priv(sch); unsigned char *b = skb->tail; struct rtattr *rta; struct tc_htb_glob gopt; HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle); /* stats */ HTB_QLOCK(sch); gopt.direct_pkts = q->direct_pkts;#ifdef HTB_DEBUG if (HTB_DBG_COND(0,2)) htb_debug_dump(q);#endif gopt.version = HTB_VER; gopt.rate2quantum = q->rate2quantum; gopt.defcls = q->defcls; gopt.debug = q->debug; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); rta->rta_len = skb->tail - b; sch->stats.qlen = sch->q.qlen;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -