⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sch_htb.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	if (cl->prio_activity) {	/* not necessary: speed optimization */		if (cl->cmode != HTB_CANT_SEND)			htb_deactivate_prios(q, cl);		cl->cmode = new_mode;		if (new_mode != HTB_CANT_SEND)			htb_activate_prios(q, cl);	} else		cl->cmode = new_mode;}/** * htb_activate - inserts leaf cl into appropriate active feeds * * Routine learns (new) priority of leaf and activates feed chain * for the prio. It can be called on already active leaf safely. * It also adds leaf into droplist. */static inline void htb_activate(struct htb_sched *q, struct htb_class *cl){	BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);	if (!cl->prio_activity) {		cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);		htb_activate_prios(q, cl);		list_add_tail(&cl->un.leaf.drop_list,			      q->drops + cl->un.leaf.aprio);	}}/** * htb_deactivate - remove leaf cl from active feeds * * Make sure that leaf is active. In the other words it can't be called * with non-active leaf. It also removes class from the drop list. */static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl){	BUG_TRAP(cl->prio_activity);	htb_deactivate_prios(q, cl);	cl->prio_activity = 0;	list_del_init(&cl->un.leaf.drop_list);}static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch){	int ret;	struct htb_sched *q = qdisc_priv(sch);	struct htb_class *cl = htb_classify(skb, sch, &ret);	if (cl == HTB_DIRECT) {		/* enqueue to helper queue */		if (q->direct_queue.qlen < q->direct_qlen) {			__skb_queue_tail(&q->direct_queue, skb);			q->direct_pkts++;		} else {			kfree_skb(skb);			sch->qstats.drops++;			return NET_XMIT_DROP;		}#ifdef CONFIG_NET_CLS_ACT	} else if (!cl) {		if (ret == NET_XMIT_BYPASS)			sch->qstats.drops++;		kfree_skb(skb);		return ret;#endif	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=		   NET_XMIT_SUCCESS) {		sch->qstats.drops++;		cl->qstats.drops++;		return NET_XMIT_DROP;	} else {		cl->bstats.packets +=			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;		cl->bstats.bytes += skb->len;		htb_activate(q, cl);	}	sch->q.qlen++;	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;	sch->bstats.bytes += skb->len;	return NET_XMIT_SUCCESS;}/* TODO: requeuing packet charges it to policers again !! */static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch){	struct htb_sched *q = qdisc_priv(sch);	int ret = NET_XMIT_SUCCESS;	struct htb_class *cl = htb_classify(skb, sch, &ret);	struct sk_buff *tskb;	if (cl == HTB_DIRECT || !cl) {		/* enqueue to helper queue */		if (q->direct_queue.qlen < q->direct_qlen && cl) {			__skb_queue_head(&q->direct_queue, skb);		} else {			__skb_queue_head(&q->direct_queue, skb);			tskb = __skb_dequeue_tail(&q->direct_queue);			kfree_skb(tskb);			sch->qstats.drops++;			return NET_XMIT_CN;		}	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=		   NET_XMIT_SUCCESS) {		sch->qstats.drops++;		cl->qstats.drops++;		return NET_XMIT_DROP;	} else		htb_activate(q, cl);	sch->q.qlen++;	sch->qstats.requeues++;	return NET_XMIT_SUCCESS;}/** * htb_charge_class - charges amount "bytes" to leaf and ancestors * * Routine assumes that packet "bytes" long was dequeued from leaf cl * borrowing from "level". It accounts bytes to ceil leaky bucket for * leaf and all ancestors and to rate bucket for ancestors at levels * "level" and higher. It also handles possible change of mode resulting * from the update. Note that mode can also increase here (MAY_BORROW to * CAN_SEND) because we can use more precise clock that event queue here. * In such case we remove class from event queue first. */static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,			     int level, struct sk_buff *skb){	int bytes = skb->len;	long toks, diff;	enum htb_cmode old_mode;#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \	if (toks > cl->B) toks = cl->B; \	toks -= L2T(cl, cl->R, bytes); \	if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \	cl->T = toks	while (cl) {		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);		if (cl->level >= level) {			if (cl->level == level)				cl->xstats.lends++;			HTB_ACCNT(tokens, buffer, rate);		} else {			cl->xstats.borrows++;			cl->tokens += diff;	/* we moved t_c; update tokens */		}		HTB_ACCNT(ctokens, cbuffer, ceil);		cl->t_c = q->now;		old_mode = cl->cmode;		diff = 0;		htb_change_class_mode(q, cl, &diff);		if (old_mode != cl->cmode) {			if (old_mode != HTB_CAN_SEND)				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);			if (cl->cmode != HTB_CAN_SEND)				htb_add_to_wait_tree(q, cl, diff);		}		/* update byte stats except for leaves which are already updated */		if (cl->level) {			cl->bstats.bytes += bytes;			cl->bstats.packets += skb_is_gso(skb)?					skb_shinfo(skb)->gso_segs:1;		}		cl = cl->parent;	}}/** * htb_do_events - make mode changes to classes at the level * * Scans event queue for pending events and applies them. Returns time of * next pending event (0 for no event in pq). * Note: Applied are events whose have cl->pq_key <= q->now. */static psched_time_t htb_do_events(struct htb_sched *q, int level){	int i;	for (i = 0; i < 500; i++) {		struct htb_class *cl;		long diff;		struct rb_node *p = rb_first(&q->wait_pq[level]);		if (!p)			return 0;		cl = rb_entry(p, struct htb_class, pq_node);		if (cl->pq_key > q->now)			return cl->pq_key;		htb_safe_rb_erase(p, q->wait_pq + level);		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);		htb_change_class_mode(q, cl, &diff);		if (cl->cmode != HTB_CAN_SEND)			htb_add_to_wait_tree(q, cl, diff);	}	if (net_ratelimit())		printk(KERN_WARNING "htb: too many events !\n");	return q->now + PSCHED_TICKS_PER_SEC / 10;}/* Returns class->node+prio from id-tree where classe's id is >= id. NULL   is no such one exists. */static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,					      u32 id){	struct rb_node *r = NULL;	while (n) {		struct htb_class *cl =		    rb_entry(n, struct htb_class, node[prio]);		if (id == cl->classid)			return n;		if (id > cl->classid) {			n = n->rb_right;		} else {			r = n;			n = n->rb_left;		}	}	return r;}/** * htb_lookup_leaf - returns next leaf class in DRR order * * Find leaf where current feed pointers points to. */static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,					 struct rb_node **pptr, u32 * pid){	int i;	struct {		struct rb_node *root;		struct rb_node **pptr;		u32 *pid;	} stk[TC_HTB_MAXDEPTH], *sp = stk;	BUG_TRAP(tree->rb_node);	sp->root = tree->rb_node;	sp->pptr = pptr;	sp->pid = pid;	for (i = 0; i < 65535; i++) {		if (!*sp->pptr && *sp->pid) {			/* ptr was invalidated but id is valid - try to recover			   the original or next ptr */			*sp->pptr =			    htb_id_find_next_upper(prio, sp->root, *sp->pid);		}		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it				   can become out of date quickly */		if (!*sp->pptr) {	/* we are at right end; rewind & go up */			*sp->pptr = sp->root;			while ((*sp->pptr)->rb_left)				*sp->pptr = (*sp->pptr)->rb_left;			if (sp > stk) {				sp--;				BUG_TRAP(*sp->pptr);				if (!*sp->pptr)					return NULL;				htb_next_rb_node(sp->pptr);			}		} else {			struct htb_class *cl;			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);			if (!cl->level)				return cl;			(++sp)->root = cl->un.inner.feed[prio].rb_node;			sp->pptr = cl->un.inner.ptr + prio;			sp->pid = cl->un.inner.last_ptr_id + prio;		}	}	BUG_TRAP(0);	return NULL;}/* dequeues packet at given priority and level; call only if   you are sure that there is active class at prio/level */static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,					int level){	struct sk_buff *skb = NULL;	struct htb_class *cl, *start;	/* look initial class up in the row */	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,				     q->ptr[level] + prio,				     q->last_ptr_id[level] + prio);	do {next:		BUG_TRAP(cl);		if (!cl)			return NULL;		/* class can be empty - it is unlikely but can be true if leaf		   qdisc drops packets in enqueue routine or if someone used		   graft operation on the leaf since last dequeue;		   simply deactivate and skip such class */		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {			struct htb_class *next;			htb_deactivate(q, cl);			/* row/level might become empty */			if ((q->row_mask[level] & (1 << prio)) == 0)				return NULL;			next = htb_lookup_leaf(q->row[level] + prio,					       prio, q->ptr[level] + prio,					       q->last_ptr_id[level] + prio);			if (cl == start)	/* fix start if we just deleted it */				start = next;			cl = next;			goto next;		}		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);		if (likely(skb != NULL))			break;		if (!cl->warned) {			printk(KERN_WARNING			       "htb: class %X isn't work conserving ?!\n",			       cl->classid);			cl->warned = 1;		}		q->nwc_hit++;		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->				  ptr[0]) + prio);		cl = htb_lookup_leaf(q->row[level] + prio, prio,				     q->ptr[level] + prio,				     q->last_ptr_id[level] + prio);	} while (cl != start);	if (likely(skb != NULL)) {		if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {			cl->un.leaf.deficit[level] += cl->un.leaf.quantum;			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->					  ptr[0]) + prio);		}		/* this used to be after charge_class but this constelation		   gives us slightly better performance */		if (!cl->un.leaf.q->q.qlen)			htb_deactivate(q, cl);		htb_charge_class(q, cl, level, skb);	}	return skb;}static struct sk_buff *htb_dequeue(struct Qdisc *sch){	struct sk_buff *skb = NULL;	struct htb_sched *q = qdisc_priv(sch);	int level;	psched_time_t next_event;	/* try to dequeue direct packets as high prio (!) to minimize cpu work */	skb = __skb_dequeue(&q->direct_queue);	if (skb != NULL) {		sch->flags &= ~TCQ_F_THROTTLED;		sch->q.qlen--;		return skb;	}	if (!sch->q.qlen)		goto fin;	q->now = psched_get_time();	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;	q->nwc_hit = 0;	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {		/* common case optimization - skip event handler quickly */		int m;		psched_time_t event;		if (q->now >= q->near_ev_cache[level]) {			event = htb_do_events(q, level);			if (!event)				event = q->now + PSCHED_TICKS_PER_SEC;			q->near_ev_cache[level] = event;		} else			event = q->near_ev_cache[level];		if (event && next_event > event)			next_event = event;		m = ~q->row_mask[level];		while (m != (int)(-1)) {			int prio = ffz(m);			m |= 1 << prio;			skb = htb_dequeue_tree(q, prio, level);			if (likely(skb != NULL)) {				sch->q.qlen--;				sch->flags &= ~TCQ_F_THROTTLED;				goto fin;			}		}	}	sch->qstats.overlimits++;	qdisc_watchdog_schedule(&q->watchdog, next_event);fin:	return skb;}/* try to drop from each class (by prio) until one succeed */static unsigned int htb_drop(struct Qdisc *sch){	struct htb_sched *q = qdisc_priv(sch);	int prio;	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {		struct list_head *p;		list_for_each(p, q->drops + prio) {			struct htb_class *cl = list_entry(p, struct htb_class,							  un.leaf.drop_list);			unsigned int len;			if (cl->un.leaf.q->ops->drop &&			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {				sch->q.qlen--;				if (!cl->un.leaf.q->q.qlen)					htb_deactivate(q, cl);				return len;			}		}	}	return 0;}/* reset all classes *//* always caled under BH & queue lock */static void htb_reset(struct Qdisc *sch){	struct htb_sched *q = qdisc_priv(sch);	int i;	for (i = 0; i < HTB_HSIZE; i++) {		struct hlist_node *p;		struct htb_class *cl;		hlist_for_each_entry(cl, p, q->hash + i, hlist) {			if (cl->level)				memset(&cl->un.inner, 0, sizeof(cl->un.inner));			else {				if (cl->un.leaf.q)					qdisc_reset(cl->un.leaf.q);				INIT_LIST_HEAD(&cl->un.leaf.drop_list);			}			cl->prio_activity = 0;			cl->cmode = HTB_CAN_SEND;		}	}	qdisc_watchdog_cancel(&q->watchdog);	__skb_queue_purge(&q->direct_queue);	sch->q.qlen = 0;	memset(q->row, 0, sizeof(q->row));	memset(q->row_mask, 0, sizeof(q->row_mask));	memset(q->wait_pq, 0, sizeof(q->wait_pq));	memset(q->ptr, 0, sizeof(q->ptr));	for (i = 0; i < TC_HTB_NUMPRIO; i++)		INIT_LIST_HEAD(q->drops + i);}static int htb_init(struct Qdisc *sch, struct rtattr *opt){	struct htb_sched *q = qdisc_priv(sch);	struct rtattr *tb[TCA_HTB_INIT];	struct tc_htb_glob *gopt;	int i;	if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||	    tb[TCA_HTB_INIT - 1] == NULL ||	    RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");		return -EINVAL;	}	gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);	if (gopt->version != HTB_VER >> 16) {		printk(KERN_ERR		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);		return -EINVAL;	}	INIT_LIST_HEAD(&q->root);	for (i = 0; i < HTB_HSIZE; i++)		INIT_HLIST_HEAD(q->hash + i);	for (i = 0; i < TC_HTB_NUMPRIO; i++)		INIT_LIST_HEAD(q->drops + i);	qdisc_watchdog_init(&q->watchdog, sch);	skb_queue_head_init(&q->direct_queue);	q->direct_qlen = sch->dev->tx_queue_len;	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */		q->direct_qlen = 2;	if ((q->rate2quantum = gopt->rate2quantum) < 1)		q->rate2quantum = 1;	q->defcls = gopt->defcls;	return 0;}static int htb_dump(struct Qdisc *sch, struct sk_buff *skb){	struct htb_sched *q = qdisc_priv(sch);	unsigned char *b = skb_tail_pointer(skb);	struct rtattr *rta;	struct tc_htb_glob gopt;	spin_lock_bh(&sch->dev->queue_lock);	gopt.direct_pkts = q->direct_pkts;	gopt.version = HTB_VER;	gopt.rate2quantum = q->rate2quantum;	gopt.defcls = q->defcls;	gopt.debug = 0;	rta = (struct rtattr *)b;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -