⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sch_hfsc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	 */	y1 = rtsc_x2y(rtsc, x);	if (y1 <= y) {		/* rtsc is below isc, no change to rtsc */		return;	}	y2 = rtsc_x2y(rtsc, x + isc->dx);	if (y2 >= y + isc->dy) {		/* rtsc is above isc, replace rtsc by isc */		rtsc->x = x;		rtsc->y = y;		rtsc->dx = isc->dx;		rtsc->dy = isc->dy;		return;	}	/*	 * the two curves intersect	 * compute the offsets (dx, dy) using the reverse	 * function of seg_x2y()	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)	 */	dx = (y1 - y) << SM_SHIFT;	dsm = isc->sm1 - isc->sm2;	do_div(dx, dsm);	/*	 * check if (x, y1) belongs to the 1st segment of rtsc.	 * if so, add the offset.	 */	if (rtsc->x + rtsc->dx > x)		dx += rtsc->x + rtsc->dx - x;	dy = seg_x2y(dx, isc->sm1);	rtsc->x = x;	rtsc->y = y;	rtsc->dx = dx;	rtsc->dy = dy;	return;}static voidinit_ed(struct hfsc_class *cl, unsigned int next_len){	u64 cur_time = psched_get_time();	/* update the deadline curve */	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);	/*	 * update the eligible curve.	 * for concave, it is equal to the deadline curve.	 * for convex, it is a linear curve with slope m2.	 */	cl->cl_eligible = cl->cl_deadline;	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {		cl->cl_eligible.dx = 0;		cl->cl_eligible.dy = 0;	}	/* compute e and d */	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);	eltree_insert(cl);}static voidupdate_ed(struct hfsc_class *cl, unsigned int next_len){	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);	eltree_update(cl);}static inline voidupdate_d(struct hfsc_class *cl, unsigned int next_len){	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);}static inline voidupdate_cfmin(struct hfsc_class *cl){	struct rb_node *n = rb_first(&cl->cf_tree);	struct hfsc_class *p;	if (n == NULL) {		cl->cl_cfmin = 0;		return;	}	p = rb_entry(n, struct hfsc_class, cf_node);	cl->cl_cfmin = p->cl_f;}static voidinit_vf(struct hfsc_class *cl, unsigned int len){	struct hfsc_class *max_cl;	struct rb_node *n;	u64 vt, f, cur_time;	int go_active;	cur_time = 0;	go_active = 1;	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {		if (go_active && cl->cl_nactive++ == 0)			go_active = 1;		else			go_active = 0;		if (go_active) {			n = rb_last(&cl->cl_parent->vt_tree);			if (n != NULL) {				max_cl = rb_entry(n, struct hfsc_class,vt_node);				/*				 * set vt to the average of the min and max				 * classes.  if the parent's period didn't				 * change, don't decrease vt of the class.				 */				vt = max_cl->cl_vt;				if (cl->cl_parent->cl_cvtmin != 0)					vt = (cl->cl_parent->cl_cvtmin + vt)/2;				if (cl->cl_parent->cl_vtperiod !=				    cl->cl_parentperiod || vt > cl->cl_vt)					cl->cl_vt = vt;			} else {				/*				 * first child for a new parent backlog period.				 * add parent's cvtmax to cvtoff to make a new				 * vt (vtoff + vt) larger than the vt in the				 * last period for all children.				 */				vt = cl->cl_parent->cl_cvtmax;				cl->cl_parent->cl_cvtoff += vt;				cl->cl_parent->cl_cvtmax = 0;				cl->cl_parent->cl_cvtmin = 0;				cl->cl_vt = 0;			}			cl->cl_vtoff = cl->cl_parent->cl_cvtoff -							cl->cl_pcvtoff;			/* update the virtual curve */			vt = cl->cl_vt + cl->cl_vtoff;			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,						      cl->cl_total);			if (cl->cl_virtual.x == vt) {				cl->cl_virtual.x -= cl->cl_vtoff;				cl->cl_vtoff = 0;			}			cl->cl_vtadj = 0;			cl->cl_vtperiod++;  /* increment vt period */			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;			if (cl->cl_parent->cl_nactive == 0)				cl->cl_parentperiod++;			cl->cl_f = 0;			vttree_insert(cl);			cftree_insert(cl);			if (cl->cl_flags & HFSC_USC) {				/* class has upper limit curve */				if (cur_time == 0)					cur_time = psched_get_time();				/* update the ulimit curve */				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,					 cl->cl_total);				/* compute myf */				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,						      cl->cl_total);				cl->cl_myfadj = 0;			}		}		f = max(cl->cl_myf, cl->cl_cfmin);		if (f != cl->cl_f) {			cl->cl_f = f;			cftree_update(cl);			update_cfmin(cl->cl_parent);		}	}}static voidupdate_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time){	u64 f; /* , myf_bound, delta; */	int go_passive = 0;	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)		go_passive = 1;	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {		cl->cl_total += len;		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)			continue;		if (go_passive && --cl->cl_nactive == 0)			go_passive = 1;		else			go_passive = 0;		if (go_passive) {			/* no more active child, going passive */			/* update cvtmax of the parent class */			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)				cl->cl_parent->cl_cvtmax = cl->cl_vt;			/* remove this class from the vt tree */			vttree_remove(cl);			cftree_remove(cl);			update_cfmin(cl->cl_parent);			continue;		}		/*		 * update vt and f		 */		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)			    - cl->cl_vtoff + cl->cl_vtadj;		/*		 * if vt of the class is smaller than cvtmin,		 * the class was skipped in the past due to non-fit.		 * if so, we need to adjust vtadj.		 */		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;			cl->cl_vt = cl->cl_parent->cl_cvtmin;		}		/* update the vt tree */		vttree_update(cl);		if (cl->cl_flags & HFSC_USC) {			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,							      cl->cl_total);#if 0			/*			 * This code causes classes to stay way under their			 * limit when multiple classes are used at gigabit			 * speed. needs investigation. -kaber			 */			/*			 * if myf lags behind by more than one clock tick			 * from the current time, adjust myfadj to prevent			 * a rate-limited class from going greedy.			 * in a steady state under rate-limiting, myf			 * fluctuates within one clock tick.			 */			myf_bound = cur_time - PSCHED_JIFFIE2US(1);			if (cl->cl_myf < myf_bound) {				delta = cur_time - cl->cl_myf;				cl->cl_myfadj += delta;				cl->cl_myf += delta;			}#endif		}		f = max(cl->cl_myf, cl->cl_cfmin);		if (f != cl->cl_f) {			cl->cl_f = f;			cftree_update(cl);			update_cfmin(cl->cl_parent);		}	}}static voidset_active(struct hfsc_class *cl, unsigned int len){	if (cl->cl_flags & HFSC_RSC)		init_ed(cl, len);	if (cl->cl_flags & HFSC_FSC)		init_vf(cl, len);	list_add_tail(&cl->dlist, &cl->sched->droplist);}static voidset_passive(struct hfsc_class *cl){	if (cl->cl_flags & HFSC_RSC)		eltree_remove(cl);	list_del(&cl->dlist);	/*	 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)	 * needs to be called explicitly to remove a class from vttree.	 */}/* * hack to get length of first packet in queue. */static unsigned intqdisc_peek_len(struct Qdisc *sch){	struct sk_buff *skb;	unsigned int len;	skb = sch->dequeue(sch);	if (skb == NULL) {		if (net_ratelimit())			printk("qdisc_peek_len: non work-conserving qdisc ?\n");		return 0;	}	len = skb->len;	if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {		if (net_ratelimit())			printk("qdisc_peek_len: failed to requeue\n");		qdisc_tree_decrease_qlen(sch, 1);		return 0;	}	return len;}static voidhfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl){	unsigned int len = cl->qdisc->q.qlen;	qdisc_reset(cl->qdisc);	qdisc_tree_decrease_qlen(cl->qdisc, len);}static voidhfsc_adjust_levels(struct hfsc_class *cl){	struct hfsc_class *p;	unsigned int level;	do {		level = 0;		list_for_each_entry(p, &cl->children, siblings) {			if (p->level >= level)				level = p->level + 1;		}		cl->level = level;	} while ((cl = cl->cl_parent) != NULL);}static inline unsigned inthfsc_hash(u32 h){	h ^= h >> 8;	h ^= h >> 4;	return h & (HFSC_HSIZE - 1);}static inline struct hfsc_class *hfsc_find_class(u32 classid, struct Qdisc *sch){	struct hfsc_sched *q = qdisc_priv(sch);	struct hfsc_class *cl;	list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {		if (cl->classid == classid)			return cl;	}	return NULL;}static voidhfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,		u64 cur_time){	sc2isc(rsc, &cl->cl_rsc);	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);	cl->cl_eligible = cl->cl_deadline;	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {		cl->cl_eligible.dx = 0;		cl->cl_eligible.dy = 0;	}	cl->cl_flags |= HFSC_RSC;}static voidhfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc){	sc2isc(fsc, &cl->cl_fsc);	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);	cl->cl_flags |= HFSC_FSC;}static voidhfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,		u64 cur_time){	sc2isc(usc, &cl->cl_usc);	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);	cl->cl_flags |= HFSC_USC;}static inthfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,		  struct rtattr **tca, unsigned long *arg){	struct hfsc_sched *q = qdisc_priv(sch);	struct hfsc_class *cl = (struct hfsc_class *)*arg;	struct hfsc_class *parent = NULL;	struct rtattr *opt = tca[TCA_OPTIONS-1];	struct rtattr *tb[TCA_HFSC_MAX];	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;	u64 cur_time;	if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))		return -EINVAL;	if (tb[TCA_HFSC_RSC-1]) {		if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))			return -EINVAL;		rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);		if (rsc->m1 == 0 && rsc->m2 == 0)			rsc = NULL;	}	if (tb[TCA_HFSC_FSC-1]) {		if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))			return -EINVAL;		fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);		if (fsc->m1 == 0 && fsc->m2 == 0)			fsc = NULL;	}	if (tb[TCA_HFSC_USC-1]) {		if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))			return -EINVAL;		usc = RTA_DATA(tb[TCA_HFSC_USC-1]);		if (usc->m1 == 0 && usc->m2 == 0)			usc = NULL;	}	if (cl != NULL) {		if (parentid) {			if (cl->cl_parent && cl->cl_parent->classid != parentid)				return -EINVAL;			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)				return -EINVAL;		}		cur_time = psched_get_time();		sch_tree_lock(sch);		if (rsc != NULL)			hfsc_change_rsc(cl, rsc, cur_time);		if (fsc != NULL)			hfsc_change_fsc(cl, fsc);		if (usc != NULL)			hfsc_change_usc(cl, usc, cur_time);		if (cl->qdisc->q.qlen != 0) {			if (cl->cl_flags & HFSC_RSC)				update_ed(cl, qdisc_peek_len(cl->qdisc));			if (cl->cl_flags & HFSC_FSC)				update_vf(cl, 0, cur_time);		}		sch_tree_unlock(sch);		if (tca[TCA_RATE-1])			gen_replace_estimator(&cl->bstats, &cl->rate_est,					      &sch->dev->queue_lock,					      tca[TCA_RATE-1]);		return 0;	}	if (parentid == TC_H_ROOT)		return -EEXIST;	parent = &q->root;	if (parentid) {		parent = hfsc_find_class(parentid, sch);		if (parent == NULL)			return -ENOENT;	}	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)		return -EINVAL;	if (hfsc_find_class(classid, sch))		return -EEXIST;	if (rsc == NULL && fsc == NULL)		return -EINVAL;	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);	if (cl == NULL)		return -ENOBUFS;	if (rsc != NULL)		hfsc_change_rsc(cl, rsc, 0);	if (fsc != NULL)		hfsc_change_fsc(cl, fsc);	if (usc != NULL)		hfsc_change_usc(cl, usc, 0);	cl->refcnt    = 1;	cl->classid   = classid;	cl->sched     = q;	cl->cl_parent = parent;	cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);	if (cl->qdisc == NULL)		cl->qdisc = &noop_qdisc;	INIT_LIST_HEAD(&cl->children);	cl->vt_tree = RB_ROOT;	cl->cf_tree = RB_ROOT;	sch_tree_lock(sch);	list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);	list_add_tail(&cl->siblings, &parent->children);	if (parent->level == 0)		hfsc_purge_queue(sch, parent);	hfsc_adjust_levels(parent);	cl->cl_pcvtoff = parent->cl_cvtoff;	sch_tree_unlock(sch);	if (tca[TCA_RATE-1])		gen_new_estimator(&cl->bstats, &cl->rate_est,				  &sch->dev->queue_lock, tca[TCA_RATE-1]);	*arg = (unsigned long)cl;	return 0;}static voidhfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl){	struct hfsc_sched *q = qdisc_priv(sch);	tcf_destroy_chain(cl->filter_list);	qdisc_destroy(cl->qdisc);	gen_kill_estimator(&cl->bstats, &cl->rate_est);	if (cl != &q->root)		kfree(cl);}static inthfsc_delete_class(struct Qdisc *sch, unsigned long arg){	struct hfsc_sched *q = qdisc_priv(sch);	struct hfsc_class *cl = (struct hfsc_class *)arg;	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)		return -EBUSY;	sch_tree_lock(sch);	list_del(&cl->siblings);	hfsc_adjust_levels(cl->cl_parent);	hfsc_purge_queue(sch, cl);	list_del(&cl->hlist);	if (--cl->refcnt == 0)		hfsc_destroy_class(sch, cl);	sch_tree_unlock(sch);	return 0;}static struct hfsc_class *hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr){	struct hfsc_sched *q = qdisc_priv(sch);	struct hfsc_class *cl;	struct tcf_result res;	struct tcf_proto *tcf;	int result;	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)		if (cl->level == 0)			return cl;	*qerr = NET_XMIT_BYPASS;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -