⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sch_csz.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	}	return NULL;}#ifdef CSZ_PLUS_TBFstatic void csz_watchdog(unsigned long arg){	struct Qdisc *sch = (struct Qdisc*)arg;	qdisc_wakeup(sch->dev);}static __inline__ voidcsz_move_queue(struct csz_flow *this, long delta){	this->fprev->fnext = this->fnext;	this->fnext->fprev = this->fprev;	this->start += delta;	this->finish += delta;	csz_insert_finish(this);}static __inline__ int csz_enough_tokens(struct csz_sched_data *q,					struct csz_flow *this,					struct sk_buff *skb){	long toks;	long shift;	psched_time_t now;	PSCHED_GET_TIME(now);	toks = PSCHED_TDIFF(now, t_tbf) + this->tokens - L2R(q,this,skb->len);	shift = 0;	if (this->throttled) {		/* Remember aposteriory delay */		unsigned long R = csz_update(q);		shift = R - this->R_tbf;		this->R_tbf = R;	}	if (toks >= 0) {		/* Now we have enough tokens to proceed */		this->tokens = toks <= this->depth ? toks : this->depth;		this->t_tbf = now;			if (!this->throttled)			return 1;		/* Flow was throttled. Update its start&finish numbers		   with delay calculated aposteriori.		 */		this->throttled = 0;		if (shift > 0)			csz_move_queue(this, shift);		return 1;	}	if (!this->throttled) {		/* Flow has just been throttled; remember		   current round number to calculate aposteriori delay		 */		this->throttled = 1;		this->R_tbf = csz_update(q);	}	/* Move all the queue to the time when it will be allowed to send.	   We should translate time to round number, but it is impossible,	   so that we made the most conservative estimate i.e. we suppose	   that only this flow is active and, hence, R = t.	   Really toks <= R <= toks/r_a.	   This apriory shift in R will be adjusted later to reflect	   real delay. We cannot avoid it because of:	   - throttled flow continues to be active from the viewpoint	     of CSZ, so that it would acquire the highest priority,	     if you not adjusted start numbers.	   - Eventually, finish number would become less than round	     number and flow were declared inactive.	 */	toks = -toks;	/* Remeber, that we should start watchdog */	if (toks < q->wd_expires)		q->wd_expires = toks;	toks >>= q->R_log;	shift += toks;	if (shift > 0) {		this->R_tbf += toks;		csz_move_queue(this, shift);	}	csz_insert_start(this);	return 0;}#endifstatic struct sk_buff *csz_dequeue(struct Qdisc* sch){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	struct sk_buff *skb;	struct csz_flow *this;#ifdef CSZ_PLUS_TBF	q->wd_expires = 0;#endif	this = (struct csz_flow*)q->s.snext;	while (this != (struct csz_flow*)&q->s) {		/* First of all: unlink from start list */		this->sprev->snext = this->snext;		this->snext->sprev = this->sprev;		if (this != &q->flow[0]) {	/* Guaranteed flow */			skb = __skb_dequeue(&this->q);			if (skb) {#ifdef CSZ_PLUS_TBF				if (this->depth) {					if (!csz_enough_tokens(q, this, skb))						continue;				}#endif				if (this->q.qlen) {					struct sk_buff *nskb = skb_peek(&this->q);					this->start += L2R(this,nskb->len);					csz_insert_start(&q->s, this);				}				sch->q.qlen--;				return skb;			}		} else {	/* Predicted or best effort flow */			skb = skb_dequeue_best(q);			if (skb) {				unsigned peeked = this->peeked;				this->peeked = 0;				if (--this->q.qlen) {					struct sk_buff *nskb;					unsigned dequeued = L2R(this,skb->len);					/* We got not the same thing that					   peeked earlier; adjust start number					   */					if (peeked != dequeued && peeked)						this->start += dequeued - peeked;					nskb = skb_peek_best(q);					peeked = L2R(this,nskb->len);					this->start += peeked;					this->peeked = peeked;					csz_insert_start(&q->s, this);				}				sch->q.qlen--;				return skb;			}		}	}#ifdef CSZ_PLUS_TBF	/* We are about to return no skb.	   Schedule watchdog timer, if it occurred because of shaping.	 */	if (q->wd_expires) {		unsigned long delay = PSCHED_US2JIFFIE(q->wd_expires);		del_timer(&q->wd_timer);		if (delay == 0)			delay = 1;		q->wd_timer.expires = jiffies + delay;		add_timer(&q->wd_timer);		sch->stats.overlimits++;	}#endif	return NULL;}static voidcsz_reset(struct Qdisc* sch){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	int    i;	for (i=0; i<4; i++)		skb_queue_purge(&q->other[i]);	for (i=0; i<CSZ_GUARANTEED; i++) {		struct csz_flow *this = q->flow + i;		skb_queue_purge(&this->q);		this->snext = this->sprev =		this->fnext = this->fprev = (struct csz_head*)this;		this->start = this->finish = 0;	}	q->s.snext = q->s.sprev = &q->s;	q->f.fnext = q->f.fprev = &q->f;	q->R_c = 0;#ifdef CSZ_PLUS_TBF	PSCHED_GET_TIME(&q->t_tbf);	q->tokens = q->depth;	del_timer(&q->wd_timer);#endif	sch->q.qlen = 0;}static voidcsz_destroy(struct Qdisc* sch){	MOD_DEC_USE_COUNT;}static int csz_init(struct Qdisc *sch, struct rtattr *opt){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	struct rtattr *tb[TCA_CSZ_PTAB];	struct tc_csz_qopt *qopt;	int    i;	rtattr_parse(tb, TCA_CSZ_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt));	if (tb[TCA_CSZ_PARMS-1] == NULL ||	    RTA_PAYLOAD(tb[TCA_CSZ_PARMS-1]) < sizeof(*qopt))		return -EINVAL;	qopt = RTA_DATA(tb[TCA_CSZ_PARMS-1]);	q->R_log = qopt->R_log;	q->delta_log = qopt->delta_log;	for (i=0; i<=TC_PRIO_MAX; i++) {		if (qopt->priomap[i] >= CSZ_FLOWS)			return -EINVAL;		q->prio2band[i] = qopt->priomap[i];	}	for (i=0; i<4; i++)		skb_queue_head_init(&q->other[i]);	for (i=0; i<CSZ_GUARANTEED; i++) {		struct csz_flow *this = q->flow + i;		skb_queue_head_init(&this->q);		this->snext = this->sprev =		this->fnext = this->fprev = (struct csz_head*)this;		this->start = this->finish = 0;	}	q->s.snext = q->s.sprev = &q->s;	q->f.fnext = q->f.fprev = &q->f;	q->R_c = 0;#ifdef CSZ_PLUS_TBF	init_timer(&q->wd_timer);	q->wd_timer.data = (unsigned long)sch;	q->wd_timer.function = csz_watchdog;#endif	MOD_INC_USE_COUNT;	return 0;}#ifdef CONFIG_RTNETLINKstatic int csz_dump(struct Qdisc *sch, struct sk_buff *skb){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	unsigned char	 *b = skb->tail;	struct rtattr *rta;	struct tc_csz_qopt opt;	rta = (struct rtattr*)b;	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);	opt.flows = CSZ_FLOWS;	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);	RTA_PUT(skb, TCA_CSZ_PARMS, sizeof(opt), &opt);	rta->rta_len = skb->tail - b;	return skb->len;rtattr_failure:	skb_trim(skb, b - skb->data);	return -1;}#endifstatic int csz_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,		     struct Qdisc **old){	return -EINVAL;}static struct Qdisc * csz_leaf(struct Qdisc *sch, unsigned long cl){	return NULL;}static unsigned long csz_get(struct Qdisc *sch, u32 classid){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	unsigned long band = TC_H_MIN(classid) - 1;	if (band >= CSZ_FLOWS)		return 0;	if (band < CSZ_GUARANTEED && q->flow[band].L_tab == NULL)		return 0;	return band+1;}static unsigned long csz_bind(struct Qdisc *sch, unsigned long parent, u32 classid){	return csz_get(sch, classid);}static void csz_put(struct Qdisc *sch, unsigned long cl){	return;}static int csz_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg){	unsigned long cl = *arg;	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	struct rtattr *opt = tca[TCA_OPTIONS-1];	struct rtattr *tb[TCA_CSZ_PTAB];	struct tc_csz_copt *copt;	rtattr_parse(tb, TCA_CSZ_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt));	if (tb[TCA_CSZ_PARMS-1] == NULL ||	    RTA_PAYLOAD(tb[TCA_CSZ_PARMS-1]) < sizeof(*copt))		return -EINVAL;	copt = RTA_DATA(tb[TCA_CSZ_PARMS-1]);	if (tb[TCA_CSZ_RTAB-1] &&	    RTA_PAYLOAD(tb[TCA_CSZ_RTAB-1]) < 1024)		return -EINVAL;	if (cl) {		struct csz_flow *a;		cl--;		if (cl >= CSZ_FLOWS)			return -ENOENT;		if (cl >= CSZ_GUARANTEED || q->flow[cl].L_tab == NULL)			return -EINVAL;		a = &q->flow[cl];		spin_lock_bh(&sch->dev->queue_lock);#if 0		a->rate_log = copt->rate_log;#endif#ifdef CSZ_PLUS_TBF		a->limit = copt->limit;		a->rate = copt->rate;		a->buffer = copt->buffer;		a->mtu = copt->mtu;#endif		if (tb[TCA_CSZ_RTAB-1])			memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024);		spin_unlock_bh(&sch->dev->queue_lock);		return 0;	}	/* NI */	return 0;}static int csz_delete(struct Qdisc *sch, unsigned long cl){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	struct csz_flow *a;	cl--;	if (cl >= CSZ_FLOWS)		return -ENOENT;	if (cl >= CSZ_GUARANTEED || q->flow[cl].L_tab == NULL)		return -EINVAL;	a = &q->flow[cl];	spin_lock_bh(&sch->dev->queue_lock);	a->fprev->fnext = a->fnext;	a->fnext->fprev = a->fprev;	a->sprev->snext = a->snext;	a->snext->sprev = a->sprev;	a->start = a->finish = 0;	kfree(xchg(&q->flow[cl].L_tab, NULL));	spin_unlock_bh(&sch->dev->queue_lock);	return 0;}#ifdef CONFIG_RTNETLINKstatic int csz_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	unsigned char	 *b = skb->tail;	struct rtattr *rta;	struct tc_csz_copt opt;	tcm->tcm_handle = sch->handle|cl;	cl--;	if (cl > CSZ_FLOWS)		goto rtattr_failure;	if (cl < CSZ_GUARANTEED) {		struct csz_flow *f = &q->flow[cl];		if (f->L_tab == NULL)			goto rtattr_failure;		rta = (struct rtattr*)b;		RTA_PUT(skb, TCA_OPTIONS, 0, NULL);		opt.limit = f->limit;		opt.rate = f->rate;		opt.slice = f->slice;		memset(&opt.peakrate, 0, sizeof(opt.peakrate));#ifdef CSZ_PLUS_TBF		opt.buffer = f->buffer;		opt.mtu = f->mtu;#else		opt.buffer = 0;		opt.mtu = 0;#endif		RTA_PUT(skb, TCA_CSZ_PARMS, sizeof(opt), &opt);		rta->rta_len = skb->tail - b;	}	return skb->len;rtattr_failure:	skb_trim(skb, b - skb->data);	return -1;}#endifstatic void csz_walk(struct Qdisc *sch, struct qdisc_walker *arg){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	int prio = 0;	if (arg->stop)		return;	for (prio = 0; prio < CSZ_FLOWS; prio++) {		if (arg->count < arg->skip) {			arg->count++;			continue;		}		if (prio < CSZ_GUARANTEED && q->flow[prio].L_tab == NULL) {			arg->count++;			continue;		}		if (arg->fn(sch, prio+1, arg) < 0) {			arg->stop = 1;			break;		}		arg->count++;	}}static struct tcf_proto ** csz_find_tcf(struct Qdisc *sch, unsigned long cl){	struct csz_sched_data *q = (struct csz_sched_data *)sch->data;	if (cl)		return NULL;	return &q->filter_list;}struct Qdisc_class_ops csz_class_ops ={	csz_graft,	csz_leaf,	csz_get,	csz_put,	csz_change,	csz_delete,	csz_walk,	csz_find_tcf,	csz_bind,	csz_put,#ifdef CONFIG_RTNETLINK	csz_dump_class,#endif};struct Qdisc_ops csz_qdisc_ops ={	NULL,	&csz_class_ops,	"csz",	sizeof(struct csz_sched_data),	csz_enqueue,	csz_dequeue,	NULL,	NULL,	csz_init,	csz_reset,	csz_destroy,	NULL /* csz_change */,#ifdef CONFIG_RTNETLINK	csz_dump,#endif};#ifdef MODULEint init_module(void){	return register_qdisc(&csz_qdisc_ops);}void cleanup_module(void) {	unregister_qdisc(&csz_qdisc_ops);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -