⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tc_linux.c

📁 radius协议源码÷The Radius Stack will connect to a Radius Server. This stack implementation is built upo
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * tc_linux.c	Almost system independent part of interface ISI RSVP *		to traffic control, admission control, policing functions etc. *		 *		NOTE. ISI RSVPD has no generic functions for this *		purpose. It is pretty strange, because admission control *		and understanding IS service parameters are mainly *		not system specific. Moreover, its correct implementation *		requires surgery in ISI body, interface proposed by *		RSVP functional specs is incomplete. * *		This program is free software; you can redistribute it and/or *		modify it under the terms of the GNU General Public License *		as published by the Free Software Foundation; either version *		2 of the License, or (at your option) any later version. * * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */#include <stddef.h>#include <sys/types.h>#include <linux/types.h>#include "rsvp_daemon.h"#include "rsvp_api.h"#include "rapi_lib.h"		/* Define flowspec formats */#include "rsvp_specs.h"		/* Flowspec descriptor format */#include "rsvp_TCif.h"		/* Adaptation interface */extern double pow(double x, double y);extern double sqrt(double x);extern double math_log(double x);#include "tc_linux.h"#define In_Obj(x, y) ((Object_header *)(x) <= Next_Object((Object_header *)(y)))static __u32 t2us=1;static __u32 us2t=1;static double tick_in_usec = 1;struct if_link ifl_vec[MAX_INTERFACES];staticunsigned intif_indextomtu(unsigned int n){	int fd,ret = 0;	struct ifreq ifr;	fd = socket(AF_INET,SOCK_DGRAM,PF_UNSPEC);	if (FAILED(fd))		return(ret);	if_indextoname(n,ifr.ifr_name);	if (!FAILED(ioctl(fd,SIOCGIFMTU,(caddr_t) &ifr)))		ret = ifr.ifr_metric;	close(fd);	return(ret);}long tc_core_usec2tick(long usec){	return usec*tick_in_usec;}long tc_core_tick2usec(long tick){	return tick/tick_in_usec;}unsigned tc_calc_xmittime(unsigned rate, unsigned size){	return tc_core_usec2tick(1000000*((double)size/rate));}/*   rtab[pkt_len>>cell_log] = pkt_xmit_time */int tc_calc_rtable(unsigned bps, __u32 *rtab, int cell_log, unsigned mtu,		   unsigned mpu){	int i;	if (mtu == 0)		mtu = 2047;	if (cell_log < 0) {		cell_log = 0;		while ((mtu>>cell_log) > 255)			cell_log++;	}	for (i=0; i<256; i++) {		unsigned sz = (i<<cell_log);		if (sz < mpu)			sz = mpu;		rtab[i] = tc_core_usec2tick(1000000*((double)sz/bps));	}	return cell_log;}int tc_core_init(){	FILE *fp = fopen("/proc/net/psched", "r");	if (fp == NULL)		return -1;	if (fscanf(fp, "%08x%08x", &t2us, &us2t) != 2) {		fclose(fp);		return -1;	}	fclose(fp);	tick_in_usec = (double)t2us/us2t;	return 0;}int tc_setup_estimator(unsigned A, unsigned time_const, struct tc_estimator *est){	for (est->interval=0; est->interval<=5; est->interval++) {		if (A <= (1<<est->interval)*(1000000/4))			break;	}	if (est->interval > 5)		return -1;	est->interval -= 2;	for (est->ewma_log=1; est->ewma_log<32; est->ewma_log++) {		double w = 1.0 - 1.0/(1<<est->ewma_log);		if (A/(-math_log(w)) > time_const)			break;	}	est->ewma_log--;	if (est->ewma_log==0 || est->ewma_log >= 31)		return -1;	return 0;}voidTC_init(int Oif){	static int done;	int i;	int no_tc;	if (done)		return;	done = 1;	if (tc_core_init())		return;	no_tc = tc_collect_qdisc();	/* Initialize traffic control on interfaces */	for (i = 0; i<if_num; i++) {		rsvp_qdisc_t *qi;		if (IsNumAPI(i))			continue;		strncpy(ifl_vec[i].ifl_name,if_vec[i].if_name,IFNAMSIZ);		ifl_vec[i].ifl_index = if_vec[i].if_index;		ifl_vec[i].ifl_sflags = 0;		ifl_vec[i].ifl_path_bw = 0;		ifl_vec[i].ifl_min_latency = 0;		ifl_vec[i].ifl_path_mtu = if_indextomtu(if_vec[i].if_index);		qi = ifl_vec[i].ifl_qdisc;		if (ifl_vec[i].ifl_sflags&IFL_FLAG_TC) {			if (ifl_vec[i].ifl_sflags&IFL_FLAG_TCUP)				if_vec[i].if_up = 1;			continue;		}		ifl_vec[i].ifl_sflags |= IFL_FLAG_TCDOWN;		if_vec[i].if_up = 0;		if (qi == NULL)			continue;		/* Try to initialize scheduler on this interface */		if (no_tc || qi->qops->start(qi)) {			log(LOG_INFO, 0, "Qdisc \"%s\" on %s is not operational.\n", qi->qops->kind, if_vec[i].if_name);			/* Oops... Something is wrong... */			if (qi->qops->free)				qi->qops->free(qi);			ifl_vec[i].ifl_qdisc = NULL;			free(qi);			continue;		}		if_vec[i].if_up = 1;		ifl_vec[i].ifl_sflags &= ~IFL_FLAG_TCDOWN;		ifl_vec[i].ifl_sflags |= IFL_FLAG_TCUP;	}}static int tc_get_mem(rsvp_flow_t *f, ADSPEC *adspec){	IS_main_hdr_t	*mhp;	IS_serv_hdr_t	*shp, *lastshp;	int mem = 0;	mhp = (IS_main_hdr_t *) Obj_data(adspec);	shp = (IS_serv_hdr_t *)(mhp+1);	lastshp  = (IS_serv_hdr_t *) Next_Main_Hdr(mhp);	while (shp < lastshp) {		if (shp->issh_service == GUARANTEED_SERV) {			__u32 Csum = 0;			__u32 Dsum = 0;			IS_parm_hdr_t *php;			IS_parm_hdr_t *lastphp;			php = (IS_parm_hdr_t *)(shp+1);			lastphp = (IS_parm_hdr_t *)Next_Serv_Hdr(shp);			while (php < lastphp) {				switch (php->isph_parm_num) {				case GUAR_ADSPARM_Csum:					Csum = (*(u_int32_t *)(php+1));					break;				case GUAR_ADSPARM_Dsum:					Dsum = (*(u_int32_t *)(php+1));					break;				}				php = Next_Parm_Hdr(php);			}						return 2*f->policer.burst + Csum +				+ ((double)Dsum * f->policer.rate.rate)/1000000;		}		shp = Next_Serv_Hdr(shp);	}	return mem;}static intis_admission_strategy(rsvp_flow_t *f, FLOWSPEC *fspec, SENDER_TSPEC *s_tspec,		      ADSPEC *adspec, int flags){	IS_specbody_t		*is_specp;	IS_serv_hdr_t		*sp;	double			rfactor;	is_specp = &fspec->flow_body;	sp = (IS_serv_hdr_t *) &is_specp->spec_u;	switch(sp->issh_service) {	case CONTROLLED_LOAD_SERV:		f->policer.burst = is_specp->spec_u.CL_spec.CLspec_b;		f->policer.mtu = is_specp->spec_u.CL_spec.CLspec_M;		if (f->policer.mtu == 0)			return -1;		f->policer.rate.rate = is_specp->spec_u.CL_spec.CLspec_r;		f->policer.rate.mpu = is_specp->spec_u.CL_spec.CLspec_m;		if (f->policer.rate.mpu > f->policer.mtu)			f->policer.rate.mpu = f->policer.mtu;		if (f->policer.rate.mpu == 0) {			/* Workaround for one more Cisco IOS bug. */			f->policer.rate.mpu = 576;			if (576 > f->policer.mtu)				f->policer.rate.mpu = f->policer.mtu;		}		f->policer.peakrate.mpu = f->policer.rate.mpu;		if (is_specp->spec_u.CL_spec.CLspec_p < 0xFFFFFFFFU)			f->policer.peakrate.rate = is_specp->spec_u.CL_spec.CLspec_p;		if (f->policer.peakrate.rate <= f->policer.rate.rate)			f->policer.peakrate.rate = 0;		if (flags&(TCF_M_POLICE|TCF_B_POLICE)) {			/* How to set buffering for CL service?			   We have no delay information.			   Set it to a random value?			 */			f->policer.limit = tc_get_mem(f, adspec);			if (f->policer.limit == 0)				f->policer.limit = 2*f->policer.burst + f->qi->C					+ f->qi->D * f->policer.rate.rate;			f->ai.mem = f->policer.limit;			f->ai.admtype = ADM_CL_DET;			f->policer.action = TC_POLICE_UNSPEC;		} else {			f->policer.action = TC_POLICE_RECLASSIFY;			f->ai.mem = f->policer.burst;			f->ai.admtype = ADM_CL_STAT;		}		f->ai.rate = is_specp->spec_u.CL_spec.CLspec_r;		break;	case GUARANTEED_SERV:		f->policer.burst = is_specp->spec_u.G_spec.Gspec_b;		f->policer.mtu = is_specp->spec_u.G_spec.Gspec_M;		if (f->policer.mtu == 0)			return -1;		f->policer.rate.rate = is_specp->spec_u.G_spec.Gspec_r;		f->policer.rate.mpu = is_specp->spec_u.G_spec.Gspec_m;		if (f->policer.rate.mpu > f->policer.mtu)			f->policer.rate.mpu = f->policer.mtu;		if (f->policer.rate.mpu == 0) {			/* Workaround for one more Cisco IOS bug. */			f->policer.rate.mpu = 576;			if (576 > f->policer.mtu)				f->policer.rate.mpu = f->policer.mtu;		}		f->policer.peakrate.mpu = f->policer.rate.mpu;		if (is_specp->spec_u.G_spec.Gspec_p < UINT_MAX)			f->policer.peakrate.rate = is_specp->spec_u.G_spec.Gspec_p;		if (f->policer.peakrate.rate <= f->policer.rate.rate)			f->policer.peakrate.rate = 0;		if (flags&(TCF_M_POLICE|TCF_B_POLICE)) {			f->policer.action = TC_POLICE_UNSPEC;			f->policer.limit = tc_get_mem(f, adspec);			f->ai.mem = f->policer.limit;		} else if (flags&TCF_E_POLICE) {			f->policer.action = TC_POLICE_RECLASSIFY;			f->ai.mem = f->policer.burst;		} else {			f->policer.action = TC_POLICE_OK;			f->ai.mem = tc_get_mem(f, adspec);		}		f->ai.admtype = ADM_G_DET;		f->ai.rate = is_specp->spec_u.G_spec.Gspec_R;		break;	default:		return -1;	}	if (f->ai.mem == 0) {		f->ai.admtype = 0;		return -1;	}	/* Translate parameters.	   It might be made much more cleverly, unfortunately	   when I understood it was too late to fix.	   Namely, scheduling must be made by llhead adjusted	   parameters, but policing by pure IP payload.	 */	f->policer.mtu += f->qi->llhead;	f->policer.rate.mpu += f->qi->llhead;	f->policer.peakrate.mpu += f->qi->llhead;	/* If MTU is invalid, than reject request, if peak rate was requested	   (it is really important QoS parameter in this case), otherwise	   reset it to mtu.	 */	if (f->policer.mtu > f->qi->lmtu) {		if (f->policer.peakrate.rate) {			f->ai.admtype = 0;			return -1;		}		f->policer.mtu = f->qi->lmtu;		if (f->policer.rate.mpu > f->policer.mtu)			f->policer.rate.mpu = f->policer.mtu;		if (f->policer.peakrate.mpu > f->policer.mtu)			f->policer.peakrate.mpu = f->policer.mtu;	}	rfactor = (double)f->policer.rate.mpu/(f->policer.rate.mpu-f->qi->llhead);	f->policer.rate.rate *= rfactor;	if (f->policer.peakrate.rate)		f->policer.peakrate.rate *= rfactor;	f->ai.rate *= rfactor;	f->policer.limit *= rfactor;	f->policer.burst *= rfactor;	f->ai.mem *= rfactor;	f->ai.peakrate = f->policer.rate.rate +		(double)f->policer.burst / f->qi->ai.time_const;	if (f->policer.peakrate.rate) {		int prate = f->policer.peakrate.rate +			(double)f->policer.mtu / f->qi->ai.time_const;		if (prate < f->ai.peakrate)			f->ai.peakrate = prate;	}	return 0;}static floattc_cl_agg_rate(rsvp_qdisc_t *qi, struct admission_info *a){	long now = time(NULL);	float interval = now - a->last_m;	if (interval < a->interval)		return a->mu;	a->last_m = now;	return a->mu + (qi->qops->agg_cl_rate(qi) - a->mu)*pow(a->ewma_const, interval/a->interval);}static voidis_cl_stat_unadmit(rsvp_qdisc_t *qi, struct admission_info *ai, flow_acinfo_t *fa){	if (!fa->commited)		return;	ai->cl_sum -= fa->rate;	ai->clp2_sum -= fa->peakrate*fa->peakrate;	ai->mem_cur -= fa->mem;	ai->cl_mem -= fa->mem;	ai->cl_num--;	if (ai->g_sum + ai->cl_sum < ai->bw_max)		ai->cl_rate = ai->cl_sum;	fa->commited = 0;}static voidis_g_det_unadmit(rsvp_qdisc_t *qi, struct admission_info *ai, flow_acinfo_t *fa){	if (!fa->commited)		return;	ai->g_sum -= fa->rate;	ai->mem_cur -= fa->mem;	ai->g_num--;	if (ai->g_sum + ai->cl_sum < ai->bw_max)		ai->cl_rate = ai->cl_sum;	else		ai->cl_rate = ai->bw_max - ai->g_sum;	fa->commited = 0;}static intis_cl_stat_admit(rsvp_qdisc_t *qi, struct admission_info *ai, flow_acinfo_t *fa){	float			r;	struct admission_info	a = *ai;	/* Alas, I forgot to implement aggregate memory	   usage statistics in kernel 8(	   So do static admission check for now.	 */	if (a.mem_cur + fa->mem > a.mem_max)		return -1;	a.mem_cur += fa->mem;	a.cl_mem += fa->mem;	if (a.cl_num >= a.cl_max)		return -1;	a.cl_num++;	if (a.g_sum + a.cl_sum + fa->rate < a.bw_max) {		a.cl_sum += fa->rate;		a.clp2_sum += fa->peakrate*fa->peakrate;		a.cl_rate = a.cl_sum;		a.mu += fa->peakrate;		goto finish;	}	a.mu = tc_cl_agg_rate(qi, &a);	r = a.mu + sqrt(math_log(1.0/a.epsilon)*a.clp2_sum/2);	if (r + fa->peakrate + a.g_sum < a.bw_max) {		a.cl_sum += fa->rate;		/* Add flow rate to average */		a.mu += fa->peakrate;		a.clp2_sum += fa->peakrate*fa->peakrate;		a.cl_rate = a.bw_max - a.g_sum;		goto finish;	}	return -1;finish:	*ai = a;	fa->commited = 1;	return 0;}int is_g_det_admit(rsvp_qdisc_t *qi, struct admission_info *ai, flow_acinfo_t *fa){	float			r;	struct admission_info	a = *ai;	if (a.g_num >= a.g_max)		return -1;	a.g_num++;	if (a.mem_cur + fa->mem > a.mem_max)		return -1;	a.mem_cur += fa->mem;	if (a.g_sum + a.cl_sum + fa->rate < a.bw_max) {		a.g_sum += fa->rate;		a.cl_rate = a.cl_sum;		goto finish;	}	a.mu = tc_cl_agg_rate(qi, &a);	r = a.mu + sqrt(math_log(1.0/a.epsilon)*a.clp2_sum/2);	if (r + fa->rate + a.g_sum < a.bw_max) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -