⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm.cpp

📁 纠错输出编码SVM算法
💻 CPP
📖 第 1 页 / 共 5 页
字号:
	for(int i=0; i<k; i++)		p[i] = 1.0/k;		for(int t=0; t<m; t++)		qp[t] = qn[t] = 0.0;		for(int t=0; t<m; t++)		for(int j=0; j<k; j++)			if(I[t][j]>0) qp[t] += p[j];			else if(I[t][j]<0) qn[t] += p[j];	// Algorithm 2	double *delta=Malloc(double, k);	for(iter=0; iter<max_iter; iter++)	{		for(int i=0; i<k; i++)		{			double delta_d = 0.0, delta_n = 0.0; 				for(int j=0; j<m; j++)			{				if(I[j][i]>0)				{					delta_n += rp[j]/qp[j];					delta_d += 1.0/(qp[j]+qn[j]);				}				else if(I[j][i]<0)				{					delta_n += (1.0-rp[j])/qn[j];					delta_d += 1.0/(qp[j]+qn[j]);				}			}			delta[i] = (delta_n+mu/p[i])/(delta_d+mu*k);					double di = delta[i]*p[i] - p[i];			p[i] = delta[i]*p[i];						double sump = 1 + di;			for(int t=0; t<k; t++)				p[t] = p[t]/sump;						for(int t=0; t<m; t++)			{				qp[t] += (I[t][i] > 0) ? di : 0;				qn[t] += (I[t][i] < 0) ? di : 0;				qp[t] = qp[t] / sump;				qn[t] = qn[t] / sump;			}							}				double max_error = 0.0;		for(int i=0; i<k; i++)			if(fabs(delta[i]-1.0)>max_error)				max_error = fabs(delta[i]-1.0);				if(max_error < eps) break;	}		if (iter>=max_iter)		info("Exceeds max_iter in GBT_multiclass_prob\navg iter: %d\n",iter*k);	else		info("avg iter: %d\n",iter*k);		free(qp);	free(qn);	free(delta);}void GBT_multiclass_probability(int multiclass_type, int m, int k, double *rp, double *p, int **I, int method){	switch(method){		case 1:			gbtone(multiclass_type, m, k, rp, p, I);			break;		case 2:			gbtall(multiclass_type, m, k, rp, p, I);			break;		case 3:			grad(multiclass_type, m, k, rp, p, I);			break;		default:			info("Unknown probability estimation method\n");		};}// Cross-validation decision values for probability estimatesvoid svm_binary_svc_probability(	const svm_problem *prob, const svm_parameter *param,	double Cp, double Cn, double& probA, double& probB){	int i;	int nr_fold = 5;	int *perm = Malloc(int,prob->l);	double *dec_values = Malloc(double,prob->l);		// random shuffle	for(i=0;i<prob->l;i++) perm[i]=i;	for(i=0;i<prob->l;i++)	{		int j = i+rand()%(prob->l-i);		swap(perm[i],perm[j]);	}	for(i=0;i<nr_fold;i++)	{		int begin = i*prob->l/nr_fold;		int end = (i+1)*prob->l/nr_fold;		int j,k;		struct svm_problem subprob;		subprob.l = prob->l-(end-begin);		subprob.x = Malloc(struct svm_node*,subprob.l);		subprob.y = Malloc(double,subprob.l);					k=0;		for(j=0;j<begin;j++)		{			subprob.x[k] = prob->x[perm[j]];			subprob.y[k] = prob->y[perm[j]];			++k;		}		for(j=end;j<prob->l;j++)		{			subprob.x[k] = prob->x[perm[j]];			subprob.y[k] = prob->y[perm[j]];			++k;		}		int p_count=0,n_count=0;		for(j=0;j<k;j++)			if(subprob.y[j]>0)				p_count++;			else				n_count++;				if(p_count==0 && n_count==0)			for(j=begin;j<end;j++)				dec_values[perm[j]] = 0;		else if(p_count > 0 && n_count == 0)			for(j=begin;j<end;j++)				dec_values[perm[j]] = 1;		else if(p_count == 0 && n_count > 0)			for(j=begin;j<end;j++)				dec_values[perm[j]] = -1;		else		{			svm_parameter subparam = *param;			subparam.probability=0;			// For Generalized BT model, currently we do not consider weighted C			subparam.C = param->C;			subparam.nr_weight = 0;						/*subparam.C=1.0;			subparam.nr_weight=2;			subparam.weight_label = Malloc(int,2);			subparam.weight = Malloc(double,2);			subparam.weight_label[0]=+1;			subparam.weight_label[1]=-1;			subparam.weight[0]=Cp;			subparam.weight[1]=Cn;*/			subprob.nr_binary = error_correcting_code(subparam.multiclass_type, svm_find_nr_class(&subprob), subprob.I);			struct svm_model *submodel = svm_train(&subprob,&subparam);			for(j=begin;j<end;j++)			{				svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]])); 				// ensure +1 -1 order; reason not using CV subroutine				dec_values[perm[j]] *= submodel->label[0];			}					svm_destroy_model(submodel);			svm_destroy_param(&subparam);			free(subprob.x);			free(subprob.y);		}	}			sigmoid_train(prob->l,dec_values,prob->y,probA,probB);	free(dec_values);	free(perm);}// Return parameter of a Laplace distribution double svm_svr_probability(	const svm_problem *prob, const svm_parameter *param){	int i;	int nr_fold = 5;	double *ymv = Malloc(double,prob->l);	double mae = 0;	svm_parameter newparam = *param;	newparam.probability = 0;	svm_cross_validation(prob,&newparam,nr_fold,ymv);	for(i=0;i<prob->l;i++)	{		ymv[i]=prob->y[i]-ymv[i];		mae += fabs(ymv[i]);	}			mae /= prob->l;	double std=sqrt(2*mae*mae);	int count=0;	mae=0;	for(i=0;i<prob->l;i++)	        if (fabs(ymv[i]) > 5*std)                         count=count+1;		else 		        mae+=fabs(ymv[i]);	mae /= (prob->l-count);	info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae);	free(ymv);	return mae;}// error-correcting code functions// nr_binary for small number of classstatic int dense[6] = {3,3,10,10,20,20};static int sparse[3] = {3,10,30};// check whether I is valid// return 1 if invalid, 0 if validint check_I(int **In, int nr_binary, int nr_class){	int c,i,j,k,s,z;	// check for identical/complementary rows 	for(i=0; i<nr_binary; i++)		for(j=i+1; j<nr_binary; j++)		{			s = c = 0;			for(k=0;k<nr_class;k++)			{				if(In[i][k] == In[j][k]) s++;				if(In[i][k]+In[j][k] == 0) c++;			}						if(s==nr_class || c==nr_class)				return 1;		}		// check for zero column	for(i=0; i<nr_class; i++)	{		z = 0;		for(j=0; j<nr_binary; j++)			if(In[j][i] == 0) z++;		if(z == nr_binary)			return 1;	}		return 0;}int rho_score(int **In, int nr_binary, int nr_class){	int i,j,k,rho,min = nr_binary;		for(i=0; i<nr_class; i++)		for(j=i+1; j<nr_class; j++)		{				rho = 0;			for(k=0; k<nr_binary; k++)				rho += In[k][i] * In[k][j];			if( nr_binary-rho < min) 				min = nr_binary-rho;		}	return min;	}int error_correcting_code(int multiclass_type, int nr_class, int**& I){	int nr_binary = 0;	if(multiclass_type == ONE_ONE || nr_class <= 2)	{		nr_binary = nr_class*(nr_class-1)/2;		I = Malloc(int *, nr_binary);		int r,i,j=0,k=j+1;		for (i=0; i<nr_binary;i++)		{			I[i]=Malloc(int, nr_class);			for(r=0; r<nr_class;r++)				I[i][r]=0;			I[i][j]= +1; I[i][k]=-1;			if (k==nr_class-1) 			{				j++; k=j+1;			}			else				k++;		}	}	else if(multiclass_type == ONE_ALL)	{		int i,j;		nr_binary = nr_class;		I = Malloc(int *, nr_binary);		for(i=0; i<nr_binary; i++)		{			I[i] = Malloc(int, nr_class);			for(j=0; j<nr_class; j++)				I[i][j] = -1;			I[i][i] = 1;		}	}	else if(multiclass_type == DENSE)	{		int **In = NULL;		int i,j,n,p,rho,max_rho=0;				nr_binary = (int)round(10 * log(nr_class)/log(2.0));		if(nr_class < 9)			nr_binary = dense[nr_class-3];				In = Malloc(int *, nr_binary);		I = Malloc(int *, nr_binary);		for(i=0; i<nr_binary; i++)		{				In[i] = Malloc(int, nr_class);			I[i] = Malloc(int, nr_class);		}				// Generate 100 I and select the best one 		for(n = 0; n < 100; n++)		{			for(i=0; i<nr_binary; i++)				for(j=0; j<nr_class; j++)					In[i][j] = -1;			for(i=0; i<nr_binary; i++)			{				for(j=0; j<nr_class/2; j++)				{						p = rand() % nr_class;					while(In[i][p] == 1)						p = rand() % nr_class;					In[i][p] = 1;				}			}						if(check_I(In, nr_binary, nr_class))			{				n--;				continue;			}			rho = rho_score(In, nr_binary, nr_class);			if(rho > max_rho)			{				max_rho = rho;				for(i=0; i<nr_binary; i++)					memcpy(I[i], In[i], nr_class*sizeof(int));			}		}				for(i=0; i<nr_binary; i++)			free(In[i]);		free(In);	}	else  // SPARSE	{		int **In = NULL;		int i,j,n,f1,f2,rho,max_rho=0;		float p;				nr_binary = (int)round(15 * log(nr_class)/log(2.0));		if(nr_class < 6)			nr_binary = sparse[nr_class-3];				In = Malloc(int *, nr_binary);		I = Malloc(int *, nr_binary);		for(i=0; i<nr_binary; i++)		{				In[i] = Malloc(int, nr_class);			I[i] = Malloc(int, nr_class);		}				// Generate 100 I and select the best one		for(n=0; n<100; n++)		{			for(i=0; i<nr_binary; i++)				do{					f1=f2=0;					for(j=0; j<nr_class; j++)					{						p = (rand()%1000)/1000.0;						if(p > 0.75) f1=In[i][j]=1; 						else if(p < 0.25) f2=In[i][j]=-1;						else In[i][j]=0;					}				}while(!(f1*f2));						if(check_I(In, nr_binary, nr_class))			{				n--;				continue;			}			rho = rho_score(In, nr_binary, nr_class);			if(rho > max_rho)			{				max_rho = rho;				for(i=0; i<nr_binary; i++)					memcpy(I[i], In[i], nr_class*sizeof(int));			}		}		for(i=0; i<nr_binary; i++)			free(In[i]);		free(In);	}	return nr_binary;}//// Interface functions//svm_model *svm_train(const svm_problem *prob, const svm_parameter *param){	svm_model *model = Malloc(svm_model,1);	model->param = *param;	model->free_sv = 0;	// XXX	if(param->svm_type == ONE_CLASS ||	   param->svm_type == EPSILON_SVR ||	   param->svm_type == NU_SVR)	{		// regression or one-class-svm		model->nr_class = 2;		model->label = NULL;		model->nSV = NULL;		model->probA = NULL; model->probB = NULL;		model->sv_coef = Malloc(double *,1);		if(param->probability && 		   (param->svm_type == EPSILON_SVR ||		    param->svm_type == NU_SVR))		{			model->probA = Malloc(double,1);			model->probA[0] = svm_svr_probability(prob,param);		}		decision_function f = svm_train_one(prob,param,0,0);		model->rho = Malloc(double,1);		model->rho[0] = f.rho;		int nSV = 0;		int i;		for(i=0;i<prob->l;i++)			if(fabs(f.alpha[i]) > 0) ++nSV;		model->l = nSV;		model->SV = Malloc(svm_node *,nSV);		model->sv_coef[0] = Malloc(double,nSV);		int j = 0;		for(i=0;i<prob->l;i++)			if(fabs(f.alpha[i]) > 0)			{				model->SV[j] = prob->x[i];				model->sv_coef[0][j] = f.alpha[i];				++j;			}				free(f.alpha);	}	else	{		// classification		// find out the number of classes		int l = prob->l;		int nr_binary = prob->nr_binary;		int **I = prob->I;		int max_nr_class = 16;		int nr_class = 0;		int *label = Malloc(int,max_nr_class);		int *count = Malloc(int,max_nr_class);		int *index = Malloc(int,l);		int i;				for(i=0;i<l;i++)		{			int this_label = (int)prob->y[i];			int j;			for(j=0;j<nr_class;j++)				if(this_label == label[j])				{					++count[j];					break;				}			index[i] = j;			if(j == nr_class)			{				if(nr_class == max_nr_class)				{					max_nr_class *= 2;					label = (int *)realloc(label,max_nr_class*sizeof(int));					count = (int *)realloc(count,max_nr_class*sizeof(int));				}				label[nr_class] = this_label;				count[nr_class] = 1;				++nr_class;			}		}		// group training data of the same class		int *start = Malloc(int,nr_class);		start[0] = 0;		for(i=1;i<nr_class;i++)			start[i] = start[i-1]+count[i-1];		svm_node **x = Malloc(svm_node *,l);				for(i=0;i<l;i++)		{			x[start[index[i]]] = prob->x[i];			++start[index[i]];		}				start[0] = 0;		for(i=1;i<nr_class;i++)			start[i] = start[i-1]+count[i-1];		// calculate weighted C		double *weighted_C = Malloc(double, nr_class);		for(i=0;i<nr_class;i++)			weighted_C[i] = param->C;		for(i=0;i<param->nr_weight;i++)		{				int j;			for(j=0;j<nr_class;j++)				if(param->weight_label[i] == label[j])					break;			if(j == nr_class)				fprintf(stderr,"warning: class label %d specified in weight is not found\n", param->weight_label[i]);			else				weighted_C[j] *= param->weight[i];		}		// train k*(k-1)/2 models				bool *nonzero = Malloc(bool,l);		for(i=0;i<l;i++)			nonzero[i] = false;		decision_function *f = Malloc(decision_function,nr_binary);		double *probA=NULL,*probB=NULL;		if (param->probability)		{			probA=Malloc(double,nr_binary);			probB=Malloc(double,nr_binary);		}		int p;		for(i=0;i<nr_binary;i++)		{			svm_problem sub_prob;			sub_prob.l = 0;			for (int j=0; j < nr_class; j++)				if (I[i][j]!=0)					    sub_prob.l += count[j];			sub_prob.x = Malloc(svm_node *,sub_prob.l);			sub_prob.y = Malloc(double,sub_prob.l);						p=0;			for(int j=0;j<nr_class;j++)				if (I[i][j]!=0)					for (int k=0;k<count[j];k++)					{						sub_prob.x[p] = x[start[j]+k];						sub_prob.y[p] = I[i][j];						p++;					}			if(param->probability)					svm_binary_svc_probability(&sub_prob,param,param->C,param->C,probA[i],probB[i]);						f[i] = svm_train_one(&sub_prob,param,param->C,param->C);			free(sub_prob.x);			free(sub_prob.y);		}		// build output						model->nr_class = nr_class;		model->nr_binary = nr_binary;		model->I = Malloc(int *, nr_binary);		for(int i=0; i<nr_binary; i++)		{			model->I[i] = Malloc(int, nr_class);			memcpy(model->I[i], I[i], nr_class*sizeof(int));		}		model->label = Malloc(int,nr_class);		for(i=0;i<nr_class;i++)			model->label[i] = label[i];				model->rho = Malloc(double,nr_binary);		model->nSV_binary = Malloc(int,nr_binary);		for(i=0;i<nr_binary;i++)		{			model->rho[i] = f[i].rho;			model->nSV_binary[i] = f[i].nSV;		}		if(param->probability)		{			model->probA = Malloc(double,nr_binary);			model->probB = Malloc(double,nr_binary);			for(i=0;i<nr_binary;i++)			{				model->probA[i] = probA[i];				model->probB[i] = probB[i];			}		}		else		{			model->probA=NULL;			model->probB=NULL;		}		for(i=0;i<nr_binary;i++)		{			p=0; 			for(int j=0; j<nr_class; j++)				if (I[i][j]!=0)					for (int k=0;k<count[j];k++)					{						if (fabs(f[i].alpha[p]) > 0)							nonzero[start[j]+k]=true;						p++;					}		}		int total_sv = 0;		model->nSV = Malloc(int,nr_class);		for(i=0;i<nr_class;i++)		{			int nSV = 0;			for(int j=0;j<count[i];j++)				if(nonzero[start[i]+j])				{						++nSV;					++total_sv;				}			model->nSV[i] = nSV;		}		info("Total nSV = %d\n",total_sv);		model->l = total_sv;		model->SV = Malloc(svm_node *,total_sv);		int *xtosv=Malloc(int,l);		p=0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -