⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svmprecomputed.cpp

📁 一个包含丰富内容的流形学习算法工具包
💻 CPP
📖 第 1 页 / 共 4 页
字号:
				}			}			else			{				if(alpha[i] < 0)				{					alpha[i] = 0;					alpha[j] = sum;				}			}		}		// update G		double delta_alpha_i = alpha[i] - old_alpha_i;		double delta_alpha_j = alpha[j] - old_alpha_j;				for(int k=0;k<active_size;k++)		{			G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j;		}		// update alpha_status and G_bar		{			bool ui = is_upper_bound(i);			bool uj = is_upper_bound(j);			update_alpha_status(i);			update_alpha_status(j);			int k;			if(ui != is_upper_bound(i))			{				Q_i = Q.get_Q(i,l);				if(ui)					for(k=0;k<l;k++)						G_bar[k] -= C_i * Q_i[k];				else					for(k=0;k<l;k++)						G_bar[k] += C_i * Q_i[k];			}			if(uj != is_upper_bound(j))			{				Q_j = Q.get_Q(j,l);				if(uj)					for(k=0;k<l;k++)						G_bar[k] -= C_j * Q_j[k];				else					for(k=0;k<l;k++)						G_bar[k] += C_j * Q_j[k];			}		}	}	// calculate rho	si->rho = calculate_rho();	// calculate objective value	{		double v = 0;		int i;		for(i=0;i<l;i++)			v += alpha[i] * (G[i] + b[i]);		si->obj = v/2;	}	// put back the solution	{		for(int i=0;i<l;i++)			alpha_[active_set[i]] = alpha[i];	}	// juggle everything back	/*{		for(int i=0;i<l;i++)			while(active_set[i] != i)				swap_index(i,active_set[i]);				// or Q.swap_index(i,active_set[i]);	}*/	si->upper_bound_p = Cp;	si->upper_bound_n = Cn;	info("\noptimization finished, #iter = %d\n",iter);	delete[] b;	delete[] y;	delete[] alpha;	delete[] alpha_status;	delete[] active_set;	delete[] G;	delete[] G_bar;}// return 1 if already optimal, return 0 otherwiseint Solver::select_working_set(int &out_i, int &out_j){	// return i,j which maximize -grad(f)^T d , under constraint	// if alpha_i == C, d != +1	// if alpha_i == 0, d != -1	double Gmax1 = -INF;		// max { -grad(f)_i * d | y_i*d = +1 }	int Gmax1_idx = -1;	double Gmax2 = -INF;		// max { -grad(f)_i * d | y_i*d = -1 }	int Gmax2_idx = -1;	for(int i=0;i<active_size;i++)	{		if(y[i]==+1)	// y = +1		{			if(!is_upper_bound(i))	// d = +1			{				if(-G[i] > Gmax1)				{					Gmax1 = -G[i];					Gmax1_idx = i;				}			}			if(!is_lower_bound(i))	// d = -1			{				if(G[i] > Gmax2)				{					Gmax2 = G[i];					Gmax2_idx = i;				}			}		}		else		// y = -1		{			if(!is_upper_bound(i))	// d = +1			{				if(-G[i] > Gmax2)				{					Gmax2 = -G[i];					Gmax2_idx = i;				}			}			if(!is_lower_bound(i))	// d = -1			{				if(G[i] > Gmax1)				{					Gmax1 = G[i];					Gmax1_idx = i;				}			}		}	}	if(Gmax1+Gmax2 < eps) 		return 1;	out_i = Gmax1_idx;	out_j = Gmax2_idx;	return 0;}void Solver::do_shrinking(){	int i,j,k;	if(select_working_set(i,j)!=0) return;	double Gm1 = -y[j]*G[j];	double Gm2 = y[i]*G[i];	// shrink		for(k=0;k<active_size;k++)	{		if(is_lower_bound(k))		{			if(y[k]==+1)			{				if(-G[k] >= Gm1) continue;			}			else	if(-G[k] >= Gm2) continue;		}		else if(is_upper_bound(k))		{			if(y[k]==+1)			{				if(G[k] >= Gm2) continue;			}			else	if(G[k] >= Gm1) continue;		}		else continue;		--active_size;		swap_index(k,active_size);		--k;	// look at the newcomer	}	// unshrink, check all variables again before final iterations	if(unshrinked || -(Gm1 + Gm2) > eps*10) return;		unshrinked = true;	reconstruct_gradient();	for(k=l-1;k>=active_size;k--)	{		if(is_lower_bound(k))		{			if(y[k]==+1)			{				if(-G[k] < Gm1) continue;			}			else	if(-G[k] < Gm2) continue;		}		else if(is_upper_bound(k))		{			if(y[k]==+1)			{				if(G[k] < Gm2) continue;			}			else	if(G[k] < Gm1) continue;		}		else continue;		swap_index(k,active_size);		active_size++;		++k;	// look at the newcomer	}}double Solver::calculate_rho(){	double r;	int nr_free = 0;	double ub = INF, lb = -INF, sum_free = 0;	for(int i=0;i<active_size;i++)	{		double yG = y[i]*G[i];		if(is_lower_bound(i))		{			if(y[i] > 0)				ub = min(ub,yG);			else				lb = max(lb,yG);		}		else if(is_upper_bound(i))		{			if(y[i] < 0)				ub = min(ub,yG);			else				lb = max(lb,yG);		}		else		{			++nr_free;			sum_free += yG;		}	}	if(nr_free>0)		r = sum_free/nr_free;	else		r = (ub+lb)/2;	return r;}//// Solver for nu-svm classification and regression//// additional constraint: e^T \alpha = constant//class Solver_NU : public Solver{public:	Solver_NU() {}	void Solve(int l, const Kernel& Q, const double *b, const schar *y,		   double *alpha, double Cp, double Cn, double eps,		   SolutionInfo* si, int shrinking)	{		this->si = si;		Solver::Solve(l,Q,b,y,alpha,Cp,Cn,eps,si,shrinking);	}private:	SolutionInfo *si;	int select_working_set(int &i, int &j);	double calculate_rho();	void do_shrinking();};int Solver_NU::select_working_set(int &out_i, int &out_j){	// return i,j which maximize -grad(f)^T d , under constraint	// if alpha_i == C, d != +1	// if alpha_i == 0, d != -1	double Gmax1 = -INF;	// max { -grad(f)_i * d | y_i = +1, d = +1 }	int Gmax1_idx = -1;	double Gmax2 = -INF;	// max { -grad(f)_i * d | y_i = +1, d = -1 }	int Gmax2_idx = -1;	double Gmax3 = -INF;	// max { -grad(f)_i * d | y_i = -1, d = +1 }	int Gmax3_idx = -1;	double Gmax4 = -INF;	// max { -grad(f)_i * d | y_i = -1, d = -1 }	int Gmax4_idx = -1;	for(int i=0;i<active_size;i++)	{		if(y[i]==+1)	// y == +1		{			if(!is_upper_bound(i))	// d = +1			{				if(-G[i] > Gmax1)				{					Gmax1 = -G[i];					Gmax1_idx = i;				}			}			if(!is_lower_bound(i))	// d = -1			{				if(G[i] > Gmax2)				{					Gmax2 = G[i];					Gmax2_idx = i;				}			}		}		else		// y == -1		{			if(!is_upper_bound(i))	// d = +1			{				if(-G[i] > Gmax3)				{					Gmax3 = -G[i];					Gmax3_idx = i;				}			}			if(!is_lower_bound(i))	// d = -1			{				if(G[i] > Gmax4)				{					Gmax4 = G[i];					Gmax4_idx = i;				}			}		}	}	if(max(Gmax1+Gmax2,Gmax3+Gmax4) < eps) 		return 1;	if(Gmax1+Gmax2 > Gmax3+Gmax4)	{		out_i = Gmax1_idx;		out_j = Gmax2_idx;	}	else	{		out_i = Gmax3_idx;		out_j = Gmax4_idx;	}	return 0;}void Solver_NU::do_shrinking(){	double Gmax1 = -INF;	// max { -grad(f)_i * d | y_i = +1, d = +1 }	double Gmax2 = -INF;	// max { -grad(f)_i * d | y_i = +1, d = -1 }	double Gmax3 = -INF;	// max { -grad(f)_i * d | y_i = -1, d = +1 }	double Gmax4 = -INF;	// max { -grad(f)_i * d | y_i = -1, d = -1 }	int k;	for(k=0;k<active_size;k++)	{		if(!is_upper_bound(k))		{			if(y[k]==+1)			{				if(-G[k] > Gmax1) Gmax1 = -G[k];			}			else	if(-G[k] > Gmax3) Gmax3 = -G[k];		}		if(!is_lower_bound(k))		{			if(y[k]==+1)			{					if(G[k] > Gmax2) Gmax2 = G[k];			}			else	if(G[k] > Gmax4) Gmax4 = G[k];		}	}	double Gm1 = -Gmax2;	double Gm2 = -Gmax1;	double Gm3 = -Gmax4;	double Gm4 = -Gmax3;	for(k=0;k<active_size;k++)	{		if(is_lower_bound(k))		{			if(y[k]==+1)			{				if(-G[k] >= Gm1) continue;			}			else	if(-G[k] >= Gm3) continue;		}		else if(is_upper_bound(k))		{			if(y[k]==+1)			{				if(G[k] >= Gm2) continue;			}			else	if(G[k] >= Gm4) continue;		}		else continue;		--active_size;		swap_index(k,active_size);		--k;	// look at the newcomer	}	// unshrink, check all variables again before final iterations	if(unshrinked || max(-(Gm1+Gm2),-(Gm3+Gm4)) > eps*10) return;		unshrinked = true;	reconstruct_gradient();	for(k=l-1;k>=active_size;k--)	{		if(is_lower_bound(k))		{			if(y[k]==+1)			{				if(-G[k] < Gm1) continue;			}			else	if(-G[k] < Gm3) continue;		}		else if(is_upper_bound(k))		{			if(y[k]==+1)			{				if(G[k] < Gm2) continue;			}			else	if(G[k] < Gm4) continue;		}		else continue;		swap_index(k,active_size);		active_size++;		++k;	// look at the newcomer	}}double Solver_NU::calculate_rho(){	int nr_free1 = 0,nr_free2 = 0;	double ub1 = INF, ub2 = INF;	double lb1 = -INF, lb2 = -INF;	double sum_free1 = 0, sum_free2 = 0;	for(int i=0;i<active_size;i++)	{		if(y[i]==+1)		{			if(is_lower_bound(i))				ub1 = min(ub1,G[i]);			else if(is_upper_bound(i))				lb1 = max(lb1,G[i]);			else			{				++nr_free1;				sum_free1 += G[i];			}		}		else		{			if(is_lower_bound(i))				ub2 = min(ub2,G[i]);			else if(is_upper_bound(i))				lb2 = max(lb2,G[i]);			else			{				++nr_free2;				sum_free2 += G[i];			}		}	}	double r1,r2;	if(nr_free1 > 0)		r1 = sum_free1/nr_free1;	else		r1 = (ub1+lb1)/2;		if(nr_free2 > 0)		r2 = sum_free2/nr_free2;	else		r2 = (ub2+lb2)/2;		si->r = (r1+r2)/2;	return (r1-r2)/2;}//// Q matrices for various formulations//class SVC_Q: public Kernel{ public:	SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_)	:Kernel(prob.l, prob.x, param)	{		clone(y,y_,prob.l);		this->kernel_type = param.kernel_type;		cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)));	}		Qfloat *get_Q(int i, int len) const	{		Qfloat *data;		int start;		if((start = cache->get_data(i,&data,len)) < len)		{			if( kernel_type== MATRIX)			{				for(int j=start;j<len;j++)                                          	                        		data[j] = (Qfloat)(y[i]*y[j]*(x[i][(int)(x[j][0].value)].value));			}			else			{				for(int j=start;j<len;j++)					data[j] = (Qfloat)(y[i]*y[j]*(this->*kernel_function)(i,j));			}		}		return data;	}	void swap_index(int i, int j) const	{		cache->swap_index(i,j);		Kernel::swap_index(i,j);		swap(y[i],y[j]);	}	~SVC_Q()	{		delete[] y;		delete cache;	}private:	schar *y;	Cache *cache;	int kernel_type;};class ONE_CLASS_Q: public Kernel{public:	ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param)	:Kernel(prob.l, prob.x, param)	{		this->kernel_type = param.kernel_type;			cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)));	}		Qfloat *get_Q(int i, int len) const	{		Qfloat *data;		int start;		if((start = cache->get_data(i,&data,len)) < len)		{			if( kernel_type== MATRIX)			{				for(int j=start;j<len;j++)                        		data[j] = (Qfloat)((x[i][(int)(x[j][0].value)].value));			}                                                                                						else			{                                                                                							for(int j=start;j<len;j++)					data[j] = (Qfloat)(this->*kernel_function)(i,j);			}		}		return data;	}	void swap_index(int i, int j) const	{		cache->swap_index(i,j);		Kernel::swap_index(i,j);	}	~ONE_CLASS_Q()	{		delete cache;	}private:	Cache *cache;	int kernel_type;};class SVR_Q: public Kernel{ public:	SVR_Q(const svm_problem& prob, const svm_parameter& param)	:Kernel(prob.l, prob.x, param)	{		l = prob.l;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -