⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 algorithmsvm.java,v

📁 包含了模式识别中常用的一些分类器设计算法
💻 JAVA,V
📖 第 1 页 / 共 4 页
字号:
     *     */a292 5     * method: sequentialMinimalOptimization     *     * @@param   none     *     * @@return  boolean value indicating statusd301 3d426 2a427 1     * method: examineExampled429 1a429 1     * @@param   int i2: (input) example to be evaluateda432 3     * method to examine a specific Lagrange multiplier and check if     * it violates the KKT condition     *d570 2a571 1     * method: takeStepd573 2a574 2     * @@param    int i1: (input) first example to be evaluated     * @@param    int i2: (input) second example to be evaluateda577 3     * method that jointly optimizes two Lagrange multipliers and     * maintains the feasibility of the dual QP     *d784 1a784 1     * method: evaluateObjectived786 4a789 4     * @@param   int i1: (input) first example     * @@param   int i2: (input) second example     * @@param   double lim: (input) bound at which the function      *          is to be evaluated ata791 3     *     * method determines the value of the objective function at the bounds     *d832 1a832 1     * method: evaluateOutputd834 1a834 1     * @@param   const VectorDouble& point: (input) first example indexa837 1     * this method computes the output of a specific exampled863 2a864 1     * method: Ka865 2     * @@param  Vector Double& point1: (input) first example     * @@param  Vector Double& point2: (input) second exampled867 2a868 1     * @@return Kernel evaluationd870 1a870 2     * this method evaluates the linear Kernel on the input vectors     * K(x,y) = (x . y)a896 4     * method: computeSupportVectors     *     * @@param   none     * @@return  nonea930 4     * method: computeDecisionRegions     *     * @@param   none     * @@return  noned1032 1a1032 6     * method computeErrors     *      * @@param  Data d: input data point     *       * @@return none     * @1.2log@Commented all the debug statements. And changed the Step Sequence Complete to"Algorithm Complete".@text@d2 1a2 1 * @@(#) IFAlgorithm.java  1.10 02/09/03a3 4 * Copyright ***,  All Rights Reserved. *  * This software is the proprietary information of ********   * Use is subject to license terms.d28 47a74 10	//-----------------------------------------------------------------	//	// static data members	//	//-----------------------------------------------------------------	static final int KERNEL_TYPE_LINEAR = 0;	static final int KERNEL_TYPE_RBF = 1;	static final int KERNEL_TYPE_POLYNOMIAL = 2;	static final int KERNEL_TYPE_DEFAULT = KERNEL_TYPE_LINEAR;d76 1a76 4	static final Double CLASS_LABEL_1 = new Double(1.0);	static final Double CLASS_LABEL_2 = new Double(-1.0);	//-----------------------------------------------------------------d78 6a83 1	// instance data membersd85 16a100 3	//-----------------------------------------------------------------	// SVM data memberd102 3a104 7	Vector x_d = new Vector();	Vector y_d = new Vector();	Vector alpha_d = new Vector();	Vector error_cache_d = new Vector();	int number_d = 0;	// SVM parametersd106 4a109 8	int kernel_type_d = KERNEL_TYPE_RBF;	//int kernel_type_d = KERNEL_TYPE_POLYNOMIAL;	double bound_d = 100; // The upper bound for a, 0 =< a <= bound_d	double tol_d = 0.01;	double bias_d = 0.0;	double eps_d = 0.001;	// vector of support region pointsd111 11a121 5	Vector support_vectors_d = new Vector();	Vector decision_regions_d = new Vector();	int output_canvas_d[][];	// classification functionsd123 1a123 26	public boolean initialize()	{		// check the data points		//		if (output_panel_d == null)		{			return false;		}		// add the process description for the SVM algorithm		//		if (description_d.size() == 0)		{			String str = new String("   0. Initialize the original data.");			description_d.addElement(str);			str = new String("   1. Displaying the original data.");			description_d.addElement(str);			str = new String("   2. Computing the Support Vectors.");			description_d.addElement(str);			str = new String("   3. Computing the decision regions.");			description_d.addElement(str);		}d127 1a127 1		pro_box_d.appendMessage("Support Vector Machine :" + "\n");d129 4a132 4		// set the data points for this algorithm		//		set1_d = (Vector)data_points_d.dset1.clone();		set2_d = (Vector)data_points_d.dset2.clone();a133 15		// reset values		//		support_vectors_d = new Vector();		decision_regions_d = new Vector();		x_d = new Vector();		y_d = new Vector();		alpha_d = new Vector();		error_cache_d = new Vector();		number_d = 0;		step_count = 3;		algo_id = "AlgorithmSVM";				// set the step index		//		step_index_d = 0;d135 2a136 3		// append message to process box		//		pro_box_d.appendMessage((String)description_d.get(step_index_d));d138 8a145 3		// exit gracefully		//		return true;d147 2a148 2	public void run()d150 4a153 17	    // Debug	    //System.out.println(algo_id + ": run()");	    	    if (step_index_d == 1)	    {		disableControl();		step1();		enableControl();	    }	    	    else if (step_index_d == 2)	    {		disableControl();		step2(); 		enableControl();	    }d155 6a160 10	    else if (step_index_d == 3)	    {		disableControl();		step3();		pro_box_d.appendMessage("   Algorithm Complete");		enableControl(); 	    }	    return;d162 1d164 2d167 24a190 22	// method: step1 	//	// arguments: none	// return   : none	//	// step one of the algorithm	//	boolean step1()	{	    // Debug	    //System.out.println(algo_id + ": step1()");	    	    pro_box_d.setProgressMin(0);	    pro_box_d.setProgressMax(1);	    pro_box_d.setProgressCurr(0);	    scaleToFitData();	    // Display original data	    output_panel_d.addOutput(set1_d, Classify.PTYPE_INPUT, 				     data_points_d.color_dset1);	    output_panel_d.addOutput(set2_d, Classify.PTYPE_INPUT,d192 6a197 17	    output_panel_d.addOutput(set3_d, Classify.PTYPE_INPUT,				     data_points_d.color_dset3);	    output_panel_d.addOutput(set4_d, Classify.PTYPE_INPUT, 				     data_points_d.color_dset4);	    	    // step 1 completed	    //	    pro_box_d.setProgressCurr(1);	    output_panel_d.repaint();	    	    // exit gracefully	    //	    return true;	}	// method: step2 d199 5a203 2	// arguments: none	// return   : noned205 18a222 1	// step one of the algorithmd224 4a227 4	boolean step2()	{	    sequentialMinimalOptimization();	    computeSupportVectors();d229 7a235 14	    // display support vectors	    //	    output_panel_d.addOutput(support_vectors_d, Classify.PTYPE_SUPPORT_VECTOR, Color.black);	    	    pro_box_d.setProgressCurr(1000);	    output_panel_d.repaint();	    /*	      // append message to process box	      //	      //transformSVM(data_points_d);	      //computeMeans();	      //displayMatrices();	      //displaySupportRegions();d237 2a238 2	      sequentialMinimalOptimization();	      computeSupportVectors();d248 1a248 1	    */d250 4a253 4	    // exit gracefully	    //	    return true;	}d255 10a264 7	// method: step3 	//	// arguments: none	// return   : none	//	// step one of the algorithm	//d268 1a268 1	    d274 1a274 1d291 20a310 6	// method: sequentialMinimalOptimization	//	// arguments: none	//	// return: boolean value indicating statusd312 2a313 14	// References:	//	// J. C. Platt, "Fast Training of Support Vector Machines using	// Sequential Minimal Optimization".  In B. Scholkopf, C. J. C.	// Burges and A. J. Smola, editors, "Advances in K Methods	// - Support Vector Learning", pp 185-208, MIT Press, 1998.	//	boolean sequentialMinimalOptimization()	{		// declare local variables		//		int numChanged = 0;		boolean examineAll = true;d315 1a315 1		DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale();		d319 2a320 2		x_d.clear();		y_d.clear();d324 31a354 14		for (int i = 0; i < set1_d.size(); i++)		{			Vector vec_point = new Vector();			MyPoint curr_point = (MyPoint)set1_d.get(i);			double x_val = curr_point.x;			double y_val = curr_point.y;			x_val /= 2.0;			y_val /= 2.0;			vec_point.add(new Double(x_val));			vec_point.add(new Double(y_val));			x_d.add(vec_point);			y_d.add(CLASS_LABEL_1);		}d356 10a365 14		for (int i = 0; i < set2_d.size(); i++)		{			Vector vec_point = new Vector();			MyPoint curr_point = (MyPoint)set2_d.get(i);			double x_val = curr_point.x;			double y_val = curr_point.y;			x_val /= 2.0;			y_val /= 2.0;			vec_point.add(new Double(x_val));			vec_point.add(new Double(y_val));			x_d.add(vec_point);			y_d.add(CLASS_LABEL_2);		}d367 7a373 1		number_d = x_d.size();d375 11a385 5		// initialize the error cache		// Note: at start up all the Lagrange multipliers are zero, hence, the		// Ei = evaluateOutput(i) - y_d(i) = - y_d(i)		//		error_cache_d.clear();d388 1a388 2			double yi = ((Double)y_d.get(i)).doubleValue();			error_cache_d.add(new Double(-1 * yi));d390 1a390 2		// initialize the Lagrange multipliersd392 4a395 1		alpha_d.clear();d398 4a401 1			alpha_d.add(new Double(0.0));a402 29		// initialize the bias (threshold to 0)		//		bias_d = 0;		int curr = 0;		while ((numChanged > 0) || examineAll)		{			numChanged = 0;			if (examineAll)			{				for (int i = 0; i < number_d; i++)				{					numChanged += examineExample(i);				}				// debug info				//				//System.out.println("numChanged1: " + numChanged);			}			else			{				for (int i = 0; i < number_d; i++)				{					if ((((Double)alpha_d.get(i)).doubleValue() != 0) && (((Double)alpha_d.get(i)).doubleValue() != bound_d))					{						numChanged += examineExample(i);					}				}d404 22a425 1			}d427 13a439 15			if (examineAll)			{				examineAll = false;			}			else if (numChanged == 0)			{				examineAll = true;			}			curr++;			//if ( curr < 900 ){			pro_box_d.setProgressCurr(curr % 1000);			//}		}d441 17a457 3		// exit gracefully		//		return true;d459 25a483 10	// method: examineExample	//	// arguments:	//  int i2: (input) example to be evaluated	//	// return: integer value indicating status, return either 0 or 1	//	// method to examine a specific Lagrange multiplier and check if	// it violates the KKT conditiond485 1a485 1	int examineExample(int i2)d487 17a503 12		// declare local variables		//		double E1 = 0;		double E2 = 0;		double y2 = ((Double)y_d.get(i2)).doubleValue();		double alpha2 = ((Double)alpha_d.get(i2)).doubleValue();		Vector x_i2 = (Vector)x_d.get(i2);		// look up the error in the error cache for the corresponding Lagrange		//  		if ((alpha2 > 0) && (alpha2 < bound_d))d505 7a511 1			E2 = ((Double)error_cache_d.get(i2)).doubleValue();d513 7a519 1		elsed521 1a521 1			E2 = evaluateOutput(x_i2) - y2;d523 8d532 6a537 10		//E2 = ((Double)error_cache_d.get(i2)).doubleValue();		double r2 = E2 * y2;		// double tol_d <-- default tolearnce parameter		// KKT conditions:		// r2 >= tol_d  and alpha2 == 0     (well classified)		// r2 == 0  and 0 < alpha2 < C  (support vectors at margins)		// r2 <= -tol_d  and alpha2 == C     (support vectors between margins)d539 20a558 3		// the KKT conditions are necessary and sufficient conditions for		// convergence. For, example in the classification case the above		// criteria must be checked for the 1-norm soft margin optimizationd560 2a561 11		// the choice of the first heuristic provides the outer loop of SMO.		// the outer loop iterates over the entire data set and determines		// if an example violates the KKT condition. when and example violates		// the KKT condition it is selected for joint optimization with a		// second example (which is found using the second heuristic).		//		// the first choice concentrates on the CPU time taken on examples		// that most likely violate KKT (non-bound subset) and verifies that		// the KKT conditions are fulfilled within espilon.		//		if (((r2 < -tol_d) && (alpha2 < bound_d)) || ((r2 > tol_d) && (alpha2 > 0)))d563 1a563 77			// once the first Lagrange multiplier is choosen SMO chooses the			// second Lagrange multiplier to maximize the size of the step			// taken during joint optimization. SMO approximates the step			// size via |E1 - E2| because the kernel evaluateion are expensive			//			int i1 = -1;			double step_size = 0;			double max_step_size = 0;			// find the example that gives the largest step size			// (non-bound example)			//			for (int i = 0; i < number_d; i++)			{				double alpha = ((Double)alpha_d.get(i)).doubleValue();				if ((alpha > 0) && (alpha < bound_d))				{					E1 = ((Double)error_cache_d.get(i)).doubleValue();					step_size = Math.abs(E2 - E1);					if (step_size > max_step_size)					{						i1 = i;						max_step_size = step_size;					}				}			}			// if a good i1 is found then we begin the joint optimization			//			if (i1 >= 0)			{				if (takeStep(i1, i2) > 0)				{					return 1;				}			}			// under unusual circumstances SMO cannot make progress using the			// second heuristic, hence, it uses a hierarchy of second choices:			// (A) if the second heuristic does not make progress then SMO			//     searches the non-bound examples for one that does			//			int M = (int) (Math.random() * number_d);			//System.out.println("M: " + M);			for (int i = M; i < number_d + M; i++)			{				// if a good i1 is found then we begin the joint				// optimization				//				i1 = i % number_d;				double alpha = ((Double)alpha_d.get(i1)).doubleValue();				if ((alpha > 0) && (alpha < bound_d))				{					if (takeStep(i1, i2) > 0)					{						return 1;					}				}			}			// (B) if the non-bound examples do not make progress then SMO			//     searches the entire data set for one that does			//			M = (int) (Math.random() * number_d);			for (int i = M; i < number_d + M; i++)			{				// if a good i1 is found then we begin the joint optimization				//				i1 = i % number_d;				if (takeStep(i1, i2) > 0)				{					return 1;				}			}d565 1a565 4		// no progress was made		//		return 0;d567 2a568 2

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -