⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 algorithmsvm.java,v

📁 包含了模式识别中常用的一些分类器设计算法
💻 JAVA,V
📖 第 1 页 / 共 4 页
字号:
head	1.5;access;symbols;locks; strict;comment	@# @;1.5date	2005.06.10.18.30.12;	author rirwin;	state Exp;branches;next	1.4;1.4date	2005.05.23.20.18.13;	author rirwin;	state Exp;branches;next	1.3;1.3date	2005.03.09.03.13.31;	author patil;	state Exp;branches;next	1.2;1.2date	2005.01.19.21.56.12;	author patil;	state Exp;branches;next	1.1;1.1date	2004.12.28.00.04.32;	author patil;	state Exp;branches;next	;desc@@1.5log@Establishing RCS version control.@text@/* * @@(#) AlgorithmSVM.java  1.10 02/09/03 * Last editted: Ryan Irwin *  */// import java packages//import java.awt.*;//These imports are not needed - Phil T. 6-23-03//import java.awt.event.*;//import javax.swing.*;//import javax.swing.border.*;//import javax.swing.event.*;//import java.io.*;//import java.lang.*;import java.util.*;/** * Algorithm Support Vector Machines */public class AlgorithmSVM extends Algorithm{    //-----------------------------------------------------------------    //    // static data members    //    //-----------------------------------------------------------------    static final int KERNEL_TYPE_LINEAR = 0;    static final int KERNEL_TYPE_RBF = 1;    static final int KERNEL_TYPE_POLYNOMIAL = 2;    static final int KERNEL_TYPE_DEFAULT = KERNEL_TYPE_LINEAR;    static final Double CLASS_LABEL_1 = new Double(1.0);    static final Double CLASS_LABEL_2 = new Double(-1.0);    //-----------------------------------------------------------------    //    // instance data members    //    //-----------------------------------------------------------------        // SVM data member    //    Vector<Vector<Double>> x_d = new Vector<Vector<Double>>();    Vector<Double> y_d = new Vector<Double>();    Vector<Double> alpha_d = new Vector<Double>();    Vector<Double> error_cache_d = new Vector<Double>();    int number_d = 0;    // SVM parameters    //    int kernel_type_d = KERNEL_TYPE_RBF;    //int kernel_type_d = KERNEL_TYPE_POLYNOMIAL;    double bound_d = 100; // The upper bound for a, 0 =< a <= bound_d    double tol_d = 0.01;    double bias_d = 0.0;    double eps_d = 0.001;    // vector of support region points    //    Vector<MyPoint> support_vectors_d = new Vector<MyPoint>();    Vector<MyPoint> decision_regions_d = new Vector<MyPoint>();    int output_canvas_d[][];    //-------------------------------------------------------------------    //    // classification functions    //    //--------------------------------------------------------------------    /**      * Overrides the initialize() method in the base class.  Initializes     * member data and prepares for execution of first step.  This method     * "resets" the algorithm.     *     * @@return  true     */    public boolean initialize()    {	// check the data points	//	if (output_panel_d == null)	{	    return false;	}		// add the process description for the SVM algorithm	//	if (description_d.size() == 0)	{	    String str = new String("   0. Initialize the original data.");	    description_d.addElement(str);	    	    str = new String("   1. Displaying the original data.");	    description_d.addElement(str);	    	    str = new String("   2. Computing the Support Vectors.");	    description_d.addElement(str);	    	    str = new String("   3. Computing the decision regions.");	    description_d.addElement(str);	}		// append message to process box	//	pro_box_d.appendMessage("Support Vector Machine :" + "\n");		// set the data points for this algorithm	//	//	set1_d = (Vector)data_points_d.dset1.clone();	//	set2_d = (Vector)data_points_d.dset2.clone();	//	set1_d = data_points_d.dset1;	set2_d = data_points_d.dset2;		// reset values	//	support_vectors_d = new Vector<MyPoint>();	decision_regions_d = new Vector<MyPoint>();	x_d = new Vector<Vector<Double>>();	y_d = new Vector<Double>();	alpha_d = new Vector<Double>();	error_cache_d = new Vector<Double>();	number_d = 0;	step_count = 3;	algo_id = "AlgorithmSVM";			// set the step index	//	step_index_d = 0;		// append message to process box		//	pro_box_d.appendMessage((String)description_d.get(step_index_d));	// exit gracefully	//	return true;    }   /**     * Implementation of the run function from the Runnable interface.    * Determines what the current step is and calls the appropriate method.    */    public void run()    {	// Debug	//System.out.println(algo_id + ": run()");	    	if (step_index_d == 1)        {	    disableControl();	    step1();	    enableControl();	}	    	else if (step_index_d == 2)	{	    disableControl();	    step2(); 	    enableControl();	}		else if (step_index_d == 3)	{	    disableControl();	    step3();	    pro_box_d.appendMessage("   Algorithm Complete");	    enableControl(); 	}		return;    }    /**     * step one of the algorithm     *     */    boolean step1()    {	// Debug	//System.out.println(algo_id + ": step1()");		pro_box_d.setProgressMin(0);	pro_box_d.setProgressMax(1);	pro_box_d.setProgressCurr(0);		scaleToFitData();		// Display original data	output_panel_d.addOutput(set1_d, Classify.PTYPE_INPUT, 				 data_points_d.color_dset1);	output_panel_d.addOutput(set2_d, Classify.PTYPE_INPUT,				     data_points_d.color_dset2);	output_panel_d.addOutput(set3_d, Classify.PTYPE_INPUT,				 data_points_d.color_dset3);	output_panel_d.addOutput(set4_d, Classify.PTYPE_INPUT, 				 data_points_d.color_dset4);		// step 1 completed	//	pro_box_d.setProgressCurr(1);	output_panel_d.repaint();			// exit gracefully	//	return true;    }        /**     *     * step two of the algorithm. Finds the PCA for the given data     *     * @@return true     */    boolean step2()    {	sequentialMinimalOptimization();	computeSupportVectors();		// display support vectors	//	output_panel_d.addOutput(support_vectors_d, Classify.PTYPE_SUPPORT_VECTOR, Color.black);		pro_box_d.setProgressCurr(1000);	output_panel_d.repaint();	    	/*	  // append message to process box	  //	  //transformSVM(data_points_d);	  //computeMeans();	  //displayMatrices();	  //displaySupportRegions();	      	  sequentialMinimalOptimization();	  computeSupportVectors();	      	      // display support vectors	      //	      //color.addElement(new Color(255, 200, 0));    // orange	      output_panel_d.addOutput(support_vectors_d, 	      Classify.PTYPE_SUPPORT_VECTOR, 	      Color.black);	      //new Color(255, 200, 0));	      output_panel_d.repaint();	*/	    	// exit gracefully	//	return true;    }	   /**    *    * step three of the algorithm    *    * @@return true    */    boolean step3()    {	computeDecisionRegions();		// display support vectors	//	output_panel_d.addOutput(decision_regions_d, Classify.PTYPE_INPUT, new Color(255, 200, 0));	//Color.black);	output_panel_d.repaint();		computeErrors();	/*	  //computeDecisionRegions();	  	  // display support vectors	  //	  //output_panel_d.addOutput(decision_regions_d, Classify.PTYPE_INPUT, 	  //new Color(255, 200, 0));	  //Color.black);	  //output_panel_d.repaint();	*/		// exit gracefully	//	return true;    }        /**     *     * References:     *     * J. C. Platt, "Fast Training of Support Vector Machines using     * Sequential Minimal Optimization".  In B. Scholkopf, C. J. C.     * Burges and A. J. Smola, editors, "Advances in K Methods     * - Support Vector Learning", pp 185-208, MIT Press, 1998.     *     *     * @@return true     *     */    boolean sequentialMinimalOptimization()    {		// declare local variables	//	int numChanged = 0;	boolean examineAll = true;	DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale();						// total number of points		//	x_d.clear();	y_d.clear();		// add the data points		//	for (int i = 0; i < set1_d.size(); i++)	{	    Vector<Double> vec_point = new Vector<Double>();	    MyPoint curr_point = (MyPoint)set1_d.get(i);	    double x_val = curr_point.x;	    double y_val = curr_point.y;	    x_val /= 2.0;	    y_val /= 2.0;	    vec_point.add(new Double(x_val));	    vec_point.add(new Double(y_val));	    x_d.add(vec_point);	    y_d.add(CLASS_LABEL_1);	}	for (int i = 0; i < set2_d.size(); i++)	{	    Vector<Double> vec_point = new Vector<Double>();	    MyPoint curr_point = (MyPoint)set2_d.get(i);	    double x_val = curr_point.x;	    double y_val = curr_point.y;	    x_val /= 2.0;	    y_val /= 2.0;	    vec_point.add(new Double(x_val));	    vec_point.add(new Double(y_val));	    x_d.add(vec_point);	    y_d.add(CLASS_LABEL_2);	}	number_d = x_d.size();	// initialize the error cache	// Note: at start up all the Lagrange multipliers are zero, hence, the	// Ei = evaluateOutput(i) - y_d(i) = - y_d(i)	//	error_cache_d.clear();	for (int i = 0; i < number_d; i++)	{	    double yi = ((Double)y_d.get(i)).doubleValue();	    error_cache_d.add(new Double(-1 * yi));	}	// initialize the Lagrange multipliers	//	alpha_d.clear();	for (int i = 0; i < number_d; i++)	{	    alpha_d.add(new Double(0.0));	}	// initialize the bias (threshold to 0)	//	bias_d = 0;	int curr = 0;		while ((numChanged > 0) || examineAll)	{	    	    numChanged = 0;	    if (examineAll)	    {		for (int i = 0; i < number_d; i++)		{		    numChanged += examineExample(i);		}		// debug info		//	      	// System.out.println("numChanged1: " + numChanged);	    }	    else	    {		for (int i = 0; i < number_d; i++)		{		    if ((((Double)alpha_d.get(i)).doubleValue() != 0) && (((Double)alpha_d.get(i)).doubleValue() != bound_d))		    {			numChanged += examineExample(i);		    }		}				//System.out.println("numChanged2: " + numChanged);	    }	    	    if (examineAll)	    {		examineAll = false;			    }	    else if (numChanged == 0)	    {		examineAll = true;	    }	    	    curr++;	    //if ( curr < 900 ){	    pro_box_d.setProgressCurr(curr % 1000);	    //}	}		// exit gracefully	//	return true;    }    /**     * method to examine a specific Lagrange multiplier and check if     * it violates the KKT condition     *     * @@param  i2 integer example to be evaluated     *     * @@return  integer value indicating status, return either 0 or 1     *     */    int examineExample(int i2)    {	// declare local variables	//	double E1 = 0;	double E2 = 0;	double y2 = ((Double)y_d.get(i2)).doubleValue();	double alpha2 = ((Double)alpha_d.get(i2)).doubleValue();	Vector x_i2 = (Vector)x_d.get(i2);	// look up the error in the error cache for the corresponding Lagrange	//  	if ((alpha2 > 0) && (alpha2 < bound_d))	{	    E2 = ((Double)error_cache_d.get(i2)).doubleValue();	}	else	{	    E2 = evaluateOutput(x_i2) - y2;	}		//E2 = ((Double)error_cache_d.get(i2)).doubleValue();		double r2 = E2 * y2;		// double tol_d <-- default tolearnce parameter		// KKT conditions:	// r2 >= tol_d  and alpha2 == 0     (well classified)	// r2 == 0  and 0 < alpha2 < C  (support vectors at margins)	// r2 <= -tol_d  and alpha2 == C     (support vectors between margins)	//	// the KKT conditions are necessary and sufficient conditions for	// convergence. For, example in the classification case the above	// criteria must be checked for the 1-norm soft margin optimization	//	// the choice of the first heuristic provides the outer loop of SMO.	// the outer loop iterates over the entire data set and determines	// if an example violates the KKT condition. when and example violates	// the KKT condition it is selected for joint optimization with a	// second example (which is found using the second heuristic).	//	// the first choice concentrates on the CPU time taken on examples	// that most likely violate KKT (non-bound subset) and verifies that	// the KKT conditions are fulfilled within espilon.	//	if (((r2 < -tol_d) && (alpha2 < bound_d)) || ((r2 > tol_d) && (alpha2 > 0)))	{	    	    // once the first Lagrange multiplier is choosen SMO chooses the	    // second Lagrange multiplier to maximize the size of the step	    // taken during joint optimization. SMO approximates the step	    // size via |E1 - E2| because the kernel evaluateion are expensive	    //	    int i1 = -1;	    double step_size = 0;	    double max_step_size = 0;	    	    // find the example that gives the largest step size	    // (non-bound example)	    //	    for (int i = 0; i < number_d; i++)	    {		double alpha = ((Double)alpha_d.get(i)).doubleValue();		if ((alpha > 0) && (alpha < bound_d))		{		    E1 = ((Double)error_cache_d.get(i)).doubleValue();		    step_size = Math.abs(E2 - E1);		    if (step_size > max_step_size)		    {			i1 = i;			max_step_size = step_size;		    }		}	    }	    	    // if a good i1 is found then we begin the joint optimization	    //	    if (i1 >= 0)	    {		if (takeStep(i1, i2) > 0)		{		    return 1;		}	    }	    	    // under unusual circumstances SMO cannot make progress using the	    // second heuristic, hence, it uses a hierarchy of second choices:	    // (A) if the second heuristic does not make progress then SMO	    //     searches the non-bound examples for one that does	    //	    int M = (int) (Math.random() * number_d);	    // System.out.println("M: " + M);		    for (int i = M; i < number_d + M; i++)	    {		// if a good i1 is found then we begin the joint		// optimization		//		i1 = i % number_d;		double alpha = ((Double)alpha_d.get(i1)).doubleValue();				if ((alpha > 0) && (alpha < bound_d))		{		    if (takeStep(i1, i2) > 0)		    {			return 1;		    }		}	    }	    	    // (B) if the non-bound examples do not make progress then SMO	    //     searches the entire data set for one that does	    //	    M = (int) (Math.random() * number_d);	    for (int i = M; i < number_d + M; i++)	    {				// if a good i1 is found then we begin the joint optimization		//		i1 = i % number_d;		if (takeStep(i1, i2) > 0)		{		    return 1;		}	    }	}		// no progress was made	//	return 0;    }        /**     * method that jointly optimizes two Lagrange multipliers and     * maintains the feasibility of the dual QP     *     * @@param    i1 first integer example to be evaluated     * @@param    i2 second integer example to be evaluated     *     * @@return   integer value indicating status     *     */    int takeStep(int i1, int i2)    {		// if the two examples being jointly optimized are the same - 	// no progress	//	if (i1 == i2)	{	    return 0;	}		// declare and initialize local variables	//	double E1 = 0;	double E2 = 0;	double alpha1 = ((Double)alpha_d.get(i1)).doubleValue();	double alpha2 = ((Double)alpha_d.get(i2)).doubleValue();	double y1 = ((Double)y_d.get(i1)).doubleValue();	double y2 = ((Double)y_d.get(i2)).doubleValue();	double s = y1 * y2;		Vector x_i1 = (Vector)x_d.get(i1);	Vector x_i2 = (Vector)x_d.get(i2);	// look up the error in the error cache if the corresponding

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -