⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 algorithmrvm.java,v

📁 包含了模式识别中常用的一些分类器设计算法
💻 JAVA,V
📖 第 1 页 / 共 5 页
字号:
	    // System.out.println("A_d: ");	    // A_d.printMatrix();	    // System.out.println("weights_d: ");	    // Matrix.printDoubleVector(weights_d);	    // System.out.println("alpha_thresh_d: " + alpha_thresh_d);	}	// prune any weights that have gone to zero	//	pruneWeights();	// update the number of hyperparameters after pruning	//	dimA_d = A_d.getNumRows();	// reset the size of the vector set	//	num_samples_d = weights_d.size();	gradient_d.setSize(dimA_d);	MathUtil.initDoubleVector(gradient_d, 0.0);	curr_weights_d.setSize(dimA_d);	MathUtil.initDoubleVector(curr_weights_d, 0.0);	hessian_d.initMatrixValue(dimA_d, dimA_d, 0, Matrix.SYMMETRIC);	covar_cholesky_d.initMatrixValue(dimA_d, dimA_d, 0, 					 Matrix.LOWER_TRIANGULAR);	// exit gracefully	//	return true;    }        /**     *      * Prunes off vectors which attain a zero weight during     * training. Auxiliary data structures that are indexed according to the     * working set are also updated.     *     * @@return true     *     *     */    public boolean pruneWeights()    {	// Debug	//	// System.out.println("AlgorithmRVM : pruneWeights()");	// declare local variables	//	int num_weights = curr_weights_d.size();	// the A matrix is diagonal so we can get a temporary version of it	// into a vector - this makes it easier to work with	//	Vector<Double> tmp_A = new Vector<Double>();	A_d.getDiagonalVector(tmp_A);	if (debug_level_d)	{	    // System.out.println("tmp_A before pruning:");	    // Matrix.printDoubleVector(tmp_A);	}		/*	  //A_d.getDiagonal(tmp_A);	  for (int i = 0; i < dimA_d; i++) {	  	  // prune any weight which has been floored. we prune off	  // the corresponding hyperparameter as well as the row of	  // the phi matrix corresponding to this weight	  //	  if (!((MathUtil.doubleValue(curr_weights_d, i) == 0.0) &&	  (A_d.getValue(i,i) > alpha_thresh_d))){	  	  flags[i] = true;	  tmp_A.add(new Double(A_d.getValue(i, i)));	  dimA ++;	  	  }	  else {	  flags[i] = false;	  }	  }	*/		// get the dimensions of the phi matrix	//	int num_rows = phi_d.getNumRows();		boolean flags[] = new boolean[num_rows];	int dimA = 0;		// loop over the weights and check for zero-valued weights.	// the weights will be exactly zero because we have manually	// floored them previously	//	int offset = weights_d.size() - num_weights;	int weights_pruned = 0;	for (int i = 0; i < num_weights; i++)	{	    	    double weights_i = MathUtil.doubleValue(curr_weights_d, i);	    int a_i = i + weights_pruned;	    // prune any weight that is floored	    //	    if ((weights_i == 0.0) 		&& (A_d.getValue(a_i, a_i) > alpha_thresh_d))	    {				/*		  if (debug_level_d > Integral::BRIEF) {		  Console::put(L"pruning");		  }		*/				weights_pruned++;				// remove the weight, target and vector	        //		if (i + offset >= 0)	      	{		    weights_d.remove(i + offset);		    support_vectors_d.remove(i + offset);		    evalx_d.remove(i + offset);		    evaly_d.remove(i + offset);		}		curr_weights_d.remove(i);		tmp_A.remove(i);				// because we have removed an element we need to stay at this		// entry index and update the stop condition		//		i = i - 1;		num_weights = num_weights - 1;		flags[i + weights_pruned] = false;	    }	    else	    {		flags[i + weights_pruned] = true;		dimA++;	    }	}		/*	  // debugging information	  //	  if (debug_level_d > Integral::DETAILED) {	  Console::put(L"Pruning completed");	  bias_d.debug(L"bias_d");	  weights_d.debug(L"weights_d");	  targets_d.debug(L"targets_d");	  vectors_d.debug(L"vectors_d");	  }	*/	if (debug_level_d)	{	    // System.out.println("tmp_A after pruning:");	    // Matrix.printDoubleVector(tmp_A);	}	dimA_d = dimA;	A_d.initDiagonalMatrix(tmp_A);	Matrix tmp_phi = new Matrix();	tmp_phi.copyMatrixRows(phi_d, flags);	phi_d = tmp_phi;		// debugging information		//	if (debug_level_d)	{	    // System.out.println("Pruning " + weights_pruned 	    //              + " vect//ors..." + weights_d.size() + " remain");	}		// exit gracefully	//	return true;    }        /**     *     * Computes the diagonal elements of the inverse from the     * cholesky decomposition     *     *     * @@return  true     *     */    public boolean computeVarianceCholesky()    {	// Debug	// System.out.println("AlgorithmRVM : computeVarianceCholesky()");	// find the size of the cholesky decomposition matrix	//	long nrows = covar_cholesky_d.getNumRows();	double sum = 0.0;		// compute the inverse of the lower triangular cholesky decomposition	//	for (int i = 0; i < nrows; i++)        {	    covar_cholesky_d.setValue(i, i, (1.0 / covar_cholesky_d.getValue					     (i, i)));	    for (int j = i + 1; j < nrows; j++)	    {		sum = 0.0;		for (int k = i; k < j; k++)		{		    sum -= covar_cholesky_d.getValue(j, k) 			 * covar_cholesky_d.getValue(k, i);		}		covar_cholesky_d.setValue(j, i, 					  (sum / 					   covar_cholesky_d.getValue(j, j)));	    }	}		// loop over columns and get the sumSquare values to put on	// the diagonals.  This simulates a matrix multiply but we	// only retain the diagonal values.	//	/*	  Vector tmp;	  for (long i = 0; i < nrows; i++) {	  covar_cholesky_d.getColumn(tmp, i);	  double val = tmp.sumSquare();	  covar_cholesky_d.setValue(i,i,val);	  }	*/	for (int i = 0; i < nrows; i++)	{	    double val = covar_cholesky_d.getColSumSquare(i);	    covar_cholesky_d.setValue(i, i, val);	}		// exit gracefully	//	return true;    }        /**     *     * Computes the sigma valued defined on pp. 218 of [1].     *     * @@return true     *     */     public boolean computeSigma()     {	 // Debug	 // System.out.println("AlgorithmRVM : computeSigma()");	 // declare local variables	 //	 Matrix weights = new Matrix();	 Matrix sigma = new Matrix();	 // debug_level_d = false;	 weights.initMatrix(curr_weights_d);	 if (debug_level_d)	 {	     // System.out.println("phi_d: ");	     // phi_d.printMatrix();	     // System.out.println("curr_weights_d: ");	     // weights.printMatrix();	     // System.out.println("sigma: ");	     // sigma.printMatrix();	 }	 	 weights.multMatrix(phi_d, sigma);	 	 sigma.scalarMultMatrix(-1);	 sigma.expMatrix();	 sigma.addMatrixElements(1.0);	 sigma.inverseMatrixElements();	 	 if (debug_level_d)	 {	     // System.out.println("phi_d: ");	     // phi_d.printMatrix();	     // System.out.println("curr_weights_d: ");	     // weights.printMatrix();	     // System.out.println("sigma: ");	     // sigma.printMatrix();	 }	 sigma.toDoubleVector(sigma_d);	 // debug_level_d = true;	 	 // exit gracefully	 //	 return true;     }    /**     *      * Computes the log likelihood of the weights given the     * data. We would expect this value to increase as training proceeds     *     * @@return Likelihood of weights given the data     *     *     */    public double computeLikelihood()     {	 // Debug	 //	 // System.out.println("AlgorithmRVM : computeLikelihood()");	 // declare local variables	 //	 double result = 0;	 double divisor = phi_d.getNumColumns();	 // init the matrix	 Matrix weights_M = new Matrix();	 Matrix temp_M = new Matrix();	 Matrix result_M = new Matrix();	 weights_M.initMatrix(curr_weights_d);	 // compute the -1/2 * w * A * w' component of the likelihood	 //	 weights_M.multMatrix(A_d, temp_M);	 result_M.copyMatrix(weights_M);	 result_M.transposeMatrix(weights_M);	 temp_M.multMatrix(weights_M, result_M);	 result = result_M.getValue(0, 0);	 result *= 0.5;	 result /= divisor;	 // compute the summation in the log likelihood	 // L = sum [log(sigma) * t + (1-t) * log(1-sigma)]	 //	 long len;	 if (sigma_d.size() > y_d.size()) {	     	     len = y_d.size();	 }	 else {	     len = sigma_d.size();	 }	 double t;	 double sig;	 for (int i = 0; i < len; i++)	 {	     t = MathUtil.doubleValue(y_d, i);	     sig = MathUtil.doubleValue(sigma_d, i);	    	     	     if (t < 0.5)	     {		 result -= Math.log(1 - sig) / divisor;	     }	     	     else	     {		 result -= Math.log(sig) / divisor;	     }	 }	 	 // exit gracefully	 //	 return result;     }        /**     *      * Computes the output of a specific example     *     *     * @@return result of the evaluation function     *     */    double evaluateOutput(Vector point_a)    {	// Debug	//	// System.out.println("AlgorithmRVM : evaluateOutput(Vector point_a)");	// declare local variables	//	double result = bias_d;	// debug message	//	if (debug_level_d)	{	    // System.out.println("bias_d: " + bias_d);	    // System.out.println("point_a: ");	    // Matrix.printDoubleVector(point_a);	}	// evaluate the output at the example	//	for (int i = 0; i < weights_d.size(); i++)	{	    double w_i = ((Double)weights_d.get(i)).doubleValue();	    Vector x_i = (Vector)evalx_d.get(i);	    result += w_i * K(point_a, x_i);	    if (debug_level_d)	    {		// System.out.println("x_i: " + x_i + "i: " + i 		//                    + " result: " + result);	    }	}		// apply the logistic sigmoid link function	//	result = 1.0 / (1.0 + Math.exp(-result));		// debug message	//	if (debug_level_d)       	{	    // System.out.println("result: " + result);	}		// return the result	//	return result;    }        /**     *      * Evaluates the linear Kernel on the input vectors     * K(x,y) = (x . y)     *     * @@param  point1 first point     * @@param  point2 second point     *     * @@return Kernel evaluation     *     */    double K(Vector point1, Vector point2)    {	// Debug	// System.out.println(	//    "AlgorithmRVM : K(Vector point1, Vector point2)");		double result = 0;		if (kernel_type_d == KERNEL_TYPE_LINEAR)	{	    result = MathUtil.linearKernel(point1, point2);	}	if (kernel_type_d == KERNEL_TYPE_RBF)	{	    result = MathUtil.rbfKernel(point1, point2, 20);	}	if (kernel_type_d == KERNEL_TYPE_POLYNOMIAL)	{	    result = MathUtil.polynomialKernel(point1, point2);	}		// return the result	//	return result;    }        /**         *     * Computes the line of discrimination for the classification      * algorithms when the corresponding flags have been initialized     *     */    public void computeDecisionRegions()    {	// Debug	//	// System.out.println("AlgorithmRVM : computeDecisionRegions()");	DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale();	    	double currentX = scale.xmin;	double currentY = scale.ymin;	    	// set precision	//	int outputWidth = output_panel_d.disp_area_d.getXPrecision();	int outputHeight = output_panel_d.disp_area_d.getYPrecision();	    	double incrementY = (scale.ymax-scale.ymin)/outputHeight;	double incrementX = (scale.xmax-scale.xmin)/outputWidth;	// declare a 2D array to store the class associations	//	output_canvas_d = new int[outputWidth][outputHeight];	// loop through each and every point on the pixmap and	// determine which class each pixel is associated with	//	int associated = 0;	pro_box_d.setProgressMin(0);	pro_box_d.setProgressMax(outputWidth);	pro_box_d.setProgressCurr(20);	for (int i = 0; i < outputWidth; i++)	{	    currentX += incrementX;	    currentY = scale.ymin;	    // set current status	    //	    pro_box_d.setProgressCurr(i);	    // pro_box_d.appendMessage(".");	    for (int j = 0; j < outputHeight; j++)	    {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -