📄 algorithmsvm.java,v
字号:
// Lagrange multiplier is NOT at a bound otherwise we need to // evaluate the current SVM decision function based in the // current alpha vector // if ((alpha1 > 0) && (alpha1 < bound_d)) { E1 = ((Double)error_cache_d.get(i1)).doubleValue(); } else { E1 = evaluateOutput(x_i1) - y1; } if ((alpha2 > 0) && (alpha2 < bound_d)) { E2 = ((Double)error_cache_d.get(i2)).doubleValue(); } else { E2 = evaluateOutput(x_i2) - y2; } // compute the upper and lower constraints, L and H, on // multiplier alpha2 // double L = 0.0; double H = 0.0; double Lobj = 0.0; double Hobj = 0.0; if (y1 != y2) { L = Math.max(0, alpha2 - alpha1); H = Math.min(bound_d, alpha2 - alpha1 + bound_d); } else { L = Math.max(0, alpha1 + alpha2 - bound_d); H = Math.min(bound_d, alpha1 + alpha2); } // if the lower and upper constraints are the same - no progress // if (L == H) { return 0; } // recompute the Lagrange multiplier for example i2 // double k11 = K(x_i1, x_i1); double k12 = K(x_i1, x_i2); double k22 = K(x_i2, x_i2); double eta = 2 * k12 - k11 - k22; double a2 = 0; if (eta < 0) { a2 = alpha2 - y2 * (E1 - E2) / eta; // constrain a2 to lie between L and H // if (a2 < L) { a2 = L; } else if (a2 > H) { a2 = H; } } // under unusual circumstances eta will not be negative in which case // the objective function should be evaluated at each end of the line // segment using only those terms that depend on alpha2 // else { Lobj = evaluateObjective(i1, i2, L); Hobj = evaluateObjective(i1, i2, H); if (Lobj > (Hobj + eps_d)) { a2 = L; } else if (Lobj < (Hobj - eps_d)) { a2 = H; } else { a2 = alpha2; } } // recompute the Lagrange multiplier for example i1 // double bias_new = 0; double a1 = alpha1 + s * (alpha2 - a2); // the threshold b1 is valid when the new alpha1 is not at the // bounds, because it forces the output of the SVM to be y1 // when the input is x1 // double b1 = E1 + y1 * (a1 - alpha1) * k11 + y2 * (a2 - alpha2) * k12 + bias_d; if ((a1 > 0) && (a1 < bound_d)) { bias_new = b1; } // the threshold b2 is valid when the new alpha2 is not at the // bounds, because it forces the output of the SVM to be y2 // when the input is x2 // else { double b2 = E2 + y1 * (a1 - alpha1) * k12 + y2 * (a2 - alpha2) * k22 + bias_d; if ((a2 > 0) && (a2 < bound_d)) { bias_new = b2; } else { // when both new Lagrange multipliers are at bounds // and is L is not equal to H, then the interval [b1 // b2] contain all threshold values that are // consistent with the KKT condition. In this case // SMO, chooses the threshold to be half way between // b1 and b2 // bias_new = (b1 + b2) / 2; } } // save the bias difference and update the error cache and set // the new bias // double bias_delta = bias_new - bias_d; bias_d = bias_new; // update the error chache // double w1 = y1 * (a1 - alpha1); double w2 = y2 * (a2 - alpha2); // when a joint optimization occurs, the stored errors for all // non-bound multipliers (alpha) that are NOT involved in // optimization are updated // for (int i = 0; i < number_d; i++) { double alpha_i = ((Double)alpha_d.get(i)).doubleValue(); if (alpha_i > 0 && alpha_i < bound_d) { double cache_i = ((Double)error_cache_d.get(i)).doubleValue(); Vector x_i = (Vector)x_d.get(i); cache_i += w1 * K(x_i1, x_i) + w2 * K(x_i2, x_i) - bias_delta; error_cache_d.set(i, new Double(cache_i)); } } error_cache_d.set(i1, new Double(0)); error_cache_d.set(i2, new Double(0)); // store a1 and a2 in the alpha array // alpha_d.set(i1, new Double(a1)); alpha_d.set(i2, new Double(a2)); // return the progress made // return 1; } /** * method determines the value of the objective function at the bounds * * @@param i1 first integer example * @@param i2 second integer example * @@param lim bound at which the function * is to be evaluated at * * @@return result of the evaluation function */ double evaluateObjective(int i1, int i2, double lim) { // declare local variables // double y1 = ((Double)y_d.get(i1)).doubleValue(); double y2 = ((Double)y_d.get(i2)).doubleValue(); double s = y1 * y2; double alpha1 = ((Double)alpha_d.get(i1)).doubleValue(); double alpha2 = ((Double)alpha_d.get(i2)).doubleValue(); Vector x_i1 = (Vector)x_d.get(i1); Vector x_i2 = (Vector)x_d.get(i2); double aa1 = alpha1 + s * (alpha2 - lim); double t1 = -y1 * aa1 / 2; double t2 = -y2 * lim / 2; // chinese algorithm // double obj = aa1 + lim; for (int k = 0; k < number_d; k++) { double alpha_k = ((Double)alpha_d.get(k)).doubleValue(); double y_k = ((Double)y_d.get(k)).doubleValue(); Vector x_k = (Vector)x_d.get(k); if (alpha_k > 0) { obj += t1 * y_k * K(x_i1, x_k); obj += t2 * y_k * K(x_i2, x_k); } } // return the result // return obj; } /** * computes the output of a specific example * * @@param point_a Vector of input points * * @@return result of the evaluation function * * */ double evaluateOutput(Vector point_a) { // declare local variables // double result = -bias_d; // evaluate the output at the example // for (int i = 0; i < number_d; i++) { double y_i = ((Double)y_d.get(i)).doubleValue(); double alpha_i = ((Double)alpha_d.get(i)).doubleValue(); Vector x_i = (Vector)x_d.get(i); result += y_i * alpha_i * K(point_a, x_i); } // return the result // return result; } /** * this method evaluates the linear Kernel on the input vectors * K(x,y) = (x . y) * * * @@param point1 First input point Vector * @@param point2 Second input point Vector * * @@return Kernel evaluation in a double * */ double K(Vector point1, Vector point2) { double result = 0; if (kernel_type_d == KERNEL_TYPE_LINEAR) { result = MathUtil.linearKernel(point1, point2); } if (kernel_type_d == KERNEL_TYPE_RBF) { result = MathUtil.rbfKernel(point1, point2); } if (kernel_type_d == KERNEL_TYPE_POLYNOMIAL) { result = MathUtil.polynomialKernel(point1, point2); } // return the result // return result; } /** * * method computes the all the support vectors * */ public void computeSupportVectors() { // clear support vectors // support_vectors_d.clear(); // initialize the Lagrange multipliers // for (int i = 0; i < number_d; i++) { double alpha = ((Double)alpha_d.get(i)).doubleValue(); int index = i; if (alpha != 0.0) { if (index >= set1_d.size()) { index -= set1_d.size(); support_vectors_d.add(set2_d.get(index)); } else { support_vectors_d.add(set1_d.get(index)); } } } // end of the loop } /** * * method computes the line of discrimination for the classification * algorithms when the corresponding flags have been initialized * */ public void computeDecisionRegions() { // Debug //System.out.println(algo_id + ": computeDecisionRegions()"); DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale(); double currentX = scale.xmin; double currentY = scale.ymin; // set precision // int outputWidth = output_panel_d.disp_area_d.getXPrecision(); int outputHeight = output_panel_d.disp_area_d.getYPrecision(); double incrementY = (scale.ymax-scale.ymin)/outputHeight; double incrementX = (scale.xmax-scale.xmin)/outputWidth; // declare a 2D array to store the class associations // output_canvas_d = new int[outputWidth][outputHeight]; // loop through each and every point on the pixmap and // determine which class each pixel is associated with // int associated = 0; pro_box_d.setProgressMin(0); pro_box_d.setProgressMax(outputWidth); pro_box_d.setProgressCurr(20); for (int i = 0; i < outputWidth; i++) { currentX += incrementX; currentY = scale.ymin; // set current status // pro_box_d.setProgressCurr(i); //pro_box_d.appendMessage("."); for (int j = 0; j < outputHeight; j++) { // declare the current pixel point // currentY += incrementY; MyPoint pixel = new MyPoint(currentX, currentY); Vector<Double> curr_point = new Vector<Double>(); double x_val = pixel.x; double y_val = pixel.y; x_val /= 2.0; y_val /= 2.0; curr_point.add(new Double(x_val)); curr_point.add(new Double(y_val)); double output = evaluateOutput(curr_point); //System.out.println(message.toString()); if (output >= 0) { associated = 0; } else { associated = 1; } // put and entry in the output canvas array to // indicate which class the current pixel is // closest to // output_canvas_d[i][j] = associated; // add a point to the vector of decision // region points if the class that the current // point is associated with is different for // the class what the previous point was // associated with i.e., a transition point // if (j > 0 && i > 0) { if (associated != output_canvas_d[i][j - 1] || associated != output_canvas_d[i - 1][j]) { decision_regions_d.add(pixel); } } } } // end of the loop } /** * computes errors * display two matrices * */ public void computeErrors() { // declare local variables // String text; double error; int samples = 0; int samples1 = 0; int samples2 = 0; int samples3 = 0; int samples4 = 0; int incorrect = 0; int incorrect1 = 0; int incorrect2 = 0; int incorrect3 = 0; int incorrect4 = 0; DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale(); // set scales int outputWidth = output_panel_d.disp_area_d.getXPrecision(); int outputHeight = output_panel_d.disp_area_d.getYPrecision(); double incrementY = (scale.ymax-scale.ymin)/outputHeight; double incrementX = (scale.xmax-scale.xmin)/outputWidth; // compute the classification error for the first set // for (int i = 0; i < set1_d.size(); i++) { MyPoint point = (MyPoint)set1_d.elementAt(i); samples1++; if ((point.x > scale.xmin && point.x < scale.xmax) && (point.y > scale.ymin && point.y < scale.ymax)) { if (output_canvas_d[(int)((point.x-scale.xmin)/incrementX)][(int)((point.y-scale.ymin)/incrementY)] != 0) { incorrect1++; } } } if (set1_d.size() > 0) { error = ((double)incorrect1 / (double)samples1) * 100.0; text = new String( " Results for class 0:\n" + " Total number of samples: " + samples1 + "\n" + " Misclassified samples: " + incorrect1 + "\n" + " Classification error: " + MathUtil.setDecimal(error, 2) + "%"); pro_box_d.appendMessage(text); } // compute the classification error for the second set // for (int i = 0; i < set2_d.size(); i++) { MyPoint point = (MyPoint)set2_d.elementAt(i); samples2++; if ((point.x > scale.xmin && point.x < scale.xmax) && (point.y > scale.ymin && point.y < scale.ymax)) { if (output_canvas_d[(int)((point.x-scale.xmin)/incrementX)][(int)((point.y-scale.ymin)/incrementY)] != 1) { incorrect2++; } } } if (set2_d.size() > 0) { error = ((double)incorrect2 / (double)samples2) * 100.0; text = new String( " Results for class 1:\n" + " Total number of samples: " + samples2 + "\n" + " Misclassified samples: " + incorrect2 + "\n" + " Classification error: " + MathUtil.setDecimal(error, 2) + "%"); pro_box_d.appendMessage(text); } // compute the overall classification error // samples = samples1 + samples2 + samples3 + samples4; incorrect = incorrect1 + incorrect2 + incorrect3 + incorrect4; error = ((double)incorrect / (double)samples) * 100.0; text = new String( " Overall results:\n" + " Total number of samples: " + samples + "\n" + " Misclassified samples: " + incorrect + "\n" + " Classification error: " + MathUtil.setDecimal(error, 2) + "%"); pro_box_d.appendMessage(text); }}@1.4log@Fixed javadoc errors@text@d7 1d48 4a51 4 Vector x_d = new Vector(); Vector y_d = new Vector(); Vector alpha_d = new Vector(); Vector error_cache_d = new Vector();d65 2a66 2 Vector support_vectors_d = new Vector(); Vector decision_regions_d = new Vector();d115 5a119 2 set1_d = (Vector)data_points_d.dset1.clone(); set2_d = (Vector)data_points_d.dset2.clone();d123 6a128 6 support_vectors_d = new Vector(); decision_regions_d = new Vector(); x_d = new Vector(); y_d = new Vector(); alpha_d = new Vector(); error_cache_d = new Vector();d328 1a328 1 Vector vec_point = new Vector();d343 1a343 1 Vector vec_point = new Vector();d991 1a991 1 Vector curr_point = new Vector();@1.3log@Changed the comments to Java Documentation Style.Aligned the for loops and If statements as well.@text@d3 1a3 1 *d20 1a20 4 * This interface is designed to be the base for all algorithms * ..... * ..... * @@version 1.00d68 2d72 9d142 4a145 1a178 5 * method: step1 * * @@param none * @@return none *a214 4 * method: step2 * * @@param none * @@return noned216 1a216 1 * step one of the algorithmd218 1d259 6a264 9 /** * method: step3 * * @@param none * @@return none * * step one of the algorithm
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -