📄 algorithmsvm.java,v
字号:
// method: takeStepd570 20a589 3 // arguments: // int i1: (input) first example to be evaluated // int i2: (input) second example to be evaluatedd591 6a596 1 // return: integer value indicating statusd598 35a632 2 // method that jointly optimizes two Lagrange multipliers and // maintains the feasibility of the dual QPd634 11a644 1 int takeStep(int i1, int i2)d646 35a680 200 // if the two examples being jointly optimized are the same - // no progress // if (i1 == i2) { return 0; } // declare and initialize local variables // double E1 = 0; double E2 = 0; double alpha1 = ((Double)alpha_d.get(i1)).doubleValue(); double alpha2 = ((Double)alpha_d.get(i2)).doubleValue(); double y1 = ((Double)y_d.get(i1)).doubleValue(); double y2 = ((Double)y_d.get(i2)).doubleValue(); double s = y1 * y2; Vector x_i1 = (Vector)x_d.get(i1); Vector x_i2 = (Vector)x_d.get(i2); // look up the error in the error cache if the corresponding // Lagrange multiplier is NOT at a bound otherwise we need to // evaluate the current SVM decision function based in the // current alpha vector // if ((alpha1 > 0) && (alpha1 < bound_d)) { E1 = ((Double)error_cache_d.get(i1)).doubleValue(); } else { E1 = evaluateOutput(x_i1) - y1; } if ((alpha2 > 0) && (alpha2 < bound_d)) { E2 = ((Double)error_cache_d.get(i2)).doubleValue(); } else { E2 = evaluateOutput(x_i2) - y2; } // compute the upper and lower constraints, L and H, on // multiplier alpha2 // double L = 0.0; double H = 0.0; double Lobj = 0.0; double Hobj = 0.0; if (y1 != y2) { L = Math.max(0, alpha2 - alpha1); H = Math.min(bound_d, alpha2 - alpha1 + bound_d); } else { L = Math.max(0, alpha1 + alpha2 - bound_d); H = Math.min(bound_d, alpha1 + alpha2); } // if the lower and upper constraints are the same - no progress // if (L == H) { return 0; } // recompute the Lagrange multiplier for example i2 // double k11 = K(x_i1, x_i1); double k12 = K(x_i1, x_i2); double k22 = K(x_i2, x_i2); double eta = 2 * k12 - k11 - k22; double a2 = 0; if (eta < 0) { a2 = alpha2 - y2 * (E1 - E2) / eta; // constrain a2 to lie between L and H // if (a2 < L) { a2 = L; } else if (a2 > H) { a2 = H; } } // under unusual circumstances eta will not be negative in which case // the objective function should be evaluated at each end of the line // segment using only those terms that depend on alpha2 // else { Lobj = evaluateObjective(i1, i2, L); Hobj = evaluateObjective(i1, i2, H); if (Lobj > (Hobj + eps_d)) { a2 = L; } else if (Lobj < (Hobj - eps_d)) { a2 = H; } else { a2 = alpha2; } } // recompute the Lagrange multiplier for example i1 // double bias_new = 0; double a1 = alpha1 + s * (alpha2 - a2); // the threshold b1 is valid when the new alpha1 is not at the // bounds, because it forces the output of the SVM to be y1 // when the input is x1 // double b1 = E1 + y1 * (a1 - alpha1) * k11 + y2 * (a2 - alpha2) * k12 + bias_d; if ((a1 > 0) && (a1 < bound_d)) { bias_new = b1; } // the threshold b2 is valid when the new alpha2 is not at the // bounds, because it forces the output of the SVM to be y2 // when the input is x2 // else { double b2 = E2 + y1 * (a1 - alpha1) * k12 + y2 * (a2 - alpha2) * k22 + bias_d; if ((a2 > 0) && (a2 < bound_d)) { bias_new = b2; } else { // when both new Lagrange multipliers are at bounds // and is L is not equal to H, then the interval [b1 // b2] contain all threshold values that are // consistent with the KKT condition. In this case // SMO, chooses the threshold to be half way between // b1 and b2 // bias_new = (b1 + b2) / 2; } } // save the bias difference and update the error cache and set // the new bias // double bias_delta = bias_new - bias_d; bias_d = bias_new; // update the error chache // double w1 = y1 * (a1 - alpha1); double w2 = y2 * (a2 - alpha2); // when a joint optimization occurs, the stored errors for all // non-bound multipliers (alpha) that are NOT involved in // optimization are updated // for (int i = 0; i < number_d; i++) { double alpha_i = ((Double)alpha_d.get(i)).doubleValue(); if (alpha_i > 0 && alpha_i < bound_d) { double cache_i = ((Double)error_cache_d.get(i)).doubleValue(); Vector x_i = (Vector)x_d.get(i); cache_i += w1 * K(x_i1, x_i) + w2 * K(x_i2, x_i) - bias_delta; error_cache_d.set(i, new Double(cache_i)); } } error_cache_d.set(i1, new Double(0)); error_cache_d.set(i2, new Double(0)); // store a1 and a2 in the alpha array // alpha_d.set(i1, new Double(a1)); alpha_d.set(i2, new Double(a2)); // return the progress made // return 1;d682 4a685 2 // method: evaluateObjectived687 21a707 4 // arguments: // int i1: (input) first example // int i2: (input) second example // double lim: (input) bound at which the function is to be evaluated atd709 6a714 1 // return: result of the evaluation functiond716 10a725 1 // method determines the value of the objective function at the boundsd727 1a727 1 double evaluateObjective(int i1, int i2, double lim)d729 15a743 2 // declare local variablesd745 2a746 31 double y1 = ((Double)y_d.get(i1)).doubleValue(); double y2 = ((Double)y_d.get(i2)).doubleValue(); double s = y1 * y2; double alpha1 = ((Double)alpha_d.get(i1)).doubleValue(); double alpha2 = ((Double)alpha_d.get(i2)).doubleValue(); Vector x_i1 = (Vector)x_d.get(i1); Vector x_i2 = (Vector)x_d.get(i2); double aa1 = alpha1 + s * (alpha2 - lim); double t1 = -y1 * aa1 / 2; double t2 = -y2 * lim / 2; // chinese algorithm // double obj = aa1 + lim; for (int k = 0; k < number_d; k++) { double alpha_k = ((Double)alpha_d.get(k)).doubleValue(); double y_k = ((Double)y_d.get(k)).doubleValue(); Vector x_k = (Vector)x_d.get(k); if (alpha_k > 0) { obj += t1 * y_k * K(x_i1, x_k); obj += t2 * y_k * K(x_i2, x_k); } } // return the result // return obj;d748 3a750 2 // method: evaluateOutputd752 4a755 2 // arguments: // const VectorDouble& point: (input) first example indexd757 23a779 1 // return: result of the evaluation functiond781 4a784 1 // this method computes the output of a specific exampled786 2a787 2 double evaluateOutput(Vector point_a) {d789 15a803 3 // declare local variables // double result = -bias_d;d805 22a826 9 // evaluate the output at the example // for (int i = 0; i < number_d; i++) { double y_i = ((Double)y_d.get(i)).doubleValue(); double alpha_i = ((Double)alpha_d.get(i)).doubleValue(); Vector x_i = (Vector)x_d.get(i); result += y_i * alpha_i * K(point_a, x_i); }d828 5a832 3 // return the result // return result;d834 2a835 2 // method: Kd837 17a853 3 // arguments: // VectorDouble& point1: (input) first example // VectorDouble& point2: (input) second exampled855 3a857 1 // return: Kernel evaluationd859 9a867 2 // this method evaluates the linear Kernel on the input vectors // K(x,y) = (x . y)d869 25a893 1 double K(Vector point1, Vector point2)d895 5a899 19 double result = 0; if (kernel_type_d == KERNEL_TYPE_LINEAR) { result = MathUtil.linearKernel(point1, point2); } if (kernel_type_d == KERNEL_TYPE_RBF) { result = MathUtil.rbfKernel(point1, point2); } if (kernel_type_d == KERNEL_TYPE_POLYNOMIAL) { result = MathUtil.polynomialKernel(point1, point2); } // return the result // return result;d901 2a902 2 // method: computeSupportVectorsd904 16a919 2 // arguments: none // return : noned921 3a923 1 // method computes the all the support vectorsd925 1a925 1 public void computeSupportVectors()d927 10a936 8 // clear support vectors // support_vectors_d.clear(); // initialize the Lagrange multipliers // for (int i = 0; i < number_d; i++)d938 29a966 19 double alpha = ((Double)alpha_d.get(i)).doubleValue(); int index = i; if (alpha != 0.0) { if (index >= set1_d.size()) { index -= set1_d.size(); support_vectors_d.add(set2_d.get(index)); } else { support_vectors_d.add(set1_d.get(index)); } } } // end of the loop } // method: computeDecisionRegionsd968 7a974 2 // arguments: none // return : noned976 4a979 2 // method computes the line of discrimination for the classification // algorithms when the corresponding flags have been initializedd981 8a988 1 public void computeDecisionRegions()d990 4a993 10 // Debug //System.out.println(algo_id + ": computeDecisionRegions()"); DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale(); double currentX = scale.xmin; double currentY = scale.ymin; // set precisiond995 4a998 21 int outputWidth = output_panel_d.disp_area_d.getXPrecision(); int outputHeight = output_panel_d.disp_area_d.getYPrecision(); double incrementY = (scale.ymax-scale.ymin)/outputHeight; double incrementX = (scale.xmax-scale.xmin)/outputWidth; // declare a 2D array to store the class associations // output_canvas_d = new int[outputWidth][outputHeight]; // loop through each and every point on the pixmap and // determine which class each pixel is associated with // int associated = 0; pro_box_d.setProgressMin(0); pro_box_d.setProgressMax(outputWidth); pro_box_d.setProgressCurr(20); for (int i = 0; i < outputWidth; i++)d1000 2a1001 4 currentX += incrementX; currentY = scale.ymin; // set current statusd1003 2a1004 5 pro_box_d.setProgressCurr(i); //pro_box_d.appendMessage("."); for (int j = 0; j < outputHeight; j++) {d1006 5a1010 4 // declare the current pixel point // currentY += incrementY; MyPoint pixel = new MyPoint(currentX, currentY);d1012 2a1013 8 Vector curr_point = new Vector(); double x_val = pixel.x; double y_val = pixel.y; x_val /= 2.0; y_val /= 2.0; curr_point.add(new Double(x_val)); curr_point.add(new Double(y_val));d1015 29a1043 30 double output = evaluateOutput(curr_point); //System.out.println(message.toString()); if (output >= 0) { associated = 0; } else { associated = 1; } // put and entry in the output canvas array to // indicate which class the current pixel is // closest to // output_canvas_d[i][j] = associated; // add a point to the vector of decision // region points if the class that the current // point is associated with is different for // the class what the previous point was // associated with i.e., a transition point // if (j > 0 && i > 0) { if (associated != output_canvas_d[i][j - 1] || associated != output_canvas_d[i - 1][j]) { decision_regions_d.add(pixel); } }d1045 4a1048 3 } // end of the loop }d1050 28a1077 11 // // method computeErrors // // // // arguments: // // Data d: input data point // // // // return : none // // // // display two matrices // // public void computeErrors() {d1079 4a1082 20 // declare local variables // String text; double error; int samples = 0; int samples1 = 0; int samples2 = 0; int samples3 = 0; int samples4 = 0; int incorrect = 0; int incorrect1 = 0; int incorrect2 = 0; int incorrect3 = 0; int incorrect4 = 0; DisplayScale scale = output_panel_d.disp_area_d.getDisplayScale(); // set scales int outputWidth = output_panel_d.disp_area_d.getXPrecision(); int outputHeight = output_panel_d.disp_area_d.getYPrecision();d1084 2a1085 2 double incrementY = (scale.ymax-scale.ymin)/outputHeight; double incrementX = (scale.xmax-scale.xmin)/outputWidth;d1090 6d1097 4a1100 11 MyPoint point = (MyPoint)set1_d.elementAt(i); samples1++; if ((point.x > scale.xmin && point.x < scale.xmax) && (point.y > scale.ymin && point.y < scale.ymax)) { if (output_canvas_d[(int)((point.x-scale.xmin)/incrementX)][(int)((point.y-scale.ymin)/incrementY)] != 0) { incorrect1++; } }d1102 1d1105 19a1123 19 { error = ((double)incorrect1 / (double)samples1) * 100.0; text = new String( " Results for class 0:\n" + " Total number of samples: " + samples1 + "\n" + " Misclassified samples: " + incorrect1 + "\n" + " Classification error: " + MathUtil.setDecimal(error, 2) + "%"); pro_box_d.appendMessage(text); }d1128 6d1135 1a1135 5 MyPoint point = (MyPoint)set2_d.elementAt(i); samples2++; if ((point.x > scale.xmin && point.x < scale.xmax) && (point.y > scale.ymin && point.y < scale.ymax))d1137 1a1137 4 if (output_canvas_d[(int)((point.x-scale.xmin)/incrementX)][(int)((point.y-scale.ymin)/incrementY)] != 1) { incorrect2++; }d1140 1d1143 3a1145 1 {d1147 12a1158 1 error = ((double)incorrect2 / (double)samples2) * 100.0;d1160 2a1161 15 text = new String( " Results for class 1:\n" + " Total number of samples: " + samples2 + "\n" + " Misclassified samples: " + incorrect2 + "\n" + " Classification error: " + MathUtil.setDecimal(error, 2) + "%"); pro_box_d.appendMessage(text); }d1185 1a1185 1 }d1188 2@1.1log@Initial revision@text@d142 1a142 1 System.out.println(algo_id + ": run()");d162 1a162 1 pro_box_d.appendMessage(" Step Sequence Complete");d181 1a181 1 System.out.println(algo_id + ": step1()");d942 1a942 1 System.out.println(algo_id + ": computeDecisionRegions()");@
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -