📄 olmam.java
字号:
/* * $RCSfile: OLMAM.java,v $ * $Revision: 1.11 $ * $Date: 2005/05/05 02:03:48 $ * * NeuralNetworkToolkit * Copyright (C) 2004 Universidade de Brasília * * This file is part of NeuralNetworkToolkit. * * NeuralNetworkToolkit is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * NeuralNetworkToolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with NeuralNetworkToolkit; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 - USA. */package neuralnetworktoolkit.methods.gradientbased.quasinewton.lm;import neuralnetworktoolkit.StatisticalResults;import neuralnetworktoolkit.math.NeuralMath;import neuralnetworktoolkit.methods.TrainingParameters;import neuralnetworktoolkit.neuralnetwork.INeuralNetwork;/** * * * @version $Revision: 1.11 $ - $Date: 2005/05/05 02:03:48 $ * * @author <a href="mailto:hugoiver@yahoo.com.br">Hugo Iver V. Gonçalves</a> * @author <a href="mailto:rodbra@pop.com.br">Rodrigo C. M. Coimbra</a> */public class OLMAM extends ModifiedLevenbergMarquardt { /* (non-Javadoc) * @see neuralnetworktoolkit.methods.ITrainingMethod#train(neuralnetworktoolkit.INeuralNetwork, TrainingParameters) */ public StatisticalResults train(INeuralNetwork neuralNetwork, TrainingParameters parameters) { LevenbergMarquardtParameters param; double[] diagonal; param = (LevenbergMarquardtParameters) parameters; // TODO Erase comented instrumentation code. boolean firstUpdate = false; numberOfSynapses = neuralNetwork.numberOfSynapses(); errorGoal = param.getError(); lmKind = param.getLmKind(); maximumNumberOfIterations = param.getMaxIterations(); previousDeltaW = new double[neuralNetwork.numberOfSynapses()][1]; inicio = System.currentTimeMillis(); do { if (this.errorDiminished) { //System.out.println("> inicio"); //System.out.println("> totalErrorEnergy"); jtXjAndJtXerrorAndError = calculateJacobianTxJacobianAndJacobianTxError( neuralNetwork, param.getInputs(), param.getOutputs(), 50); //System.out.println("> jtXjAndJtXerror"); totalErrorEnergy = jtXjAndJtXerrorAndError.getError(); jacobianTxJacobian = jtXjAndJtXerrorAndError.getJtXj(); //System.out.println("> jacobianTxJacobian"); jacobianTxError = jtXjAndJtXerrorAndError.getJtXerror(); //System.out.println("> jacobianTxError"); } /* * System.out.println("---------------------------------------------------------------------->"); * System.out.println("--->JacobianT X Jacobian"); * neuralMath.printMatrix(jacobianTxJacobian); */ //System.out.println("--->JacobianT X Error"); //neuralMath.printMatrix(jacobianTxError); if (lmKind == LevenbergMarquardt.COMPLETE_LM) { diagonal = NeuralMath.diagonalArray(jacobianTxJacobian); diagonal = NeuralMath.constantTimesArray(lambda, diagonal); hessian = NeuralMath.matrixSumDiagonal(jacobianTxJacobian, diagonal); } else { hessian = NeuralMath.matrixSumConstantDiagonal(lambda, jacobianTxJacobian); } //System.out.println("> hessian"); //System.out.println("--->Hessian = Soma JacobianT X Jacobian + // diagonal"); //NeuralMath.printMatrix(hessian); //System.runFinalization(); inverseHessian = NeuralMath.inverseMatrix(hessian); //System.out.println("> inverse"); //System.out.println("--->Hessian Inverse"); //NeuralMath.printMatrix(hessian); minusLMDeltaW = NeuralMath.matrixProduct(inverseHessian, jacobianTxError); if ((numberOfIterations > 0) && (firstUpdate)) { IGG = NeuralMath.matrixProduct(NeuralMath .calculateTransposed(jacobianTxError), minusLMDeltaW)[0][0]; IGF = NeuralMath.matrixProduct(NeuralMath .calculateTransposed(jacobianTxError), previousDeltaW)[0][0]; IFF = NeuralMath.matrixProduct(NeuralMath .calculateTransposed(previousDeltaW), NeuralMath .matrixProduct(hessian, previousDeltaW))[0][0]; epsilon = Math.sqrt(1 - ((IGF * IGF) / (IGG * IFF))); deltaP = Math.sqrt(IGG); deltaQt = -epsilon * deltaP * Math.sqrt(IGG); /* * System.out.println("--->IGG"); * NeuralMath.printMatrix(NeuralMath.matrixProduct(NeuralMath.calculateTransposed(jacobianTxError), * minusLMDeltaW)); System.out.println("--->IGF"); * NeuralMath.printMatrix(NeuralMath.matrixProduct(NeuralMath.calculateTransposed(jacobianTxError), * previousDeltaW)); System.out.println("--->IFF"); * NeuralMath.printMatrix(NeuralMath.matrixProduct(NeuralMath.calculateTransposed(previousDeltaW), * NeuralMath.matrixProduct(hessian, previousDeltaW))); * * System.out.println("previousDeltaW"+" e deltaQt: " + deltaP); * NeuralMath.printMatrix(previousDeltaW); */ lagrange2 = (.5) * Math.sqrt((IFF * IGG - IGF * IGF) / (IGG * deltaP * deltaP - deltaQt * deltaQt)); lagrange1 = (-2 * lagrange2 * deltaQt + IGF) / IGG; /* * System.out.println("--->lagrange1: " + lagrange1); * System.out.println("--->lagrange2: " + lagrange2); */ deltaW = NeuralMath.matrixSum(NeuralMath.constantTimesMatrix( -(lagrange1 / (2 * lagrange2)), minusLMDeltaW), NeuralMath.constantTimesMatrix(1 / (2 * lagrange2), previousDeltaW)); } else { deltaW = NeuralMath.constantTimesMatrix(-1, minusLMDeltaW); } neuralNetwork.updateWeights(deltaW); instantaneousError = calculateTotalError(neuralNetwork, param .getInputs(), param.getOutputs()); if (instantaneousError < totalErrorEnergy + .1 * (NeuralMath.matrixProduct(NeuralMath .calculateTransposed(jacobianTxError), deltaW)[0][0])) { //System.out.println("---> - O erro diminuiu "+ lambda); lambda = lambda / multiplier; previousDeltaW = (double[][]) deltaW.clone(); firstUpdate = true; totalErrorEnergy = instantaneousError; this.errorDiminished = true; } else { //System.out.println("---> + O erro aumentou " + lambda); deltaW = NeuralMath.constantTimesMatrix(-1, deltaW); neuralNetwork.updateWeights(deltaW); lambda = lambda * multiplier; this.errorDiminished = false; } numberOfIterations++; if (numberOfIterations % /* (maximumNumberOfIterations/100) */1 == 0) { System.out .println("Numero de Iteracoes: " + numberOfIterations); System.out.println("Erro atual: " + totalErrorEnergy); } //System.out.println("---------------------------------------------------------------------->"); } while (((totalErrorEnergy / param.getInputs().length) > errorGoal) && (numberOfIterations < maximumNumberOfIterations) && (goAhead = true)); fim = System.currentTimeMillis(); neuralNetwork.setError(totalErrorEnergy / (param.getInputs().length)); results.setNumberIterations(numberOfIterations); numberOfIterations = 0; results.setError(totalErrorEnergy / param.getInputs().length); results.setTrainingTime((fim - inicio) / 1000); return results; } //train() } //OLMAM
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -