📄 batchadaptivelearningratemomentumbackpropagation.java
字号:
/* * $RCSfile: BatchAdaptiveLearningRateMomentumBackPropagation.java,v $ * $Revision: 1.1 $ * $Date: 2005/05/08 02:16:28 $ * * NeuralNetworkToolkit * Copyright (C) 2004 Universidade de Bras韑ia * * This file is part of NeuralNetworkToolkit. * * NeuralNetworkToolkit is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * NeuralNetworkToolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with NeuralNetworkToolkit; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 - USA. */package neuralnetworktoolkit.methods.gradientbased.backpropagation;import neuralnetworktoolkit.StatisticalResults;import neuralnetworktoolkit.math.NeuralMath;import neuralnetworktoolkit.methods.TrainingParameters;import neuralnetworktoolkit.neuralnetwork.INeuralNetwork;/** * * * @version $Revision: 1.1 $ - $Date: 2005/05/08 02:16:28 $ * * @author <a href="mailto:hugoiver@yahoo.com.br">Hugo Iver V. Gon鏰lves</a> * @author <a href="mailto:rodbra@pop.com.br">Rodrigo C. M. Coimbra</a> */public class BatchAdaptiveLearningRateMomentumBackPropagation extends ModifiedBackPropagation { public BatchAdaptiveLearningRateMomentumBackPropagation() { super(); instantaneousError = 1000000000; } /** * Trains a neural network with backpropagation method. * * @param neuralNetwork * Neural network to be trained. * @param learningRate * Learning rate. * @param error * Quadratic error wished for network. * @param instanceSet * Training instances set. * @param outputs * Network expected outputs. * @param parameters * Other parameters array. * * @return Statistical information about network learning. */ public StatisticalResults train(INeuralNetwork neuralNetwork, TrainingParameters parameters) { // TODO Erase instrumentation code. BackPropagationParameters param; param = (BackPropagationParameters) parameters; numberOfSynapses = neuralNetwork.numberOfSynapses(); errorGoal = param.getError(); maximumNumberOfEpochs = param.getMaxEpochs(); learningRate = param.getLearningRate(); alpha = param.getAlpha(); lrMultiplier = param.getLrMultiplier(); inicio = System.currentTimeMillis(); do { // TODO improve this. deltaW = new double[neuralNetwork.numberOfSynapses()][1]; for (int i = 0; i < param.getInputs().length; i++) { neuralNetwork.inputLayerSetup(param.getInputs()[i]); neuralNetwork.propagateInput(); calculateNeuronDeltas(neuralNetwork, param.getOutputs()[i]); derivativesVector = calculateDerivativesVector(neuralNetwork); instantDeltaW = NeuralMath.constantTimesMatrix(-learningRate, derivativesVector); deltaW = NeuralMath.matrixSum(deltaW, instantDeltaW); } if ( (!firstIteration)&&(successfullIteration) ) { deltaW = NeuralMath.matrixSum(deltaW, NeuralMath.constantTimesMatrix(alpha, previousDeltaW)); } else { firstIteration = false; //deltaW = neuralMath.constantTimesMatrix(-param.getLearningRate(), derivativesVector); } neuralNetwork.updateWeights(deltaW); instantaneousError = calculateTotalError(neuralNetwork, param.getInputs(), param.getOutputs()); if ( instantaneousError < totalErrorEnergy ) { //System.out.println("menor"); previousDeltaW = (double[][] )deltaW.clone(); learningRate = learningRate*lrMultiplier; totalErrorEnergy = instantaneousError; successfullIteration = true; } else { //System.out.println("maior"); deltaW = NeuralMath.constantTimesMatrix(-1, deltaW); neuralNetwork.updateWeights(deltaW); learningRate = learningRate/lrMultiplier; //successfullIteration = false; } numberOfEpochs++; if ( (numberOfEpochs) % 1/*(maximumNumberOfEpochs/100)*/ == 0) { System.out.println("N鷐ero de 蓀ocas: " + numberOfEpochs); System.out.println("Erro atual: " + totalErrorEnergy); } } while (((totalErrorEnergy / param.getInputs().length) > errorGoal) && (numberOfEpochs < maximumNumberOfEpochs) && (goAhead = true)); fim = System.currentTimeMillis(); neuralNetwork.setError(totalErrorEnergy / param.getInputs().length); results.setError(totalErrorEnergy / param.getInputs().length); results.setNumberOfEpochs(numberOfEpochs); numberOfEpochs = 0; results.setTrainingTime((fim - inicio) / 1000); return results; } //train() } //BatchAdaptiveLearningRateMomentumBackPropagation
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -