📄 net.cpp
字号:
#include "Net.h"#ifdef NEURAL_NET_DEBUG#include <iostream>#endifusing namespace NeuralNetwork;//构造函数Net::Net(int layers, int layerSizes[], real initLearningRate = 0.25, real initMomentumFactor = 0.9, real initGain = 1.0) :momentumFactor(initMomentumFactor), learningRate(initLearningRate), gain(initGain){ //申请和初始化各个网络层 numLayers = layers; layer = new NetLayer*[layers]; layer[0] = new NetLayer(layerSizes[0], 0); for (int i=1; i<layers; i++) { layer[i] = new NetLayer(layerSizes[i], layer[i-1]); } inputLayer = layer[0]; outputLayer = layer[layers-1]; //申请资源 int inputSize = inputLayer->getUnits(); input = new real[inputSize]; int outputSize = outputLayer->getUnits(); actualOutput = new real[outputSize]; expectedOutput = new real[outputSize]; //随机初始化权值 randomizeWeights();}//构造函数Net::Net(std::istream& in){ //申请和初始化各个网络层 readRaw(in, numLayers); readRaw(in, momentumFactor); readRaw(in, learningRate); readRaw(in, gain); layer = new NetLayer*[numLayers]; layer[0] = new NetLayer(in, 0); for (int i=1; i<numLayers; i++) { layer[i] = new NetLayer(in, layer[i-1]); } inputLayer = layer[0]; outputLayer = layer[numLayers-1]; //申请资源 int inputSize = inputLayer->getUnits(); input = new real[inputSize]; int outputSize = outputLayer->getUnits(); actualOutput = new real[outputSize]; expectedOutput = new real[outputSize];}void Net::save(std::ostream& out){ writeRaw(out, numLayers); writeRaw(out, momentumFactor); writeRaw(out, learningRate); writeRaw(out, gain); //保存每层 for (int i=0; i<numLayers; i++) { layer[i]->save(out); }}//析构函数Net::~Net(){ for (int i=0; i<numLayers; i++) { delete layer[i]; } delete[] layer; delete[] input; delete[] actualOutput; delete[] expectedOutput;}void Net::randomizeWeights(){ //随机初始化各层权值(除第一层) for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->randomizeWeights(); }}void Net::clearWeights(){ //清0各层权值(除第一层) for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->clearWeights(); }}void Net::saveWeights(){ for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->saveWeights(); }}void Net::restoreWeights(){ for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->restoreWeights(); }}void Net::propagate(){ for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->propagate(gain); }}void Net::backpropagate(){ //反向调用backpropagate(除第一层),使误差从输出层向输入层回馈 for (int layerNum=numLayers-1; layerNum > 0; layerNum--) { layer[layerNum]->backpropagate(gain); }}void Net::computeOutputError(real* target){ //使输出层计算与目标值的误差 error = outputLayer->computeError(gain, target);}void Net::adjustWeights(){ //各层调用adjustWeights(除第一层) for (int layerNum=1; layerNum < numLayers; layerNum++) { layer[layerNum]->adjustWeights(momentumFactor, learningRate); }}void Net::setInputs(real* inputs){ //设置输入层神经元的输出值,作为网络输入 inputLayer->setOutputs(inputs);}void Net::getOutputs(real* outputs){ //取得输出层神经元的输出值,作为网络输出 outputLayer->getOutputs(outputs);}void Net::simpleTrain(real* input, real* expectedOutput){ //检查网络结果 setInputs(input); propagate(); computeOutputError(expectedOutput); //反向传播误差值,然后修正权值,使误差迅速减小 backpropagate(); adjustWeights();}void Net::train(int epochs, ExampleFactory &trainingExamples){ int inputSize = inputLayer->getUnits(); int outputSize = outputLayer->getUnits(); //依次训练每个训练样本 for (int n=0; n < epochs*trainingExamples.numExamples(); n++) { trainingExamples.getExample(inputSize, input, outputSize, expectedOutput); simpleTrain(input, expectedOutput); }}//测试网络性能real Net::test(ExampleFactory &testExamples){ int inputSize = inputLayer->getUnits(); int outputSize = outputLayer->getUnits(); real totalError = 0; //使用每个样本运行网络,将误差累加 for (int n=0; n < testExamples.numExamples(); n++) { testExamples.getExample(inputSize, input, outputSize, expectedOutput); run(input, actualOutput); computeOutputError(expectedOutput); totalError += error; }#ifdef NEURAL_NET_DEBUG std::cout << "Error: " << totalError << std::endl;#endif return totalError;}real Net::autotrain(ExampleFactory &trainingExamples, ExampleFactory &testExamples, int epochsBetweenTests, float cutOffError){ //取得当前权值集最初的误差 real minTestError = test(testExamples); real testError = minTestError; while (testError <= cutOffError*minTestError) { //训练样本 train(epochsBetweenTests, trainingExamples); //如果结果理想,保存权值 testError = test(testExamples); if (testError < minTestError) { saveWeights(); minTestError = testError; } } //恢复保存的权值 restoreWeights(); return minTestError;}void Net::run(real* input, real* output){ setInputs(input); propagate(); getOutputs(output);}void Net::doneTraining(){ //调用每层的doneTraining for (int layerNum=0; layerNum < numLayers; layerNum++) { layer[layerNum]->doneTraining(); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -