📄 cmlp.cpp
字号:
//Tanks
//Copyright John Manslow
//29/09/2001
////////////////////////////////////////////////////
//Remove this include if not compiling under windows
#include "stdafx.h"
#define false FALSE
#define new DEBUG_NEW
////////////////////////////////////////////////////
#include "CMLP.h"
#include "stdlib.h"
#include "math.h"
#include "stdio.h"
#include "assert.h"
#include "fstream.h"
#include "time.h"
//Where backups will be made during training (guards against crashes, etc.)
#define FileForBackupSaves "TrainingBackup.mlp"
CMLP::CMLP(
const unsigned long ulNewNumberOfInputs,
const unsigned long ulNewNumberOfHiddenNodes,
const unsigned long ulNewNumberOfOutputs
)
{
TRACE("\t\tCreating MLP...");
//Record the structure (number of inputs, hidden neurons, and outputs) of the network
ulNumberOfInputs=ulNewNumberOfInputs;
ulNumberOfHiddenNodes=ulNewNumberOfHiddenNodes;
ulNumberOfOutputs=ulNewNumberOfOutputs;
//Allocate memory to store the network's "current" weights and the best weights.found during training
AllocateMemory();
//Set the network's weights to random values and reset all variables used in training.
Reset();
//Set these character pointers to NULL so we know they're not used yet
pTrainingStartTime=NULL;
pTrainingStartDate=NULL;
TRACE("successful.\n");
}
void CMLP::AllocateMemory(void)
{
unsigned long i;
//Allocate memory to store the current values of the input to hidden layer weights and
//their best values
ppdwih=new double*[ulNumberOfHiddenNodes];
ppdBestwih=new double*[ulNumberOfHiddenNodes];
assert(ppdwih && ppdBestwih);
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
ppdwih[i]=new double[ulNumberOfInputs+1];
ppdBestwih[i]=new double[ulNumberOfInputs+1];
assert(ppdwih[i] && ppdBestwih[i]);
}
//Do the same for the hidden to output layer weights
ppdwho=new double*[ulNumberOfOutputs];
ppdBestwho=new double*[ulNumberOfOutputs];
assert(ppdwho && ppdBestwho);
for(i=0;i<ulNumberOfOutputs;i++)
{
ppdwho[i]=new double[ulNumberOfHiddenNodes+1];
ppdBestwho[i]=new double[ulNumberOfHiddenNodes+1];
assert(ppdwho[i] && ppdBestwho[i]);
}
}
void CMLP::DeallocateMemory(void)
{
//Deallocate the storage used for current and best weight values.
unsigned long i;
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
delete []ppdwih[i];
delete []ppdBestwih[i];
}
delete []ppdwih;
delete []ppdBestwih;
for(i=0;i<ulNumberOfOutputs;i++)
{
delete []ppdwho[i];
delete []ppdBestwho[i];
}
delete []ppdwho;
delete []ppdBestwho;
//If we've recorded the time and date of the start of training, delete them too.
if(pTrainingStartTime)
{
delete []pTrainingStartTime;
}
if(pTrainingStartDate)
{
delete []pTrainingStartDate;
}
}
CMLP::~CMLP()
{
TRACE("\t\tDestroying MLP...");
DeallocateMemory();
TRACE("successful.\n");
}
void CMLP::Reset(void)
{
unsigned long i,j;
//Give the network weights random values between -1 and +1. Since this effectively resets
//training the best recorded weights (stored in ppdBest...) are set to the new random values.
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
for(j=0;j<ulNumberOfInputs+1;j++)
{
ppdwih[i][j]=1.0*(double(rand())/double(RAND_MAX)-0.5);
ppdBestwih[i][j]=ppdwih[i][j];
}
}
//Do the same for the hidden to output layer weights
for(i=0;i<ulNumberOfOutputs;i++)
{
for(j=0;j<ulNumberOfHiddenNodes+1;j++)
{
ppdwho[i][j]=1.0*(double(rand())/double(RAND_MAX)-0.5);
ppdBestwho[i][j]=ppdwho[i][j];
}
}
//Reset the best recorded error to an impossible value (error is always positive) to indicate
//that no training has taken place with the current values of the weights.
dBestError=-1.0;
//Reset the step size to a conservative value.
dStepSize=0.001;
}
//This is the function that allows the network to learn. It uses a perturbation search
//to find values of the network's weights that allow it to reproduce the set of example
//input-output pairs to the desired accuracy. Each call to this function improves the network
//only very slightly, so this function will often have to be called many hundreds of thousands
//of times before the network is good enough.
double CMLP::dTrainingStep(
const unsigned long ulNumberOfPatternsInTrainingSet,
double ** const ppdTrainingInputs,
double ** const ppdTrainingTargets
)
{
//This function performs one step of a perturbation search: it randomly changes the
//neural network's weights. If the network's performance has improved, the new
//values are kept. If not, the old values are restored.
unsigned long i,j;
double dNewError;
//If dBestError=-1, this is the first training step so we need to do some initialisation
//of the perturbation search.
if(dBestError==-1.0)
{
//Make sure we deallocate memory pointed to by these pointers (if any) before we
//reassign them
if(pTrainingStartTime)
{
delete []pTrainingStartTime;
}
if(pTrainingStartDate)
{
delete []pTrainingStartDate;
}
//Record time and date that training started.
pTrainingStartTime=new char[256];
_strtime(pTrainingStartTime);
pTrainingStartDate=new char[256];
_strdate(pTrainingStartDate);
//Measure the performance of the network with the weights set to their current values.
//Since this is the only performance measurement so far, it must be the best. Store it.
dBestError=dGetPerformance(
ulNumberOfPatternsInTrainingSet,
ppdTrainingInputs,
ppdTrainingTargets
);
}
//Perturb the network's weights by adding a random value between +dStepSize and -StepSize
//to each one.
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
for(j=0;j<ulNumberOfInputs+1;j++)
{
ppdwih[i][j]+=dStepSize*(double(rand())/double(RAND_MAX)-0.5)*2.0;
}
}
//And for the hidden to output layer weights
for(i=0;i<ulNumberOfOutputs;i++)
{
for(j=0;j<ulNumberOfHiddenNodes+1;j++)
{
ppdwho[i][j]+=dStepSize*(double(rand())/double(RAND_MAX)-0.5)*2.0;
}
}
//Measure the performance of the network with the new weights
dNewError=dGetPerformance(
ulNumberOfPatternsInTrainingSet,
ppdTrainingInputs,
ppdTrainingTargets
);
//If the performance is worse (the new error is larger)
if(dNewError>dBestError)
{
//Reduce the size of the perturbation a bit - we need to be more conservative!
dStepSize*=0.9;
//and set the weights back to their old values
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
for(j=0;j<ulNumberOfInputs+1;j++)
{
ppdwih[i][j]=ppdBestwih[i][j];
}
}
for(i=0;i<ulNumberOfOutputs;i++)
{
for(j=0;j<ulNumberOfHiddenNodes+1;j++)
{
ppdwho[i][j]=ppdBestwho[i][j];
}
}
}
else
{
//Otherwise the new weights performed at least as well as the old ones, so record the
//performance of the network with the new weights,
dBestError=dNewError;
//Increase the step size a little - we're doing well and can afford to be more
//adventurous
dStepSize*=1.2;
//Record the new weights as the best so far discovered
for(i=0;i<ulNumberOfHiddenNodes;i++)
{
for(j=0;j<ulNumberOfInputs+1;j++)
{
ppdBestwih[i][j]=ppdwih[i][j];
}
}
for(i=0;i<ulNumberOfOutputs;i++)
{
for(j=0;j<ulNumberOfHiddenNodes+1;j++)
{
ppdBestwho[i][j]=ppdwho[i][j];
}
}
//Save the network just in case we have a crash (or power failure or something)
Save(FileForBackupSaves);
}
//Tell the calling function what the performance of the network currently is, so it can
//decide whether to continue training. This function always leaves the network with the
//best weights found so far, so there's no need to restore them externally
return dBestError;
}
double *CMLP::pdGetOutputs(
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -