📄 executionengine.cs
字号:
using System;
using XOR_ANN.DataStructures;
namespace XOR_ANN.ANN
{
/// <summary>
/// Summary description for ExecutionEngine.
/// </summary>
public class ExecutionEngine
{
private Random randomNumberGenerator = new Random(3523); // xyzzy Seed from System clock
ANN_Data ann_Data;
// Mock CONSTANTS - these make the code easier to read
float LEARNING_RATE;
float MOMENTUM;
float OUTPUT_TOLERANCE;
int MAXIMUM_EPOCHS;
int SAMPLE_COUNT;
int OUTPUT_NODES;
int INPUT_NODES;
int HIDDEN_NODES;
float[,] lastOutputWeightChange; // Used with momentum
float[,] lastHiddenWeightChange; // Used with momentum
// Calculate output from Hidden Layer Nodes
float[] lastHiddenLayerOutput; // Make space to store the nodes outputs
float[] lastOutputLayerOutput; // Make space to store the nodes outputs
float[] currentOutputErrors;
public ExecutionEngine (ref XOR_ANN.DataStructures.ANN_Data pANN_Data,
float pLEARNING_RATE,
float pMOMENTUM,
float pOUTPUT_TOLERANCE,
int pMAXIMUM_EPOCHS
)
{
// Forward propogation calculation - returns array of output node results (Oj)
this.ann_Data = pANN_Data;
this.LEARNING_RATE = pLEARNING_RATE;
this.MOMENTUM = pMOMENTUM;
this.OUTPUT_TOLERANCE = pOUTPUT_TOLERANCE;
this.MAXIMUM_EPOCHS = pMAXIMUM_EPOCHS;
// set the MOCK CONSTANTS
this.SAMPLE_COUNT = ann_Data.sampleCount;
this.OUTPUT_NODES = ann_Data.outputNodes;
this.INPUT_NODES = ann_Data.inputNodes;
this.HIDDEN_NODES = ann_Data.hiddenNodes;
// scope working arrays
lastOutputWeightChange = new float[OUTPUT_NODES,INPUT_NODES+1]; // elements initialise to zero
lastHiddenWeightChange = new float[OUTPUT_NODES,INPUT_NODES+1]; // elements initialise to zero
lastHiddenLayerOutput = new float[HIDDEN_NODES]; // Make space to store the node outputs
lastOutputLayerOutput = new float[OUTPUT_NODES]; // Make space to store the node outputs
currentOutputErrors = new float[OUTPUT_NODES];
}
public float[] calculate(float[] pInput)
{
// Remember the sampleData
float[,] originalSampleData = this.ann_Data.sampleInput;
// Make new sample data
this.ann_Data.sampleInput = new float[1,pInput.Length];
for (int i=0; i<pInput.Length; i++)
this.ann_Data.sampleInput[0,i] = pInput[i];
// Use the ANN to calculate the result for the given input
this.forwardPropagation(0);
// Restore the sample data
this.ann_Data.sampleInput = originalSampleData;
return (this.lastOutputLayerOutput);
}
public bool train(out int pEpochCount)
{
bool success = false;
int epochCount = 0;
// Make space to store momentums for each weight
lastOutputWeightChange = new float[OUTPUT_NODES,HIDDEN_NODES+1]; // elements initialise to zero
lastHiddenWeightChange = new float[HIDDEN_NODES,INPUT_NODES+1]; // elements initialise to zero
for (int i=0; i<MAXIMUM_EPOCHS; i++)
{
success = oneEpoch();
if (success)
{
epochCount = i;
break;
}
}
pEpochCount = epochCount;
return (success);
}
private int[] generateRandomIndexes(int pNoOfSamples)
{
int[] randomIndex = new int[pNoOfSamples];
// Fill with numbers
for (int i=0; i<pNoOfSamples; i++)
randomIndex[i] = i;
// Now randomize the array
int workingValue;
int swapWithIndex;
for (int i=0; i<pNoOfSamples; i++)
{
// Pick a number to swap with
swapWithIndex = randomNumberGenerator.Next(SAMPLE_COUNT);
workingValue = randomIndex[i];
randomIndex[i] = randomIndex[swapWithIndex];
randomIndex[swapWithIndex] = workingValue;
}
return randomIndex;
}
private bool oneEpoch ()
{
// Prepare space to store the outputs from the Nodes
int convergedCount = 0; // Used to detect if we have converged
// Get a random indexer
int[] randomIndexer = this.generateRandomIndexes(SAMPLE_COUNT);
// Choose a random place to start in the sample data
// int startingLocation = randomNumberGenerator.Next(SAMPLE_COUNT);
// int sample = startingLocation;
// Loop through all samples
for (int sampleCount=0; sampleCount<SAMPLE_COUNT; sampleCount++)
{
// sample++;
// if (sample>=SAMPLE_COUNT)
// sample = sample - SAMPLE_COUNT;
// get the random sample
int sample = randomIndexer[sampleCount];
float[] sampleInput = new float[INPUT_NODES];
// Populate the sample data into a working array
for (int i=0; i<INPUT_NODES; i++)
sampleInput[i] = ann_Data.sampleInput[sample,i];
// Do a forward propagation for this sample
this.forwardPropagation(sample);
// Make array of output errors and check all outputs are within tolerance
bool allOutputsInTolerance = true;
for (int outputNode=0; outputNode<OUTPUT_NODES; outputNode++)
{
currentOutputErrors[outputNode] = ann_Data.expectedOutput[sample,outputNode] - lastOutputLayerOutput[outputNode];
if (Math.Abs(currentOutputErrors[outputNode]) > OUTPUT_TOLERANCE)
allOutputsInTolerance = false;
}
// Do we need to backprogate?
if (allOutputsInTolerance == true)
convergedCount++;
else
this.backpropagateError(sample);
}
// Return true if all samples matched the expected input within the conversionPercentage
float percentageConverged = (convergedCount*100F)/(float)SAMPLE_COUNT;
float test = SingletonGlobalParameters.instance().CONVERGE_PERCENTAGE;
return (percentageConverged >= SingletonGlobalParameters.instance().CONVERGE_PERCENTAGE);
}
private void forwardPropagation(int pSample)
{
// Work on each hidden node in turn
for (int hiddenUnit=0; hiddenUnit<HIDDEN_NODES; hiddenUnit++)
{
float netj = 0; // Netj - the net input to the node
// Increase netj by each input * corresponding weight
for (int i=0; i<INPUT_NODES; i++) // There is a weight for each input + a bias weight
netj += this.ann_Data.sampleInput[pSample,i] * this.ann_Data.hiddenWeight[hiddenUnit, i]; // Equation 1
// Now add the bias weight (muliplied by 1)
netj += this.ann_Data.hiddenWeight[hiddenUnit, INPUT_NODES] * 1; // Equation 1
// Calculate Oj by performing the Activation Function on netj
lastHiddenLayerOutput[hiddenUnit] = this.activationFunction_Sigmoid(netj); // Equation 2
}
// Calculate output (Oj) for Output Nodes
// Work on each output node in turn
for (int outputUnit=0; outputUnit<this.OUTPUT_NODES; outputUnit++)
{
float netj_op = 0;
// inrease netj by each input (from hidden layer nodes) * corresponding weight
for (int i=0; i<HIDDEN_NODES; i++) // There is a weight for each hidden node + a bias weight
netj_op += lastHiddenLayerOutput[i] * this.ann_Data.outputWeight[outputUnit,i]; // Equation 1
// Now do the bias
netj_op += this.ann_Data.outputWeight[outputUnit,HIDDEN_NODES]; // Equation 1
// Oj
lastOutputLayerOutput[outputUnit] = this.activationFunction_Sigmoid(netj_op); // Equation 2
}
// Note that the OUT variables are used:
// float[] pHiddenLayerOutput and float[] outputLayerOutput
}
private void backpropagateError(int pSample)
{
// Calculate deltaj(s)
float[] outputDeltaj = new float[OUTPUT_NODES];
for (int outputNode=0; outputNode<OUTPUT_NODES;outputNode++)
{
float pOj = lastOutputLayerOutput[outputNode];
outputDeltaj[outputNode] = (currentOutputErrors[outputNode])*pOj*(1-pOj); // Equation 3
}
float workingDeltaWeight;
// Correct the output Weights
for (int outputNode=0; outputNode<OUTPUT_NODES; outputNode++)
{
for (int weight=0; weight<HIDDEN_NODES; weight++)
{
workingDeltaWeight = (float)LEARNING_RATE * outputDeltaj[outputNode] * lastHiddenLayerOutput[weight]; // Equation 4
workingDeltaWeight += MOMENTUM * lastOutputWeightChange[outputNode,weight];
ann_Data.outputWeight[outputNode,weight] += workingDeltaWeight;
lastOutputWeightChange[outputNode,weight] = workingDeltaWeight; // This will used in next sample/Epoch
}
// Now the bias weight
workingDeltaWeight = (float)LEARNING_RATE * outputDeltaj[outputNode] * 1; // Equation 4
workingDeltaWeight += MOMENTUM * lastOutputWeightChange[outputNode,HIDDEN_NODES];
ann_Data.outputWeight[outputNode,HIDDEN_NODES] += workingDeltaWeight;
lastOutputWeightChange[outputNode,HIDDEN_NODES] = workingDeltaWeight; // This will used in next sample/Epoch
}
// Correct the Hidden Weights
for (int hiddenNode=0; hiddenNode<this.HIDDEN_NODES; hiddenNode++)
{
// Weighted error is sum of weighted error from output nodes
float weightedError = 0;
for (int outputNode=0; outputNode<OUTPUT_NODES; outputNode++)
weightedError += outputDeltaj[outputNode] * ann_Data.outputWeight[outputNode,hiddenNode]; // deltaj * weight connecting node to the output
float h_deltaj = lastHiddenLayerOutput[hiddenNode]*(1 - lastHiddenLayerOutput[hiddenNode]) * weightedError; // xyzzy ??
for (int hiddenWeight=0; hiddenWeight<INPUT_NODES; hiddenWeight++)
{
workingDeltaWeight = (float)LEARNING_RATE * h_deltaj * ann_Data.sampleInput[pSample,hiddenWeight];
workingDeltaWeight += MOMENTUM * lastHiddenWeightChange[hiddenNode,hiddenWeight];
ann_Data.hiddenWeight[hiddenNode,hiddenWeight] += workingDeltaWeight;
lastHiddenWeightChange[hiddenNode,hiddenWeight] = workingDeltaWeight;
}
// Now the bias weight
workingDeltaWeight = (float)LEARNING_RATE * h_deltaj * 1;
workingDeltaWeight += MOMENTUM * lastHiddenWeightChange[hiddenNode,INPUT_NODES];
ann_Data.hiddenWeight[hiddenNode,INPUT_NODES] += workingDeltaWeight;
lastHiddenWeightChange[hiddenNode,INPUT_NODES] = workingDeltaWeight;
}
}
private float activationFunction_Sigmoid(float pInput)
{
double result = (1/ (1 + Math.Exp(pInput*-1) ));
return (float)result;
}
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -