📄 backpropagation.cs
字号:
using System;
using XOR_ANN.DataStructures;
using System.Windows.Forms;
namespace XOR_ANN
{
/// <summary>
/// Summary description for BackPropagation.
/// </summary>
public class BackPropagation
{
int maximumEpochs = 100000;
XORData workingData;
int numberOfSamples;
float LEARNING_RATE = 0.25F;
// float MOMENTUM = 0.5F;
float[] lastDeltaHiddenWeightVector;
float[] lastDeltaOutputWeightVector;
public BackPropagation(ref XORData workingData)
{
this.workingData = workingData;
}
public void Train()
{
MessageBox.Show("1 = " + this.activationFunction(1));
// Calculate some variables up front
this.numberOfSamples = workingData.sampleData.Length;
int epochCount = 0;
bool converged = false;
while (epochCount <= this.maximumEpochs & !converged)
{
converged = this.DoAnEpoch();
epochCount++;
}
if (converged)
MessageBox.Show("Convered at Epoch " + epochCount);
else
MessageBox.Show("Not converged after " + epochCount + " Epochs.");
}
private bool DoAnEpoch()
// Returns true if converged
{
int convergedCount = 0;
// for each sample input: xyzzy - apply these randomly
for (int sample=0; sample<this.numberOfSamples; sample++)
{
// Calculate the outputs from the hidden layer
float[] hiddenLayerOutput =
this.calculateOutputFromHiddenLayer(
this.workingData.sampleData[sample],
this.workingData.hiddenWeights);
// Calculate the output from the output layer
float output = calculateOutputFromOutputLayer (
hiddenLayerOutput,
this.workingData.outputWeights[0]); // There is only one output node in this example
// Have we converged?
float outputError = Math.Abs(output - this.workingData.expectedResults[sample]);
if ( outputError < 0.1)
{
convergedCount++;
// MessageBox.Show ("Increased Converged Count");
}
else
{
// Did not converge so backprop the error
float outputErrorGradient = (this.workingData.expectedResults[sample] - output) * output * (1-output);
// for each Ouput weight
for (int j=0; j<3; j++)
{
float deltaWeight = this.LEARNING_RATE * outputErrorGradient * hiddenLayerOutput[j];
this.workingData.outputWeights[0][j] += deltaWeight;
}
// Backprop to Hidden Weights. dj = f'(netj) * outputErrorGradient * correct weight
// for each hidden node
for (int i=0; i<2; i++) //Ignore the hidden layer
{
float hiddenLayerUnitErrorGradient = outputErrorGradient * this.workingData.outputWeights[0][i+1]; // Bias is at [0]
float actualError /* 0.040*/ = hiddenLayerOutput[i+1] * (1 - hiddenLayerOutput[i+1]) * hiddenLayerUnitErrorGradient;
// Recalculate each weight
for (int j=0; j<3; j++)
{
float deltaWeight = this.LEARNING_RATE * actualError * hiddenLayerOutput[j];
this.workingData.hiddenWeights[i][j] += deltaWeight;
}
}
}
};
// Did all samples converge?
//if (convergedCount >= 3)
// MessageBox.Show("Converged 3");
return (convergedCount == this.numberOfSamples);
}
private float[] calculateOutputFromHiddenLayer (float[] inputVector, float[][] weightVector)
{
// inputs: Input Vector, Weight Vector
// output: hiddenLayerOutput Vector
float[] hiddenLayerOutput = new float[3]; // + 1 to include the bias
hiddenLayerOutput[0] = 1; // bias
// Hidden Layer Weights
int hlWeights = inputVector.Length; // Should be 3. (input vector should already include 1 for the bias)
// Hidden Layer Units
int hlUnits = weightVector.Length; // Should be 2
// for each hidden unit (2 in XOR example)
for (int i=0; i<hlUnits; i++)
{
float hiddenLayerNetInput = 0;
// Get net input, by using dot product of input vector and weight vector
for (int j=0; j<hlWeights; j++)
{
// op += input * weight
hiddenLayerNetInput += inputVector[j] * weightVector[i][j];
}
// Assign result of activation function to Hidden Layer Output Vector // i+1 because position 1 is the bias (1)
hiddenLayerOutput[i+1] = this.activationFunction(hiddenLayerNetInput);;
}
return (hiddenLayerOutput);
}
private float calculateOutputFromOutputLayer (float[] inputVector, float[] weightVector)
{
float outputLayerOutput;
// Output Layer Weights
int olWeights = inputVector.Length; // Should be 3 (should include 1 for the bias)
float outputLayerNetInput = 0;
// Get net input, by using dot product of input vector and weight vector
for (int j=0; j<olWeights; j++)
{
// op += input * weight
outputLayerNetInput += inputVector[j] * weightVector[j];
}
// Apply activation function
outputLayerOutput = this.activationFunction(outputLayerNetInput);
return (outputLayerOutput);
}
private float activationFunction(float value)
{
double activationFnResult = 1 / (1 + Math.Exp(value * -1));
return ((float)activationFnResult);
}
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -