📄 bp_back.cpp
字号:
#define ERRORLEVEL 0.0001
#define nPatterns 10
#define nInputNodes 10
#define nHiddenNodes 5
#define nOutputNodes 1
#define nIterations 2000000
/*typedefs and prototypes for dynamic storage of arrays
typedef float *PFLOAT;
typedef PFLOAT VECTOR;
typedef PFLOAT *MATRIX;
void VectorAllocate(VECTOR *vector, int nCols);
void AllocateCols(PFLOAT matrix[], int nRows, int nCols);
void MatrixAllocate(MATRIX *pmatrix, int nRows, int nCols);
void MatrixFree(MATRIX matrix, int nRows);
define storage for net layers*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <iostream.h>
/*
MATRIX out0; *input layers *
MATRIX out1; *hidden layer *
MATRIX delta1; *delta at hidden layer *
MATRIX delw1; *change in weights input:hidden *
MATRIX w1; *weights input:hidden *
MATRIX out2; *output layer *
MATRIX delta2; *delta at output layer *
MATRIX delw2; *change in weights hidden:output *
MATRIX w2; *weights hidden:output *
MATRIX target; *target output *
VECTOR PatternID; *identifier for each stored pattern */
void main()
{ //····························
double eta = 0.15,
alpha = 0.075;
double ErrorLevel = double(ERRORLEVEL);/*satisfactory error level */
double error; /*latest sum squared error value*/
int h,i,j; /*index hidden,input,output layer*/
int p, /* index pattern number */
q; /* index iteration number */
/*float out0[nPatterns][nInputNodes];*/
double out1[nPatterns][nHiddenNodes];
double delta1[nPatterns][nHiddenNodes];
double delw1[nHiddenNodes][nInputNodes];
double w1[nHiddenNodes][nInputNodes];
double out2[nPatterns][nOutputNodes];
double delta2[nPatterns][nOutputNodes];
double delw2[nOutputNodes][nHiddenNodes];
double w2[nOutputNodes][nHiddenNodes];
/*float target[nPatterns][nOutputNodes];*/
double out0[10][2] = {{0.0000, 0.0000},
{0.0000, 0.5000},
{0.1667, 0.1667},
{0.1667, 0.8333},
{0.3333, 1.0000},
{0.5000, 0.3333},
{0.6667, 0.3333},
{0.6667, 0.8333},
{0.8333, 0.8333},
{1.0000, 0.1667}};
double target[10][1] = {1.0000,1.0000,1.0000,0.5000,0.1667,0.6667,0.5000,0.0000,0.0000,0.3333};
/* read input:hidden weights */
for (h = 0; h < nHiddenNodes; h++)
{
for (i = 0; i <= nInputNodes; i++)
{
w1[h][i] = (double)((rand()/32767.0)*2-1);
delw1[h][i] = 0.0;
}
}
/* read hidden:out weights */
for (j = 0; j < nOutputNodes; j++)
for (h = 0; h <= nHiddenNodes; h++)
{
w2[j][h] = (double)((rand()/32767.0)*2-1);
delw2[j][h] = 0.0;
}
/*--------------------- begin iteration loop ------------------------*/
for (q = 0; q < nIterations; q++)
{
for (p = 0; p < nPatterns; p++)
{
/*-------------------- hidden layer --------------------------*/
/* Sum input to hidden layer over all input-weight combinations */
for (h = 0; h < nHiddenNodes; h++)
{
double sum = w1[h][nInputNodes]; /* begin with bias */
for (i = 0; i < nInputNodes; i++)
sum += w1[h][i] * out0[p][i];
/* Compute output (use sigmoid) */
out1[p][h] = 1.0 / (1.0 + exp(-sum));
}
/*-------------------- output layer --------------------------*/
for (j = 0; j < nOutputNodes; j++)
{
double sum = w2[j][nHiddenNodes];
for (h = 0; h < nHiddenNodes; h++)
sum += w2[j][h] * out1[p][h];
out2[p][j] = 1.0 / (1.0 + exp(-sum));
}
/*-------------------- delta output --------------------------*/
/* Compute deltas for each output unit for a given pattern */
for (j = 0; j < nOutputNodes; j++)
delta2[p][j] = (target[p][j] - out2[p][j]) * out2[p][j] * (1.0 - out2[p][j]);
/*-------------------- delta hidden --------------------------*/
for (h = 0; h < nHiddenNodes; h++)
{
double sum = 0.0;
for (j = 0; j < nOutputNodes; j++)
sum += delta2[p][j] * w2[j][h];
delta1[p][h] = sum * out1[p][h] * (1.0 - out1[p][h]);
}
} /*end of for p*/
/*-------------- adapt weights hidden:output ---------------------*/
for (j = 0; j < nOutputNodes; j++)
{
double dw; /* delta weight */
double sum = 0.0;
/* grand sum of deltas for each output node for one epoch */
for (p = 0; p < nPatterns; p++)
sum += delta2[p][j];
/* Calculate new bias weight for each output unit */
dw = eta * sum + alpha * delw2[j][nHiddenNodes];/*阈值改变*/
w2[j][nHiddenNodes] += dw;
delw2[j][nHiddenNodes] = dw; /* delta for bias */
/* Calculate new weights */
for (h = 0; h < nHiddenNodes; h++)
{
double sum = 0.0;
for (p = 0; p < nPatterns; p++)
sum += delta2[p][j] * out1[p][h];
dw = eta * sum + alpha * delw2[j][h];
w2[j][h] += dw;
delw2[j][h] = dw;
}
} /*end of for adapt weights hidden:output*/
/*-------------------- adapt weights input:hidden -----------------*/
for (h = 0; h < nHiddenNodes; h++)
{
double dw; /* delta weight */
double sum = 0.0;
for (p = 0; p < nPatterns; p++)
sum += delta1[p][h];
/* Calculate new bias weight for each hidden unit */
dw = eta * sum + alpha * delw1[h][nInputNodes];/*阈值改变*/
w1[h][nInputNodes] += dw;
delw1[h][nInputNodes] = dw;
/* Calculate new weights */
for (i = 0; i < nInputNodes; i++)
{
double sum = 0.0;
for (p = 0; p < nPatterns; p++)
sum += delta1[p][h] * out0[p][i];
dw = eta * sum + alpha * delw1[h][i];
w1[h][i] += dw;
delw1[h][i] = dw;
}
} /*end of for adapt weights input:hidden */
/*-------------------- Sum Squared Error ------------------------*/
for (p = 0, error = 0.0; p < nPatterns; p++)
{
for (j = 0; j < nOutputNodes; j++)
{
double temp = target[p][j] - out2[p][j];
error += temp * temp;
}
}
/* Average error per node over all patterns */
error /= (nPatterns * nOutputNodes);
/* Terminate when error satisfactory */
if (error < ErrorLevel) /*循环结束的条件*/
break;
}
/*-----end of iteration loop -----*/
for (p = 0; p < nPatterns; p++)
{
for(j = 0; j < nOutputNodes; j++)
cout << out2[p][j] <<endl;
}
}//··········································
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -