📄 1.cpp
字号:
//误差在减小,保存当前较小的权值到WeightSave[][]中
void SaveWeights(NET* Net)
{
INT l,i,j;
for (l=1; l<NUM_LAYERS; l++)
{
for (i=1; i<=Net->Layer[l]->Units; i++)
{
for (j=0; j<=Net->Layer[l-1]->Units; j++)
{
Net->Layer[l]->WeightSave[i][j] = Net->Layer[l]->Weight[i][j];
}
}
}
}
//权值达到最小,把最小权值赋给Weight[][]
void RestoreWeights(NET* Net)
{
INT l,i,j;
for (l=1; l<NUM_LAYERS; l++)
{
for (i=1; i<=Net->Layer[l]->Units; i++)
{
for (j=0; j<=Net->Layer[l-1]->Units; j++)
{
Net->Layer[l]->Weight[i][j] = Net->Layer[l]->WeightSave[i][j];
}
}
}
}
void SetInput(NET* Net, REAL* Input)
{
INT i;
for (i=1; i<=Net->InputLayer->Units; i++)
{
Net->InputLayer->Output[i] = Input[i-1];
}
}
void GetOutput(NET* Net, REAL* Output)
{
INT i;
for (i=1; i<=Net->OutputLayer->Units; i++)
{
Output[i-1] = Net->OutputLayer->Output[i];
}
}
void PropagateLayer(NET* Net, LAYER* Lower, LAYER* Upper) //层与层之间的传递
{
INT i,j;
REAL Sum;
for (i=1; i<=Upper->Units; i++)
{
Sum = 0;
for (j=0; j<=Lower->Units; j++)
{
//Sum为上一层的输入和权重的乘积
Sum += Upper->Weight[i][j] * Lower->Output[j];
}
//S型传递函数
Upper->Output[i] = 1 / (1 + exp(-Net->Gain * Sum));
}
}
void PropagateNet(NET* Net) //完成一张网的传播
{
INT l;
for (l=0; l<NUM_LAYERS-1; l++)
{
PropagateLayer(Net, Net->Layer[l], Net->Layer[l+1]);
}
}
/*****************************************************************************************
B A C K P R O P A G A T I N G E R R O R S //反向传播对误差分析并调整
****************************************************************************************/
void ComputeOutputError(NET* Net, REAL* Target) //估计计算输出值与期望输出值的偏差
{
INT i;
REAL Out, Err;
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++)
{
Out = Net->OutputLayer->Output[i];
Err = Target[i-1]-Out;
Net->OutputLayer->Error[i] = Net->Gain * Out * (1-Out) * Err;
Net->Error += 0.5 * sqr(Err);
}
}
//层的反向传播,并重新修改每个神经元的误差,使误差逐次减少
void BackpropagateLayer(NET* Net, LAYER* Upper, LAYER* Lower)
{
INT i,j;
REAL Out, Err;
for (i=1; i<=Lower->Units; i++)
{
Out = Lower->Output[i];
Err = 0;
for (j=1; j<=Upper->Units; j++)
{
Err += Upper->Weight[j][i] * Upper->Error[j];
}
Lower->Error[i] = Net->Gain * Out * (1-Out) * Err;
}
}
void BackpropagateNet(NET* Net) //网络的反向传播
{
INT l;
for (l=NUM_LAYERS-1; l>1; l--)
{
BackpropagateLayer(Net, Net->Layer[l], Net->Layer[l-1]);
}
}
void AdjustWeights(NET* Net) //修正权值
{
INT l,i,j;
REAL Out, Err, dWeight;
for (l=1; l<NUM_LAYERS; l++)
{
for (i=1; i<=Net->Layer[l]->Units; i++)
{
for (j=0; j<=Net->Layer[l-1]->Units; j++)
{
Out = Net->Layer[l-1]->Output[j];
Err = Net->Layer[l]->Error[i];
dWeight = Net->Layer[l]->dWeight[i][j];
Net->Layer[l]->Weight[i][j] += Net->Eta * Err * Out + Net->Alpha * dWeight;
Net->Layer[l]->dWeight[i][j] = Net->Eta * Err * Out;
}
}
}
}
//神经网络模拟
void SimulateNet(NET* Net, REAL* Input, REAL* Output, REAL* Target, BOOL Training)
{
SetInput(Net, Input); //输入层的处理
PropagateNet(Net); //中间层的处理
GetOutput(Net, Output); //输出层的处理
ComputeOutputError(Net, Target); //计算总的误差
if (Training)
{
BackpropagateNet(Net); //反向传播,修改各神经元的误差
AdjustWeights(Net); //反向传播,修正权值
}
}
void TrainNet(NET* Net, INT Epochs) //训练网络
{
INT Year, n;
REAL Output[M];
for (n=0; n<Epochs*TRAIN_YEARS; n++) //训练次数为Epochs*TRAIN_YEARS
{
//产生的随机数在TRAIN_LWB~~TRAIN_UPB之间
Year = RandomEqualINT(TRAIN_LWB, TRAIN_UPB);
//进行神经网络仿真模拟
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), TRUE);
}
}
void TestNet(NET* Net) //测试网络
{
INT Year;
REAL Output[M];
TrainError = 0;
for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++)
{
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TrainError += Net->Error;
}
TestError = 0;
for (Year=TEST_LWB; Year<=TEST_UPB; Year++)
{
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TestError += Net->Error;
}
fprintf(f, "\nNMSE is %0.3f on Training Set and %0.3f on Test Set",
TrainError / TrainErrorPredictingMean,
TestError / TestErrorPredictingMean);
}
void EvaluateNet(NET* Net)//预测
{
INT Year;
REAL Output [M];
REAL Output_[M];
fprintf(f, "\n\n\n");
fprintf(f, "Year Sunspots Open-Loop Prediction Closed-Loop Prediction\n");
fprintf(f, "\n");
for (Year=EVAL_LWB; Year<=EVAL_UPB; Year++)
{
SimulateNet(Net, &(Sunspots [Year-N]), Output, &(Sunspots [Year]), FALSE);
SimulateNet(Net, &(Sunspots_[Year-N]), Output_, &(Sunspots_[Year]), FALSE);
Sunspots_[Year] = Output_[0];
fprintf(f, "%d %0.3f %0.3f %0.3f\n",
FIRST_YEAR + Year,
Sunspots[Year],
Output [0],
Output_[0]);
}
}
/******************************************************************************
M A I N
******************************************************************************/
void main()
{
NET Net;
BOOL Stop;
REAL MinTestError;
InitializeRandoms(); //设置随机数发生器初始种子
GenerateNetwork(&Net); //建立一张神经网络
RandomWeights(&Net); //随机生成初始的权值
InitializeApplication(&Net); //初始化神经网络
Stop = FALSE;
MinTestError = MAX_REAL;
do
{
TrainNet(&Net, 10); //训练网络
TestNet(&Net);
if (TestError < MinTestError)
{
fprintf(f, " - saving Weights ...");
MinTestError = TestError;
SaveWeights(&Net); //保存较优的权值
}
else if (TestError > 1.2 * MinTestError)
{
/*
此时,已达到最小误差了,则确定与最小误差相对应的网络参数(权值和阈值),
停止训练。此时经过训练的BP神经网络能对在输入范围内的输入信息自行处理,
然后输出误差最小的经过非线形转换的输出信息。
*/
fprintf(f, " - stopping Training and restoring Weights ...");
Stop = TRUE;
RestoreWeights(&Net);
}
} while (NOT Stop);
TestNet(&Net);
EvaluateNet(&Net);
FinalizeApplication(&Net);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -