📄 dynbp.cpp
字号:
fNext_err=pTeach[j]-fAct;
else //隐含层节点
{
fNext_err=0;
for(int k=0;k<pLayer[i+1].nNote_num;++k)
fNext_err+=pLayer[i+1].pNote[k].fErr*pWeight[i][j][k];
}
pLayer[i].pNote[j].fErr=fAct*(1-fAct)*fNext_err;//计算节点误差
}
}
//------------------------------------------------------------------------------
//检测输出的误差,为反向传播准备
float CNet::caculate_out_err(float *pTeach)
{
int nOut;
float fSum_err;
nOut=nLay_num-1;
fSum_err=0;
for(int i=0;i<pLayer[nOut].nNote_num;++i)
fSum_err+=sqr(pTeach[i]-
anti_normal( pLayer[nOut].pNote[i].fActivation,
fNorMax, fNorMin));
fSum_err=0.5*fSum_err;
return fSum_err;
}
//------------------------------------------------------------------------------
//调整权值函数
void CNet::adjust_weight()
{
float fAct,fErr;
for(int i=nLay_num-1;i>0;--i)//修改阈值
for(int j=0;j<pLayer[i].nNote_num;++j)
pLayer[i].pNote[j].fThreshold+=pLayer[i].pNote[j].fErr*fRate;
for(i=0;i<nLay_num-1;++i)//修改权值
for(int j=0;j<pLayer[i].nNote_num;++j)
for(int k=0;k<pLayer[i+1].nNote_num;++k)
{
fAct=pLayer[i].pNote[j].fActivation;
fErr=pLayer[i+1].pNote[k].fErr;
pWeight[i][j][k]+=fRate*fAct*fErr;
}
for( i=nLay_num-1;i>0;--i)//误差重新置0
for(int j=0;j<pLayer[i].nNote_num;++j)
pLayer[i].pNote[j].fErr=0;
}
//------------------------------------------------------------------------------
//保存矩阵权值,即保存训练后的神经元间传输的权值,存在weight.txt中
void CNet::save_weight()
{
ofstream sw("weight.txt");
assert(sw);
for(int i=0;i<nLay_num-1;++i)
for(int j=0;j<pLayer[i].nNote_num;++j)
for(int k=0;k<pLayer[i+1].nNote_num;++k)
sw<<pWeight[i][j][k]<<" ";
sw.close();
}
//------------------------------------------------------------------------------
//得到矩阵权值,即初始化神经元的权值,从weight.txt文件中得到权值
void CNet::get_weight()
{
ifstream gw("weight.txt");
assert(gw);
for(int i=0;i<nLay_num-1;++i)
for(int j=0;j<pLayer[i].nNote_num;++j)
for(int k=0;k<pLayer[i+1].nNote_num;++k)
gw>>pWeight[i][j][k];
gw.close();
// cout<<"已经导出权矩阵"<<endl;
}
//------------------------------------------------------------------------------
//保存好训练过的阈值在threshold.txt中
void CNet::save_threshold()
{
ofstream st("threshold.txt");
assert(st);
for(int i=0;i<nLay_num;++i)
for(int j=0;j<pLayer[i].nNote_num;++j)
st<<pLayer[i].pNote[j].fThreshold<<" ";
st.close();
}
//------------------------------------------------------------------------------
//从threshold.txt取出阈值
void CNet::get_threshold()
{
ifstream gt("threshold.txt");
assert(gt);
for(int i=0;i<nLay_num;++i)
for(int j=0;j<pLayer[i].nNote_num;++j)
gt>>pLayer[i].pNote[j].fThreshold;
gt.close();
// cout<<"已经导出节点阈值"<<endl;
}
//------------------------------------------------------------------------------
//训练函数
float CNet::train(float *NorIn, float *NorOut, float *In, float *Out)
{
float OnceErr;
//前向计算
propagation(NorIn);
//反向传递误差
back_propagation(NorOut);
//调整权值
adjust_weight();
//返回状态
OnceErr=caculate_out_err(Out);
return OnceErr;
}
//------------------------------------------------------------------------------
//多个样本做为标准训练的函数
void CNet::group_train(float expect_err,int maxcircle)
{
int circle=0;
// int maxcircle;
float group_err=0;//样本组错误
float mean_err; //样本组平均误差
// float expect_err; //错误期望值
//----------To Modify Here!!!!!!!!!!!!!!!!!!-------------------------------
// cout<<"设定误差值=";
// cin>>expect_err;
// expect_err=0.1;
// cout<<"最大训练次数=";
// cin>>maxcircle;
// maxcircle=4;
// cout<<"正在训练......"<<endl;
//***********************************************************************
/*
for(int i=0;i<nGroup_num;++i)
group_err+=train( pNor_in[i],pNor_out[i],
pGroup_in[i],pGroup_out[i] );
mean_err=group_err/float(nGroup_num);
// cout<<"全组平均误差="<<mean_err<<endl;
*/
//************************************************************************
//*************************************************************************
float Err=0.0f;
for(int i=0;i<nGroup_num;++i)
{
group_err=train( pNor_in[i],pNor_out[i],
pGroup_in[i],pGroup_out[i] );
if(Err<group_err) Err=group_err;
}
mean_err=Err;
//*************************************************************************
while((mean_err > expect_err) && (circle < maxcircle))
{
group_err=0;
//**********************************************************************
/*
for(int i=0;i<nGroup_num;++i)
group_err+=train(pNor_in[i],pNor_out[i],
pGroup_in[i],pGroup_out[i]);
mean_err=group_err/float(nGroup_num);//应当求最大误差,而非平均误差(不应该在这里求)????
*/
//***********************************************************************************
//***********************************************************************************
for(int i=0;i<nGroup_num;++i)
{
group_err=train( pNor_in[i],pNor_out[i],
pGroup_in[i],pGroup_out[i] );
if(Err<group_err) Err=group_err;
}
mean_err=Err;
//***********************************************************************************
circle++;
}
// cout<<"训练结束"<<endl;
// cout<<"学习次数="<<circle<<endl;
// cout<<"全组平均误差="<<mean_err<<endl;
//----------------------------------------
}
//------------------------------------------------------------------------------
//对已经训练好的网络的运行
//其中的测试数据、结果放在 compute.txt和outcome.txt
void CNet::run()
{
float sum;
get_weight();
get_threshold();
int nIn=pLayer[0].nNote_num;//输入层节点数
int nOut=pLayer[nLay_num-1].nNote_num;//输出层节点数
//------------------------------------------
int nCmpt_num; //测试数据组数
ifstream cmptfile("compute.txt");
assert(cmptfile);
ofstream outfile("outcome.txt");
assert(outfile);
char hd[20];
cmptfile>>hd;
cmptfile>>nCmpt_num; //获取测试数据
outfile<<hd<<" "<<nCmpt_num<<endl;
float **pCmpt_in, **pCmpt_out, **pCmNor_in, **pCmNor_out;
pCmpt_in=new float*[nCmpt_num]; //申请测试输入数据
pCmpt_out=new float*[nCmpt_num];//申请测试输出数据
pCmNor_in=new float*[nCmpt_num]; //申请归一化输入矩阵
pCmNor_out=new float*[nCmpt_num]; //申请归一化输出矩阵
for(int i=0;i<nCmpt_num;++i)
{
pCmpt_in[i]=new float[nIn];
pCmpt_out[i]=new float[nOut];
pCmNor_in[i]=new float[nIn];
pCmNor_out[i]=new float[nOut];
}
//------------------------------------------
for( i=0;i<nCmpt_num;++i) //读入测试数据
{
cmptfile>>hd;
for(int j=0;j<nIn;++j)
{
cmptfile>>pCmpt_in[i][j];//输入
pCmNor_in[i][j]=normal( pCmpt_in[i][j],
fNorMax, fNorMin);
}
/*for(int k=nIn;k<nIn+nOut;++k)
{
cmptfile>>pCmpt_out[i][k-nIn];//输出
pCmNor_out[i][k-nIn]=normal( pCmpt_out[i][k-nIn],
fNorMax, fNorMin);
}*/
}
cmptfile.close();
//------------------------------------------
float fErr;
float fEvenErr = 0;
for ( i = 0; i < nCmpt_num; ++i)
{
propagation( pCmNor_in[i] );
//fErr = caculate_out_err( pCmpt_out[i] );
//fEvenErr+=fErr;
outfile<<"[第"<<i+1<<"组]"<<endl;
outfile<<" 输入数据: ";
for (int j = 0; j<nIn; ++j)
outfile<<pCmpt_in[i][j]<<" ";
/*outfile<<endl<<" 指导数据: ";
for (int k = 0; k<nOut; ++k)
outfile<<pCmpt_out[i][k]<<" ";*/
outfile<<endl<<" 计算结果: ";
for ( int k = 0; k<nOut; ++k)
outfile<<anti_normal(
pLayer[nLay_num-1].pNote[k].fActivation,
fNorMax,
fNorMin )<<" "<<endl<<" ";
outfile<<endl;
//outfile<<endl<<" 本组误差:"<<fErr;
//outfile<<endl;
}
// outfile<<endl<<"平均误差:"<<( fEvenErr/nCmpt_num );
outfile<<"******计算完毕******"<<endl;
outfile.close();
//-----------------------------------------------------------------
for(i=0;i<nCmpt_num;++i)//释放测试数据矩阵
{
delete []pCmpt_in[i];
delete []pCmpt_out[i];
delete []pCmNor_in[i];
delete []pCmNor_out[i];
}
delete []pCmpt_in;
delete []pCmpt_out;
delete []pCmNor_in;
delete []pCmNor_out;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -