📄 bpnn.cpp
字号:
sum=sum+Out[L-1][pren]*W[L-1][L][pren][n];
}
Sum[L][n]=sum-TH[L][n];///减去阈值 ->激活值
Out[L][n]=f(Sum[L][n]);//激活函数
}
}
//end 其他层的输出
}
//---------------------------------------------------------------------------
void __fastcall TForm1::backward(int trainID)
{ // notice t was used
//begin第LAYERS-1层
for(n=0;n<NNum[LAYERS-1];n++)
d[LAYERS-1][n]=( trainData[trainID][NNum[0]+n] //期望输出
-Out[LAYERS-1][n] )*df(Sum[LAYERS-1][n]);
for(n=0;n<NNum[LAYERS-1];n++)
{
dTH[LAYERS-1][n]=learnRatio[LAYERS-1]*d[LAYERS-1][n]*1;
for(pren=0;pren<NNum[LAYERS-2];pren++)
{
dW[LAYERS-2][LAYERS-1][pren][n]=dTH[LAYERS-1][n]*Out[LAYERS-2][pren];
}
}
//end第LAYERS-1层
//begin第LAYERS-2 ->1层
for(L=LAYERS-2;L>0;L--)
{
for( n=0;n<NNum[L];n++)
{
///求 d[L][n]
sum=0.0;
for(postn=0;postn<NNum[L+1];postn++)
sum+=d[L+1][postn]*W[L][L+1][n][postn];
d[L][n]=sum*df(Sum[L][n]);
//
dTH[L][n]=learnRatio[L]*d[L][n];
for(pren=0;pren<NNum[L-1];pren++)
{
dW[L-1][L][pren][n]=dTH[L][n]*Out[L-1][pren];
}
}
}
//end第LAYERS-2 ->1层
//begin 赋予新W,TH, 第LAYERS-1到第1层
for(L=LAYERS-1;L>0;L--)
{
for( n=0;n<NNum[L];n++)
{
for(pren=0;pren<NNum[L-1];pren++)
{
W[L-1][L][pren][n]+=dW[L-1][L][pren][n];
}
TH[L][n]+=dTH[L][n];
}
}
//end 赋予新W,TH, 第LAYERS-1到第1层
}
//---------------------------------------------------------------------------
double __fastcall TForm1::f(double x)
{ //激活函数
return( 1/(1+exp(-x)));
//return( 1.5/(1+exp(-x))-0.25);
}
//---------------------------------------------------------------------------
double __fastcall TForm1::df(double x)
{ //激活函数
return( f(x)*(1-f(x)) );
}
//---------------------------------------------------------------------------
void __fastcall TForm1::error()
{
err[learnN]=0;
for( t=0;t<trainDataNum;t++)
{ //need not reandList
forward(t); //get output
singleErr=0;
for (n=0;n<NNum[LAYERS-1];n++)
{
singleErr+=(Out[LAYERS-1][n]-trainData[t][NNum[0]+n])
*(Out[LAYERS-1][n]-trainData[t][NNum[0]+n])/2.0;
}
err[learnN]+=singleErr;
}
err[learnN]=err[learnN]/(trainDataNum+0.0+NNum[LAYERS-1]);
//
}
//---------------------------------------------------------------------------
double __fastcall TForm1::myRand()
{
return( (rand()+0.0)/0x7FFFU/5-0.1 );//rand[-0.1,0.1]
}
//---------------------------------------------------------------------------
void __fastcall TForm1::randListF() //randListF function
{
//randList从0-TrainDataNum从新排序
for(i=0;i<trainDataNum;i++)
randList[i]=i;
vector<int> v(randList, randList+trainDataNum);
random_shuffle(v.begin(), v.end());
for(i=0;i<trainDataNum;i++)
randList[i]=v.at(i);
/* int arr[10] = {0,1,2,3,4,5,6,7,8,9};
vector<int> v(arr, arr+10);
random_shuffle(v.begin(), v.end());
for(i=0;i<10;i++)
ListBox1->Items->Append(IntToStr(v.at(i)));
*/
}
//---------------------------------------------------------------------------
void __fastcall TForm1::showErrorClick(TObject *Sender)
{
DecisionGraph1->Legend->Visible=false;
Series1->Clear();
for(i=0;i<learnN-1;i++)
Series1->Add(err[i]+1,"",clRed);
// Series1->Add(0.99,"",clRed);
//Series1->AddArray(err,learnN-1);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::testBtnClick(TObject *Sender)
{
//use testData
String str;
double tmpOut;//for辨识
int flag;//for辨识
int count=0;
ListBox1->Clear();
for(t=0;t<testDataNum;t++)
//for(t=0;t<104;t++)
{
flag=1;
str="";
//输入层的输出
for(n=0;n<NNum[0];n++)
Out[0][n]=testData[t][n];
//begin 其他层的输出
for(L=1;L<LAYERS;L++) //哪一层
{
for(n=0;n<NNum[L];n++) //哪一层哪个神经元
{
sum=0.0;
for(pren=0;pren<NNum[L-1];pren++) //前一层哪个神经元
{
//begin proc fractal neural network
if(RadioF->Checked==true)
{
if( (C[L-1][pren][n]==0)&&( (L-1)<=LAYERS-3 ))
W[L-1][L][pren][n]=0;
}
///
sum=sum+Out[L-1][pren]*W[L-1][L][pren][n];
}
Sum[L][n]=sum-TH[L][n];///减去阈值 ->激活值
Out[L][n]=f(Sum[L][n]);//激活函数
}
}
//end 其他层的输出
//begin 辨识
for(n=0;n<NNum[LAYERS-1];n++)
{
if(Out[LAYERS-1][n]>=0.5)
tmpOut=1;
else
tmpOut=0;
/////
if( tmpOut==testData[t][NNum[0]+n] )
flag=flag*1;
else
flag=flag*0;
str+=FloatToStrF(Out[LAYERS-1][n],ffFixed,7,2)+" ";
//(FloatToStr(Out[LAYERS-1][n])).SubString(1,3)+" ";
}
str+="->";
for(n=0;n<NNum[LAYERS-1];n++)
{
str+=FloatToStr(testData[t][NNum[0]+n])+" ";
}
if(flag==1) {str+=" R";count++;}
else str+=" W";
ListBox1->Items->Append(IntToStr(t+1)
+": "+str);
//end 辨识
}
if(count==testDataNum) str="100";
else str=(FloatToStr((count)/(testDataNum+0.0))).SubString(3,2);
ListBox1->Items->Append(" ");
ListBox1->Items->Append(" 正确率:"
+str
+"%");
////
//begin proc fractal neural network
if(RadioF->Checked==true)
{ Label11->Caption=str+"%" ;
}
else Label10->Caption=str+"%" ;
fprintf(outputFile, "%s%\n", str);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::TimeStart()
{
time1=timeGetTime(); //winAPI 函数
////////
}
//---------------------------------------------------------------------------
void __fastcall TForm1::TimeEnd(TObject *Sender)
{
time2=timeGetTime();
time2_1=time2-time1;
if (Sender==trainBtn)
Label3->Caption=IntToStr(time2_1);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::RadioBClick(TObject *Sender)
{
Label4->Visible=false;
Label5->Visible=false;
}
//---------------------------------------------------------------------------
void __fastcall TForm1::RadioFClick(TObject *Sender)
{
Label4->Visible=true;
Label5->Visible=true;
}
//---------------------------------------------------------------------------
void __fastcall TForm1::SpeedButton1Click(TObject *Sender)
{
for(int i=0;i<10;i++)
{
readBtnClick(Sender);
trainBtnClick(Sender);
showErrorClick(Sender);
testBtnClick(Sender);
}
fclose(outputFile);
}
//---------------------------------------------------------------------------
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -