⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bpanndlg.cpp

📁 基于Visual C++6.0的BP神经网络程序
💻 CPP
📖 第 1 页 / 共 3 页
字号:
	double *Hidepartial1=new double[Hidenodes];//隐层偏导
	double *Hidepartial2=new double[Hidenodes];
	double *Hidepartial3=new double[Hidenodes];
	double *Hidepartial4=new double[Hidenodes];
	double *Hidepartial5=new double[Hidenodes];

	switch(Hidelayer_flag)
	{
	case 1:
		delete [] Hidepartial5;
		delete [] Hidepartial2;
		delete [] Hidepartial3;
		delete [] Hidepartial4;
		break;
	case 2:
		delete [] Hidepartial5;
		delete [] Hidepartial3;
		delete [] Hidepartial4;
		break;
	case 3:
		delete [] Hidepartial5;
		delete [] Hidepartial4;
	    break;
	case 4:
		delete [] Hidepartial5;
	    break;
	default:
	    break;
	}
	double *Errorst=new double[Innodes];//误差存储
	double *m_Vdelfo=new double[Outnodes*Hidenodes];//隐层到输入层权值调整矩阵
	double *m_Vdel=new double[Outnodes*Hidenodes];
	double *m_Wdelfo=new double[Innodes*Hidenodes];//输入层到隐层权值调整矩阵
	double *m_Wdel=new double[Innodes*Hidenodes];
	double *m_Ydelfo=new double[Hidenodes*Hidenodes];//输入层到隐层权值调整矩阵
	double *m_Ydel=new double[Hidenodes*Hidenodes];
	//~初始化
	//=============================

	//状态显示
	m_state.Format("神经网络权值初始化中...请等待!\r\n");
	UpdateData(false);
	//=============================
	//初始化权值
	switch(Hidelayer_flag)
	{
	case 1:
		InitWeights(Wp,Vp);
		break;
	case 2:
		InitWeights(Wp,Vp,Yp1);
		break;
	case 3:
		InitWeights(Wp,Vp,Yp1,Yp2);
	    break;
	case 4:
		InitWeights(Wp,Vp,Yp1,Yp2,Yp3);
	    break;
	case 5:
		InitWeights(Wp,Vp,Yp1,Yp2,Yp3,Yp4);
		break;
	default:
		{
			m_state.Format("初始化隐层权值失败!\r\n");
			UpdateData(false);
			return;
		}
	    break;
	}
	//~初始化权值
	//================================
	
	//状态显示
	m_state.Format("神经网络权值更新中...请等待!\r\n");
	UpdateData(false);

//================================================================
	int m_count=0;
	while (m_count<10)//m_Maxcount
	{
		
		//================================
		//前向计算输出
		switch(Hidelayer_flag)
		{
		case 1:
			{
				ForCalcul(Input,Wp,Y1,Innodes,Hidenodes);
				ForCalcul(Y1,Vp,Yout,Hidenodes,Outnodes);
			}
			break;
		case 2:
			{
				ForCalcul(Input,Wp,Y1,Innodes,Hidenodes);
				ForCalcul(Y1,Yp1,Y2,Hidenodes,Hidenodes);
				ForCalcul(Y2,Vp,Yout,Hidenodes,Outnodes);
			}
			break;
		case 3:
			{
				ForCalcul(Input,Wp,Y1,Innodes,Hidenodes);
				ForCalcul(Y1,Yp1,Y2,Hidenodes,Hidenodes);
				ForCalcul(Y2,Yp2,Y3,Hidenodes,Hidenodes);
				ForCalcul(Y3,Vp,Yout,Hidenodes,Outnodes);
			}
			break;
		case 4:
			{
				ForCalcul(Input,Wp,Y1,Innodes,Hidenodes);
				ForCalcul(Y1,Yp1,Y2,Hidenodes,Hidenodes);
				ForCalcul(Y2,Yp2,Y3,Hidenodes,Hidenodes);
				ForCalcul(Y3,Yp3,Y4,Hidenodes,Hidenodes);
				ForCalcul(Y4,Vp,Yout,Hidenodes,Outnodes);
			}
			break;
		case 5:
			{
				ForCalcul(Input,Wp,Y1,Innodes,Hidenodes);
				ForCalcul(Y1,Yp1,Y2,Hidenodes,Hidenodes);
				ForCalcul(Y2,Yp2,Y3,Hidenodes,Hidenodes);
				ForCalcul(Y3,Yp3,Y4,Hidenodes,Hidenodes);
				ForCalcul(Y4,Yp4,Y5,Hidenodes,Hidenodes);
				ForCalcul(Y5,Vp,Yout,Hidenodes,Outnodes);
			}
			break;
		default:
			{
				m_state.Format("前向计算失败,检查值!\r\n");
				UpdateData(false);
				return;
			}
			break;
		}
		//~前向计算输出
		//=====================================
		
		//=====================================
		//误差计算
		double ErroTemp = 0.0;
		int ie,je;
		for(ie = 0;ie < Outnodes;ie++)
		{
			ErroTemp+= (Output[ie]-Yout[ie])*(Output[ie]-Yout[ie]); //计算总的误差平方
			ErroTemp = 0.5*ErroTemp;  
			Erro[ie] = ErroTemp;
		}

		for(je = 0;je < Outnodes;je++)
		{
			Outpartial[je] = (Output[je]-Yout[je])*Yout[je]*(1-Yout[je]); //输出层误差偏导
		}

		switch(Hidelayer_flag)
		{
		case 1:
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Outnodes;ie++)
				{
					tmp = tmp+Outpartial[ie]*Vp[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial1[je] = tmp*Y1[je]*(1-Y1[je]);  //隐层偏导
			}
			break;
		case 2:
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Outnodes;ie++)
				{
					tmp = tmp+Outpartial[ie]*Vp[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial2[je] = tmp*Y2[je]*(1-Y2[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial2[ie]*Yp1[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial1[je] = tmp*Y1[je]*(1-Y1[je]);  //隐层偏导
			}
			break;
		case 3:
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Outnodes;ie++)
				{
					tmp = tmp+Outpartial[ie]*Vp[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial3[je] = tmp*Y3[je]*(1-Y3[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial3[ie]*Yp2[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial2[je] = tmp*Y2[je]*(1-Y2[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial2[ie]*Yp1[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial1[je] = tmp*Y1[je]*(1-Y1[je]);  //隐层偏导
			}
			break;
		case 4:
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Outnodes;ie++)
				{
					tmp = tmp+Outpartial[ie]*Vp[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial4[je] = tmp*Y4[je]*(1-Y4[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial4[ie]*Yp3[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial3[je] = tmp*Y3[je]*(1-Y3[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial3[ie]*Yp2[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial2[je] = tmp*Y2[je]*(1-Y2[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial2[ie]*Yp1[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial1[je] = tmp*Y1[je]*(1-Y1[je]);  //隐层偏导
			}
			break;
		default:
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Outnodes;ie++)
				{
					tmp = tmp+Outpartial[ie]*Vp[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial5[je] = tmp*Y5[je]*(1-Y5[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial5[ie]*Yp4[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial4[je] = tmp*Y4[je]*(1-Y4[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial4[ie]*Yp3[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial3[je] = tmp*Y3[je]*(1-Y3[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial3[ie]*Yp2[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial2[je] = tmp*Y2[je]*(1-Y2[je]);  //隐层偏导
			}
			for(je = 0;je < Hidenodes;je++)
			{
				double tmp = 0.0;
				for(ie = 0;ie < Hidenodes;ie++)
				{
					tmp = tmp+Hidepartial2[ie]*Yp1[je*Outnodes+ie]; //为了求隐层偏导计算的求和
				}
				Hidepartial1[je] = tmp*Y1[je]*(1-Y1[je]);  //隐层偏导
			}
		    break;
		}
		//~误差计算
		//=====================================
		
		//=====================================
		//权值修正


		int ia,ja;
		for(ia = 0;ia < Hidenodes;ia++)       //调整隐层到输入层的权值
			for(ja = 0;ja < Outnodes;ja++)
			{
				m_Vdelfo[ia*Outnodes+ja] = 0.0;
				m_Vdel[ia*Hidenodes+ja] =0.0;
				m_Vdelfo[ia*Outnodes+ja] = Learnratio*Outpartial[ja]*Y2[ja];
				Vp[ia*Outnodes+ja] = Vp[ia*Outnodes+ja]+m_Vdelfo[ia*Outnodes+ja]+inertia*m_Vdel[ia*Outnodes+ja];
				m_Vdel[ia*Outnodes+ja] = m_Vdelfo[ia*Outnodes+ja];
			}
			
			for(ia = 0;ia < Innodes;ia++)         //调整输入层到隐层的权值
				for(ja = 0;ja < Hidenodes;ja++)
				{
					m_Wdelfo[ia*Hidenodes+ja] =0.0;
					m_Wdel[ia*Hidenodes+ja] =0.0;
					m_Wdelfo[ia*Hidenodes+ja] = Learnratio*Hidepartial1[ja]*Y1[ja];
					Wp[ia*Hidenodes+ja] = Wp[ia*Hidenodes+ja]+m_Wdelfo[ia*Hidenodes+ja]+inertia*m_Wdel[ia*Hidenodes+ja];
					m_Wdel[ia*Hidenodes+ja] = m_Wdelfo[ia*Hidenodes+ja];
				}	

		//~权值修正
		//=====================================
		m_count++;
	}
//================================================================

// 		delete [] m_Vdelfo;
// 		delete [] m_Vdel;
// 		delete [] m_Wdelfo;
// 		delete [] m_Wdel;
// 		delete [] m_Ydelfo;
// 		delete [] m_Ydel;

	//=====================================
	//输出文本文件
	ofstream outQuanFile( "权值.txt", ios::out );
	
	outQuanFile<<"Out\n";
	for(int io6=0;io6<Outnodes;io6++)
	{
		for(int jo6=0;jo6<Hidenodes;jo6++)
		{
			outQuanFile<<Vp[io6*Hidenodes+jo6]<<",";
		}
		outQuanFile<<"\n";
	}//取得第末尾隐层至输出层权值


	switch(Hidelayer_flag)
	{
	case 5:
		{
			outQuanFile<<"Hide\n";
			for(int io5=0;io5<Hidenodes;io5++)
			{
				for(int jo5=0;jo5<Hidenodes;jo5++)
				{
					outQuanFile<<Yp4[io5*Hidenodes+jo5]<<",";
				}
				outQuanFile<<"\n";
				
			}//取得第四隐层至第五隐层权值
		}
	case 4:
		{
			outQuanFile<<"Hide\n";
			for(int io4=0;io4<Hidenodes;io4++)
			{
				for(int jo4=0;jo4<Hidenodes;jo4++)
				{
					outQuanFile<<Yp3[io4*Hidenodes+jo4]<<",";
				}
				outQuanFile<<"\n";
			}//取得第三隐层至第四隐层权值
		}
		
	case 3:
		{
			outQuanFile<<"Hide\n";
			for(int io3=0;io3<Hidenodes;io3++)
			{
				for(int jo3=0;jo3<Hidenodes;jo3++)
				{
					outQuanFile<<Yp2[io3*Hidenodes+jo3]<<",";
				}
				outQuanFile<<"\n";
			}//取得第二隐层至第三隐层权值
		}
		
	case 2:
		{
			outQuanFile<<"Hide\n";
			for(int io2=0;io2<Hidenodes;io2++)
			{
				for(int jo2=0;jo2<Hidenodes;jo2++)
				{
					outQuanFile<<Yp1[io2*Hidenodes+jo2]<<",";
				}
				outQuanFile<<"\n";
			}//取得第一隐层至第二隐层权值
		}
	default:
		break;
	}
	outQuanFile<<"In\n";
	for(int io1=0;io1<Hidenodes;io1++)
	{
		for(int jo1=0;jo1<Innodes;jo1++)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -