⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bp.cpp

📁 此为bp神经网络算法
💻 CPP
字号:
#include "stdafx.h"
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <math.h>

#define   HN       6             //隐含层神经元个数, 请多次修改其值来进行比较
#define   InputN   7
#define   OutN     2
#define   datanum  6              //样本数
#define   g(x)     1/(1+exp(-x))  //sigmod函数


using namespace std;

void timestart(void);           //获取程序开始运行时的时间
void timeend(void);             //获取程序结束时的时间
double timevalue(void);         //获取程序运行时间

int _tmain(int argc, _TCHAR* argv[])
{
	timestart();
	
	double x_out[InputN];            //输入层的输出值
	double hn_out[HN];               //隐含层的输出值
	double y_out[OutN];              //输出层的输出值
	double y[OutN];                  //输出层的期望输出值
	
	double w[InputN][HN];            //输入层到隐含层的权值
	double v[HN][OutN];              //隐含层到输出层的权值

	double deltaw[InputN][HN];
	double deltav[HN][OutN];
	
	double hn_delta[HN];             //隐含层的delta
	double y_delta[OutN];            //输出层的delta
	
	double error;                    //全局误差
	double errlimit=0.001;            //预设的全局误差极限
	double alpha=0.1, beta=0.1;      //预设的学习速率
	
	int loop=0;                      //训练次数
	int times=5000;                    //预设的训练次数上限
	int i, j, m;
	double max, min;
	double sumtemp;
	double errtemp;
	
	struct                           //训练样本对. 对XOR问题, 共有4对
	{
		double input[InputN];
		double teach[OutN];
	}data[datanum] = {{6.4479, 17.875, 24.925, 26.324, 52.825, 35.417, 52.013, 1,0},
		        {6.5748, 17.99, 28.637, 26.831, 54.77, 36.756, 54.592, 1,0}, 
	            {6.3533, 15.964, 24.787, 24.793, 49.924, 33.025, 49.597, 1,0},
				{6.3806, 14.07, 24.463, 24.929, 49.74, 31.972, 51.98, 0,1},
				{6.7335, 17.653, 26.721, 26.545, 54.826, 36.172, 53.261, 0,1},
                {6.4121, 14.065, 26.306, 24.966, 51.032, 31.998, 51.087, 0,1}}; //注意这里的值需要改变
	
	
	//-----------------------------------------------------------
	//初始化. 将连接权值、偏置系数置为(-1,1)之间的随机数
	cout<<"initializing............"<<endl;
	srand((unsigned)time(NULL));
	for(i=0; i<InputN; i++)        //w的随机输入
		for(j=0; j<HN; j++)
		{
			w[i][j] = ((double)rand()/32767.0)*2-1;
			deltaw[i][j] = 0;
//			cout<<"w["<<i+1<<"]["<<j+1<<"]="<<w[i][j]<<endl;
		}

	for(i=0; i<HN; i++)   //v的随机输入
		for(j=0;j<OutN;j++)
		{		
			v[i][j] = ((double)rand()/32767.0)*2-1;
			deltav[i][j] = 0;
//			cout<<"v["<<i+1<<"]["<<j+1<<"]="<<v[i][j]<<endl;
		}


	//训练.
	cout<<endl<<"training beginning............"<<endl;
	while(loop < times)
	{
		loop++;
		error = 0.0;
			
		for(m=0; m<datanum; m++)                     //对datanum个样本进行增量训练,m为样本个数
		{
			//输入正向传播
			max=0.0;
			min=0.0;
			for(i=0; i<InputN; i++) //设置输入层的输出
			{
				x_out[i] = data[m].input[i];
				if (max < x_out[i])   //比较得出输入的最大最小值
			    	max = x_out[i];
				
				if (min > x_out[i]) 
					min = x_out[i];
//				cout<<"x_out["<<i+1<<"]="<<x_out[i]<<endl;
			}

			for(i=0; i<InputN; i++)   //归一化后的输入值
			{
				x_out[i] = (x_out[i] - min) / (max-min);
//              cout<<"x_out["<<i+1<<"]="<<x_out[i]<<endl;
			}

			for(i=0; i<OutN; i++)              //输出层的期望输出
			{
				y[i] = data[m].teach[i];
//				cout<<"y["<<i+1<<"]="<<y[i]<<endl;
			}			
				
			for(i=0; i<HN; i++)                //计算隐含层的输出
			{
				sumtemp = 0.0;
				for(j=0; j<InputN; j++)
					sumtemp += w[j][i] * x_out[j];
				hn_out[i] = tanh(sumtemp);     //隐含层作用函数为y=tanh(x)
			}
				
			for(i=0; i<OutN; i++)              //计算输出层的输出
			{
				sumtemp = 0.0;
				for(j=0; j<HN; j++) 
					sumtemp += v[j][i] * hn_out[j];
				y_out[i] = g(sumtemp);            //输出层作用函数为sigmod函数
//				y_out[i] = tanh(sumtemp);
			}

				
			//误差反向传播
			for(i=0; i<OutN; i++)
			{
				errtemp = y[i] - y_out[i]; 
				y_delta[i] = errtemp * g(y_out[i]) * (1.0 - g(y_out[i]));
//                y_delta[i] = errtemp * (1/(1+exp(-y_out[i]))) * (1.0 - (1/(1+exp(-y_out[i]))));    //计算输出层的delta
				error += (errtemp * errtemp);  //所有样本的全局误差
			}
				
			for(i=0; i<HN; i++)                //计算隐含层的delta
			{
				errtemp = 0.0;
				for(j=0; j<OutN; j++)
                    errtemp += y_delta[j] * v[i][j];
				hn_delta[i] = errtemp * (1.0 + hn_out[i]) * (1.0 - hn_out[i]);
			}
				
			for(i=0; i<OutN; i++)       //调整输出层的权值和偏置系数
			{
				for(j=0; j<HN; j++)
				{
					deltav[j][i] = alpha * deltav[j][i] + beta * y_delta[i] * hn_out[j];
					v[j][i] += deltav[j][i];
				}
			}
				
				
			for(i=0; i<HN; i++)        //调整隐含层的权值和偏置系数
			{
				for(j=0; j<InputN; j++)
				{
					deltaw[j][i] = alpha * deltaw[j][i] + beta * hn_delta[i] * x_out[j];
					w[j][i] += deltaw[j][i];
				}
			}
		}//end for 样本	
				
		//全局误差判断
		error = error/2;
		cout<<"全局误差Error="<<error<<endl;
		if(error < errlimit)
			break;
			
	}//end while 循环次数
		
    //----------------------------------------------------------------
	//输出训练结果
	cout<<endl<<"training completion............"<<endl;
	cout<<"HN="<<HN<<endl;
	cout<<"Error="<<error<<endl;
	cout<<"Loop="<<loop<<endl;
	for(i=0; i<InputN; i++)
		for(j=0; j<HN; j++)
		    cout<<"w["<<i+1<<"]["<<j+1<<"]="<<w[i][j]<<endl;

	for(i=0; i<HN; i++)
		for(j=0; j<OutN; j++)
			cout<<"v["<<i+1<<"]["<<j+1<<"]="<<v[i][j]<<endl;
	


	//------------------------------------------------------------------
	//利用上面训练得到的权值和偏置系数进行检验
	cout<<endl<<"testing样本............"<<endl;
	for(m=0; m<datanum; m++)                     //对datanum个样本分别检验其实际输出
	{
		for(i=0; i<InputN; i++)                 //设置输入层的输出
		{
			x_out[i] = data[m].input[i];
			cout<<"data["<<m+1<<"].x["<<i+1<<"]="<<data[m].input[i]<<endl;
		}
		for(i=0; i<HN; i++)                //计算隐含层的输出
		{
			sumtemp = 0.0;
			for(j=0; j<InputN; j++)
				sumtemp += w[j][i] * x_out[j];
			hn_out[i] = tanh(sumtemp);     //隐含层作用函数为y=tanh(x)
		}

		for(i=0; i<OutN; i++)     //计算输出层的输出
		{
			sumtemp = 0.0;
			for(j=0; j<HN; j++)
				sumtemp += v[j][i] * hn_out[j];
//			y_out[i] = 1/(1+exp(-sumtemp));
			y_out[i] = g(sumtemp);
			cout<<"data["<<m+1<<"].y["<<i+1<<"]= "<<y_out[i]<<endl;
		}
	}


	//------------------------------------------------------------------
	//利用上面训练得到的权值和偏置系数对新的数据进行检验
	cout<<endl<<"testing输入............"<<endl;
	
	struct                           //检验数据
	{
		double input[InputN];
	}newdata[3] = {{6.5386, 18.26, 27.842, 28.003, 58.226, 37.378, 55.951},
				{6.5226, 17.2, 27.324, 30.425, 61.694, 42.122, 59.334},
				{6.5226, 16.447, 27.226, 27.338, 54.934, 35.612, 54.897}}; //注意这里的值需要改变
	
	for(m=0; m<3; m++)                     //对datanum个样本分别检验其实际输出
	{
		for(i=0; i<InputN; i++)                 //设置输入层的输出
		{
			x_out[i] = data[m].input[i];
			cout<<"data["<<m+1<<"].x["<<i+1<<"]="<<data[m].input[i]<<endl;
		}
		for(i=0; i<HN; i++)                //计算隐含层的输出
		{
			sumtemp = 0.0;
			for(j=0; j<InputN; j++)
				sumtemp += w[j][i] * x_out[j];
			hn_out[i] = tanh(sumtemp);     //隐含层作用函数为y=tanh(x)
		}
		
		for(i=0; i<OutN; i++)     //计算输出层的输出
		{
			sumtemp = 0.0;
			for(j=0; j<HN; j++)
				sumtemp += v[j][i] * hn_out[j];
			//			y_out[i] = 1/(1+exp(-sumtemp));
			y_out[i] = g(sumtemp);
			cout<<"data["<<m+1<<"].y["<<i+1<<"]= "<<y_out[i]<<endl;
		}
	}
		
	timeend();
	cout<<endl<< "the code running time(by Win32 function)="<<timevalue()<<"s"<<endl;
		

	return 0;
}		




//----------------------------------------------------------------
//测量程序运行时间的函数(包括Windows和Linux两个版本) - by Edward G. Bradford
#ifdef _WIN32
#include <Windows.h>
static LARGE_INTEGER _timestart, _timeend;
static LARGE_INTEGER freq;

void timestart(void)
{
    static int first = 1;
	
    if(first) 
	{
        QueryPerformanceFrequency(&freq);
        first = 0;
    }
    QueryPerformanceCounter(&_timestart);
}
void timeend(void)
{
    QueryPerformanceCounter(&_timeend);
}
double timevalue(void)
{
    return ((double)_timeend.QuadPart - (double)_timestart.QuadPart)/((double)freq.QuadPart);
}

#else
#include <sys/utsname.h>
static struct timeval _timestart, _timeend;
static struct timezone tz;

void timestart(void)
{
    gettimeofday(&_timestart, &tz);
}
void timeend(void)
{
    gettimeofday(&_timeend,&tz);
}
double timevalue(void)
{
    double t1, t2;
	
    t1 =  (double)_timestart.tv_sec + (double)_timestart.tv_usec/(1000*1000);
    t2 =  (double)_timeend.tv_sec + (double)_timeend.tv_usec/(1000*1000);
    return t2-t1;
}
#endif




		

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -