⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bp.cpp

📁 用C#写的一个神经网络结合遗传算法优化的程序
💻 CPP
字号:
#include "stdafx.h"
#include "BP.h"

SBPNeuron::SBPNeuron(int NumInputs) : m_iNumInputs(NumInputs+1),m_dActivation(0),m_dError(0)//多出的输入为阈值,阈值之权值称为偏移(bias)
{
	for(int i=0; i<NumInputs+1; i++)
	{
		m_vecWeight.push_back(RandomClamped());
		m_vecPrevUpdate.push_back(0);
	}
}
SBPNeuronLayer::SBPNeuronLayer(int NumNeurons, int NumInputsPerNeuron) : m_iNumNeurons(NumNeurons)//隐层神经元数目,每个神经元的输入数目
{
	for(int i=0; i<NumNeurons; i++)
	{
		m_vecNeurons.push_back(SBPNeuron(NumInputsPerNeuron));
	}
}
CBPNeuralNet::CBPNeuralNet(int NumInputs,int NumOutputs,int HiddenNeurons,double LearningRate,bool softmax): m_iNumInputs(NumInputs),
                                                                                            m_iNumOutputs(NumOutputs),
																							m_iNumHiddenLayers(1),//1个隐层
																							m_iNeuronsPerHiddenLayer(HiddenNeurons),
																							m_dLearningRate(LearningRate),
																							m_dErrorSum(9999),
																							m_bTrained(false),
																							m_iNumEpochs(0),
																							m_bSoftMax(softmax)
{
	CreateNet();
}
void CBPNeuralNet::PutWeights(std::vector<double> &weights)
{
	int cWeight=0;
	for(int i=0; i<m_iNumHiddenLayers+1; i++)
	{
		for(int j=0; j<m_vecLayers[i].m_iNumNeurons; j++)
		{
			for(int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_iNumInputs; k++)
			{
				m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k]=weights[cWeight++];
			}
		}
	}
	return;
}
void CBPNeuralNet::CreateNet()
{
	if(m_iNumHiddenLayers>0)
	{
		m_vecLayers.push_back(SBPNeuronLayer(m_iNeuronsPerHiddenLayer,m_iNumInputs));//创建第一个隐层
		for(int i=0; i<m_iNumHiddenLayers-1; i++)//创建其余的隐层,每个隐层的神经元数和每个神经元的输入数目相等
		{
			m_vecLayers.push_back(SBPNeuronLayer(m_iNeuronsPerHiddenLayer,m_iNeuronsPerHiddenLayer));
		}
		m_vecLayers.push_back(SBPNeuronLayer(m_iNumOutputs,m_iNeuronsPerHiddenLayer));//创建输出层
	}
	else
	{
		m_vecLayers.push_back(SBPNeuronLayer(m_iNumOutputs,m_iNumInputs));//无隐层时,直接创建输出层
	}

}
void CBPNeuralNet::InitNetwork()//初始化网络权值
{
	for(int i=0; i<m_iNumHiddenLayers+1; i++)
	{
		for(int n=0; n<m_vecLayers[i].m_iNumNeurons; n++)
		{
			for(int k=0; k<m_vecLayers[i].m_vecNeurons[n].m_iNumInputs; k++)
			{
				m_vecLayers[i].m_vecNeurons[n].m_vecWeight[k]= RandomClamped();
			}
		}
	}
	m_dErrorSum=9999;
	m_iNumEpochs=0;
	return;
}
vector<double> CBPNeuralNet::Update(vector<double> inputs)//输入->输出
{
	for(int k=0; k<(int)inputs.size(); k++)//噪声加入到输入数据中,预防过拟合
	{
		inputs[k] += RandFloat()*MAX_NOISE_TO_ADD;
	}
	vector<double> outputs;
	int cWeight=0;
	if(inputs.size()!=m_iNumInputs)
		return outputs;
	for(int i=0; i<m_iNumHiddenLayers+1; i++)
	{
		if(i>0)
			inputs=outputs;
		outputs.clear();
		cWeight=0;
		for(int n=0; n<m_vecLayers[i].m_iNumNeurons; n++)
		{
			double netinput=0;
			int NumInputs=m_vecLayers[i].m_vecNeurons[n].m_iNumInputs;
			for(int k=0; k<NumInputs-1; k++)
			{
				netinput += m_vecLayers[i].m_vecNeurons[n].m_vecWeight[k]*inputs[cWeight++];
			}
			netinput += m_vecLayers[i].m_vecNeurons[n].m_vecWeight[NumInputs-1]*BIAS;//神经元阈值*偏移

			
			if(m_bSoftMax && (i==m_iNumHiddenLayers) )//柔性最大
			{
				m_vecLayers[i].m_vecNeurons[n].m_dActivation = exp(netinput);
			}
			else
			{
				m_vecLayers[i].m_vecNeurons[n].m_dActivation = Sigmoid(netinput,ACTIVATION_RESPONSE);//激励函数Sigmoid
			}
			outputs.push_back(m_vecLayers[i].m_vecNeurons[n].m_dActivation);
			cWeight=0;
		}
	}
	if(m_bSoftMax)//
	{
		double expTot=0;
		for(int o=0; o<(int)outputs.size(); o++)
		{
			expTot += outputs[o];
		}
		for(int o=0; o<(int)outputs.size(); o++)
		{
			outputs[o] = outputs[o]/expTot;
			m_vecLayers[m_iNumHiddenLayers].m_vecNeurons[o].m_dActivation = outputs[o];
		}
	}
	return outputs;
}
double CBPNeuralNet::Sigmoid(double activation, double response)
{
	return 1/(1+exp(-activation/response));
}
bool CBPNeuralNet::Train(vector<iovector> &vecSetIn, vector<iovector> &vecSetOut)
{
		
	vector<vector<double> > SetIn  = vecSetIn;
	vector<vector<double> > SetOut = vecSetOut;

	//first make sure the training set is valid 
	 if ((SetIn.size()!=SetOut.size())||(SetIn[0].size() != m_iNumInputs)||(SetOut[0].size()!= m_iNumOutputs))
	{   
		MessageBox(NULL, "Inputs != Outputs", "Error", NULL);   
	    return false;
	}  
   //initialize all the weights to small random values
   InitNetwork();
   while( m_dErrorSum > ERROR_THRESHOLD )
   {
	   //curerror=m_dErrorSum;
	   //return false if there are any problems
       if (!NetworkTrainingEpoch(SetIn, SetOut))
       {
           return false;
       }
	   ++m_iNumEpochs;
   }
   CString str;
   str.Format("%d",m_iNumEpochs);
   MessageBox(NULL, str, "BP", NULL);   
   m_bTrained = true; 
   return true;
}

bool CBPNeuralNet::NetworkTrainingEpoch(std::vector<iovector> &SetIn, std::vector<iovector> &SetOut)
{
	vector<double>::iterator curWeight;
	vector<SBPNeuron>::iterator curNrnOut,curNrnHid;

	double WeightUpdate=0;//momentum

	m_dErrorSum=0;

	for(int vec=0; vec< (int)SetIn.size(); vec++)//一次循环处理一个样本
	{
		vector<double> outputs=Update(SetIn[vec]);//计算神经元的输出
		if(outputs.size()==0)
			return false;
		for(int op=0; op<m_iNumOutputs; op++)//调整输出层的权值
		{
			double err=(SetOut[vec][op]- outputs[op])*outputs[op]*(1-outputs[op]);//计算误差,这是一个公式
			//m_dErrorSum += (SetOut[vec][op]- outputs[op])*(SetOut[vec][op]- outputs[op]);//SSE(Sum of the Squared Errors)
			m_vecLayers[1].m_vecNeurons[op].m_dError =err;//m_vecLayers[1]: 输出层
			curWeight=m_vecLayers[1].m_vecNeurons[op].m_vecWeight.begin();//当前权值指针
			curNrnHid=m_vecLayers[0].m_vecNeurons.begin();//隐层神经元指针		
			int w=0;//momentum
			while(curWeight!=m_vecLayers[1].m_vecNeurons[op].m_vecWeight.end()-1)//
			{
				WeightUpdate = m_dLearningRate*err*curNrnHid->m_dActivation;//momentum 此次调整的权值量
				//*curWeight += m_dLearningRate*err*curNrnHid->m_dActivation;//调整权值,这是一个公式
				*curWeight += WeightUpdate+m_vecLayers[1].m_vecNeurons[op].m_vecPrevUpdate[w]*MOMENTUM;//momentum
				m_vecLayers[1].m_vecNeurons[op].m_vecPrevUpdate[w] = WeightUpdate;//momentum
				curWeight++;//指向下一个权值
				curNrnHid++;//指向隐层的下一个神经元
				w++;
			}
			WeightUpdate=m_dLearningRate*err*BIAS;//momentum
			//*curWeight += m_dLearningRate*err*BIAS;//最后一个权值比较特殊,即阈值的权值,也称为偏移
			*curWeight += WeightUpdate+m_vecLayers[1].m_vecNeurons[op].m_vecPrevUpdate[w]*MOMENTUM;//momentum
			m_vecLayers[1].m_vecNeurons[op].m_vecPrevUpdate[w]=WeightUpdate;//momentum
		}

		double error=0;
		if(!m_bSoftMax)//Use SSE
		{
			for(int o=0; o<m_iNumOutputs; o++)
			{
				error += (SetOut[vec][o]-outputs[o])*(SetOut[vec][o]-outputs[o]);
			}
		}
		else//Use Cross-Entropy Error
		{
			for(int o=0; o<m_iNumOutputs; o++)
			{
				error += SetOut[vec][o]*log(outputs[o]);		
			}
			error = -error;
		}
		m_dErrorSum += error;//

		curNrnHid = m_vecLayers[0].m_vecNeurons.begin();
		int n=0;
		while(curNrnHid != m_vecLayers[0].m_vecNeurons.end())//调整隐层的权值
		{
			double err=0;
			curNrnOut = m_vecLayers[1].m_vecNeurons.begin();//输出层神经元的指针
			while(curNrnOut != m_vecLayers[1].m_vecNeurons.end())
			{
				err += curNrnOut->m_dError*curNrnOut->m_vecWeight[n];//公式
				curNrnOut++;
			}
			err *= curNrnHid->m_dActivation*(1-curNrnHid->m_dActivation);//公式
			int w=0;//
			for(w=0; w<m_iNumInputs; w++)
			{
				WeightUpdate = m_dLearningRate*err*SetIn[vec][w];//momentum
				curNrnHid->m_vecWeight[w] += WeightUpdate+curNrnHid->m_vecPrevUpdate[w]*MOMENTUM;//momentum
				curNrnHid->m_vecPrevUpdate[w]=WeightUpdate;
				//curNrnHid->m_vecWeight[w] += m_dLearningRate*err*SetIn[vec][w];//调整输入的权值,这是一个公式
			}
			//curNrnHid->m_vecWeight[m_iNumInputs] +=err*m_dLearningRate*BIAS;//阈值的权值bias
			WeightUpdate = m_dLearningRate*err*BIAS;
			curNrnHid->m_vecWeight[m_iNumInputs] += WeightUpdate + curNrnHid->m_vecPrevUpdate[w]*MOMENTUM;
			curNrnHid->m_vecPrevUpdate[w] =WeightUpdate;
			curNrnHid++;//指向该层下一个神经元
			n++;//
		}
	}
	return true;
}
void CBPNeuralNet::OutputWeight()
{
	ofstream weight("weight.txt");
	int numofweights=0;
	for(int i=0; i<m_iNumHiddenLayers+1; i++)
	{
		for(int n=0; n<m_vecLayers[i].m_iNumNeurons; n++)
		{
			for(int k=0; k<m_vecLayers[i].m_vecNeurons[n].m_iNumInputs; k++)
			{
				weight<<m_vecLayers[i].m_vecNeurons[n].m_vecWeight[k]<<endl;
				numofweights++;
			}
		}
	}
	weight<<"权值数目:"<<numofweights;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -