⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 neuralnet.cpp

📁 用C++开发的一个人工神经网络小游戏
💻 CPP
字号:
// NeuralNet.cpp: implementation of the CNeuralNet class.
//
//////////////////////////////////////////////////////////////////////

#include "stdafx.h"
#include "EludeObstacle.h"
#include "NeuralNet.h"

#ifdef _DEBUG
#undef THIS_FILE
static char THIS_FILE[]=__FILE__;
#define new DEBUG_NEW
#endif

//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////

extern double globe_weightRange;
extern int globe_NumInput;
extern int globe_NumOutput;
extern int globe_NumHiddenLayers;
extern int globe_iNeuronsPerHiddenLayer;
extern double globe_dActivationResponse;

//returns a random integer between x and y
inline int	  RandInt(int x,int y) {return rand()%(y-x+1)+x;}

//returns a random float between zero and 1
inline double RandFloat()		   {return (rand())/(RAND_MAX+1.0);}

//returns a random float in the range -1 < n < 1
inline double RandomClamped()	   {return RandFloat() - RandFloat();}



SNeuron::SNeuron(int NumInputs): m_NumInputs(NumInputs+1)
{
	//为了附加的权值 bias 因此 +1
	for (int i=0; i<NumInputs+1; ++i)
	{
		//用随机数初始化该权值
		double x = globe_weightRange * RandomClamped();
		m_vecWeight.push_back(x);
	}
}

SNeuronLayer::SNeuronLayer(int NumNeurons, 
                           int NumInputsPerNeuron):	m_NumNeurons(NumNeurons)
{
	for (int i=0; i<NumNeurons; ++i)

		m_vecNeurons.push_back(SNeuron(NumInputsPerNeuron));
}

CNeuralNet::CNeuralNet() 
{
	CreateNet();
}


//产生整个神经网络,重复调用可以重新产生新的神经网络
void CNeuralNet::CreateNet()
{
	m_NumInputs	          =	globe_NumInput;
	m_NumOutputs		      =	globe_NumOutput;
	m_NumHiddenLayers	    =	globe_NumHiddenLayers;
	m_NeuronsPerHiddenLyr =	globe_iNeuronsPerHiddenLayer;

	m_vecLayers.clear();

	//为人工神经网络产生新的层
	//如果有至少一个隐藏的层
	if (m_NumHiddenLayers > 0)
	{
		//产生第一个隐藏层
		m_vecLayers.push_back(SNeuronLayer(m_NeuronsPerHiddenLyr, m_NumInputs));
		
		for (int i=0; i<m_NumHiddenLayers-1; ++i)
		{
			m_vecLayers.push_back(SNeuronLayer(m_NeuronsPerHiddenLyr,
				m_NeuronsPerHiddenLyr));
		}
		//产生输出层
		m_vecLayers.push_back(SNeuronLayer(m_NumOutputs, m_NeuronsPerHiddenLyr));
	}
	else
	{
		//产生输出层
		m_vecLayers.push_back(SNeuronLayer(m_NumOutputs, m_NumInputs));
	}
	double x1 = m_vecLayers[0].m_vecNeurons[0].m_vecWeight[0];
	double x2 = m_vecLayers[0].m_vecNeurons[0].m_vecWeight[1];
	double x3 = m_vecLayers[0].m_vecNeurons[0].m_vecWeight[2];
}


//计算出基因交叉时,交叉点的位置,放进一个容器里面,同时初始化
vector<int> CNeuralNet::CalculateSplitPoints()
{
	vector<int> SplitPoints;
	
	m_Weights = 0;
	
	//for each layer
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{
		//for each neuron
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			//for each weight
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k)
			{
				m_Weights++;
			}
			
			SplitPoints.push_back(m_Weights - 1);
		}
	}
	
	return SplitPoints;
}

double CNeuralNet::Sigmoid(double netinput, double response) const
{
	return ( 1 / ( 1 + exp(-netinput * response)));
}

//传入一个储存了input的容器,返回output的容器
vector<double> CNeuralNet::Update(vector<double> inputs)
{
	//临时储存每一层的输出,作为下一层的输入
	vector<double> temp;
	
	//first check that we have the correct amount of inputs
	if (inputs.size() != m_NumInputs)
	{
		//just return an empty vector if incorrect.
		AfxMessageBox("error");
		return temp;
	}
	
	//处理每一层,包括输入层,所以层数等于HiddenLayers + 1
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{		
		if ( i > 0 )
		{
			inputs = temp;
		}
		
		temp.clear();
		
		
		//使所在的层的每一个神经元计算 (输入*所对应的权值)
		//然后用总权值经过sigmoid函数的处理得到输出
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			double netinput = 0;
			
			int	NumInputs = m_vecLayers[i].m_vecNeurons[j].m_NumInputs;
			
			
			//for each weight
			for (int k=0; k<NumInputs - 1; ++k)
			{
				double x1 = m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k];
				double x2 = inputs[k];
				//计算输入乘以权值的总和
				netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] * 
                    inputs[k];
			}
			
			double x3 = m_vecLayers[i].m_vecNeurons[j].m_vecWeight[NumInputs-1];
			//记得计算bias
			netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[NumInputs-1] * 
				-1;
			
			//把这一循环中处理的神经元对应的输出储存在temp容器里面,将作为下一层的输入.

			
			if(i == 0)
			{
				temp.push_back(netinput);
			}
			else if(i == 1)
			{
				temp.push_back(Sigmoid(netinput,globe_dActivationResponse));
			}
			else if(i == 2)
			{
				temp.push_back(Sigmoid(netinput,globe_dActivationResponse));
			}
			else
			{
				temp.push_back(Sigmoid(netinput,globe_dActivationResponse));
			}
			/*else if(i == 3)
			{
				temp.push_back(Sigmoid(netinput,globe_dActivationResponse));
			}
			else if(i == 4)
			{
				temp.push_back(netinput);
			}
			*/
			
			
		}
	}
//////////////////
	double x0 = temp[0];
	double x1 = temp[1];
	double x2 = temp[2];
	double x3 = temp[3];
	double x4 = temp[4];
	int size = temp.size();
//////////////////

	return temp;
}



vector<double> CNeuralNet::GetWeights() const
{
	//申请一个容器用来储存所有权值.
	vector<double> weights;
	
	//对每一个层作处理
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{
		//对层里面的每一个神经元作处理(记住噢,m_NumNeurons已经作了加一的处理了)
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			//把每一个权值纳入容器中
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k)
			{
				weights.push_back(m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k]);
			}
		}
	}

	return weights;
}

void CNeuralNet::PutWeights(vector<double> &weights)
{
	
	//对每一个层作处理
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{
		int numWeight = 0;
		//对层里面的每一个神经元作处理
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			//把容器中的每一个权值写入对应的地方
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_NumInputs; ++k)
			{
				m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] = weights[numWeight++];
			}
		}
	}

}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -