⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 neuralb.cpp

📁 预测神经网络
💻 CPP
📖 第 1 页 / 共 2 页
字号:
	Net_Design.signal_dimensions=bp.signal_dimensions;
	
	Net_Design.activation_function_for_output_layer=bp.activation_function_for_output_layer;
	//输出维数
	Net_Design.nodes_in_output_layer=bp.nodes_in_output_layer;
	//隐层节点数
	Net_Design.hidenode[1]=bp.hidenode[1];
	Net_Design.hidenode[0]=bp.hidenode[0];
	
	//分配存储空间
	Net_Design.construct_and_initialize_backprop_network();
	//输出层偏置矢量
	for(int nodes = 0; nodes < Net_Design.nodes_in_output_layer; nodes++)
	{Net_Design.node_in_output_layer[nodes].bias=bp.node_in_output_layer[nodes].bias;}
	//输出层权重
	for(nodes = 0; nodes < Net_Design.nodes_in_output_layer; nodes++)
	{
		for(int dim = 0; dim < Net_Design.hidenode[1]; dim++)
		{Net_Design.node_in_output_layer[nodes].weight_of_inputs[dim]=bp.node_in_output_layer[nodes].weight_of_inputs[dim];
		}
		
		
	}
	
	
	Net_Design.activation_function_for_hidden_layer=bp.activation_function_for_hidden_layer;
	//第一隐层偏置矢量
	for(nodes = 0; nodes < Net_Design.hidenode[0]; nodes++)
	{
		Net_Design.hidden_layer_number[0].node_in_hidden_layer[nodes].bias=bp.hidden_layer_number[0].node_in_hidden_layer[nodes].bias;
		
	}
	
	
	//第一隐层权重
	for(nodes = 0; nodes < Net_Design.hidenode[0]; nodes++)
	{
		for(int dim = 0; dim <Net_Design.signal_dimensions; dim++)
		{
			Net_Design.hidden_layer_number[0].node_in_hidden_layer[nodes].weight_of_inputs[dim]=bp.hidden_layer_number[0].node_in_hidden_layer[nodes].weight_of_inputs[dim];
		}
		
	}
	
	//第二隐层偏置矢量
	for(nodes = 0; nodes < Net_Design.hidenode[1]; nodes++)
	{Net_Design.hidden_layer_number[1].node_in_hidden_layer[nodes].bias=bp.hidden_layer_number[1].node_in_hidden_layer[nodes].bias;
	
	}
	
	//第二隐层权重
	for(nodes = 0; nodes < Net_Design.hidenode[1]; nodes++)
	{
		for(int dim = 0; dim < Net_Design.hidenode[0]; dim++)
		{
			Net_Design.hidden_layer_number[1].node_in_hidden_layer[nodes].weight_of_inputs[dim]=bp.hidden_layer_number[1].node_in_hidden_layer[nodes].weight_of_inputs[dim];
			
			
		}
	}
	
	Net_Design.realtesting=bp.realtesting;
	strcpy(Net_Design.bpfilename,bp.bpfilename);
	strcpy(Net_Design.bpforlookfilename,bp.bpforlookfilename);
	Net_Design.savebptoSee=bp.savebptoSee;

}

//DEL void NeuralB::copytestdata(const NeuralB * & source)
//DEL {
//DEL 		Test_Data.signal_dimensions=source->Training_Data.signal_dimensions;
//DEL 		Test_Data.nodes_in_output_layer=source->Training_Data.nodes_in_output_layer;
//DEL 		Test_Data.sample_number=source->Training_Data.sample_number;
//DEL 		
//DEL 		//Test_Data.number_of_samples = new sample_data[Test_Data.sample_number];
//DEL 		Test_Data.number_of_samples = new sample_data[Test_Data.sample_number];
//DEL 		
//DEL 		for(int i = 0; i < Test_Data.sample_number; i++)
//DEL 			
//DEL 		{
//DEL 			Test_Data.number_of_samples[i].data_in_sample = new float[Test_Data.signal_dimensions + Test_Data.nodes_in_output_layer];
//DEL            
//DEL 		}
//DEL 		
//DEL 		
//DEL 		int dimensions = Test_Data.signal_dimensions + Test_Data.nodes_in_output_layer;
//DEL 		
//DEL 		
//DEL 		for(i = 0; i < Test_Data.sample_number; i++)
//DEL 		{
//DEL 			for(int j = 0; j < dimensions; j++)
//DEL 			{
//DEL 				Test_Data.number_of_samples[i].data_in_sample[j]=source->Training_Data.number_of_samples[i].data_in_sample[j];
//DEL 				//TRACE("%f \n ",Test_Data.number_of_samples[i].data_in_sample[j]);
//DEL 			}
//DEL 		}
//DEL 		
//DEL }


void NeuralB::inference(CString samFilename)
{

}


void NeuralB::realtest()
{
	float output_error, sum_of_error, real_output;
	int sigdim, hid1, hid2, outnode;
	
    sum_of_error = 0;
    output_error = 0;
	//读入测试数据
	
	for(sigdim = 0; sigdim < Test_Data.signal_dimensions; sigdim++)
	{
		
		for(hid1 = 0; hid1 < hidenode[0]; hid1++)
			
		{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].processing_unit_input[sigdim] = 
		Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[sigdim];
		TRACE("%f\n",Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].processing_unit_input[sigdim]);
		}
	}
	


	
	
	for(hid1 = 0; hid1 <hidenode[0]; hid1++)
	{
		Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
		for(hid2 = 0; hid2 <hidenode[1]; hid2++)
		{Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].processing_unit_input[hid1] = Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].output_signal;
	//	TRACE("%f ",Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid2].output_signal);
		}
	}
	
	
	
	for(hid2 = 0; hid2 <hidenode[1]; hid2++)
	{
		Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
		for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
		{Net_Design.node_in_output_layer[outnode].processing_unit_input[hid2] = Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].output_signal;}
	}
	
	for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
	{
		Net_Design.node_in_output_layer[outnode].calculate_output_signal(Net_Design.activation_function_for_output_layer);
		//
		
		Net_Design.node_in_output_layer[outnode].calculate_output_error_information_term(Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[Test_Data.signal_dimensions + outnode], Net_Design.activation_function_for_output_layer);
	}
	
	// convert normalized target output data and send to file
	for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
	{
		real_output =Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[outnode + Test_Data.signal_dimensions];
		Test_Data.targetoutput[outnode]=real_output;
	}
	
	
	// convert normalized output data and send to file
	for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
	{
		real_output =(float)(Net_Design.node_in_output_layer[outnode].output_signal*
			Test_Data.pNormalizeInfo->pCoAnti[Test_Data.signal_dimensions+outnode].Max_min+
			Test_Data.pNormalizeInfo->pCoAnti[Test_Data.signal_dimensions+outnode].min);
		Test_Data.netoutput[outnode]=real_output;
		TRACE("%f\n",Net_Design.node_in_output_layer[outnode].output_signal);

		//TRACE("%f\n",Net_Design.node_in_output_layer[outnode].output_signal);
	}
	
	
}

void NeuralB::prepareRealTest(const char * samfilename)
{
	realtesting=true;
	Test_Data.realtesting=true;
	strcpy(Test_Data.filename,samfilename);
	Test_Data.acquire_net_info(
		Net_Design.signal_dimensions,
		Net_Design.nodes_in_output_layer);
	Test_Data.specify_signal_sample_size();

	Test_Data.gainCoAnti();
	//把sample array 中的数据读入bp网络	
	for(int sig = 0; sig <Test_Data.sample_number; sig++)
	{//一个信号的signal_dimensions维
		
		for(int sigdim = 0; sigdim <Test_Data.signal_dimensions; sigdim++)
		{
			//信号的一维的hidenode[0]个隐层节点
			for(int hid1 = 0; hid1<hidenode[0]; hid1++)
			{ //一个隐层节点的sigdim
				Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].
					processing_unit_input[sigdim] =
					Test_Data.number_of_samples[sig].data_in_sample[sigdim];
			}
		}
	}

	
	Test_Data.netoutput=
		new float[Net_Design.nodes_in_output_layer];
	Test_Data.targetoutput=
		new float[Net_Design.nodes_in_output_layer];
	caculateAntiNormalize();

	
	//preparEnd
}
//为了保存训练样本的归一化特征
void NeuralB::copyNormalizeInfo()
{int totalDim=
Net_Design.signal_dimensions+
Net_Design.nodes_in_output_layer;
Net_Design.pNormalizeInfo=
new CNormalizeInfo(Net_Design.signal_dimensions,
				   Net_Design.nodes_in_output_layer);
for(int dim=0;dim<totalDim;dim++)

{Net_Design.pNormalizeInfo->pCoAnti[dim].Max_min=
Training_Data.pNormalizeInfo->pCoAnti[dim].Max_min;
Net_Design.pNormalizeInfo->pCoAnti[dim].min=
Training_Data.pNormalizeInfo->pCoAnti[dim].min;
}

}

void NeuralB::caculateAntiNormalize()
{double exact=1E-10;
	
	double totalCoAntiweightMax_min=0;
    double totalCoAntiweightmin=0;
	for(int s=0;s<Net_Design.signal_dimensions;s++)
{
 double a=Net_Design.pNormalizeInfo->pCoAnti[s].Max_min;
 double b=Test_Data.pNormalizeInfo->pCoAnti[s].Max_min;
 if (a==b) {a=b+exact;}
 
Test_Data.pNormalizeInfo->pCoAntiweight[s].Max_min=fabs(a/(a-b));

 a=Net_Design.pNormalizeInfo->pCoAnti[s].min;
 b=Test_Data.pNormalizeInfo->pCoAnti[s].min;

if (a==b) {a=b+exact;}
Test_Data.pNormalizeInfo->pCoAntiweight[s].min=fabs(a/(a-b));
}

for(s=0;s<Net_Design.signal_dimensions;s++)
{totalCoAntiweightMax_min=
totalCoAntiweightMax_min+
Test_Data.pNormalizeInfo->pCoAntiweight[s].Max_min;
totalCoAntiweightmin=
totalCoAntiweightmin+
Test_Data.pNormalizeInfo->pCoAntiweight[s].min;
}
//计算输出层加权的反归一化系数
  for(s=0;s<Net_Design.nodes_in_output_layer;s++)
  {
	Test_Data.pNormalizeInfo->pCoAnti[s+Net_Design.signal_dimensions].Max_min=0;
    Test_Data.pNormalizeInfo->pCoAnti[s+Net_Design.signal_dimensions].min=0;
  }

for(s=0;s<Net_Design.nodes_in_output_layer;s++)
{

	for(int sd=0;sd<Net_Design.signal_dimensions;sd++)
	{   
		Test_Data.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+s].Max_min=
        Test_Data.pNormalizeInfo->pCoAnti[s+Net_Design.nodes_in_output_layer].Max_min+
		Test_Data.pNormalizeInfo->pCoAnti[sd].Max_min*(Test_Data.pNormalizeInfo->pCoAntiweight[sd].Max_min)/totalCoAntiweightMax_min;
	    Test_Data.pNormalizeInfo->pCoAnti[s+Net_Design.nodes_in_output_layer].min=
        Test_Data.pNormalizeInfo->pCoAnti[s+Net_Design.nodes_in_output_layer].min+
		Test_Data.pNormalizeInfo->pCoAnti[sd].min*(Test_Data.pNormalizeInfo->pCoAntiweight[sd].min)/totalCoAntiweightmin;
	}

}
//计算训练样本的每个归一化系数比
for(s=0;s<Net_Design.signal_dimensions;s++)
{
	for(int d=0;d<Net_Design.nodes_in_output_layer;d++)
	{if (Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].Max_min==0)
	Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].Max_min=
	Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].Max_min+exact;
	if (Net_Design.pNormalizeInfo->pCoAnti[s].Max_min==0) 
		Net_Design.pNormalizeInfo->pCoAnti[s].Max_min=
		Net_Design.pNormalizeInfo->pCoAnti[s].Max_min+exact;

Net_Design.pNormalizeInfo->pRelativeCoAnti[s][d].Max_min=
  Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].Max_min/
  Net_Design.pNormalizeInfo->pCoAnti[s].Max_min;

	if(Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].min==0)
		Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].min=
		Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].min+exact;

	if(Net_Design.pNormalizeInfo->pCoAnti[s].min==0)
		Net_Design.pNormalizeInfo->pCoAnti[s].min=
		Net_Design.pNormalizeInfo->pCoAnti[s].min+exact;
	
	
	
  

  Net_Design.pNormalizeInfo->pRelativeCoAnti[s][d].min=
	  Net_Design.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+d].min/
	  Net_Design.pNormalizeInfo->pCoAnti[s].min;
	}
}
//计算输出层的反归一化系数
for(s=0;s<Net_Design.nodes_in_output_layer;s++)
{
	Test_Data.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+s].Max_min=0;
	Test_Data.pNormalizeInfo->pCoAnti[Net_Design.signal_dimensions+s].min=0;
	

     for(int d=0;d<Net_Design.nodes_in_output_layer;d++)
	 {
	 
	  Test_Data.pNormalizeInfo->pCoAnti[Net_Design.nodes_in_output_layer+d].Max_min=
      Test_Data.pNormalizeInfo->pCoAnti[Net_Design.nodes_in_output_layer+d].Max_min+
      Net_Design.pNormalizeInfo->pRelativeCoAnti[s][d].Max_min*
	  Test_Data.pNormalizeInfo->pCoAnti[s].Max_min;


	  Test_Data.pNormalizeInfo->pCoAnti[Net_Design.nodes_in_output_layer+d].min=
		  Test_Data.pNormalizeInfo->pCoAnti[Net_Design.nodes_in_output_layer+d].min+
		  Net_Design.pNormalizeInfo->pRelativeCoAnti[s][d].min*
		  Test_Data.pNormalizeInfo->pCoAnti[s].min;
	 }

}

}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -