📄 neuralb.cpp
字号:
// NeuralB.cpp: implementation of the NeuralB class.
//
//////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "NeuralB.h"
#include<math.h>
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
NeuralB:: ~NeuralB()
{
}
void NeuralB :: initialize_training_storage_array()
{
Training_Data.acquire_net_info(Net_Design.signal_dimensions, Net_Design.nodes_in_output_layer);
Training_Data.request_training_data();
}
void NeuralB :: establish_test_battery_size(void)
{
Test_Data.acquire_net_info(Net_Design.signal_dimensions, Net_Design.nodes_in_output_layer);
}
NeuralB::NeuralB()
{trainHasBegin=true;}
// define the establish_backprop_network function
void NeuralB::establish_backprop_network(void)
{ hidenode[0]=Net_Design.hidenode[0];
hidenode[1]=Net_Design.hidenode[1];
Net_Design.construct_and_initialize_backprop_network();
} // end establish_backprop_network function
// set the activation functions of the nodes of the network
// define train_net_with_backpropagation function
void NeuralB::train_net_with_backpropagation(void)
{
float output_error,real_error_difference;
int sig,sigdim, hid1, hid2, outnode;
sum_of_error = 0;
//Training_Data.sample_number个样本
//把sample array 中的数据读入bp网络
for(sig = 0; sig < Training_Data.sample_number; sig++)
{//一个信号的signal_dimensions维
output_error = 0;
for(sigdim = 0; sigdim < Training_Data.signal_dimensions; sigdim++)
{
//信号的一维的hidenode[0]个隐层节点
for(hid1 = 0; hid1<hidenode[0]; hid1++)
{ //一个隐层节点的sigdim
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].processing_unit_input[sigdim] =
Training_Data.number_of_samples[Training_Data.signalpoint[sig].signal_value].data_in_sample[sigdim];
}
}
//第二层的输入维数(等于第一层的输出维数)
for(hid1 = 0; hid1 < hidenode[0]; hid1++)
{//计算第一隐层输出信号
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
//第二隐层输入等于第一隐层输出
for(hid2 = 0; hid2 < hidenode[1]; hid2++)
{
Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].processing_unit_input[hid1] = Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].output_signal;
}
}
//第二隐层有hidenode[1]个节点
for(hid2 = 0; hid2 < hidenode[1]; hid2++)
{//计算第二隐层输出信号
Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
//输出层输入信号等于第二隐层 输出信号
Net_Design.node_in_output_layer[outnode].processing_unit_input[hid2] = Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].output_signal;
}
}
//计算输出层的信号输出
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
Net_Design.node_in_output_layer[outnode].calculate_output_signal(Net_Design.activation_function_for_output_layer);
Net_Design.node_in_output_layer[outnode].calculate_output_error_information_term(Training_Data.number_of_samples[Training_Data.signalpoint[sig].signal_value].data_in_sample[Training_Data.signal_dimensions + outnode], Net_Design.activation_function_for_output_layer);
// calculate the instantaneous sum of squared errors (Haykin, 1994)
real_error_difference =(float) (pow(Net_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (Training_Data.max_output_value[outnode] - Training_Data.min_output_value[outnode]);
output_error += (float)(0.5 * pow(real_error_difference, 2.0));
// calculate maximum and mean absolute error difference for each node
real_error_difference = Net_Design.node_in_output_layer[outnode].absolute_error_difference * (Training_Data.max_output_value[outnode] - Training_Data.min_output_value[outnode]);
//均方差
meandifference[outnode] += real_error_difference / float(Training_Data.sample_number);
//搜索最大均方差
if(sig == 0) {maxdifference[outnode] = real_error_difference;}
else
{
if(real_error_difference > maxdifference[outnode])
{
maxdifference[outnode] = real_error_difference;
}
}
}
// average squared error for each signal is saved
sum_of_error += output_error / float (Training_Data.sample_number);
// backpropagation of error will depend on the number of hidden layers
// { // backpropagate from output node to adjacent hidden layer
//计算第二隐层的误差项
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{//计算error_information_term
for(hid2 = 0; hid2 < hidenode[1]; hid2++)
{
Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].error_information_term += Net_Design.node_in_output_layer[outnode].error_information_term * Net_Design.node_in_output_layer[outnode].weight_of_inputs[hid2];
}
}
// calculate error information term for each node in hiddenlayer
for(hid2 = 0; hid2 < hidenode[1]; hid2++)
{
Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].calculate_hidden_error_information_term((float)Net_Design.activation_function_for_hidden_layer);
}
// backpropagate error from hidden layer 2 to hidden layer 1
for(hid2 = 0; hid2 < hidenode[1]; hid2++)
{
for(hid1 = 0; hid1 <hidenode[0]; hid1++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].error_information_term += Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].error_information_term * Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].weight_of_inputs[hid1];
}
}
for(hid1 = 0; hid1 <hidenode[0]; hid1++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].calculate_hidden_error_information_term((float)Net_Design.activation_function_for_hidden_layer);
}
// update the networks output nodes
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
Net_Design.node_in_output_layer[outnode].calculate_weight_and_bias_correction_terms(Training_Data.rate_of_learning);
}
// update the networks hidden nodes (if they exist)
//两层
for(hid1 = 0; hid1 < hidenode[0]; hid1++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].calculate_weight_and_bias_correction_terms(Training_Data.rate_of_learning);
}
for(hid2 = 0; hid2< hidenode[1]; hid2++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid2].calculate_weight_and_bias_correction_terms(Training_Data.rate_of_learning);
}
} // end sig loop
// savefile_ptr << epoch + 1 << " ";
// savefile_ptr << sum_of_error << " ";
// for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
// {savefile_ptr << maxdifference[outnode] << " " << meandifference[outnode] << " ";}
// savefile_ptr << endl;
//搜索最小均方差
if(epoch == 0)
{Training_Data.minimum_average_squared_error = sum_of_error;}
if(sum_of_error < Training_Data.minimum_average_squared_error)
{Training_Data.minimum_average_squared_error = sum_of_error;}
// scramble the order of signal presentation
//打乱训练数据
Training_Data.scramble_data_in_array();
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{ maxdifference[outnode] = 0.0; meandifference[outnode] = 0.0;}
// if(Training_Data.minimum_average_squared_error <= target_minimum_average_squared_error)
// delete arrays holding the training data
//Training_Data.delete_signal_data_array();
// delete [] maxdifference;
// delete [] meandifference;
} // end of backpropagation function
// define the function that tests the neural network
void NeuralB::test_neural_network()
{
float output_error, sum_of_error, real_output;
int sigdim, hid1, hid2, outnode;
sum_of_error = 0;
output_error = 0;
//读入测试数据
for(sigdim = 0; sigdim < Test_Data.signal_dimensions; sigdim++)
{
for(hid1 = 0; hid1 < hidenode[0]; hid1++)
{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].processing_unit_input[sigdim] =
Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[sigdim];
TRACE("%f\n",Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].processing_unit_input[sigdim]);
}
}
for(hid1 = 0; hid1 <hidenode[0]; hid1++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(hid2 = 0; hid2 <hidenode[1]; hid2++)
{Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].processing_unit_input[hid1] =
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].output_signal;
//TRACE("%f\n",Net_Design.hidden_layer_number[0].node_in_hidden_layer[hid1].output_signal);
}
}
for(hid2 = 0; hid2 <hidenode[1]; hid2++)
{
Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].processing_unit_input[hid2] = Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].output_signal;}
}
//TRACE("%f ",Net_Design.hidden_layer_number[1].node_in_hidden_layer[hid2].output_signal);
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
Net_Design.node_in_output_layer[outnode].calculate_output_signal(Net_Design.activation_function_for_output_layer);
//
Net_Design.node_in_output_layer[outnode].calculate_output_error_information_term(Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[Test_Data.signal_dimensions + outnode], Net_Design.activation_function_for_output_layer);
}
// convert normalized target output data and send to file
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
real_output = Test_Data.min_output_value[outnode] + (Test_Data.number_of_samples[Test_Data.testsampleNum].data_in_sample[outnode + Test_Data.signal_dimensions] * (Test_Data.max_output_value[outnode] - Test_Data.min_output_value[outnode]));
Test_Data.targetoutput[outnode]=real_output;
}
// convert normalized output data and send to file
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
real_output =
Test_Data.min_output_value[outnode] +
(Net_Design.node_in_output_layer[outnode].output_signal *
(Test_Data.max_output_value[outnode] -
Test_Data.min_output_value[outnode]));
Test_Data.netoutput[outnode]=real_output;
TRACE("%f\n",Net_Design.node_in_output_layer[outnode].output_signal);
}
// send absolute differences between each node and its output to a file
//for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
// {
// real_output = (float)(pow(Net_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (Test_Data.max_output_value[outnode] - Test_Data.min_output_value[outnode]);
// savefile_ptr << real_output << " ";
// real_output = (float)pow(real_output, 2.0);
// output_error += (float)0.5 * real_output;
// }
// sum square of error
///////////////////////////////////////////
} // end test neural network function
void NeuralB::network_training_testing()
{
int menu_choice;
cout << "\n\n\n\n";
cout << "**************** Operations Menu ****************" << "\n\n";
cout << " Please select one of the following options:" <<"\n\n";
cout << " 1. Train Backprop network only " <<"\n\n";
cout << " 2. Test Backprop network only " <<"\n\n";
cout << " 3. Train and Test Backprop network" <<"\n\n";
cout << "*************************************************" << "\n\n";
cout << " Your choice?: "; cin >> menu_choice;
cout << "\n\n";
switch(menu_choice)
{
case 1:
initialize_training_storage_array();
train_net_with_backpropagation();
break;
case 2:
establish_test_battery_size();
if(true)
{test_neural_network();}
break;
case 3:
initialize_training_storage_array();
train_net_with_backpropagation();
establish_test_battery_size();
if(true)
{test_neural_network();}
break;
default:network_training_testing();
}
}
// This concludes the backpropagation section of the program
bool NeuralB::beginTrainBp()
{
//使输入数据随机分布
maxdifference = new float[Net_Design.nodes_in_output_layer];
meandifference = new float[Net_Design.nodes_in_output_layer];
trainHasBegin=true;
return true;
}
void NeuralB::CopyBpNet(Back_Topology bp)
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -