patternhw4doc.cpp
来自「Neural Network program for pattern class」· C++ 代码 · 共 985 行 · 第 1/3 页
CPP
985 行
}
}
/************************************************************************/
/* EnterRegressData(int mode) */
/* Name: EnterRegressData */
/* Parameter: No */
/* Return: No */
/* Explain: Enter data into Certain type of structure */
/************************************************************************/
void CPatternHW4Doc::EnterRegressData(int mode)
{
register int i;
if(mode == TRAINMODE){ // Training Mode
for(i=0;i<225;i++){
TrnData[i].Data[0] = m_DataR[i][0];
TrnData[i].Data[1] = m_DataR[i][1];
TrnData[i].Response = m_DataR[i][2];
}
}else if(mode == TESTMODE){ // Test Mode
for(i=0;i<225;i++){
TsData[i].Data[0] = m_DataR[i][0];
TsData[i].Data[1] = m_DataR[i][1];
TsData[i].Response = m_DataR[i][2];
}
}
}
/****************************************************************************/
/* MLPR(int hidLay, double learnRate, int hidNode, double momen, double th) */
/* Name: MLPR */
/* Parameter: int hidLay - The number of hidden layer */
/* double learnRate - Learning rate */
/* double momen - Momentum value */
/* double th - Threshold value */
/* Return: No */
/* Explain: Multi-layer Perceptron for Regressor */
/****************************************************************************/
void CPatternHW4Doc::MLPR(int hidLay, double learnRate, int hidNode, double momen, double th)
{
int i; // Loop Constant
int index; // Indexing number
int iteration=0; // The number of iteration
int epoch=0; // The number of Epoch
double sse=1.0, err[1];
double *Feed = new double[2];
/************************************************************************/
/* Initialize all layers in MLP */
/************************************************************************/
InitMLPLayer(&InputLayer,INPUTMODE,2,momen,learnRate,NULL);
for(i=0;i<hidLay;i++){
if(i==0)
InitMLPLayer(&HiddenLayer[i],HIDDENMODE,hidNode,momen,learnRate,&InputLayer);
else
InitMLPLayer(&HiddenLayer[i],HIDDENMODE,hidNode,momen,learnRate,&HiddenLayer[i-1]);
}
InitMLPLayer(&OutputLayer,OUTPUTMODE,1,momen,learnRate,&HiddenLayer[hidLay-1]);
/************************************************************************/
/************************************************************************/
/* Normalize the input data */
/************************************************************************/
NormalizeR(TrnData);
/************************************************************************/
do{
index = iteration%225; // Index number setting
/************************************************************************/
/* 1. Feed Input Data */
/************************************************************************/
Feed[0]=TrnData[index].Data[0];
Feed[1]=TrnData[index].Data[1];
SetInputvalue(&InputLayer,Feed);
/************************************************************************/
/************************************************************************/
/* 2. Forward Computation with Updating the node value */
/************************************************************************/
for(i=0;i<hidLay;i++){
UpdateY(&HiddenLayer[i]);
}
UpdateY(&OutputLayer);
/************************************************************************/
/************************************************************************/
/* 3. Backward Computation with calculate the error */
/************************************************************************/
for(i=0;i<1;i++) { // Compute Error
err[i]=TrnData[index].Response-OutputLayer.Y[i];
}
Backward(&OutputLayer,err);
for(i=hidLay-1;i>-1;i--)
{
if(i==(hidLay-1)) {
Backward(&HiddenLayer[i],&OutputLayer);
} else {
Backward(&HiddenLayer[i],&HiddenLayer[i+1]);
}
}
/************************************************************************/
/************************************************************************/
/* 4. Weight Vector Update */
/************************************************************************/
for(i=0;i<hidLay;i++){
UpdateW(&HiddenLayer[i]);
}
UpdateW(&OutputLayer);
/************************************************************************/
/************************************************************************/
/* Result of Iterations */
/************************************************************************/
if((iteration)%225==0) // For every multiple of 225 iterations
{
m_SSE[epoch]=0;
for(i=0;i<1;i++){
m_SSE[epoch]+=(err[i]*err[i]/2.0); // Calculate the error rate
}
sse=m_SSE[epoch];
epoch++; // Increase the epoch number
}
/************************************************************************/
iteration++;
}while(sse>th && epoch<MAXEPOCH); // Until the error becomes smaller than threshold
// Or the epoch number becomes larger than maximum value
delete Feed;
}
/************************************************************************/
/* NormalizeR(RegressData *Data) */
/* Name: NormalizeR */
/* Parameter: RegressData *Data - Data set */
/* Return: No */
/* Explain: Normalize the Regress Data set */
/************************************************************************/
void CPatternHW4Doc::NormalizeR(RegressData *Data)
{
int i;
double data1dif, data2dif;
double data1max, data2max; // Maximum Value
double data1min, data2min; // Minimum Value
data1max = -100 ; data2max = -100 ;
data1min = 100 ; data2min = 100 ;
for(i=0;i<225;i++) {
/****************************************************************/
/* Find Maximum and Minimum Value */
/****************************************************************/
if(data1max<Data[i].Data[0]) data1max=Data[i].Data[0];
if(data2max<Data[i].Data[1]) data2max=Data[i].Data[1];
if(data1min>Data[i].Data[0]) data1min=Data[i].Data[0];
if(data2min>Data[i].Data[1]) data2min=Data[i].Data[1];
/****************************************************************/
}
/********************************************************************/
/* Find difference Value between maximum and minimum value */
/********************************************************************/
data1dif = data1max - data1min;
data2dif = data2max - data2min;
/********************************************************************/
/********************************************************************/
/* Normalize the input data value */
/********************************************************************/
for(i=0;i<225;i++) {
Data[i].Data[0] = ( Data[i].Data[0] - data1min ) / data1dif;
Data[i].Data[1] = ( Data[i].Data[1] - data2min ) / data2dif;
}
/********************************************************************/
}
/************************************************************************************/
/* TestR(int hidLay, double learnRate, int hidNode, double momen, double threshold) */
/* Name: TestR */
/* Parameter: int hidLay - The number of hidden layer */
/* double learnRate - Learning rate */
/* int hidNode - The number of nodes in hidden layer */
/* double momen - Momentum value */
/* double threshold - Threshold value */
/* Return: No */
/* Explain: Test for regressor */
/************************************************************************************/
void CPatternHW4Doc::TestR(int hidLay, double learnRate, int hidNode, double momen, double threshold)
{
int i,j;
double *Feed = new double[2];
double err[1];
MLPR(hidLay, learnRate, hidNode, momen, threshold); // Make non-linear Classifier
NormalizeR(TsData); // Input Test Data Normalization
for(i=0;i<225;i++) {
/************************************************/
/* 1. Feed input samples */
/************************************************/
Feed[0]=TsData[i].Data[0]; // Sepal Length
Feed[1]=TsData[i].Data[1]; // Sepal Width
SetInputvalue(&InputLayer,Feed); // Set up the input value
/************************************************/
/************************************************/
/* 2. Forward Computation */
/************************************************/
for(int k=0;k<hidLay;k++) {
UpdateY(&HiddenLayer[k]); // Update the hidden layer
}
Test_ResultR[i]=UpdataY2(&OutputLayer); // Update the output layer
/************************************************/
/************************************************/
/* Calculation of error */
/************************************************/
for(j=0;j<1;j++) {
err[j]=TsData[i].Response-OutputLayer.Y[j];
}
m_SSE[i]=0;
for(j=0;j<1;j++){
m_SSE[i]+=(err[j]*err[j]/2.0);
}
/************************************************/
}
FILE* fp;
fp = fopen("result.txt","wb");
for(i=0;i<225;i++){
fprintf(fp,"%f\n",m_SSE[i]);
}
fclose(fp);
delete Feed;
}
/************************************************************************/
/* UpdataY2(Layer *current_layer) */
/* Name: UpdataY2 */
/* Parameter: Layer *current_layer - current layer */
/* Return: double */
/* Explain: Update Y for regressor */
/************************************************************************/
double CPatternHW4Doc::UpdataY2(Layer *current_layer)
{
int i,j;
double v;
if(current_layer->layer_mode == INPUTMODE) // Check the mode of current layer
return -1;
for(i=0;i<current_layer->nodes;i++){ // For all nodes in current layer,
v = 0; // Initialize
for(j=0;j<current_layer->prev_layer->nodes+1;j++){
v+=current_layer->prev_layer->Y[j]*current_layer->W[j][i];
} // Calculate the sum of multiples of Y[prev] and W[crnt]
current_layer->Y[i]=Sigmoid(v); // Unipolar Sigmoidal Function
}
current_layer->Y[current_layer->nodes]=-1; // For augment sample, enter '-1'
return v;
}
/****************************************************************************************/
/* TestTrain(int hidLay, double learnRate, int hidNode, double momen, double threshold) */
/* Name: TestTrain */
/* Parameter: int hidLay - The number of hidden layer */
/* double learnRate - Learning rate */
/* int hidNode - The number of nodes in hidden layer */
/* double momen - Momentum value */
/* double threshold - Threshold value */
/* Return: No */
/* Explain: Test with training data set */
/****************************************************************************************/
void CPatternHW4Doc::TestTrain(int hidLay, double learnRate, int hidNode, double momen, double threshold)
{
int i,j;
double *Feed = new double[4];
double err[3];
MLP(hidLay, learnRate, hidNode, momen, threshold); // Make non-linear Classifier
Normalize(TrData); // Input Test Data Normalization
for(i=0;i<75;i++) {
/************************************************/
/* 1. Feed input samples */
/************************************************/
Feed[0]=TrData[i].sepalLength; // Sepal Length
Feed[1]=TrData[i].sepalWidth; // Sepal Width
Feed[2]=TrData[i].petalLength; // Petal Length
Feed[3]=TrData[i].petalWidth; // Petal Width
SetInputvalue(&InputLayer,Feed); // Set up the input value
/************************************************/
/************************************************/
/* 2. Forward Computation */
/************************************************/
for(int k=0;k<hidLay;k++) {
UpdateY(&HiddenLayer[k]); // Update the hidden layer
}
UpdateY(&OutputLayer); // Update the output layer
/************************************************/
/************************************************/
/* 3. Calculate the result of testing */
/************************************************/
for(j=0;j<3;j++) {
Test_Result[i][j]=OutputLayer.Y[j];
}
/************************************************/
/************************************************/
/* Calculation of error */
/************************************************/
for(j=0;j<3;j++) {
err[j]=TrData[i].CLdata[j]-OutputLayer.Y[j];
}
m_SSE[i]=0;
for(j=0;j<3;j++){
m_SSE[i]+=(err[j]*err[j]/2.0);
}
/************************************************/
}
delete Feed;
}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?