⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 采用ga优化的bp神经网络程序.txt

📁 程序名:ga_bp_predict.cpp 描述: 采用GA优化的BP神经网络程序
💻 TXT
📖 第 1 页 / 共 2 页
字号:
    {
      WorstIndividual=population[pop];    //最差的个体
    }
  }
}

void Elitist()    //精英策略(优胜模型)
{
  int    i;
  int    best_mem,worst_mem;
  double  best,worst;

  best=population[0].fitness;  
  worst=population[0].fitness;
  best_mem=0;
  worst_mem=0;

  for(i=0;i<POP_SIZE-1;i++)
  {
    if(best<population[i].fitness)
    {
      best=population[i].fitness;    //找到种群中前N-1个最大适应值的个体
      best_mem=i;
    }
    if(worst>population[i].fitness)
    {
      worst=population[i].fitness;  //适应值最差的个体,其实这部分功能没体现出来,幽深劣太的话就需要
      worst_mem=i;
    }
  }

  if(best>BestIndividual.fitness)
  {
    population[POP_SIZE-1]=population[best_mem]; //用前N-1个个体中最大值代替最后一个个体
    BestIndividual=population[best_mem];
  }
  else
  {
    population[POP_SIZE-1]=BestIndividual; 
  }
}

void Steady_State()    //稳定态策略(劣态模型)
{
  int    i;
  int   worst_mem,best_mem;
  double worst,best;

  best=population[0].fitness;  
  worst=population[0].fitness;
  best_mem=0;
  worst_mem=0;

  for(i=0;i<POP_SIZE-1;i++)
  {
    if(best<population[i].fitness)
    {
      best=population[i].fitness;    //找到种群中前N-1个最大适应值的个体
      best_mem=i;
    }
    if(worst>population[i].fitness)
    {
      worst=population[i].fitness;  //最差的个体,steady state 模型采用
      worst_mem=i;
    }
  }

  if(worst<WorstIndividual.fitness)
  {
    population[POP_SIZE-1]=population[worst_mem]; //用最差的替代最后一个元素
    WorstIndividual=population[worst_mem];
  }
  else
  {
    population[POP_SIZE-1]=WorstIndividual; 
  }
}

void Adaptive_Crossover()              //采用自适应交叉概率
{
  int    pop;
  int    i;
  int    j;
  int    k;
  int    pop1,pop2;  
  double  p;
  double temp;
  double average_fitness,sum;
  double max_fitness;
  double  pc;  //交叉概率

  sum=0;
  max_fitness=population[0].fitness;
  for(i=0;i<POP_SIZE;i++)
  {
    sum=sum+population[i].fitness;
    if(max_fitness<population[i].fitness)
      max_fitness=population[i].fitness;
  }
  average_fitness=sum/(double)POP_SIZE;

  //自适应概率交叉
  for(pop=0;pop<POP_SIZE;pop++)
  {
    if(population[i].fitness>average_fitness)
      pc=P1_XOVER*(max_fitness-population[i].fitness)/(max_fitness-average_fitness);
    else
      pc=P2_XOVER;
    p=(double)(rand()%1000)/1000.0;
    if(p<pc)
    {
      pop1=rand()%POP_SIZE;    
      pop2=rand()%POP_SIZE;
      temp=GetRandVal(0,1);

      for(j=0;j<HiddenNum;j++)
      {
        for(i=0;i<InNum;i++)
        {
          if(rand()%2==0)
            population[pop].gene1[j][i]=population[pop1].gene1[j][i]*temp+(1-temp)*population[pop2].gene1[j][i]; //交叉
          else
            population[pop].gene1[j][i]=population[pop1].gene1[j][i]*(1-temp)+temp*population[pop2].gene1[j][i]; 
        }
      }
      for(k=0;k<OutNum;k++)
      {
        for(j=0;j<HiddenNum;j++)
        {
          if(rand()%2==0)
            population[pop].gene2[k][j]=population[pop1].gene2[k][j]*temp+(1-temp)*population[pop2].gene1[k][j]; //交叉
          else
            population[pop].gene2[k][j]=population[pop1].gene2[k][j]*(1-temp)+temp*population[pop2].gene1[k][j]; 
        }
      }
    }
  }
}

void Adaptive_Mutate()                //采用自适应概率变异
{
  int    pop;
  int    i;
  int    j;
  int    k;
  double  p;
  double pm;
  double max_fitness,average_fitness;
  double  sum;

  sum=0;
  max_fitness=population[0].fitness;
  for(i=0;i<POP_SIZE;i++)
  {
    sum+=population[i].fitness;
    if(max_fitness<population[i].fitness)
      max_fitness=population[i].fitness;
  }
  average_fitness=sum/(double)POP_SIZE;

  for(pop=0;pop<POP_SIZE;pop++)
  {
    if(population[i].fitness>average_fitness)
      pm=P1_MUTATE*(max_fitness-population[pop].fitness)/(max_fitness-average_fitness);
    else
      pm=P2_MUTATE;
    p=(double)(rand()%1000)/1000.0;
    if(p<pm)
    {
      for(j=0;j<HiddenNum;j++)
      {
        for(i=0;i<InNum;i++)
        {
          population[pop].gene1[j][i]=GetRandVal(0,1);
        }
      }
      for(k=0;k<OutNum;k++)
      {
        for(j=0;j<HiddenNum;j++)
        {
          population[pop].gene2[k][j]=GetRandVal(0,1);
        }
      }
    }
  }
}

void Report()                  //报告进化的状况,当前最佳个体和平均情况
{
  int    i;
  double  average_fitness;
  double best_fitness;
  double  sum;

  sum=0;
  for(i=0;i<POP_SIZE;i++)
  {
    sum+=population[i].fitness;
  }
  average_fitness=sum/(double)POP_SIZE;
  best_fitness=BestIndividual.fitness;
  
  fprintf(ga_fp,"%4d %10.4lf %10.4lf\n",generation,best_fitness,average_fitness);
}

void GetOptimizedWeight()            //得到GA优化过的权值
{
  int    i,j,k;

  for(j=0;j<HiddenNum;j++)
  {
    for(i=0;i<InNum;i++)
    {
      W[j][i]=BestIndividual.gene1[j][i];      //Weight_W
    }
  }
  for(k=0;k<OutNum;k++)
  {
    for(j=0;j<HiddenNum;j++)
    {
      V[j][i]=BestIndividual.gene2[j][i];      //Weight_V
    }
  }
}

void RecordOptimizedWeight()
{
  int    i,j,k;

  fprintf(ga_fp,"\n\nthe Optimized Weight of W[j][i] is:\n");

  for(j=0;j<HiddenNum;j++)
  {
    for(i=0;i<InNum;i++)
    {
      fprintf(ga_fp,"%8.4lf",W[j][i]);
    }
    fprintf(ga_fp,"\n");
  }

  fprintf(ga_fp,"\n\nthe Optimized Weight of V[k][j] is:\n");
  for(k=0;k<OutNum;k++)
  {
    for(j=0;j<HiddenNum;j++)
    {
      fprintf(ga_fp,"%8.4lf",V[k][j]);
    }
  fprintf(ga_fp,"\n");
  }

}

void Finalial_GA()                    //关闭GA文件
{
  fclose(ga_fp);
}

void GA_Optimized_Network()                //采用GA优化权值
{
  generation=0;
  Evaluate_Weight();
  while(generation++<MAX_GEN)
  {
      Select_Roulette();    //轮盘赌选择
//      Select_Tournament();  //竞争者选择
      Crossover();      //
      Mutate();
//      Adaptive_Crossover();  //自适应交叉
//      Adaptive_Mutate();    //自适应变异
      Evaluate_Weight();      
      KeepBestIndividual();
      Report();
      Elitist();        //精英策略
//      Steady_State();      //稳定态策略
  }
  GetOptimizedWeight();
  RecordOptimizedWeight();    //记录优化后的权值
  Finalial_GA();
}

/*********************************************************
        propogation forward
**********************************************************/
void InputLayerToHiddenLayer()  //输入层到隐层传播
{
  int    i,j;
  double  TempValue;

  for(j=0;j<HiddenNum;j++)
  {
    TempValue=0;
    for(i=0;i<InNum;i++)
    {
      TempValue+=W[j][i]*I[i]; //内积
      WSave[j][i]=W[j][i];
    }
    U[j]=TempValue+theta[j];  //隐层输入
    theta_save[j]=theta[j];
    H[j]=sigmoid(U[j]);      //隐层输出
  }
}

void HiddenLayerToOutputLayer()  //隐层到输出层传播
{
  int    j,k;
  double  TempValue;

  for(k=0;k<OutNum;k++)
  {
    TempValue=0;
    for(j=0;j<HiddenNum;j++)
    {
      TempValue+=V[k][j]*H[j];  //内积
      VSave[k][j]=V[k][j];
    }
    S[k]=TempValue+gama[k];    //输出层输入
    gama_save[k]=gama[k];
    O[k]=sigmoid(S[k]);      //输出层输出
  }
}
/*************************************************
    Error Calculate & Recover the Output 
*************************************************/
void ErrorCalculate(int m)  //第m个样本的误差计算
{
  int    k;
  double err;

  err=0;  
  for(k=0;k<OutNum;k++)
  {
    err+=(T[k]-O[k])*(T[k]-O[k])/2.0;
  }
  Error[m]=err;      //得到第m个样本误差的方差
}

void RecoverOutput()    //输出值的恢复
{
  int    i;
  for(i=0;i<OutNum;i++)
  {
    predict_out[i]=Max-(Max-Min)*O[i]/0.8;
  }
}

/*************************************************
      Error Back Propogation
*************************************************/

void OutputLayertoHiddenLayer()  //误差由输出层向隐层反向传播,并调整结构
{
  int    j,k;
  double delta[OutNum];    //输出误差
  double delta_bar[OutNum];

  for(k=0;k<OutNum;k++)
  {
    delta[k]=T[k]-O[k];
    delta_bar[k]=delta[k]*O[k]*(1-O[k]);
    gama[k]+=alpha*delta_bar[k]+beta*(gama[k]-gama_save[k]);      //调整输出层阈值
    for(j=0;j<HiddenNum;j++)
    {
      V[k][j]+=alpha*delta_bar[k]*H[j]+beta*(V[k][j]-VSave[k][j]);  //调整输出层权重
    }
  }
}

void HiddenLayerToInputLayer()  //调整隐层权重与阈值
{
  int    i,j,k;
  double  delta[OutNum];    //输出层误差调整
  double sigma[HiddenNum];  //隐层误差调整系数

  for(j=0;j<HiddenNum;j++)
  {
    sigma[j]=0;
    for(k=0;k<OutNum;k++)
    {
      delta[k]=(T[k]-O[k])*O[k]*(1-O[k]);
      sigma[j]+=delta[k]*V[k][j]*H[j]*(1-H[j]);
    }
  }
  //调整隐层权重与阈值
  for(j=0;j<HiddenNum;j++)
  {
    for(i=0;i<InNum;i++)
    {
      W[j][i]+=alpha*sigma[j]*I[i]+beta*(W[j][i]-WSave[j][i]);    //调整权重
    }
    theta[j]+=alpha*sigma[j]+beta*(theta[j]-theta_save[j]);        //调整阈值
  }
}

/*************************************************
        main program
*************************************************/ 

void main()                                  //main程序
{
  int    i;
  int    m;
  int    CurrentStudy;    //当前学习次序
  int    MaxLearnTime;    //最大学习次数
  int   predict_step;    //进行单步预测的阶数
  double  PreError;      //设定误差
  double TotalError;

  MaxLearnTime=20000;
  alpha=0.2;
  beta=0.8;
  PreError=1e-3;
  randomize();
  DataNormalize();      //数据序列初始化
  NN_Initialize();      //神经网络初始化
  GA_Initialize();      //遗传算法初始化

  if((ga_fp=fopen("ga_fp.dat","w"))==NULL)    
  {
    exit(1);
    printf("cannot open file\n");
  }
  if((nn_fp=fopen("nn_fp.data","w"))==NULL)
  {
    exit(1);
    printf("cannot open file\n");
  }

  GA_Optimized_Network();    //GA优化权值程序

  /**************得到训练好的权值****************/
  fprintf(nn_fp,"Predict Step  TotalError\n");
  CurrentStudy=0;
  do
  {
    ++CurrentStudy;
    if(CurrentStudy>MaxLearnTime)
      break;
    TotalError=0;

    for(m=0;m<P;m++)
    {
      GetSample(m);        //得到nn数据
      InputLayerToHiddenLayer();  //传输
      HiddenLayerToOutputLayer();
      ErrorCalculate(m);      //计算每次的均方误差
      OutputLayertoHiddenLayer();  //调整
      HiddenLayerToInputLayer();
      TotalError+=Error[m];
    }
  printf("%5d    %10.5lf\n",CurrentStudy,TotalError);
  //  fprintf(fp,"%5d %10.5lf\n",CurrentStudy,TotalError);
  }  
  while(TotalError>PreError);

  /*******经过训练收敛后,下面检验其网络的泛化能力******

  TotalError=0;
  for(m=P;m<N;m++)
  {
    GetSample(m);        //得到nn数据
    InputLayerToHiddenLayer();  //传输
    HiddenLayerToOutputLayer();
    ErrorCalculate(m);      //计算每次的均方误差
    TotalError+=Error[m];
  }
  if(TotalError>CheckError)
  {
    printf("The Network has to be trained again Until it met the error requies!\n");
    goto train;
  }

  /**********************保留权值**************************/    
/*
  fprintf(fp,"\nFinally the Weight List (From InputLayer to HiddenLayer) is:\n");
  for(j=0;j<HiddenNum;j++)
  {
    for(i=0;i<InNum;i++)
    {
      fprintf(fp,"%8.4lf",W[j][i]);
    }
    fprintf(fp,"\n");
  }

  fprintf(fp,"\nFinally the Weight List (From HiddenLayer to OutputLayer) is:\n");
  for(k=0;k<OutNum;k++)
  {
    for(j=0;j<HiddenNum;j++)
    {
      fprintf(fp,"%8.4lf",V[k][i]);
    }
    fprintf(fp,"\n");
  }

  fprintf(fp,"\nthe Threshold List (From InputLayer to HiddenLayer) is:\n");
  for(j=0;j<HiddenNum;j++)
  {
    fprintf(fp,"%8.4lf",theta[j]);
  }

  fprintf(fp,"\n\nthe Threshold List (From HiddenLayer to OutputLayer) is:\n");
  for(k=0;k<OutNum;k++)
  {
    fprintf(fp,"%8.4lf",gama[k]);
  }
  fclose(fp);
*/

  /************根据训练好的网络,进行滑动单步预测**************/
  for(predict_step=0;predict_step<2;predict_step++)
  {
    GetSample(P);                  //得到nn数据
    InputLayerToHiddenLayer();            //传输
    HiddenLayerToOutputLayer();
  
    RecoverOutput();
    RenewSeries();
    DataNormalize();

    for(i=0;i<OutNum;i++)
    {
      fprintf(nn_fp,"\nthe Prediction Value (for next %4d year) is: %8.4lf\n",OutNum*predict_step+i+1,predict_out[i]);
    }
    /*
    for(i=0;i<InNum;i++)
    {
      NewSeries[i+P-1]=NewSeries[i+P];
      printf("%d nn=%lf\n",predict_step,NewSeries[i+P-1]);
    }
    NewSeries[N-1]=O[0];  
    */
  }

  fprintf(nn_fp,"\nSimulation completed!\n");
  fclose(nn_fp);
  printf("Simulation completed!\n");
  
} 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -