⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xor.cpp

📁 c++解决xor问题(bp网络)
💻 CPP
字号:
//用BP网络求解XOR问题


#include <stdlib.h>
#include <time.h>
#include <iomanip.h>
#include <math.h>
#include <fstream.h>

const double ETA=0.65;            //学习率                   
const double ALPHA=0.85;          //动量因子         
const double BETA=1.0;
const int    TIMES=5000;
const int    NEURONS_INPUTLAYER=3, NEURONS_HIDDENLAYER=2;
const int    TRAININGPATTERNS=4;

double W[NEURONS_INPUTLAYER];
double w[NEURONS_HIDDENLAYER][NEURONS_INPUTLAYER];
double v[NEURONS_HIDDENLAYER];    //隐单元输出
double input[NEURONS_INPUTLAYER];

//得到网络的输出值

double getoutput(){
int    i, j;
double output, H=0.0;  
double h[NEURONS_HIDDENLAYER];    //隐单元输入
for(i=0; i<NEURONS_HIDDENLAYER; i++){
  h[i]=0.0;
  for(j=0; j<NEURONS_INPUTLAYER; j++)
    h[i]+=w[i][j]*input[j];
  v[i]=1.0/(1.0+exp((-1.0)*h[i]));   
  H+=W[i+1]*v[i];
  }
H+=(-1.0)*W[0];
output=1.0/(1.0+exp((-1.0)*H));
return output;
}

void main(void){
int    i, j, k, tmp, CycTimes=0, Randomsequence[TRAININGPATTERNS];
double MSE, RandomSgn, Out[TRAININGPATTERNS];  
double TeacherSignal, Momentum1, Momentum2;
double O, D_tmp;
double Pattern[TRAININGPATTERNS][NEURONS_INPUTLAYER+1];
double d[NEURONS_HIDDENLAYER];   
double w_prev[NEURONS_HIDDENLAYER][NEURONS_INPUTLAYER];
double W_prev[NEURONS_INPUTLAYER];
int *p;
//对样本集赋值
cin>>*p;
ifstream fin;
fin.open("xor.txt");
for(i=0; i<4; i++){
  for(j=0; j<4; j++)
  fin>>Pattern[i][j];
}
fin.close();

//随机初始化权向量

srand((unsigned)time(NULL));
for(i=0; i<NEURONS_INPUTLAYER; i++){
  RandomSgn=fmod((double)rand(),2.0);
  if(RandomSgn<0.001)
    W[i]=(-1.0)*((rand()%9000)+1000)/1000000.0*5.0;
  else
    W[i]=((rand()%9000)+1000)/1000000.0*5.0;
}

for(i=0; i<NEURONS_HIDDENLAYER; i++){
  for(j=0; j<NEURONS_INPUTLAYER; j++){
    RandomSgn=fmod((double)rand(),2.0);
    if(RandomSgn<0.001)
      w[i][j]=(-1.0)*((rand()%9000)+1000)/1000000.0*5.0;
    else
      w[i][j]=((rand()%9000)+1000)/1000000.0*5.0;
  }
}
		     
for(i=0; i<NEURONS_INPUTLAYER; i++)
  W_prev[i]= W[i];

for(i=0; i<NEURONS_HIDDENLAYER; i++){
  for(j=0; j<NEURONS_INPUTLAYER; j++)
    w_prev[i][j]=w[i][j];
}


ofstream fout;
fout.open("bpxor.txt", ios::app|ios::out);

fout<<endl<<endl;


fout<<"用BP网络求解XOR问题"<<endl<<endl;

fout<<"实验条件:"<<endl<<endl;
fout<<"学 习 率:      ETA="<<ETA<<"   "<<endl;
fout<<"动量因子:      ALPHA="<<ALPHA<<"     "<<endl;	
fout<<"形状因子:      BETA="<<BETA<<".0     "<<endl<<endl;	

srand((unsigned)time(NULL));

L1:
MSE=0.0;
  
//将一个自然数序列随机重排
	
for(i=0; i<TRAININGPATTERNS; i++)
  Randomsequence[i]=i;
      
for(i=0; i<TRAININGPATTERNS; i++){
  j=rand()%TRAININGPATTERNS;
  k=rand()%TRAININGPATTERNS;
  tmp=Randomsequence[j];
  Randomsequence[j]=Randomsequence[k];
  Randomsequence[k]=tmp;
}

//更新权值

for(k=0; k<TRAININGPATTERNS; k++){
  for(i=0; i<NEURONS_INPUTLAYER; i++)
    input[i]=Pattern[Randomsequence[k]][i];
  TeacherSignal=Pattern[Randomsequence[k]][NEURONS_INPUTLAYER];
  O=getoutput();

  D_tmp=O*(1.0-O)*(TeacherSignal-O);
  Momentum1=ALPHA*(W[0]-W_prev[0]);
  W_prev[0]=W[0];
  W[0]+=(-1.0)*ETA*D_tmp+Momentum1;
  for(i=1; i<NEURONS_INPUTLAYER; i++){
    Momentum1=ALPHA*(W[i]-W_prev[i]);
    W_prev[i]=W[i];
    W[i]+=ETA*D_tmp*v[i-1]+Momentum1;
  }
		
  for(i=0; i<NEURONS_HIDDENLAYER; i++){
 	d[i]=v[i]*(1.0-v[i])*W[i+1]*D_tmp;		  
    for(j=0; j<NEURONS_INPUTLAYER; j++){
      Momentum2=ALPHA*(w[i][j]-w_prev[i][j]);
      w_prev[i][j]=w[i][j];
      w[i][j]+=ETA*d[i]*input[j]+Momentum2;
	}
  }	
}

CycTimes+=1;

//计算实际输出值、均方差	

for(k=0; k<TRAININGPATTERNS; k++){   
  for(i=0; i<NEURONS_INPUTLAYER; i++)
    input[i]=Pattern[k][i];
  TeacherSignal=Pattern[k][NEURONS_INPUTLAYER];
  Out[k]=getoutput();
  MSE+=(Out[k]-TeacherSignal)*(Out[k]-TeacherSignal)/2.0;
}

//结果输出
 
//if((Out[0]<0.9)||(Out[1]>=0.1)||(Out[2]>=0.1)||(Out[3]<0.9)){  

if((Out[0]>=0.1)||(Out[1]<0.9)||(Out[2]<0.9)||(Out[3]>=0.1)){  
  if(CycTimes<TIMES){     
    cout<<setw(8)<<setiosflags(ios::right)<<CycTimes;
    cout<<setw(14)<<setiosflags(ios::right)<<MSE<<endl;
    goto L1;
  }
  else{
	fout<<"失败!"<<endl<<endl; 
    fout<<"///////////////////////////";
	fout.close();
  }	
}     

else{	             
fout<<"实验结果:"<<endl<<endl;
  if(CycTimes<1000)	
	  fout<<setw(7)<<setiosflags(ios::right)<<"迭代次数:      Times="<<CycTimes<<endl;
  else
    fout<<setw(6)<<setiosflags(ios::right)<<"迭代次数:      Times="<<CycTimes<<endl;
  fout<<setw(10)<<setiosflags(ios::right)<<"平方误差:      MSE="<<MSE<<endl;

  fout<<"权 向 量:     "<<endl<<endl;
  for(i=0; i<NEURONS_HIDDENLAYER; i++){
    fout<<setw(24)<<setiosflags(ios::right)<<w[i][0]; 
	for(j=1; j<NEURONS_INPUTLAYER; j++)
      fout<<setw(18)<<setiosflags(ios::right)<<w[i][j]; 
    fout<<endl;
  }
    fout<<endl;
  fout<<setw(24)<<setiosflags(ios::right)<<W[0];
  for(i=1; i<NEURONS_INPUTLAYER; i++)
    fout<<setw(18)<<setiosflags(ios::right)<<W[i];
  fout<<endl; 

 
 
  fout<<setw(10)<<setiosflags(ios::right)<<"样本输出:"<<endl;

  for(k=0; k<TRAININGPATTERNS; k++){
	  if(Pattern[k][NEURONS_INPUTLAYER]>0.5){
	  
	  fout<<setw(18)<<setiosflags(ios::right)
		<<Pattern[k][NEURONS_INPUTLAYER]<<".0";
    fout<<"     "<<setw(17)<<setiosflags(ios::right)<<Out[k]<<endl;
	  }
else
/*
   {
    fout<<setw(18)<<setiosflags(ios::right)
		<<Pattern[k][NEURONS_INPUTLAYER]-1<<".0";
    fout<<"     "<<setw(17)<<setiosflags(ios::right)<<Out[k]-1<<endl;
	  }
  */

     {
    fout<<setw(18)<<setiosflags(ios::right)
		<<Pattern[k][NEURONS_INPUTLAYER]<<".0";
    fout<<"     "<<setw(17)<<setiosflags(ios::right)<<Out[k]<<endl;
	  }




  }
  fout<<endl;     
 
  fout<<"///////////////////////////////////////////////";
  fout.close();
  }
}

/*****************************
文件xor.txt中的数据

    -1.0    0.0    0.0   1.0
    -1.0    0.0    1.0   0.0
    -1.0    1.0    0.0   0.0
    -1.0    1.0    1.0   1.0
*****************************/


⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -