⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 matlabalgorithm.txt

📁 matlab遗传算法实例,用于在matlab环境下遗传算法的实现.
💻 TXT
📖 第 1 页 / 共 2 页
字号:
                for j=1:10 
                    param1(k,r)=param1(k,r)+a1(k,r,j)*pow2(j-1); 
                end 
            end 
        end 
        for k=1:4 
            for j=1:10 
                param2(1,k)=param2(1,k)+a2(1,k,j)*pow2(j-1); 
            end 
        end 
            
       m=pow2(10)-1; 
       for k=1:3 
            for r=1:5 
                x_param1(k,r)=-15+param1(k,r)/m*32; 
            end 
        end 
        for k=1:4 
            y_param2(1,k)=-15+param2(1,k)/m*32; 
        end 
        
       fitness_function(1,i)=xw_ga_ann19_fitness( x_param1,y_param2); 
       %fitness_function(1,i)=x1^2+x2^2+x3^2+x4^2+x5^2+x6^2+x7^2+x8^2+x9^2+x10^2; 
       if(fitness_function(1,i)>maxsat) 
           maxsat=fitness_function(1,i); 
           optimal_1=x_param1; 
           optimal_2=y_param2; 
           optimal_3=fitness_function(1,i); 
       end 
       if(fitness_function(1,i)<=0.001) 
           flag=-1; 
           %optimal 
           %generation 
           break; 
       else sum_fit=sum_fit+fitness_function(1,i); 
       end 
       if(flag<0) 
           break; 
       end 
   end 
   if(flag>0) 
       %the first select 
       sum_fit=sum_fit-fitness_function(1,Population_Size); 
       for i=1:Population_Size-1 
           x=round(randn(1)*1023); 
           sum=round(sum_fit); 
           rr=rem(x,sum); 
           n=1;ba=1; 
           partsum=0; 
           while((partsum<rr)&(n<Population_Size-1)) 
               parsum=partsum+fitness_function(1,n); 
               ba=n; 
               n=n+1; 
           end 
           selected(1,i)=ba; 
       end 
       %reproduce 
       for i=1:Population_Size-1 
           for j=1:String_Length 
               chromosome_change(i,j)=chromosome(selected(1,i),j); 
           end 
           fitness_function(1,i)=fitness_function(1,selected(1,i)); 
       end 
       %select before crossover 
       for i=1:Population_Size-1 
           x=round(rand(1)*32367); 
           sum=round(sum_fit); 
           rr=rem(x,sum)+1; 
           n=1; 
           partsum=0; 
           while((partsum<rr)&(n<=Population_Size-1)) 
               partsum=partsum+fitness_function(1,n); 
               bba=n; 
               n=n+1; 
           end 
           selected(1,i)=bba; 
       end 
       %crossover 
       maxsw=max(fitness_function); 
       for i=1:Population_Size/2-1 
           parent1=selected(1,i); 
           parent2=selected(1,Population_Size-1-i); 
           child1=i; 
           child2=Population_Size-1-i; 
           pc=0.8; 
           randnum=rand(1); 
           if randnum<pc 
            site1=round(rand(1)*String_Length); 
               for j=1:String_Length 
                   if(j<site1) 
                   chromosome(child1,j)=chromosome_change(parent1,j); 
                   chromosome(child2,j)=chromosome_change(parent2,j); 
                   else 
                   chromosome(child1,j)=chromosome_change(parent2,j); 
                   chromosome(child2,j)=chromosome_change(parent1,j); 
                   end 
               end 
           end 
       end 
       %mutation 
       pm=0.1; 
       for i=1:Population_Size-1             
           for j=1:String_Length 
               randnum=rand(1);                 
               if(randnum<pm) 
                   chromosome(i,j)=~chromosome(i,j);%变异取反 
                   %else 
                  % chromosome(i,j)=~chromosome(i,j);%变异取反 
               end 
           end 
       end 
   end 
   generation=generation+1; 
end 
%************************************************** 
%ANN start 
sensorN=3;%神经元数 
stopR=50000;%训练停止回合数 
h=1;%学习率 
%------------------------------% 
%trainS=1:1:100; 
%trainD=1./trainS;%训练样本 
trainS=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 ]; 
maxp=max(trainS); 
minp=min(trainS); 
for i=1:16 
   trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05; 
end 
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320]; 
maxt=max(trainD); 
mint=min(trainD); 
for i=1:16 
   trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05; 
end 
%trainS=1:1:100; 
%trainD=1./trainS;%训练样本 
HLW=randn(sensorN,5);%隐含层权值 
%HLW=[x1,x2; x3,x4; x5,x6]; 
HLW=optimal_1; 
HLV=zeros(sensorN,1);%隐含层诱导局部域 
HLOut=zeros(sensorN,1);%隐含层输出 
HLG=zeros(sensorN,1);%隐含层G 
OLW=randn(1,sensorN+1);%输出层权值 
%OLW=[x7 x8 x9 x10]; 
OLW=optimal_2; 
OLV=zeros(1,1);%输出层诱导局部域 
OLY=zeros(1,1);%输出层输出值 
OLG=zeros(1,1);%输出层G 

%经验初值 3个隐含神经元 
for j=1:stopR %训练回合 停止条件为固定的训练回合 有待改进为其他的停止准则 
   for i=4:(length(trainD)-1) 
       HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1]; 
       HLOut=1./(1+exp(-HLV)); 
       OLV=OLW*[HLOut;1]; 
       OLY=1./(1+exp(-OLV)); 
       OLG=(trainD(i+1)-OLY)*(OLY.*(1-OLY)); 
       HLG=(HLOut.*(1-HLOut)).*(OLW(:,1:sensorN)'*OLG); 
       OLW=OLW+h*OLG*[HLOut;1]'; 
       HLW=HLW+h*HLG*[trainD(i),trainD(i-1),trainD(i-2),trainD(i-3),1]; 
   end 
end 
%train=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 ]; 
%maxp=2003; 
%minp=min(trainS); 
%for i=1:17 
%   trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05; 
%end 
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320 ]; 
maxt=446320; 
mint=min(trainD); 
for i=1:16 
   trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05; 
end 
trainanswer=zeros(1,length(trainD)-3); 
for i=4:(length(trainS)) 
       HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1]; 
       HLOut=1./(1+exp(-HLV)); 
       OLV=OLW*[HLOut;1]; 
       OLY=1./(1+exp(-OLV)); 
       trainanswer(i-3)=OLY; 
   end 
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320 460000 ];   
maxt=446320; 
mint=min(trainD); 
for i=1:17 
   trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05; 
end 
trainP=zeros(1,length(trainanswer)); 
trainP=trainD(1,5:17); 
k=1992:1:2004; 
plot(k,trainP,'r',k,trainanswer,'b+'); 
xlabel('年份'); 
ylabel('归一化军费额度(+表示)'); 

>>file:xw_ga_ann_19_fitness 
function m=xw_ga_ann19_fitness( x_param1,y_param2) 
%---可以修改的参数-------------% 
sensorN=3;%神经元数 
stopR=100;%训练停止回合数 
h=1;%学习率 
%------------------------------% 
%trainS=1:1:100; 
%trainD=1./trainS;%训练样本 
trainS=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 ]; 
maxp=max(trainS); 
minp=min(trainS); 
for i=1:16 
   trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05; 
end 
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320]; 
maxt=max(trainD); 
mint=min(trainD); 
for i=1:16 
   trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05; 
end 
%trainS=1:1:100; 
%trainD=1./trainS;%训练样本 
HLW=randn(sensorN,5);%隐含层权值 
HLW=x_param1; 
HLV=zeros(sensorN,1);%隐含层诱导局部域 
HLOut=zeros(sensorN,1);%隐含层输出 
HLG=zeros(sensorN,1);%隐含层G 
OLW=randn(1,sensorN+1);%输出层权值 
OLW=y_param2; 
OLV=zeros(1,1);%输出层诱导局部域 
OLY=zeros(1,1);%输出层输出值 
OLG=zeros(1,1);%输出层G 

%经验初值 3个隐含神经元 
% HLW=[0.1456,-0.11;-0.1357,-0.21;0.3422,-0.13]; 
% OLW=[-1.8141    2.6347   -1.2922]; 
m=0; 
%for j=1:stopR %训练回合 停止条件为固定的训练回合 有待改进为其他的停止准则 
   for i=4:(length(trainD)-1) 
       HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1]; 
       HLOut=1./(1+exp(-HLV)); 
       OLV=OLW*[HLOut;1]; 
       OLY=1./(1+exp(-OLV)); 
       %OLG=(trainD(i+1)-OLY)*(OLY.*(1-OLY)); 
       m=m+(trainD(i+1)-OLY)^2; 
%        HLG=(HLOut.*(1-HLOut)).*(OLW(:,1:sensorN)'*OLG); 
%       OLW=OLW+h*OLG*[HLOut;1]'; 
%      HLW=HLW+h*HLG*[trainS(i),1]; 
   end 
   m=10/m; 
   %end 
 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -