⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tpso1.m

📁 智能优化算法: 粒子群优化算法(PSO)应用于神经网络优化程序。分为无隐含层、一隐含层、二隐含层。运行DemoTrainPSO.m即可。 程序来自:Brian Birge NCSU
💻 M
字号:
function [w1,b1,i,tr] = tpso1(w1,b1,f1,p,t,tp,wv,bv,es,v)
%TPSO1 - Train 1-layer (0 hidden layer) feed-forward network 
%        with particle swarm optimization (PSO)
%
% Brian Birge
% rev 1.0
% 01/01/03
%
%  [W1,B1,TE,TR] = TPSO2(W1,B1,F1,P,T,TP)
%    W1  - SxR weight matrix.
%    B1  - Sx1 bias vector.
%    F1  - Transfer function (string).
%    P  - RxQ matrix of input vectors.
%    T  - SxQ matrix of target vectors.
%    TP - Training parameters (optional).
%  Returns:
%    W1  - new weights.
%    B1  - new biases.
%    TE - the actual number of epochs trained.
%    TR - training record: [row of errors]
%
%	Training parameters are:
%    TP(1) - Epochs between updating display, default = 100.
%    TP(2) - Maximum number of iterations (epochs) to train, default = 2000.
%    TP(3) - Sum-squared error goal, default = 0.02.
%    TP(4) - population size, default = 20
%    TP(5) - maximum particle velocity, default = 4
%    TP(6) - acceleration constant 1, default = 2
%    TP(7) - acceleration constant 2, default = 2
%    TP(8) - Initial inertia weight, default = 0.9
%    TP(9) - Final inertia weight, default = 0.2
%    TP(10)- Iteration (epoch) by which inertial weight should be at final value, default = 1500
%    TP(11)- maximum initial network weight absolute value, default = 100
%    TP(12)- randomization flag (flagg), default = 2:
%                      flagg = 0, same random numbers used for each particle (different at each epoch - least random)
%                      flagg = 1, separate randomized numbers for each particle at each epoch
%                      flagg = 2, separate random #'s at each component of each particle at each epoch (most random)
%    TP(13)- minimum global error gradient (if SSE(i+1)-SSE(i) < gradient over 
%               certain length of epochs, terminate run, default = 1e-9
%    TP(14)- epochs before error gradient criterion terminates run, default = 200
%               i.e., if the SSE does not change over 200 epochs, quit program
%    TP(15) - plot flag, if =1 then training progress shown, otherwise no display, default =1

rand('state',sum(100*clock));    

if nargin < 5
   error('Not enough arguments.');
end

% TRAINING PARAMETERS
 if nargin == 5
    tp = []; 
 end

 tp = nndef(tp,[100 2000 0.02 20 4 2 2 0.9 0.2 1500 100 2 1e-9 200 1]);
 df  = tp(1);
 me  = tp(2);
 eg  = tp(3);
 ps  = tp(4);
 mv  = tp(5);
 ac1 = tp(6);
 ac2 = tp(7);
 iw1 = tp(8);
 iw2 = tp(9);
 iwe = tp(10);
 mwav= tp(11);
 flagg=tp(12);
 ergrd=tp(13);
 ergrdep=tp(14);
 pltflg=tp(15);
 
 cnt2=0;
 cnt3=0;
 cnt4=0;
 
 mvcrit=50;
 mvmag=1e-5;
 
% PLOTTING
 message = sprintf('TRAINPSO: %%g/%g epochs, GBest SSE = %%g.\n',me);

% initialize population
 
 % unwrap wts & biases into position vector for 1st population member
   [pos(1,:),row,col]=unwrapmat(w1,b1);
   D=length(pos(1,:));  % dimension D of optimization problem (fly through this hyperspace)
 
 % get the other particles and their velocities at time zero
   pos(2:ps,:,1)=mwav*(2*rand(ps-1,D)-1); % construct rest of random population pos between -mwav.mwav
   vel(:,:)=mv*(2*rand(ps,D)-1); % construct initial random velocities between -mv,mv
 
 % initial pbest positions vals
   pbest(1:ps,:)=pos;
 
 % put way to get pbestvalues here
   for j=1:ps  % start particle loop
    [w1,b1]=wrapmat(pos(j,:),row,col);     % put particle j into weight/bias format   
    out(:,:,j)=simuff(p,w1,b1,f1);        % simulate neural network
    e = t-out(:,:,j);                              % error between target and net output
    SSEhist(j) = sumsqr(e);                      % sum squared error for jth particle
   end
   pbestval=SSEhist;
 
 % assign initial gbest here also (gbest and gbestval)
   [gbestval,idx1]=min(pbestval);
   gbest=pbest(idx1,:);
   tr(1)=gbestval;
  
% start PSO iterative procedures
  cnt=0; % counter used for updating display according to df in the options
  cnt2=0; % counter used for the stopping subroutine based on error convergence
  gbhist=gbest;
  for i=1:me  % start epoch loop (iterations)
    for j=1:ps  % start particle loop
      [w1,b1]=wrapmat(pos(j,:),row,col);     % put particle j into weight/bias format   
      out(:,:,j)=simuff(p,w1,b1,f1);        % simulate neural network
      e = t-out(:,:,j);                              % error between target and net output
      SSEhist(j) = sumsqr(e);                      % sum squared error for jth particle, keep history
     
    % update pbest
       if pbestval(j)>=SSEhist(j)
          pbestval(j)=SSEhist(j);
          pbest(j,:)=pos(j,:);
       end
          
    % assign gbest by finding minimum of all particle pbests 
       [iterbestval,idx1]=min(pbestval);  % global best value for this iteration
       if gbestval>=iterbestval
          gbestval=iterbestval;
          gbest=pbest(idx1,:);
       end
    
      tr(i+1)=gbestval; % keep track of global best SSE
      te=i;             % this will return the epoch number to calling program when done

    % get inertia weight, just a linear funct w.r.t. epoch parameter iwe  
     if i<=iwe
       iwt(i)=((iw2-iw1)/(iwe-1))*(i-1)+iw1;
     else
       iwt(i)=iw2;
     end
     
    % this for loop is the heart of the PSO algorithm, updates position and velocity across dimension D 
     for k=1:D
       rannum1=rand(1);
       rannum2=rand(1);
       
      % update velocity for each dimension of each particle
       vel(j,k)=iwt(i)*vel(j,k)+ac1*rannum1*(pbest(j,k)-pos(j,k))+ac2*rannum2*(gbest(1,k)-pos(j,k));
       
      % limit velocities
       if vel(j,k)>mv
          vel(j,k)=mv;
       end
       if vel(j,k)<-mv
          vel(j,k)=-mv;
       end      
     end
   
    % update position for each dimension of each particle
     pos(j,:)=pos(j,:)+vel(j,:);     
     
 end         % end particle loop

 % check for stopping criterion based on speed of convergence to desired error   
  tmp1=abs(tr(i)-gbestval);
  if tmp1>ergrd
     cnt2=0;
  elseif tmp1<=ergrd
     cnt2=cnt2+1;
     if cnt2>=ergrdep
        disp('***************************** global error gradient too small for too long');     
        break
     end       
  end

%%************************************************************************************  
% 
%% this section is for modifying the maximum allowable velocity
%
%% dynamically decrease maximum velocity if gbestval
%% doesn't change over mvcrit iterations (set at top of function)
% if tr(i)~=gbestval
%    cnt3=0;
% elseif tr(i)==gbestval
%     cnt3=cnt3+1;
%     if cnt3>=mvcrit
%        mv=(mv*.999);
%        cnt3=0;
%     end
%  end
%  
% % dynamically increase maximum velocity by if gbestval
% % only changes by a small order of magnitude over mvcrit iterations
%  if tr(i)~=gbestval
%    if tmp1<=mvmag
%       cnt4=cnt4+1;
%       if cnt4>=mvcrit          
%          mv=(mv*1.001);
%          cnt4=0;
%       end       
%    end
%  end
% 
% %************************************************************************************
 
 % CHECK ERROR PHASE
  if gbestval < eg 
    gbhist=[gbhist;gbest];      
    disp(['***************************************  Reached Goal ******************']); 
    disp(['TRAINPSO: ',num2str(i),'/',num2str(me),' epochs,  gbest SSE = ',num2str(gbestval,10)]);
    disp(['  mv  = ',num2str(mv,10),',  iwt = ',num2str(iwt(i),10)]);
    disp(['*************************************** end of training ****************']);
   if pltflg==1
    subplot(2,1,1)
      semilogy(1:te+1,tr(1:end));
      xlabel('epoch');
      ylabel('gbest');
      title(['mv=',num2str(mv),' inertia wt=',num2str(iwt(i)),', ',num2str(D),' dimensions, GbestVal= ',num2str(gbestval,10)]);     
      hold on
      semilogy(1:te+1,ones(size(1:te+1))*eg,'r-.');
      hold off
      drawnow
    subplot(2,1,2)
     % plot(pos(:,1),pos(:,D),'b.')
      plot(pbest(:,1),pbest(:,D),'b.');
      xlabel('pos dim 1');
      ylabel(['pos dim ',num2str(D)]);
      grid on
      hold on
     % plot([pbest(:,1),pos(:,1)]',[pbest(:,D),pos(:,D)]','y-');
      plot(gbest(1),gbest(D),'r*');    
      plot(gbhist(:,1),gbhist(:,D),'y');
      hold off
      drawnow     
%    subplot(2,2,4)
%      plot(vel(:,1),vel(:,D),'b.');
%      xlabel('vel dim 1');
%      ylabel(['vel dim ',num2str(D)]);
%      grid on
%      hold on
%      plot(vel(idx1,1),vel(idx1,D),'r*');
%      hold off
%      drawnow   
   end
   break
  end

 % PLOTTING
  if rem(i,df) == 0
    gbhist=[gbhist;gbest];
    disp(['TRAINPSO: ',num2str(i),'/',num2str(me),' epochs,  gbest SSE = ',num2str(gbestval,10)]);
    disp(['  mv  = ',num2str(mv,10),',  iwt = ',num2str(iwt(i),10)]);
   if pltflg==1
     goplotpso;    % plotting call
   end
  end      

end     % end epoch loop

% WARNINGS
if gbestval > eg
  disp('TRAINPSO: Network error did not reach the error goal.')
  disp(['************* end of training ***************************************************']);  
end

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -