⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gdtrain.m

📁 基于BP模型的神经网络模型
💻 M
字号:
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%
%  gdtrain(network,date,iterations) - gradient descent procedure for Matrix
%  neural network training.
%  
%  Parameters: network -  neural network with matrix networks
%              data - training data sample
%              iterations - how many iterations to perform training
%
%  Author: Povilas Daniu餴s, paralax@hacker.lt
%  http://ai.hacker.lt - lithuanian site about Artificial Intelligence.
%
%  TODO: weighted MSE minimization, maximal likelihood method, multiple
%  activation function support.
%  ----------------------------------------------------------------------
function f=gdtrain(network,data,iterations)

ETA = network.eta;         % initial learning rate
ALPHA = network.alpha;     % momentum factor
EPSILON1 = network.epsilon1; % minimal weight change (to detect slow convergence)
EPSILON = network.epsilon; % if error < EPSILON exit with success
C1 = 0.80;                 % constant to speed down learning
C2 = 1.05;                 % constant to speed up learning
failed = 0;                % criteria to stop if divergence detected  

left_size = length(network.left(1).w);
right_size = length(network.right(1).w);


%
% Find derivatives of error function
%
       
for it=1:iterations  % epochos !  
    for j=1:network.regressors      
        for i=1:length(data.target)  
            X.training(1).mat = data.training(i).mat;
            X.target(1) = data.target(i);
            delta = (mNN_sim(network,X) - X.target(1));   % optimizuot
            
            % alpha
            if (j > 1)
                network.d(j) = network.d(j) + delta*tansig(network.left(j).w * X.training(1).mat * network.right(j).w);             
            else
                network.d(j) = network.d(j) + delta;
            end                       
            % u, v and b
            if (j > 1)
                activation_signal = 1 - tansig(network.left(j).w * X.training(1).mat * network.right(j).w)^2;                          
                 for k=1:left_size                         
                         network.d_left(j,k) = network.d_left(j,k) + network.weights(j)*delta*activation_signal*(network.right(j).w' * X.training(1).mat(k,:)');
                 end              
                 
                 for k=1:right_size
                         network.d_right(j,k) = network.d_right(j,k) + network.weights(j)*delta*activation_signal*(network.left(j).w * X.training(1).mat(:,k));
                 end                     
                 network.d_b(j) = network.d_b(j) + delta*network.weights(j)*activation_signal;                 
            end        
             
        end      
    end    
  
%  
%  Update parameters and set derivatives to 0 
%
  network = updateParameters(network,ETA/length(data.target),ALPHA);  

%
% Simulate and test
%
  s = mNN_sim(network,data);
  sse1 = sum((s - data.target).^2); 
  
if (it > 1)  
  if (sse1 > sse0)
      ETA = ETA * C1;
      failed = failed + 1;
  else
      ETA = ETA * C2;
      failed = 0;
  end
end

  fprintf('Iteration: %d from %d. Eta = %f, Performance: %f \n',it,iterations, ETA, sse1/length(data.target));
if (it > 1)  
  if (abs(sse1 - sse0) < EPSILON1)
      disp('Convergence to slow. Stopping procedure.');
      break;
  else
      sse0 = sse1;
  end      
else
    sse0 = sse1;
end
  
    if ( (sse0 / length(data.target)) <= EPSILON)
        fprintf('Training complete: %f \n ',EPSILON);
        break;
    end
    
    if (failed > network.earlyStop)
        fprintf('Divergence detected: %d (earlyStop = %d)',failed,network.earlyStop);
        break;
    end
      
end % for it=1:iterations

f = network;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -