⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nniol.m

📁 类神经网路─MATLAB的应用(范例程式)
💻 M
📖 第 1 页 / 共 2 页
字号:
% (The derivative of fhat with respect to each weight in the f net)

    % ==========   Elements corresponding to the linear output units   ============
    for i = L_outputf'
      index1 = (i-1) * (hiddenf + 1) + 1;

      % -- The part of PSI corresponding to hidden-to-output layer weights --
      PSIf(index1:index1+hiddenf,index2+i) = y1f;
      % ---------------------------------------------------------------------
 
      % -- The part of PSI corresponding to input-to-hidden layer weights ---
      for j = L_hiddenf',
        PSIf(indexf(j):indexf(j)+inputs,index2+i) = W2f(i,j)*PHI_aug;
      end
     
      for j = H_hiddenf',
        tmp = W2f(i,j)*(1-y1f(j,:).*y1f(j,:)); 
        PSIf(indexf(j):indexf(j)+inputs,index2+i) = tmp(onesf_i,:).*PHI_aug;
      end 
      % ---------------------------------------------------------------------    
    end

    % ============  Elements corresponding to the tanh output units   =============
    for i = H_outputf',
      index1 = (i-1) * (hiddenf + 1) + 1;

      % -- The part of PSI corresponding to hidden-to-output layer weights --
      tmp = 1 - y2f(i,:).*y2f(i,:);
      PSIf(index1:index1+hiddenf,index2+i) = y1f.*tmp(onesf_h,:);
      % ---------------------------------------------------------------------
         
      % -- The part of PSI corresponding to input-to-hidden layer weights ---
      for j = L_hiddenf',
        tmp = W2f(i,j)*(1-y2f(i,:).*y2f(i,:));
        PSIf(indexf(j):indexf(j)+inputs,index2+i) = tmp(onesf_i,:).* PHI_aug;
      end
      
      for j = H_hiddenf',
        tmp  = W2f(i,j)*(1-y1f(j,:).*y1f(j,:));
        tmp2 = (1-y2f(i,:).*y2f(i,:));
        PSIf(indexf(j):indexf(j)+inputs,index2+i) = tmp(onesf_i,:)...
                                                  .*tmp2(onesf_i,:).* PHI_aug;
      end
      % ---------------------------------------------------------------------
    end


% (The derivative of ghat with respect to each weight in the g net)

    % ==========   Elements corresponding to the linear output units   ============
    for i = L_outputg'
      index1 = (i-1) * (hiddeng + 1) + 1;

      % -- The part of PSI corresponding to hidden-to-output layer weights --
      PSIg(index1:index1+hiddeng,index2+i) = y1g;
      % ---------------------------------------------------------------------
 
      % -- The part of PSI corresponding to input-to-hidden layer weights ---
      for j = L_hiddeng',
        PSIg(indexg(j):indexg(j)+inputs,index2+i) = W2g(i,j)*PHI_aug;
      end
     
      for j = H_hiddeng',
        tmp = W2g(i,j)*(1-y1g(j,:).*y1g(j,:)); 
        PSIg(indexg(j):indexg(j)+inputs,index2+i) = tmp(onesg_i,:).*PHI_aug;
      end 
      % ---------------------------------------------------------------------    
    end

    % ============  Elements corresponding to the tanh output units   =============
    for i = H_outputg',
      index1 = (i-1) * (hiddeng + 1) + 1;

      % -- The part of PSI corresponding to hidden-to-output layer weights --
      tmp = 1 - y2g(i,:).*y2g(i,:);
      PSIg(index1:index1+hiddeng,index2+i) = y1g.*tmp(onesg_h,:);
      % ---------------------------------------------------------------------
         
      % -- The part of PSI corresponding to input-to-hidden layer weights ---
      for j = L_hiddeng',
        tmp = W2g(i,j)*(1-y2g(i,:).*y2g(i,:));
        PSIg(indexg(j):indexg(j)+inputs,index2+i) = tmp(onesg_i,:).* PHI_aug;
      end
      
      for j = H_hiddeng',
        tmp  = W2g(i,j)*(1-y1g(j,:).*y1g(j,:));
        tmp2 = (1-y2g(i,:).*y2g(i,:));
        PSIg(indexg(j):indexg(j)+inputs,index2+i) = tmp(onesg_i,:)...
                                                  .*tmp2(onesg_i,:).* PHI_aug;
      end
      % ---------------------------------------------------------------------
    end
    PSI=[PSIf;PSIg.*U(ones_u,:)];
    PSI_red = PSI(theta_index,:);
    dw = 0;
  end
  
   
% >>>>>>>>>>>>>>>>>>>>>>>>>>>        COMPUTE h_k        <<<<<<<<<<<<<<<<<<<<<<<<<<<
  % -- Gradient --
  G = PSI_red*E_vector-D.*theta_red;

  % -- Hessian  --
  H = PSI_red*PSI_red';
  H(index3) = H(index3)'+lambda+D;                  % Add diagonal matrix

  % -- Search direction --
  h = H\G;                                          % Solve for search direction

  % -- Compute 'apriori' iterate --
  theta_red_new = theta_red + h;                    % Update parameter vector
  theta(theta_index) = theta_red_new;

  % -- Put the parameters back into the weight matrices --
  W1f_new = reshape(theta(params2f+1:paramsf),inputs+1,hiddenf)';
  W2f_new = reshape(theta(1:params2f),hiddenf+1,outputs)';
  W1g_new = reshape(theta(paramsf+params2g+1:parameters),inputs+1,hiddeng)';
  W2g_new = reshape(theta(paramsf+1:paramsf+params2g),hiddeng+1,outputs)';
    
% >>>>>>>>>>>>>>>>>>>>   COMPUTE NETWORK OUTPUT  y2(theta+h)   <<<<<<<<<<<<<<<<<<<<
  h1f = W1f_new*PHI_aug;  
  y1f(H_hiddenf,:) = pmntanh(h1f(H_hiddenf,:));
  y1f(L_hiddenf,:) = h1f(L_hiddenf,:);
  h2f = W2f_new*y1f;
  y2f(H_outputf,:) = pmntanh(h2f(H_outputf,:));
  y2f(L_outputf,:) = h2f(L_outputf,:);

  h1g = W1g_new*PHI_aug;  
  y1g(H_hiddeng,:) = pmntanh(h1g(H_hiddeng,:));
  y1g(L_hiddeng,:) = h1g(L_hiddeng,:);   
  h2g = W2g_new*y1g;
  y2g(H_outputg,:) = pmntanh(h2g(H_outputg,:));
  y2g(L_outputg,:) = h2g(L_outputg,:);

  y2           = y2f + y2g.*U;
  E_new        = Y - y2;                 % Training error
  E_new_vector = E_new(:);               % Reshape E into a long vector
  SSE_new  = E_new_vector'*E_new_vector; % Sum of squared errors (SSE)
  PI_new   = (SSE_new + theta_red_new'*(D.*theta_red_new))/(2*N); % PI
    

% >>>>>>>>>>>>>>>>>>>>>>>>>>>       UPDATE  lambda     <<<<<<<<<<<<<<<<<<<<<<<<<<<<
  L = h'*G + h'*(h.*(D+lambda));

  % Decrease lambda if SSE has fallen 'sufficiently'
  if 2*N*(PI - PI_new) > (0.75*L),
    lambda = lambda/2;
  
  % Increase lambda if SSE has grown 'sufficiently'
  elseif 2*N*(PI-PI_new) <= (0.25*L),
    lambda = 2*lambda;
  end

% >>>>>>>>>>>>>>>>>>>>       UPDATES FOR NEXT ITERATION        <<<<<<<<<<<<<<<<<<<<
  % Update only if criterion has decreased
  if PI_new < PI,                      
    W1f = W1f_new;
    W2f = W2f_new;
    W1g = W1g_new;
    W2g = W2g_new;
    theta_red = theta_red_new;
    E_vector = E_new_vector;
    PI = PI_new;
    dw = 1;
    iteration = iteration + 1;
    PI_vector(iteration-1) = PI;                             % Collect PI in vector
    fprintf('iteration # %i   PI = %4.3e\r',iteration-1,PI); % Print on-line inform
  end

  % Check if stop condition is fulfilled
  if (PI < stop_crit) | (lambda>1e7), break, end             
end
%----------------------------------------------------------------------------------
%--------------              END OF NETWORK TRAINING                  -------------
%----------------------------------------------------------------------------------
PI_vector = PI_vector(1:iteration-1);
c=fix(clock);
fprintf('\n\nNetwork training ended at %2i.%2i.%2i\n',c(4),c(5),c(6));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -