⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nniol.m

📁 基于MATLAB的神经网络非线性系统辨识软件包.
💻 M
📖 第 1 页 / 共 2 页
字号:
function [W1f,W2f,W1g,W2g,PI_vector,iteration,lambda]=nniol(NetDeff,NetDefg,...
                                                  NN,W1f,W2f,W1g,W2g,trparms,Y,U)
%  NNIOL
%  -----
%       Train a neural network to model a dynamic system, assuming the 
%       following structure:
%         y(t) = f(y(t-1),..,y(t-na),u(t-nk-1),...,u(t-nk-nb)) 
%                   +  g(y(t-1),..,y(t-na),u(t-nk-1),...,u(t-nk-nb))*u(t-nk)
%       with the Levenberg-Marquardt method. This type of model description
%       is particularly relevant in control by discrete input-output
%       linearization.
%
%  CALL:
%  [W1f,W2f,W1g,W2g,critvec,iteration,lambda]=...
%                     nniol(NetDeff,NetDefg,NN,W1f,W2f,W1g,W2g,trparms,Y,U)
%
%  INPUTS:
%  U       : Input signal (= control signal) (left out in the nnarma case)
%            dim(U) = [(inputs) * (# of data)]
%  Y       : Output signal. dim(Y) = [1 * # of data]
%  NN      : NN=[na nb nk].
%            na = # of past outputs used for determining the prediction
%            nb = # of past inputs used for determining prediction
%            nk = time delay (usually 1)
%  NetDeff : Architecture of network used for modelling the function f
%  NetDefg : Archtecture of network used for modelling the function g
%  W1f,W2f : Input-to-hidden layer and hidden-to-output layer weights for 
%  W1g,W2g   the "f" and "g" nets, respectively.
%            If they are passed as [], they will be initialized automatically
%  trparms:  Data structure with parameters associated with the
%            training algorithm (optional). Use the function SETTRAIN if
%            you do not want to use the default values.
%
%            For time series (NNARMA models), use only NN=[na nc].
% 
%  See the function MARQ for an explanation of the remaining input arguments
%  as well as of the returned variables.

%  Programmed by : Magnus Norgaard, IAU/IMM, technical University of Denmark
%  LastEditDate  : January 2, 2000

%----------------------------------------------------------------------------------
%--------------             NETWORK INITIALIZATIONS                   -------------
%----------------------------------------------------------------------------------

% >>>>>>>>>>>>>>>>>>>>>>>>>>>     REGRESSOR STRUCTURE     <<<<<<<<<<<<<<<<<<<<<<<<<   
[nu,N]      = size(U);                   % # of inputs and # of data
na = NN(1);                              % # of past y's to be used in TDL
nb = NN(2:1+nu);                         % # of past u's to be used in TDL
nk = NN(2+nu:1+2*nu);                    % Time delay in system
nmax        = max([na nb+nk-1]);
nab         = na+sum(nb);                % Number of "signal inputs" to each net
outputs  = 1;                            % # of outputs
inputs   = nab-1;                        % # of inputs


% >>>>>>>>>>>>>>>>>>>>>>>>>>    OPTIONAL INITIALIZATIONS    <<<<<<<<<<<<<<<<<<<<<<<
% -- Initialize weights if nescessary --
if isempty(W1f) | isempty(W2f) | isempty(W1g) | isempty(W2g)
  hiddenf = length(NetDeff(1,:));        % Number of hidden neurons in f-net
  W1f = rand(hiddenf,nab)-0.5;
  W2f = rand(1,hiddenf+1)-0.5;
  hiddeng = length(NetDefg(1,:));        % Number of hidden neurons in g-net
  W1g = rand(hiddeng,nab)-0.5;
  W2g = rand(1,hiddeng+1)-0.5;
end


% >>>>>>>>>>>>>>>>>>>>  CONSTRUCT THE REGRESSION MATRIX PHI   <<<<<<<<<<<<<<<<<<<<<
PHI = zeros(nab-1,N-nmax);
jj  = nmax+1:N;
for k = 1:na, PHI(k,:)    = Y(jj-k); end
index = na;
for kk = 1:nu,                          % Here nu=1, allways!
for k = 1:nb(kk)-1, PHI(k+index,:) = U(kk,jj-k-nk(kk)); end
  index = index + nb(kk);
end
Y = Y(nmax+1:N);
U = U(nmax+1-nk:N-nk);
N = N-nmax;


% >>>>>>>>>>>>>>>>>>>>>     DETERMINE NETWORK STRUCTURE      <<<<<<<<<<<<<<<<<<<<<<
% ---------- f-net structure ----------
L_hiddenf = find(NetDeff(1,:)=='L')';   % Location of linear hidden neurons
H_hiddenf = find(NetDeff(1,:)=='H')';   % Location of tanh hidden neurons
L_outputf = find(NetDeff(2,:)=='L')';   % Location of linear output neurons
H_outputf = find(NetDeff(2,:)=='H')';   % Location of tanh output neurons
y1f       =[zeros(hiddenf,N);ones(1,N)];% Hidden layer outputs
y2f       = zeros(outputs,N);           % Network output          
indexf = outputs*(hiddenf+1) + 1 + [0:hiddenf-1]*(inputs+1); % A useful vector!
params1f  = hiddenf*(inputs+1);         % # of input-to-hidden weights
params2f  = outputs*(hiddenf+1);        % # of hidden-to-output weights
paramsf   = params1f + params2f;        % Total # of weights in net f
PSIf      = zeros(paramsf,outputs*N);   % Deriv. of fhat w.r.t. each weight
onesf_h   = ones(hiddenf+1,1);          % A vector of ones
onesf_i   = ones(inputs+1,1);           % Another vector of ones

% ---------- g-net structure ----------
L_hiddeng = find(NetDefg(1,:)=='L')';   % Location of linear hidden neurons
H_hiddeng = find(NetDefg(1,:)=='H')';   % Location of tanh hidden neurons
L_outputg = find(NetDefg(2,:)=='L')';   % Location of linear output neurons
H_outputg = find(NetDefg(2,:)=='H')';   % Location of tanh output neurons
y1g       =[zeros(hiddeng,N);ones(1,N)];% Hidden layer outputs
y2g       = zeros(outputs,N);           % Network output
indexg = outputs*(hiddeng+1) + 1 + [0:hiddeng-1]*(inputs+1); % A useful vector!
params1g  = hiddeng*(inputs+1);         % # of input-to-hidden weights
params2g  = outputs*(hiddeng+1);        % # of hidden-to-output weights
paramsg   = params1g + params2g;        % Total # of weights in net g
PSIg      = zeros(paramsg,outputs*N);   % Deriv. of ghat w.r.t. each weight
onesg_h   = ones(hiddeng+1,1);          % A vector of ones
onesg_i   = ones(inputs+1,1);           % Another vector of ones
ones_u    = ones(paramsg,1);            % Yet another vector of ones


% >>>>>>>>>>>>>>>>>>>>     MISCELLANOUS INITIALIZATIONS      <<<<<<<<<<<<<<<<<<<<<
index2 = (0:N-1)*outputs;               % Yet another useful vector
iteration = 1;                          % Counter variable
dw        = 1;                          % Flag telling that the weights are new
PHI_aug   = [PHI;ones(1,N)];            % Augment PHI with a row containg ones
parameters = paramsf + paramsg;         % Total # of weights
PSI      = zeros(parameters,outputs*N); % Deriv. of each output w.r.t. each weight
identity = eye(parameters);             % Identity matrix
                                        % Parameter vector containing all weights
theta = [reshape(W2f',params2f,1) ; reshape(W1f',params1f,1)];
theta = [theta ; reshape(W2g',params2g,1) ; reshape(W1g',params1g,1)];
theta_index = find(theta);              % Index to weights<>0
theta_red = theta(theta_index);         % Reduced parameter vector
reduced  = length(theta_index);         % The # of parameters in theta_red
index3   = 1:(reduced+1):(reduced^2);   % A third useful vector


% >>>>>>>>>>>>>>>>>>>>>>>>>    TRAINING PARAMETERS     <<<<<<<<<<<<<<<<<<<<<<<<<<<
% -- Initialize 'trparms' if nescessary --
lambda_old = 0;
if ~exist('trparms') | isempty(trparms) % Default training parameters
  trparms = settrain;
  lambda  = trparms.lambda;
  D       = trparms.D;
else                                    % User specified values
  if ~isstruct(trparms),
     error('''trparms'' must be a structure variable.');
  end
  if ~isfield(trparms,'infolevel')
     trparms = settrain(trparms,'infolevel','default');
  end
  if ~isfield(trparms,'maxiter')
     trparms = settrain(trparms,'maxiter','default');
  end
  if ~isfield(trparms,'critmin')
     trparms = settrain(trparms,'critmin','default');
  end
  if ~isfield(trparms,'critterm')
     trparms = settrain(trparms,'critterm','default');
  end
  if ~isfield(trparms,'gradterm')
     trparms = settrain(trparms,'gradterm','default');
  end
  if ~isfield(trparms,'paramterm')
     trparms = settrain(trparms,'paramterm','default');
  end
  if ~isfield(trparms,'lambda')
     trparms = settrain(trparms,'lambda','default');
  end
  lambda    = trparms.lambda;
  if ~isfield(trparms,'D')
     trparms = settrain(trparms,'D','default');
     D = trparms.D;
  else
    if length(trparms.D)==1,              % Scalar weight decay parameter
      D = trparms.D(ones(1,reduced))';      
    elseif length(trparms.D)==2,          % Two weight decay parameters
      D = trparms.D([ones(1,parameters2) 2*ones(1,parameters1)])';
      D = D(theta_index);
    elseif length(trparms.D)>2,           % Individual weight decay
      D = trparms.D(:);
    end
  end
end
D = D(:);
critdif  = trparms.critterm+1;            % Initialize stopping variables
gradmax  = trparms.gradterm+1;
paramdif = trparms.paramterm+1;
PI_vector = zeros(trparms.maxiter,1);     % Vector for storing criterion values

%----------------------------------------------------------------------------------
%--------------                   TRAIN NETWORK                       -------------
%----------------------------------------------------------------------------------
clc;
c=fix(clock);
fprintf('Network training started at %2i.%2i.%2i\n\n',c(4),c(5),c(6));


% >>>>>>>>>>>>>>>>>>>>>  COMPUTE NETWORK OUTPUT  y2(theta)   <<<<<<<<<<<<<<<<<<<<<<
h1f = W1f*PHI_aug;  
y1f(H_hiddenf,:) = pmntanh(h1f(H_hiddenf,:));
y1f(L_hiddenf,:) = h1f(L_hiddenf,:);    
h2f = W2f*y1f;
y2f(H_outputf,:) = pmntanh(h2f(H_outputf,:));
y2f(L_outputf,:) = h2f(L_outputf,:);

h1g = W1g*PHI_aug;  
y1g(H_hiddeng,:) = pmntanh(h1g(H_hiddeng,:));
y1g(L_hiddeng,:) = h1g(L_hiddeng,:);    
h2g = W2g*y1g;
y2g(H_outputg,:) = pmntanh(h2g(H_outputg,:));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -