⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nniol.m

📁 类神经网路─MATLAB的应用(范例程式)
💻 M
📖 第 1 页 / 共 2 页
字号:
function [W1f,W2f,W1g,W2g,PI_vector,iteration,lambda]=nniol(NetDeff,NetDefg,...
                                                  NN,W1f,W2f,W1g,W2g,trparms,Y,U)
%  NNIOL
%  -----
%       Train a neural network to model a dynamic system, assuming the 
%       following structure:
%         y(t) = f(y(t-1),..,y(t-na),u(t-nk-1),...,u(t-nk-nb)) 
%                   +  g(y(t-1),..,y(t-na),u(t-nk-1),...,u(t-nk-nb))*u(t-nk)
%       with the Levenberg-Marquardt method. This type of model description
%       is particularly relevant in control by discrete input-output
%       linearization.
%
%  CALL:
%  [W1f,W2f,W1g,W2g,critvec,iteration,lambda]=...
%                     nniol(NetDeff,NetDefg,NN,W1f,W2f,W1g,W2g,trparms,Y,U)
%
%  INPUTS:
%  U       : Input signal (= control signal) (left out in the nnarma case)
%            dim(U) = [(inputs) * (# of data)]
%  Y       : Output signal. dim(Y) = [1 * # of data]
%  NN      : NN=[na nb nk].
%            na = # of past outputs used for determining the prediction
%            nb = # of past inputs used for determining prediction
%            nk = time delay (usually 1)
%  NetDeff : Architecture of network used for modelling the function f
%  NetDefg : Archtecture of network used for modelling the function g
%  W1f,W2f : Input-to-hidden layer and hidden-to-output layer weights for 
%  W1g,W2g   the "f" and "g" nets, respectively.
%            If they are passed as [], they will be initialized automatically
%  trparms : Contains parameters associated with the training (see MARQ)
%            if trparms=[] it is reset to trparms = [500 0 1 0]
% 
%  NB NB NB!
%  ---------
%  See the function MARQ for more information about the inputs and outputs.
%
%  Programmed by : Magnus Norgaard, IAU/IMM, Technical University of Denmark
%  LastEditDate  : July 17, 1996

%----------------------------------------------------------------------------------
%--------------             NETWORK INITIALIZATIONS                   -------------
%----------------------------------------------------------------------------------

% >>>>>>>>>>>>>>>>>>>>>>>>>>>     REGRESSOR STRUCTURE     <<<<<<<<<<<<<<<<<<<<<<<<<   
[nu,N]      = size(U);                   % # of inputs and # of data
na = NN(1);                              % # of past y's to be used in TDL
nb = NN(2:1+nu);                         % # of past u's to be used in TDL
nk = NN(2+nu:1+2*nu);                    % Time delay in system
nmax        = max([na nb+nk-1]);
nab         = na+sum(nb);                % Number of "signal inputs" to each net
outputs  = 1;                            % # of outputs
inputs   = nab-1;                        % # of inputs


% >>>>>>>>>>>>>>>>>>>>>>>>>>    OPTIONAL INITIALIZATIONS    <<<<<<<<<<<<<<<<<<<<<<<
% -- Initialize weights if nescessary --
if isempty(W1f) | isempty(W2f) | isempty(W1g) | isempty(W2g)
  hiddenf = length(NetDeff(1,:));        % Number of hidden neurons in f-net
  W1f = rand(hiddenf,nab)-0.5;
  W2f = rand(1,hiddenf+1)-0.5;
  hiddeng = length(NetDefg(1,:));        % Number of hidden neurons in g-net
  W1g = rand(hiddeng,nab)-0.5;
  W2g = rand(1,hiddeng+1)-0.5;
end

% -- Initialize 'trparms' if nescessary --
if isempty(trparms), trparms=[500 0 1 0]; end


% >>>>>>>>>>>>>>>>>>>>  CONSTRUCT THE REGRESSION MATRIX PHI   <<<<<<<<<<<<<<<<<<<<<
PHI = zeros(nab-1,N-nmax);
jj  = nmax+1:N;
for k = 1:na, PHI(k,:)    = -Y(jj-k); end
index = na;
for kk = 1:nu,                          % Here nu=1, allways!
for k = 1:nb(kk)-1, PHI(k+index,:) = U(kk,jj-k-nk(kk)); end
  index = index + nb(kk);
end
Y = Y(nmax+1:N);
U = U(nmax+1-nk:N-nk);
N = N-nmax;


% >>>>>>>>>>>>>>>>>>>>>     DETERMINE NETWORK STRUCTURE      <<<<<<<<<<<<<<<<<<<<<<
% ---------- f-net structure ----------
L_hiddenf = find(NetDeff(1,:)=='L')';   % Location of linear hidden neurons
H_hiddenf = find(NetDeff(1,:)=='H')';   % Location of tanh hidden neurons
L_outputf = find(NetDeff(2,:)=='L')';   % Location of linear output neurons
H_outputf = find(NetDeff(2,:)=='H')';   % Location of tanh output neurons
y1f       =[zeros(hiddenf,N);ones(1,N)];% Hidden layer outputs
y2f       = zeros(outputs,N);           % Network output          
indexf = outputs*(hiddenf+1) + 1 + [0:hiddenf-1]*(inputs+1); % A useful vector!
params1f  = hiddenf*(inputs+1);         % # of input-to-hidden weights
params2f  = outputs*(hiddenf+1);        % # of hidden-to-output weights
paramsf   = params1f + params2f;        % Total # of weights in net f
PSIf      = zeros(paramsf,outputs*N);   % Deriv. of fhat w.r.t. each weight
onesf_h   = ones(hidden+1,1);           % A vector of ones
onesf_i   = ones(inputs+1,1);           % Another vector of ones

% ---------- g-net structure ----------
L_hiddeng = find(NetDefg(1,:)=='L')';   % Location of linear hidden neurons
H_hiddeng = find(NetDefg(1,:)=='H')';   % Location of tanh hidden neurons
L_outputg = find(NetDefg(2,:)=='L')';   % Location of linear output neurons
H_outputg = find(NetDefg(2,:)=='H')';   % Location of tanh output neurons
y1g       =[zeros(hiddeng,N);ones(1,N)];% Hidden layer outputs
y2g       = zeros(outputs,N);           % Network output
indexg = outputs*(hiddeng+1) + 1 + [0:hiddeng-1]*(inputs+1); % A useful vector!
params1g  = hiddeng*(inputs+1);         % # of input-to-hidden weights
params2g  = outputs*(hiddeng+1);        % # of hidden-to-output weights
paramsg   = params1g + params2g;        % Total # of weights in net g
PSIg      = zeros(paramsg,outputs*N);   % Deriv. of ghat w.r.t. each weight
onesg_h   = ones(hidden+1,1);           % A vector of ones
onesg_i   = ones(inputs+1,1);           % Another vector of ones
ones_u    = ones(paramsg,1);            % Yet another vector of ones


% >>>>>>>>>>>>>>>>>>>>     MISCELLANOUS INITIALIZATIONS      <<<<<<<<<<<<<<<<<<<<<
index2 = (0:N-1)*outputs;               % Yet another useful vector
iteration = 1;                          % Counter variable
dw        = 1;                          % Flag telling that the weights are new
PHI_aug   = [PHI;ones(1,N)];            % Augment PHI with a row containg ones
parameters = paramsf + paramsg;         % Total # of weights
PSI      = zeros(parameters,outputs*N); % Deriv. of each output w.r.t. each weight
identity = eye(parameters);             % Identity matrix
                                        % Parameter vector containing all weights
theta = [reshape(W2f',params2f,1) ; reshape(W1f',params1f,1)];
theta = [theta ; reshape(W2g',params2g,1) ; reshape(W1g',params1g,1)];
theta_index = find(theta);              % Index to weights<>0
theta_red = theta(theta_index);         % Reduced parameter vector
reduced  = length(theta_index);         % The # of parameters in theta_red
index3   = 1:(reduced+1):(reduced^2);   % A third useful vector


% >>>>>>>>>>>>>>>>>>>>>>>>>    TRAINING PARAMETERS     <<<<<<<<<<<<<<<<<<<<<<<<<<<
max_iter  = trparms(1);                 % Max no. of iterations
stop_crit = trparms(2);                 % Stop if cost function gets below this
lambda    = trparms(3);                 % Levenberg-Marquardt parameter
if length(trparms)==4,                  % Scalar weight decay parameter
  D = trparms(4*ones(1,reduced))';      
elseif length(trparms)==5,              % Two weight decay parameters
  D = trparms([4*ones(1,parameters2) 5*ones(1,parameters1)])';
  D = D(theta_index);
elseif length(trparms)>5,               % Individual weight decay
  D = trparms(4:length(trparms))';
end
PI_vector = zeros(max_iter,1);          % A vector containing the accumulated SSE


%----------------------------------------------------------------------------------
%--------------                   TRAIN NETWORK                       -------------
%----------------------------------------------------------------------------------
clc;
c=fix(clock);
fprintf('Network training started at %2i.%2i.%2i\n\n',c(4),c(5),c(6));


% >>>>>>>>>>>>>>>>>>>>>  COMPUTE NETWORK OUTPUT  y2(theta)   <<<<<<<<<<<<<<<<<<<<<<
h1f = W1f*PHI_aug;  
y1f(H_hiddenf,:) = pmntanh(h1f(H_hiddenf,:));
y1f(L_hiddenf,:) = h1f(L_hiddenf,:);    
h2f = W2f*y1f;
y2f(H_outputf,:) = pmntanh(h2f(H_outputf,:));
y2f(L_outputf,:) = h2f(L_outputf,:);

h1g = W1g*PHI_aug;  
y1g(H_hiddeng,:) = pmntanh(h1g(H_hiddeng,:));
y1g(L_hiddeng,:) = h1g(L_hiddeng,:);    
h2g = W2g*y1g;
y2g(H_outputg,:) = pmntanh(h2g(H_outputg,:));
y2g(L_outputg,:) = h2g(L_outputg,:);

y2       = y2f + y2g.*U;                % Network output
E        = Y - y2;                      % Training error
E_vector = E(:);                        % Reshape E into a long vector
SSE      = E_vector'*E_vector;          % Sum of squared errors (SSE)
PI       = (SSE+theta_red'*(D.*theta_red))/(2*N); % Performance index

while iteration<=max_iter    
if dw==1,
% >>>>>>>>>>>>>>>>>>>>>>>>>>>   COMPUTE THE PSI MATRIX   <<<<<<<<<<<<<<<<<<<<<<<<<<

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -