📄 special3.m
字号:
% --------------------------------- SPECIAL3 ------------------------------
%
% Program for training an inverse model of a process by so-called specialized
% training (see Psaltis, Sideris & Yamamura: "A Multilayered Neural Network
% Controller").
%
% The inverse model is trained with a recursive Gauss-Newton algorithm
% ("recursive prediction error method"). Three different versions have been
% implemented: Exponential forgetting, constant trace, and the
% exponential forgetting and resetting algorithm (EFRA).
%
% The user must provide a neural network model of the process to be controlled
% and an initial inverse model. This can be created by choosing the weights at
% random or by training the network with general training. The latter is
% recommended when possible.
%
% All parameters associated with the training procedure are set in the
% file 'invinit2.m'
%
% See the program "special2" for an implementation inspired by recursive
% pseudo-linear regression.
%
% Made by Magnus Norgaard IAU, Technical University of Denmark.
% LastEditDate: Oct. 28, 1997
%----------------------------------------------------------------------------------
%------------------- >>> INITIALIZATIONS <<< ---------------------
%----------------------------------------------------------------------------------
%>>>>>>>>>>>>>>>>>>>>>> READ VARIABLES FROM FILE <<<<<<<<<<<<<<<<<<<<<<<
clear plot_a plot_b
global ugl
invinit2 % Run user sepcified initializations
eval(['load ' nninv]); % Load inverse neural model
% >>>>>>>>>>>>>>>>> DETERMINE STRUCTURE OF FORWARD MODEL <<<<<<<<<<<<<<<<<<<<
outputs = 1; % # of outputs
eval(['load ' nnforw]); % Load forward neural model of system
hiddenf = length(NetDeff(1,:)); % Number of hidden neurons
L_hiddenf = find(NetDeff(1,:)=='L')'; % Location of linear hidden neurons
H_hiddenf = find(NetDeff(1,:)=='H')'; % Location of tanh hidden neurons
L_outputf = find(NetDeff(2,:)=='L')'; % Location of linear output neurons
H_outputf = find(NetDeff(2,:)=='H')'; % Location of tanh output neurons
y1f = [zeros(hiddenf,1);1]; % Hidden layer outputs
yhat = zeros(outputs,1); % Network output
% >>>>>>>>>>>>>>>>>>>>>>>> DETERMINE REGRESSOR STRUCTURE <<<<<<<<<<<<<<<<<<<<<<
na = NN(1); % # of past y's to be used in TDL
nb = NN(2); % # of past u's to be used in TDL
nk = NN(3); % Time delay in system
nab = na+sum(nb); % Number of "signal inputs" to each net
inputs = nab; % # of inputs
phi = zeros(inputs+1,1); % Initialize regressor vector
% >>>>>>>>>>>>>>>>>> DETERMINE STRUCTURE OF INVERSE MODEL <<<<<<<<<<<<<<<<<<<
hiddeni = length(NetDefi(1,:)); % Number of hidden neurons
L_hiddeni = find(NetDefi(1,:)=='L')'; % Location of linear hidden neurons
H_hiddeni = find(NetDefi(1,:)=='H')'; % Location of tanh hidden neurons
L_outputi = find(NetDefi(2,:)=='L')'; % Location of linear output neurons
H_outputi = find(NetDefi(2,:)=='H')'; % Location of tanh output neurons
y1i = [zeros(hiddeni,1);1]; % Hidden layer outputs
d21 = W2f(1:hiddenf); % Derivative if linear output
d10 = W1f(:,na+1); % Derivative if linear hidden units
d20 = 0; % Derivative of output w.r.t. control
%>>>>>>>>>>>>>>>>> CALCULATE REFERENCE SIGNAL & FILTER IT <<<<<<<<<<<<<<<<<<
if strcmp(refty,'siggener'),
ref = zeros(samples+1,1);
for ii = 1:samples,
ref(ii) = siggener(Ts*(ii-1),sq_amp,sq_freq,sin_amp,sin_freq,dc,sqrt(Nvar));
end
else
eval(['ref = ' refty ';']);
ref=ref(:);
i=length(ref);
if i>samples+1,
ref=ref(1:samples+1);
else
ref=[ref;ref(i)*ones(samples+1-i,1)];
end
end
ym = filter(Bm,Am,ref); % Filter the reference
ym(samples+1) = ym(1); % Necessary because the reference is repeated
ref(samples+1) = ref(1);
%>>>>>>>>>>>>>>>>>>>>>>> INITIALIZE VARIABLES <<<<<<<<<<<<<<<<<<<<<<<
% Initialization of vectors containing past signals
maxlength = 5;
y_old = zeros(maxlength,1);
u_old = zeros(maxlength,1);
% Variables associated with the weight update algorithm
SSE = 0; % Sum of squared error in current epoch
epochs = 0; % Epoch counter
first = max(na,nb+nk-1)+10; % Update weights when iteration>first
index = (hiddeni+1) + 1 + [0:hiddeni-1]*(inputs+1); % A useful vector!
parameters1= hiddeni*(inputs+1); % # of input-to-hidden weights
parameters2= (hiddeni+1); % # of hidden-to-output weights
parameters = parameters1 + parameters2; % Total # of weights
PSI = zeros(parameters,1); % Deriv. of inv. net output w.r.t. weights
PSIold = zeros(parameters,nb-1); % Past PSI vectors
PSI_red = zeros(parameters,1); % Deriv. of forw. net output w.r.t. weights
PSIold_red = zeros(parameters,na); % Past PSI_red vectors
% Parametervector containing all weights
theta = [reshape(W2i',parameters2,1) ; reshape(W1i',parameters1,1)];
index3= 1:(parameters+1):(parameters^2);% Yet another useful vector
if strcmp(method,'ff'), % Forgetting factor method
mflag = 1; % Method flag
lambda = trparms(1); % Forgetting factor
p0 = trparms(2); % Diagonal element of covariance matrix
P = p0 * eye(parameters); % Initialize covariance matrix
elseif strcmp(method,'ct'), % Constant trace method
mflag = 2; % Method flag
lambda = trparms(1); % Forgetting factor
alpha_max = trparms(2); % Max. eigenvalue
alpha_min = trparms(3); % Min. eigenvalue
P = alpha_max * eye(parameters); % Initialize covariance matrix
Pbar = P; % Initialize covariance matrix
elseif strcmp(method,'efra'), % EFRA method
mflag = 3; % Method flag
alpha = trparms(1); % EFRA parameters
beta = trparms(2);
delta = trparms(3);
lambda = trparms(4);
gamma = (1-lambda)/lambda;
maxeig = gamma/(2*delta)*(1+sqrt(1+4*beta*delta/(gamma*gamma)));% Max. eigenvalue
P = maxeig * eye(parameters); % Initialize covariance matrix
betaI = beta*eye(parameters); % Useful diagonal matrix
end
% Initialization of Simulink system
if strcmp(simul,'simulink')
simoptions = simset('Solver',integrator,'MaxRows',0); % Set integrator opt.
eval(['[sizes,x0] = ' sim_model '([],[],[],0);']); % Get initial states
end
% Initializations of vectors used for storing old data
ref_data = [ref(1:samples)];
ym_data = [ym(1:samples)];
u_data = zeros(samples,1);
y_data = zeros(samples,1);
yhat_data = zeros(samples,1);
% Miscellanous initializations
maxiter = maxiter*samples; % Number of iterations
u = 0;
y = 0;
t = -Ts;
i = 0; % Iteration in current epoch counter
fighandle=progress;
%----------------------------------------------------------------------------------
%--------------------- >>> MAIN LOOP <<< --------------------
%----------------------------------------------------------------------------------
for iter=1:maxiter,
i = i+1;
t = t + Ts;
%>>>>>>>>>>>>>> PREDICT OUTPUT OF SYSTEM USING THE FORWARD MODEL <<<<<<<<<<<<<
phi = [y_old(1:na);u_old(1:nb);1];
h1f = W1f*phi;
y1f(H_hiddenf) = pmntanh(h1f(H_hiddenf));
y1f(L_hiddenf) = h1f(L_hiddenf);
h2f = W2f*y1f;
yhat(H_outputf) = pmntanh(h2f(H_outputf));
yhat(L_outputf) = h2f(L_outputf);
%>>>>>>>>>>>>>>>>>>>> READ OUTPUT FROM THE PHYSICAL SYSTEM <<<<<<<<<<<<<<<<<<<
if strcmp(simul,'simulink')
utmp=[t-Ts,u_old(1);t,u_old(1)];
simoptions.InitialState=x0;
[time,x0,y] = sim(sim_model,[t-Ts t],simoptions,utmp);
x0 = x0(size(x0,1),:)';
y = y(size(y,1),:)';
elseif strcmp(simul,'matlab')
ugl = u_old(1);
[time,x] = ode45(mat_model,[t-Ts t],x0);
x0 = x(length(time),:)';
eval(['y = ' model_out '(x0);']);
elseif strcmp(simul,'nnet')
y = yhat;
end
%>>>>>>>>>>>>>>>>>>>>>>> CALCULATE PREDICTION ERROR <<<<<<<<<<<<<<<<<<<<<<<
ey = ym(i) - y;
%>>>>>>>>>>>> COMPUTE DERIVATIVE OF PREDICTED OUTPUT W.R.T. CONTROL <<<<<<<<<<<<
% Matrix containing the partial derivative of the output w.r.t
% each of the outputs from the hidden units
if H_outputf,
d21 = (1-yhat*yhat)*W2f(1:hiddenf);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -