📄 demo_mc.m
字号:
% PURPOSE : Demonstrate the differences between the following filters on the same problem:% % 1) Extended Kalman Filter (EKF)% 2) Unscented Kalman Filter (UKF)% 3) Particle Filter (PF)% 4) PF with EKF proposal (PFEKF)% 5) PF with UKF proposal (PFUKF)% For more details refer to:% AUTHORS : Nando de Freitas (jfgf@cs.berkeley.edu)% Rudolph van der Merwe (rvdmerwe@ece.ogi.edu)% DATE : 17 August 2000clear all;clc;echo off;path('./ukf',path);% INITIALISATION AND PARAMETERS:% ==============================no_of_runs = 100; % number of experiments to generate statistical % averagesdoPlot = 0; % 1 plot online. 0 = only plot at the end.sigma = 1e-5; % Variance of the Gaussian measurement noise.g1 = 3; % Paramater of Gamma transition prior.g2 = 2; % Parameter of Gamman transition prior. % Thus mean = 3/2 and var = 3/4.T = 60; % Number of time steps.R = 1e-5; % EKF's measurement noise variance. Q = 3/4; % EKF's process noise variance.P0 = 3/4; % EKF's initial variance of the states.N = 200; % Number of particles.resamplingScheme = 1; % The possible choices are % systematic sampling (2), % residual (1) % and multinomial (3). % They're all O(N) algorithms. Q_pfekf = 10*3/4;R_pfekf = 1e-1;Q_pfukf = 2*3/4;R_pfukf = 1e-1; alpha = 1; % UKF : point scaling parameterbeta = 0; % UKF : scaling parameter for higher order terms of Taylor series expansion kappa = 2; % UKF : sigma point selection scaling parameter (best to leave this = 0)%**************************************************************************************% SETUP BUFFERS TO STORE PERFORMANCE RESULTS% ==========================================rmsError_ekf = zeros(1,no_of_runs);rmsError_ukf = zeros(1,no_of_runs);rmsError_pf = zeros(1,no_of_runs);rmsError_pfMC = zeros(1,no_of_runs);rmsError_pfekf = zeros(1,no_of_runs);rmsError_pfekfMC = zeros(1,no_of_runs);rmsError_pfukf = zeros(1,no_of_runs);rmsError_pfukfMC = zeros(1,no_of_runs);time_pf = zeros(1,no_of_runs); time_pfMC = zeros(1,no_of_runs);time_pfekf = zeros(1,no_of_runs);time_pfekfMC = zeros(1,no_of_runs);time_pfukf = zeros(1,no_of_runs);time_pfukfMC = zeros(1,no_of_runs);%**************************************************************************************% MAIN LOOPfor j=1:no_of_runs, rand('state',sum(100*clock)); % Shuffle the pack! randn('state',sum(100*clock)); % Shuffle the pack! % GENERATE THE DATA:% ==================x = zeros(T,1);y = zeros(T,1);processNoise = zeros(T,1);measureNoise = zeros(T,1);x(1) = 1; % Initial state.for t=2:T processNoise(t) = gengamma(g1,g2); measureNoise(t) = sqrt(sigma)*randn(1,1); x(t) = feval('ffun',x(t-1),t) +processNoise(t); % Gamma transition prior. y(t) = feval('hfun',x(t),t) + measureNoise(t); % Gaussian likelihood.end; % PLOT THE GENERATED DATA:% ========================figure(1)clf;plot(1:T,x,'r',1:T,y,'b');ylabel('Data','fontsize',15);xlabel('Time','fontsize',15);legend('States (x)','Observations(y)');%%%%%%%%%%%%%%% PERFORM EKF and UKF ESTIMATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ============================== %%%%%%%%%%%%%%%%%%%%%% INITIALISATION:% ==============mu_ekf = ones(T,1); % EKF estimate of the mean of the states.P_ekf = P0*ones(T,1); % EKF estimate of the variance of the states.mu_ukf = mu_ekf; % UKF estimate of the mean of the states.P_ukf = P_ekf; % UKF estimate of the variance of the states.yPred = ones(T,1); % One-step-ahead predicted values of y.mu_ekfPred = ones(T,1); % EKF O-s-a estimate of the mean of the states.PPred = ones(T,1); % EKF O-s-a estimate of the variance of the states.disp(' ');for t=2:T, fprintf('run = %i / %i : EKF & UKF : t = %i / %i \r',j,no_of_runs,t,T); fprintf('\n') % PREDICTION STEP: % ================ mu_ekfPred(t) = feval('ffun',mu_ekf(t-1),t); Jx = 0.5; % Jacobian for ffun. PPred(t) = Q + Jx*P_ekf(t-1)*Jx'; % CORRECTION STEP: % ================ yPred(t) = feval('hfun',mu_ekfPred(t),t); if t<=30, Jy = 2*0.2*mu_ekfPred(t); % Jacobian for hfun. else Jy = 0.5; % Jy = cos(mu_ekfPred(t))/2; % Jy = 2*mu_ekfPred(t)/4; % Jacobian for hfun. end; M = R + Jy*PPred(t)*Jy'; % Innovations covariance. K = PPred(t)*Jy'*inv(M); % Kalman gain. mu_ekf(t) = mu_ekfPred(t) + K*(y(t)-yPred(t)); P_ekf(t) = PPred(t) - K*Jy*PPred(t); % Full Unscented Kalman Filter step % ================================= [mu_ukf(t),P_ukf(t)]=ukf(mu_ukf(t-1),P_ukf(t-1),[],Q,'ukf_ffun',y(t),R,'ukf_hfun',t,alpha,beta,kappa); end; % End of t loop.%%%%%%%%%%%%%%% PERFORM SEQUENTIAL MONTE CARLO %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ============================== %%%%%%%%%%%%%%%%%%%%%% INITIALISATION:% ==============xparticle_pf = ones(T,N); % These are the particles for the estimate % of x. Note that there's no need to store % them for all t. We're only doing this to % show you all the nice plots at the end.xparticlePred_pf = ones(T,N); % One-step-ahead predicted values of the states.yPred_pf = ones(T,N); % One-step-ahead predicted values of y.w = ones(T,N); % Importance weights.disp(' '); tic; % Initialize timer for benchmarkingfor t=2:T, fprintf('run = %i / %i : PF : t = %i / %i \r',j,no_of_runs,t,T); fprintf('\n') % PREDICTION STEP: % ================ % We use the transition prior as proposal. for i=1:N, xparticlePred_pf(t,i) = feval('ffun',xparticle_pf(t-1,i),t) + gengamma(g1,g2); end; % EVALUATE IMPORTANCE WEIGHTS: % ============================ % For our choice of proposal, the importance weights are give by: for i=1:N, yPred_pf(t,i) = feval('hfun',xparticlePred_pf(t,i),t); lik = inv(sqrt(sigma)) * exp(-0.5*inv(sigma)*((y(t)-yPred_pf(t,i))^(2))) ... + 1e-99; % Deal with ill-conditioning. w(t,i) = lik; end; w(t,:) = w(t,:)./sum(w(t,:)); % Normalise the weights. % SELECTION STEP: % =============== % Here, we give you the choice to try three different types of % resampling algorithms. Note that the code for these algorithms % applies to any problem! if resamplingScheme == 1 outIndex = residualR(1:N,w(t,:)'); % Residual resampling. elseif resamplingScheme == 2 outIndex = systematicR(1:N,w(t,:)'); % Systematic resampling. else outIndex = multinomialR(1:N,w(t,:)'); % Multinomial resampling. end; xparticle_pf(t,:) = xparticlePred_pf(t,outIndex); % Keep particles with % resampled indices.end; % End of t loop.time_pf(j) = toc; % How long did this take?%%%%%%%%%%%%%% PERFORM SEQUENTIAL MONTE CARLO WITH MCMC %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ======================================== %%%%%%%%%%%%%%%%% INITIALISATION:% ==============xparticle_pfMC = ones(T,N); % These are the particles for the estimate % of x. Note that there's no need to store % them for all t. We're only doing this to % show you all the nice plots at the end.xparticlePred_pfMC = ones(T,N); % One-step-ahead predicted values of the states.yPred_pfMC = ones(T,N); % One-step-ahead predicted values of y.w = ones(T,N); % Importance weights.previousXMC = ones(T,N); % Particles at the previous time step. previousXResMC = ones(T,N); % Resampled previousX.disp(' '); tic; % Initialize timer for benchmarkingfor t=2:T, fprintf('run = %i / %i : PF-MCMC : t = %i / %i \r',j,no_of_runs,t,T); fprintf('\n') % PREDICTION STEP: % ================ % We use the transition prior as proposal. for i=1:N, xparticlePred_pfMC(t,i) = feval('ffun',xparticle_pfMC(t-1,i),t) + gengamma(g1,g2); end; previousXMC(t,:) = xparticle_pfMC(t-1,:); % Store the particles at t-1. % EVALUATE IMPORTANCE WEIGHTS: % ============================ % For our choice of proposal, the importance weights are give by: for i=1:N, yPred_pfMC(t,i) = feval('hfun',xparticlePred_pfMC(t,i),t); lik = inv(sqrt(sigma)) * exp(-0.5*inv(sigma)*((y(t)-yPred_pfMC(t,i))^(2))) ... + 1e-99; % Deal with ill-conditioning. w(t,i) = lik; end; w(t,:) = w(t,:)./sum(w(t,:)); % Normalise the weights. % SELECTION STEP: % =============== % Here, we give you the choice to try three different types of % resampling algorithms. Note that the code for these algorithms % applies to any problem! if resamplingScheme == 1 outIndex = residualR(1:N,w(t,:)'); % Residual resampling. elseif resamplingScheme == 2 outIndex = systematicR(1:N,w(t,:)'); % Systematic resampling. else outIndex = multinomialR(1:N,w(t,:)'); % Multinomial resampling. end; xparticle_pfMC(t,:) = xparticlePred_pfMC(t,outIndex); % Keep particles with % resampled % indices. previousXResMC(t,:) = previousXMC(t,outIndex); % Resample particles % at t-1. % METROPOLIS-HASTINGS STEP: % ======================== u=rand(N,1); accepted=0; rejected=0; for i=1:N, xProp = feval('ffun',previousXResMC(t,i),t) + gengamma(g1,g2); mProp = feval('hfun',xProp,t); likProp = inv(sqrt(sigma)) * exp(-0.5*inv(sigma)*((y(t)-mProp)^(2))) + 1e-99; m = feval('hfun',xparticle_pfMC(t,i),t); lik = inv(sqrt(sigma)) * exp(-0.5*inv(sigma)*((y(t)-m)^(2))) + 1e-99; acceptance = min(1,likProp/lik); if u(i,1) <= acceptance xparticle_pfMC(t,i) = xProp; accepted=accepted+1; else xparticle_pfMC(t,i) = xparticle_pfMC(t,i); rejected=rejected+1; end; end; end; % End of t loop.time_pfMC(j) = toc; % How long did this take?%%%%%%%%%%%%%%% PERFORM SEQUENTIAL MONTE CARLO %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ======== EKF proposal ======== %%%%%%%%%%%%%%%%%%%%%% INITIALISATION:% ==============xparticle_pfekf = ones(T,N); % These are the particles for the estimate % of x. Note that there's no need to store % them for all t. We're only doing this to % show you all the nice plots at the end.Pparticle_pfekf = P0*ones(T,N); % Particles for the covariance of x.xparticlePred_pfekf = ones(T,N); % One-step-ahead predicted values of the states.PparticlePred_pfekf = ones(T,N); % One-step-ahead predicted values of P.yPred_pfekf = ones(T,N); % One-step-ahead predicted values of y.w = ones(T,N); % Importance weights.muPred_pfekf = ones(T,1); % EKF O-s-a estimate of the mean of the states.PPred_pfekf = ones(T,1); % EKF O-s-a estimate of the variance of the states.mu_pfekf = ones(T,1); % EKF estimate of the mean of the states.P_pfekf = P0*ones(T,1); % EKF estimate of the variance of the states.disp(' ');tic; % Initialize timer for benchmarkingfor t=2:T, fprintf('run = %i / %i : PF-EKF : t = %i / %i \r',j,no_of_runs,t,T); fprintf('\n') % PREDICTION STEP: % ================ % We use the EKF as proposal. for i=1:N, muPred_pfekf(t) = feval('ffun',xparticle_pfekf(t-1,i),t); Jx = 0.5; % Jacobian for ffun. PPred_pfekf(t) = Q_pfekf + Jx*Pparticle_pfekf(t-1,i)*Jx'; yPredTmp = feval('hfun',muPred_pfekf(t),t); if t<=30, Jy = 2*0.2*muPred_pfekf(t); % Jacobian for hfun. else
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -