📄 sirflt.cpp
字号:
/* * Bayes++ the Bayesian Filtering Library * Copyright (c) 2002 Michael Stevens * See accompanying Bayes++.htm for terms and conditions of use. * * $Header: /cvsroot/bayesclasses/Bayes++/BayesFilter/SIRFlt.cpp,v 1.31.2.6 2005/01/18 15:49:05 mistevens Exp $ * $NoKeywords: $ *//* * Sampling Importance Resampleing Filter. * * Bootstap filter (Sequential Importance Resampleing). */#include "SIRFlt.hpp"#include "matSup.hpp"#include "models.hpp"#include <algorithm>#include <iterator>#include <vector>namespace {template <class scalar>inline scalar sqr(scalar x)// Square { return x*x;}};//namespace/* Filter namespace */namespace Bayesian_filter{ using namespace Bayesian_filter_matrix;Standard_resampler::Float Standard_resampler::resample (Resamples_t& presamples, std::size_t& uresamples, FM::DenseVec& w, SIR_random& r) const/* * Standard resampler from [1] * Algorithm: * A particle is chosen once for each time its cumulative weight intersects with a uniform random draw. * Complexity O(n*log(n)) * This complexity is required to sort the uniform random draws made, * this allows comparing of the two ordered lists w(cumulative) and ur (the sort random draws). * Output: * presamples number of times this particle should be resampled * uresamples number of unqiue particles (number of non zeros in Presamples) * w becomes a normalised cumulative sum * Sideeffects: * A draw is made from 'r' for each particle */{ assert (presamples.size() == w.size()); DenseVec::iterator wi, wi_end = w.end(); // Normalised cumulative sum of likelihood weights (Kahan algorithm), and find smallest weight Float wmin = std::numeric_limits<Float>::max(); Float wcum = 0; { Float c = 0, y, t; for (wi = w.begin(); wi != wi_end; ++wi) { if (*wi < wmin) { wmin = *wi; } y = *wi - c; t = wcum + y; c = t - wcum - y; wcum = *wi = t; } } if (wmin < 0) // bad weights error (Numeric_exception("negative weight")); if (wcum <= 0) // bad cumulative weights (previous check should actually prevent -ve error (Numeric_exception("zero cumulative weight sum")); // Any numerical failure should cascade into cummulative sum if (wcum != wcum) // inequality due to NaN error (Numeric_exception("NaN cumulative weight sum")); // Sorted uniform random distribution [0..1) for each resample DenseVec ur(w.size()); r.uniform_01(ur); std::sort (ur.begin(), ur.end()); assert (ur[0] >= 0 && ur[ur.size()-1] < 1); // very bad if random is incorrect // Scale ur to cummulative sum ur *= wcum; // Resamples based on cumulative weights from sorted resample random values Resamples_t::iterator pri = presamples.begin(); wi = w.begin(); DenseVec::iterator ui = ur.begin(), ui_end = ur.end(); std::size_t unique = 0; while (wi != wi_end) { std::size_t Pres = 0; // assume P not resampled until find out otherwise if (ui != ui_end && *ui < *wi) { ++unique; do // count resamples { ++Pres; ++ui; } while (ui != ui_end && *ui < *wi); } ++wi; *pri++ = Pres; } assert (pri==presamples.end()); // must traverse all of P if (ui != ui_end) // resample failed due no non numeric weights error (Numeric_exception("weights are not numeric and cannot be resampled")); uresamples = unique; return wmin / wcum;}Systematic_resampler::Float Systematic_resampler::resample (Resamples_t& presamples, std::size_t& uresamples, FM::DenseVec& w, SIR_random& r) const/* * Systematic resample algorithm from [2] * Algorithm: * A particle is chosen once for each time its cumulative weight intersects with an equidistant grid. * A uniform random draw is chosen to position the grid within the cumulative weights * Complexity O(n) * Output: * presamples number of times this particle should be resampled * uresamples number of unqiue particles (number of non zeros in Presamples) * w becomes a normalised cumulative sum * Sideeffects: * A single draw is made from 'r' */{ std::size_t nParticles = presamples.size(); assert (nParticles == w.size()); DenseVec::iterator wi, wi_end = w.end(); // Normalised cumulative sum of likelihood weights (Kahan algorithm), and find smallest weight Float wmin = std::numeric_limits<Float>::max(); Float wcum = 0; { Float c = 0, y, t; for (wi = w.begin(); wi != wi_end; ++wi) { if (*wi < wmin) { wmin = *wi; } y = *wi - c; t = wcum + y; c = t - wcum - y; wcum = *wi = t; } } if (wmin < 0) // bad weights error (Numeric_exception("negative weight")); if (wcum <= 0) // bad cumulative weights (previous check should actually prevent -ve error (Numeric_exception("total likelihood zero")); // Any numerical failure should cascade into cummulative sum if (wcum != wcum) error (Numeric_exception("total likelihood numerical error")); // Stratified step Float wstep = wcum / Float(nParticles); DenseVec ur(1); // single uniform for initialisation r.uniform_01(ur); assert (ur[0] >= 0 && ur[0] < 1); // bery bad if random is incorrect // Resamples based on cumulative weights Importance_resampler::Resamples_t::iterator pri = presamples.begin(); wi = w.begin(); std::size_t unique = 0; Float s = ur[0] * wstep; // random initialisation while (wi != wi_end) { std::size_t Pres = 0; // assume P not resampled until find out otherwise if (s < *wi) { ++unique; do // count resamples { ++Pres; s += wstep; } while (s < *wi); } ++wi; *pri++ = Pres; } assert (pri==presamples.end()); // must traverse all of P uresamples = unique; return wmin / wcum;}/* * SIR filter implementation */const SIR_scheme::Float SIR_scheme::rougheningKinit = 1; // use 1 std.dev. per sample as default rougheningSIR_scheme::SIR_scheme (std::size_t x_size, std::size_t s_size, SIR_random& random_helper) : Sample_state_filter (x_size, s_size), Sample_filter (x_size, s_size), random (random_helper), resamples (s_size), wir (s_size)/* * Initialise filter and set the size of things we know about */{ SIR_scheme::x_size = x_size; rougheningK = rougheningKinit;}SIR_scheme& SIR_scheme::operator= (const SIR_scheme& a)/* Optimise copy assignment to only copy public filter state (particles and weights) * random helper is not part of filter state * Precond: matrix size conformance */{ Sample_filter::operator=(a); stochastic_samples = a.stochastic_samples; // Copy weights wir_update = a.wir_update; return *this;}void SIR_scheme::init_S ()/* * Initialise sampling * Pre: S * Post: stochastic_samples := samples in S */{ stochastic_samples = S.size2(); std::fill (wir.begin(), wir.end(), Float(1)); // Initial uniform weights wir_update = false;}SIR_scheme::Float SIR_scheme::update_resample (const Importance_resampler& resampler)/* * Resample particles using weights and roughen * Pre : S represent the predicted distribution * Post: S represent the fused distribution, n_resampled from weighted_resample * Exceptions: * Bayes_filter_exception from resampler * unchanged: S, stochastic_samples * Return * lcond, Smallest normalised weight, represents conditioning of resampling solution * lcond == 1 if no resampling preformed * This should by multipled by the number of samples to get the Likelihood function conditioning */{ Float lcond = 1; if (wir_update) // Resampleing only required if weights have been updated { // Resample based on likelihood weights std::size_t R_unique; lcond = resampler.resample (resamples, R_unique, wir, random); // No resampling exceptions: update S copy_resamples (S, resamples); stochastic_samples = R_unique; roughen (); // Roughen samples std::fill (wir.begin(), wir.end(), Float(1)); // Resampling results in uniform weights wir_update = false; } return lcond;}void SIR_scheme::predict (Sampled_predict_model& f)/* * Predict state posterior with sampled noise model * Pre : S represent the prior distribution * Post: S represent the predicted distribution, stochastic_samples := samples in S
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -