📄 sirflt.cpp
字号:
*/{ // Predict particles S using supplied predict model const std::size_t nSamples = S.size2(); for (std::size_t i = 0; i != nSamples; ++i) { FM::ColMatrix::Column Si(S,i); noalias(Si) = f.fw(Si); } stochastic_samples = S.size2();}void SIR_scheme::observe (Likelihood_observe_model& h, const Vec& z)/* * Observation fusion using Likelihood at z * Pre : wir previous particle likelihood weights * Post: wir fused (multiplicative) particle likehood weights */{ h.Lz (z); // Observe likelihood at z // Weight Particles. Fused with previous weight const std::size_t nSamples = S.size2(); for (std::size_t i = 0; i != nSamples; ++i) { wir[i] *= h.L (FM::column(S,i)); } wir_update = true;}void SIR_scheme::observe_likelihood (const Vec& lw)/* * Observation fusion directly from likelihood weights * lw may be smaller then the number of particles. Weights for additional particles are assumed to be 1 * Pre : wir previous particle likelihood weights * Post: wir fused (multiplicative) particle likehood weights */{ // Weight Particles. Fused with previous weight Vec::const_iterator lw_end = lw.end(); for (Vec::const_iterator lw_i = lw.begin(); lw_i != lw_end; ++lw_i) { wir[lw_i.index()] *= *lw_i; } wir_update = true;}void SIR_scheme::copy_resamples (ColMatrix& P, const Importance_resampler::Resamples_t& presamples)/* * Update P by selectively copying presamples * Uses a in-place copying alogorithm * Algorithm: In-place copying * First copy the live samples (those resampled) to end of P * Replicate live sample in-place */{ // reverse_copy_if live std::size_t si = P.size2(), livei = si; Importance_resampler::Resamples_t::const_reverse_iterator pri, pri_end = presamples.rend(); for (pri = presamples.rbegin(); pri != pri_end; ++pri) { --si; if (*pri > 0) { --livei; noalias(FM::column(P,livei)) = FM::column(P,si); } } assert (si == 0); // Replicate live samples si = 0; Importance_resampler::Resamples_t::const_iterator pi, pi_end = presamples.end(); for (pi = presamples.begin(); pi != pi_end; ++pi) { std::size_t res = *pi; if (res > 0) { do { noalias(FM::column(P,si)) = FM::column(P,livei); ++si; --res; } while (res > 0); ++livei; } } assert (si == P.size2()); assert (livei == P.size2());}void SIR_scheme::roughen_minmax (ColMatrix& P, Float K) const/* * Roughening * Uses algorithm from Ref[1] using max-min in each state of P * K is scaleing factor for roughening noise * unique_samples is unchanged as roughening is used to postprocess observe resamples * Numerical colapse of P * P with very small or zero range result in minimal roughening * Exceptions: * none * unchanged: P */{ using namespace std; // Scale Sigma by constant and state dimensions Float SigmaScale = K * pow (Float(P.size2()), -1/Float(x_size)); // Find min and max states in all P, precond P not empty Vec xmin(x_size); noalias(xmin) = column(P,0); Vec xmax(x_size); noalias(xmax) = xmin; ColMatrix::iterator2 pi = P.begin2(); while (pi != P.end2()) // Loop includes 0 to simplify code { Vec::iterator mini = xmin.begin(); Vec::iterator maxi = xmax.begin();#ifdef BOOST_UBLAS_NO_NESTED_CLASS_RELATION for (ColMatrix::iterator1 xpi = begin(pi, ublas::iterator2_tag()); xpi != end(pi, ublas::iterator2_tag()); ++xpi)#else for (ColMatrix::iterator1 xpi = pi.begin(); xpi != pi.end(); ++xpi)#endif { if (*xpi < *mini) *mini = Float(*xpi); // ISSUE mixed type proxy assignment if (*xpi > *maxi) *maxi = Float(*xpi); ++mini; ++maxi; } ++pi; } // Roughening st.dev max-min Vec rootq(x_size); rootq = xmax; noalias(rootq) -= xmin; rootq *= SigmaScale; // Apply roughening predict based on scaled variance DenseVec n(x_size); for (pi = P.begin2(); pi != P.end2(); ++pi) { random.normal (n); // independant zero mean normal // multiply elements by std dev for (DenseVec::iterator ni = n.begin(); ni != n.end(); ++ni) { *ni *= rootq[ni.index()]; } ColMatrix::Column Pi(P,pi.index2()); noalias(n) += Pi; // add to P Pi = n; }}/* * SIR implementation of a Kalman filter */SIR_kalman_scheme::SIR_kalman_scheme (std::size_t x_size, std::size_t s_size, SIR_random& random_helper) : Sample_state_filter (x_size, s_size), Kalman_state_filter(x_size), SIR_scheme (x_size, s_size, random_helper), roughen_model (x_size,x_size, random_helper){}void SIR_kalman_scheme::init ()/* * Initialise sampling from kalman statistics * Pre: x,X * Post: x,X,S */{ // Samples at mean const std::size_t nSamples = S.size2(); for (std::size_t i = 0; i != nSamples; ++i) { FM::ColMatrix::Column Si(S,i); noalias(Si) = x; } // Decorrelate init state noise Matrix UD(x_size,x_size); Float rcond = UdUfactor (UD, X); rclimit.check_PSD(rcond, "Init X not PSD"); // Sampled predict model for initial noise variance FM::identity (roughen_model.Fx); UdUseperate (roughen_model.G, roughen_model.q, UD); roughen_model.init_GqG (); // Predict using model to apply initial noise predict (roughen_model); SIR_scheme::init_S ();}void SIR_kalman_scheme::mean ()/* * Update state mean * Pre : S * Post: x,S */{ // Mean of distribution: mean of particles x.clear(); const std::size_t nSamples = S.size2(); for (std::size_t i = 0; i != nSamples; ++i) { FM::ColMatrix::Column Si(S,i); x.plus_assign (Si); } x /= Float(S.size2());}Bayes_base::Float SIR_kalman_scheme::update_resample (const Importance_resampler& resampler)/* * Modified SIR_scheme update implementation * update mean and covariance of sampled distribution with update_statistics * Normal numerical circumstances (such as all identical samples) * may lead to illconditioned X which is not PSD when factorised. */{ Float lcond = SIR_scheme::update_resample(resampler); // Resample particles update_statistics(); // Estimate sample mean and covariance return lcond;}void SIR_kalman_scheme::update_statistics ()/* * Update kalman statistics without resampling * Update mean and covariance estimate of sampled distribution => mean and covariance of particles * Pre : S (s_size >=1 enforced by state_filter base) * Post: x,X,S (X may be non PSD) * * Sample Covariance := Sum_i [transpose(S[i]-mean)*(S[i]-mean)] / (s_size) * The definition is the Maximum Likelihood (biased) estimate of covariance given samples with unknown (estimated) mean * Numerics * No check is made for the conditioning of samples with regard to mean and covariance * Extreme ranges or very large sample sizes will result in inaccuracy * The covariance should always remain PSD however */{ mean (); X.clear(); // Covariance const std::size_t nSamples = S.size2(); for (std::size_t i = 0; i != nSamples; ++i) { FM::ColMatrix::Column Si(S,i); X.plus_assign (FM::outer_prod(Si-x, Si-x)); } X /= Float(nSamples);}void SIR_kalman_scheme::roughen_correlated (ColMatrix& P, Float K)/* * Roughening * Uses a roughening noise based on covariance of P * This is a more sophisticated algorithm then Ref[1] as it takes * into account the correlation of P * K is scaleing factor for roughening noise * Numerical colapse of P * Numerically when covariance of P semi definate (or close), X's UdU factorisation * may be negative. * Exceptions: * Bayes_filter_exception due colapse of P * unchanged: P */{ using namespace std; // Scale variance by constant and state dimensions Float VarScale = sqr(K) * pow (Float(P.size2()), Float(-2.)/Float(x_size)); update_statistics(); // Estimate sample mean and covariance // Decorrelate states Matrix UD(x_size,x_size); Float rcond = UdUfactor (UD, X); rclimit.check_PSD(rcond, "Roughening X not PSD"); // Sampled predict model for roughening FM::identity (roughen_model.Fx); // Roughening predict based on scaled variance UdUseperate (roughen_model.G, roughen_model.q, UD); roughen_model.q *= VarScale; roughen_model.init_GqG(); // Predict using roughening model predict (roughen_model);}}//namespace
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -