⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 srifilter.cpp

📁 linux的gps应用
💻 CPP
📖 第 1 页 / 共 3 页
字号:
#pragma ident "$Id: SRIFilter.cpp 327 2006-11-30 18:37:30Z ehagen $"//============================================================================////  This file is part of GPSTk, the GPS Toolkit.////  The GPSTk is free software; you can redistribute it and/or modify//  it under the terms of the GNU Lesser General Public License as published//  by the Free Software Foundation; either version 2.1 of the License, or//  any later version.////  The GPSTk is distributed in the hope that it will be useful,//  but WITHOUT ANY WARRANTY; without even the implied warranty of//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the//  GNU Lesser General Public License for more details.////  You should have received a copy of the GNU Lesser General Public//  License along with GPSTk; if not, write to the Free Software Foundation,//  Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA//  //  Copyright 2004, The University of Texas at Austin////============================================================================//============================================================================////This software developed by Applied Research Laboratories at the University of//Texas at Austin, under contract to an agency or agencies within the U.S. //Department of Defense. The U.S. Government retains all rights to use,//duplicate, distribute, disclose, or release this software. ////Pursuant to DoD Directive 523024 //// DISTRIBUTION STATEMENT A: This software has been approved for public //                           release, distribution is unlimited.////=============================================================================/** * @file SRIFilter.cpp * Implementation of class SRIFilter. * class SRIFilter implements the square root information matrix form of the * Kalman filter. * * Reference: "Factorization Methods for Discrete Sequential Estimation," *             G.J. Bierman, Academic Press, 1977. *///------------------------------------------------------------------------------------// GPSTk includes#include "SRIFilter.hpp"#include "RobustStats.hpp"#include "StringUtils.hpp"using namespace std;namespace gpstk{//------------------------------------------------------------------------------------// empty constructorSRIFilter::SRIFilter(void)   throw(){   defaults();}//------------------------------------------------------------------------------------// constructor given the dimension N.SRIFilter::SRIFilter(const unsigned int N)   throw(){   defaults();   R = Matrix<double>(N,N,0.0);   Z = Vector<double>(N,0.0);   names = Namelist(N);}//------------------------------------------------------------------------------------// constructor given a Namelist, its dimension determines the SRI dimension.SRIFilter::SRIFilter(const Namelist& NL)   throw(){   defaults();   if(NL.size() <= 0) return;   R = Matrix<double>(NL.size(),NL.size(),0.0);   Z = Vector<double>(NL.size(),0.0);   names = NL;}//------------------------------------------------------------------------------------// explicit constructor - throw if the dimensions are inconsistent.SRIFilter::SRIFilter(const Matrix<double>& Rin,                     const Vector<double>& Zin,                     const Namelist& NLin)   throw(MatrixException){   defaults();   if(Rin.rows() != Rin.cols() ||      Rin.rows() != Zin.size() ||      Rin.rows() != NLin.size()) {      using namespace StringUtils;      MatrixException me("Invalid input dimensions: R is "         + asString<int>(Rin.rows()) + "x"         + asString<int>(Rin.cols()) + ", Z has length "         + asString<int>(Zin.size()) + ", and NL has length "         + asString<int>(NLin.size())         );      GPSTK_THROW(me);   }   R = Rin;   Z = Zin;   names = NLin;}//------------------------------------------------------------------------------------// operator=SRIFilter& SRIFilter::operator=(const SRIFilter& right)   throw(){   R = right.R;   Z = right.Z;   names = right.names;   iterationsLimit = right.iterationsLimit;   convergenceLimit = right.convergenceLimit;   divergenceLimit = right.divergenceLimit;   doWeight = right.doWeight;   doRobust = right.doRobust;   doLinearize = right.doLinearize;   doSequential = right.doSequential;   doVerbose = right.doVerbose;   valid = right.valid;   number_iterations = right.number_iterations;   number_batches = right.number_batches;   rms_convergence = right.rms_convergence;   condition_number = right.condition_number;   Xsave = right.Xsave;   return *this;}//------------------------------------------------------------------------------------// SRIF (Kalman) measurement update, or least squares update// Returns (whitened) residuals in Dvoid SRIFilter::measurementUpdate(const Matrix<double>& H,                                  Vector<double>& D,                                  const Matrix<double>& CM)   throw(MatrixException,VectorException){   if(H.cols() != R.cols() || H.rows() != D.size() ||      (&CM != &SRINullMatrix && (CM.rows() != D.size() || CM.cols() != D.size())) ) {      using namespace StringUtils;      MatrixException me("Invalid input dimensions:\n  SRI is "         + asString<int>(R.rows()) + "x"         + asString<int>(R.cols()) + ",\n  Partials is "         + asString<int>(H.rows()) + "x"         + asString<int>(H.cols()) + ",\n  Data has length "         + asString<int>(D.size())         );      if(&CM != &SRINullMatrix) me.addText(",  and Cov is "         + asString<int>(CM.rows()) + "x"         + asString<int>(CM.cols()));      GPSTK_THROW(me);   }   try {      Matrix<double> P(H);      Cholesky<double> Ch;         // whiten partials and data      if(&CM != &SRINullMatrix) {         Matrix<double> L;         Ch(CM);         L = inverse(Ch.L);         P = L * P;         D = L * D;      }         // update *this with the whitened information      SrifMU(R, Z, P, D);         // un-whiten residuals      if(&CM != &SRINullMatrix) {         D = Ch.L * D;      }   }   catch(MatrixException& me) { GPSTK_RETHROW(me); }   catch(VectorException& ve) { GPSTK_RETHROW(ve); }}//------------------------------------------------------------------------------------// SRIF (Kalman) measurement update, or least squares update// Given data and measurement covariance, compute a solution and// covariance using the appropriate least squares algorithm.// @param D   Data vector, length M//               Input:  raw data//               Output: post-fit residuals// @param X   Solution vector, length N//               Input:  nominal solution X0 (zero when doLinearized is false)//               Output: final solution// @param Cov Covariance matrix, dimension (N,N)//               Input:  (If doWeight is true) inverse measurement covariance//                       or weight matrix(M,M)//               Output: Solution covariance matrix (N,N)// @param LSF Pointer to a function which is used to define the equation to be solved.// LSF arguments are://            X  Nominal solution (input)//            f  Values of the equation f(X) (length M) (output)//            P  Partials matrix df/dX evaluated at X (dimension M,N) (output)//        When doLinearize is false, LSF should ignore X and return the (constant)//        partials matrix in P and zero in f.//// Return values: 0 ok//               -1 Problem is underdetermined (M<N) // TD -- naturalized sol?//               -2 Problem is singular//               -3 Algorithm failed to converge//               -4 Algorithm diverged//// Reference for robust least squares: Mason, Gunst and Hess,// "Statistical Design and Analysis of Experiments," Wiley, New York, 1989, pg 593.//// Notes on the algorithm:// Least squares, including linearized (iterative) and sequential processing.// This class will solve the equation f(X) = D, a vector equation in which the// solution vector X is of length N, and the data vector D is of length M.// The function f(X) may be linear, in which case it is of the form// P*X=D where P is a constant matrix,// or non-linear, in which case it will be linearized by expanding about a given// nominal solution X0://          df |//          -- |     * dX = D - f(X0),//          dX |X=X0// where dX is defined as (X-X0), the new solution is X, and the partials matrix is// P=(df/dX)|X=X0. Dimensions are P(M,N)*dX(N) = D(M) - f(X0)(M).// Linearized problems are iterated until the solution converges (stops changing). // // The solution may be weighted by a measurement covariance matrix MCov,// or weight matrix W (in which case MCov = inverse(W)). MCov must be non-singular.// // Options are to make the algorithm linearized (via the boolean input variable// doLinearize) and/or sequential (doSequential).// //    - linearized. When doLinearize is true, the algorithm solves the linearized//    version of the measurement equation (see above), rather than the simple//    linear version P*X=D. Also when doLinearize is true, the code will iterate//    (repeat until convergence) the linearized algorithm; if you don't want to//    iterate, set the limit on the number of iterations to zero.//    NB In this case, a solution must be found for each nominal solution//    (i.e. the information matrix must be non-singular); otherwise there can be//    no iteration.// //    - sequential. When doSequential is true, the class will save the accumulated//    information from all the calls to Compute() and Add() since the last reset()//    within the class. This means the resulting solution is determined by ALL the//    data fed to the class since the last reset(). In this case the data is fed//    to the algorithm in 'batches', which may be of any size.// //    NB When doLinearize is true, the information stored in the class has a//    different interpretation than it does in the linear case.//    Calling Solve(X,Cov) will NOT give the solution vector X, but rather the//    latest update (X-X0) = (X-Xsave).// //    NB In the linear case, the result you get from sequentially processing//    a large dataset in many small batches is identical to what you would get//    by processing all the data in one big batch. This is NOT true in the//    linearized case, because the information at each batch is dependent on thes//    nominal state. See the next comment.// //    NB Sequential, linearized LS really makes sense only when the state is//    changing. It is difficult to get a good solution in this case with small//    batches, because the stored information is dependent on the (final) state//    solution at each batch. Start with a good nominal state, or with a large//    batch of data that will produce one.// // The general Least Squares algorithm is://    0. set i=0.//    1. If non-sequential, or if this is the first call, set R=0=z//    2. Let X = X0 = initial nominal solution (input). if(linear, X0==0).//    3. Save SRIsave=SRI and X0save=X0//    4. start iteration i here.//    5. Compute partials matrix P and f(X0) by calling LSF(X0,f,P).//          if(linear), LSF returns the constant P and f(X0)=0.//    6. Set R = SRIsave.R + P(T)*inverse(MCov)*P//    7. Set z = SRIsave.Z + P(T)*inverse(MCov)*(D-f(X0))//    8. (The measurement equation is now//          P(X-X0save)=d-F(X0)//       which is, in the linear case,//          PX = d )//    9. Compute RMS change in X: rms = ||X-X0||/N// 10. Solve z=Rx to get//          Cov = inverse(R)//       and//          X = X0save + inverse(R)*z [or in the linear case X = inverse(R)*z]// 11. if(linear) goto quit//          [else linearized]// 12. increment the number of iterations// 13. If rms > divergence limit, goto quit (failure).// 14. If i > 1 and rms < convergence limit, goto quit (success)// 15. If i (number of iterations) >= iteration limit, goto quit (failure)// 16. Set X0 = X// 17. Return to step 4.// 18. quit: if(sequential and failed) set SRI=SRIsave.// int SRIFilter::leastSquaresEstimation(Vector<double>& D,                                      Vector<double>& X,                                      Matrix<double>& Cov,                                      void (LSF)(Vector<double>& X,                                                 Vector<double>& f,                                                 Matrix<double>& P)   )   throw(MatrixException){   int M = D.size();   int N = R.rows();   if(doVerbose) cout << "\nSRIFilter::leastSquaresUpdate : M,N are "      << M << "," << N << endl;   // errors   if(N == 0) {      MatrixException me("Called with zero-sized SRIFilter");      GPSTK_THROW(me);   }   if(doLinearize && M < N) {      MatrixException me(            string("When linearizing, problem must not be underdetermined:\n")            + string("   data dimension is ") + StringUtils::asString(M)            + string(" while state dimension is ") + StringUtils::asString(N));      GPSTK_THROW(me);   }   if(doSequential && R.rows() != X.size()) {      using namespace StringUtils;      MatrixException me("Sequential problem has inconsistent dimensions:\n  SRI is "         + asString<int>(R.rows()) + "x"         + asString<int>(R.cols()) + " while X has length "         + asString<int>(X.size()));      GPSTK_THROW(me);   }   if(doWeight && doRobust) {      MatrixException me("Cannot have doWeight and doRobust both true.");      GPSTK_THROW(me);   }   // TD disallow Robust and Linearized ?   // TD disallow Robust and Sequential ?   int i,j,iret;   double big,small;   Vector<double> f(M),Xsol(N),NominalX,Res(M),Wts(M,1.0),OldWts(M,1.0);   Matrix<double> Partials(M,N),MeasCov(M,M);   const Matrix<double> Rapriori(R);   const Vector<double> Zapriori(Z);   // save measurement covariance matrix   if(doWeight) MeasCov=Cov;   // NO ... this prevents you from giving it apriori information...   // if the first time, clear the stored information   //if(!doSequential || number_batches==0)   //   zeroAll();   // if sequential and not the first call, NominalX must be the last solution   if(doSequential && number_batches != 0) X = Xsave;   // nominal solution   if(!doLinearize) {      if(X.size() != N) X=Vector<double>(N);      X = 0.0;   }   NominalX = X;   valid = false;   condition_number = 0.0;   rms_convergence = 0.0;   number_iterations = 0;   iret = 0;   // iteration loop   do {      number_iterations++;      // call LSF to get f(NominalX) and Partials(NominalX)      LSF(NominalX,f,Partials);      // Res will be both pre- and post-fit data residuals      Res = D-f;      if(doVerbose) {         cout << "\nSRIFilter::leastSquaresUpdate :";         if(doLinearize || doRobust)            cout << " Iteration " << number_iterations;         cout << endl;         LabelledVector LNX(names,NominalX);         LNX.message(" Nominal X:");         cout << LNX << endl;         cout << " Pre-fit data residuals:  "            << fixed << setprecision(6) << Res << endl;      }      // build measurement covariance matrix for robust LS      if(doRobust) {         MeasCov = 0.0;         for(i=0; i<M; i++) MeasCov(i,i) = 1.0 / (Wts(i)*Wts(i));      }      // restore apriori information      if(number_iterations > 1) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -