⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tracker.cc

📁 this is software for visual SLAM
💻 CC
📖 第 1 页 / 共 3 页
字号:
      v6LastUpdate = v6Update;    };    if(mbDraw)    {      glPointSize(6);      glEnable(GL_BLEND);      glEnable(GL_POINT_SMOOTH);      glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);      glBegin(GL_POINTS);      for(vector<TrackerData*>::reverse_iterator it = vIterationSet.rbegin();	  it!= vIterationSet.rend(); 	  it++)	{	  if(! (*it)->bFound)	    continue;	  glColor(gavLevelColors[(*it)->nSearchLevel]);	  glVertex((*it)->v2Image);	}      glEnd();      glDisable(GL_BLEND);    }    // Update the current keyframe with info on what was found in the frame.  // Strictly speaking this is unnecessary to do every frame, it'll only be  // needed if the KF gets added to MapMaker. Do it anyway.  // Export pose to current keyframe:  mCurrentKF.se3CfromW = mse3CamFromWorld;    // Record successful measurements. Use the KeyFrame-Measurement struct for this.  mCurrentKF.mMeasurements.clear();  for(vector<TrackerData*>::iterator it = vIterationSet.begin();      it!= vIterationSet.end();       it++)    {      if(! (*it)->bFound)	continue;      Measurement m;      m.v2RootPos = (*it)->v2Found;      m.nLevel = (*it)->nSearchLevel;      m.bSubPix = (*it)->bDidSubPix;       mCurrentKF.mMeasurements[& ((*it)->Point)] = m;    }    // Finally, find the mean scene depth from tracked features  {    double dSum = 0;    double dSumSq = 0;    int nNum = 0;    for(vector<TrackerData*>::iterator it = vIterationSet.begin();	it!= vIterationSet.end(); 	it++)      if((*it)->bFound)	{	  double z = (*it)->v3Cam[2];	  dSum+= z;	  dSumSq+= z*z;	  nNum++;	};    if(nNum > 20)      {	mCurrentKF.dSceneDepthMean = dSum/nNum;	mCurrentKF.dSceneDepthSigma = sqrt((dSumSq / nNum) - (mCurrentKF.dSceneDepthMean) * (mCurrentKF.dSceneDepthMean));      }  }}// Find points in the image. Uses the PatchFiner struct stored in TrackerDataint Tracker::SearchForPoints(vector<TrackerData*> &vTD, int nRange, int nSubPixIts){  int nFound = 0;  for(unsigned int i=0; i<vTD.size(); i++)   // for each point..    {      // First, attempt a search at pixel locations which are FAST corners.      // (PatchFinder::FindPatchCoarse)      TrackerData &TD = *vTD[i];      PatchFinder &Finder = TD.Finder;      Finder.MakeTemplateCoarseCont(TD.Point);      if(Finder.TemplateBad())	{	  TD.bInImage = TD.bPotentiallyVisible = TD.bFound = false;	  continue;	}      manMeasAttempted[Finder.GetLevel()]++;  // Stats for tracking quality assessmenta            bool bFound = 	Finder.FindPatchCoarse(ir(TD.v2Image), mCurrentKF, nRange);      TD.bSearched = true;      if(!bFound) 	{	  TD.bFound = false;	  continue;	}            TD.bFound = true;      TD.dSqrtInvNoise = (1.0 / Finder.GetLevelScale());            nFound++;      manMeasFound[Finder.GetLevel()]++;            // Found the patch in coarse search - are Sub-pixel iterations wanted too?      if(nSubPixIts > 0)	{	  TD.bDidSubPix = true;	  Finder.MakeSubPixTemplate();	  bool bSubPixConverges=Finder.IterateSubPixToConvergence(mCurrentKF, nSubPixIts);	  if(!bSubPixConverges)	    { // If subpix doesn't converge, the patch location is probably very dubious!	      TD.bFound = false;	      nFound--;	      manMeasFound[Finder.GetLevel()]--;	      continue;	    }	  TD.v2Found = Finder.GetSubPixPos();	}      else	{	  TD.v2Found = Finder.GetCoarsePosAsVector();	  TD.bDidSubPix = false;	}    }  return nFound;};//Calculate a pose update 6-vector from a bunch of image measurements.//User-selectable M-Estimator.//Normally this robustly estimates a sigma-squared for all the measurements//to reduce outlier influence, but this can be overridden if//dOverrideSigma is positive. Also, bMarkOutliers set to true//records any instances of a point being marked an outlier measurement//by the Tukey MEstimator.Vector<6> Tracker::CalcPoseUpdate(vector<TrackerData*> vTD, double dOverrideSigma, bool bMarkOutliers){  // Which M-estimator are we using?  int nEstimator = 0;  static gvar3<string> gvsEstimator("TrackerMEstimator", "Tukey", SILENT);  if(*gvsEstimator == "Tukey")    nEstimator = 0;  else if(*gvsEstimator == "Cauchy")    nEstimator = 1;  else if(*gvsEstimator == "Huber")    nEstimator = 2;  else     {      cout << "Invalid TrackerMEstimator, choices are Tukey, Cauchy, Huber" << endl;      nEstimator = 0;      *gvsEstimator = "Tukey";    };    // Find the covariance-scaled reprojection error for each measurement.  // Also, store the square of these quantities for M-Estimator sigma squared estimation.  vector<double> vdErrorSquared;  for(unsigned int f=0; f<vTD.size(); f++)    {      TrackerData &TD = *vTD[f];      if(!TD.bFound)	continue;      TD.v2Error_CovScaled = TD.dSqrtInvNoise* (TD.v2Found - TD.v2Image);      vdErrorSquared.push_back(TD.v2Error_CovScaled * TD.v2Error_CovScaled);    };    // No valid measurements? Return null update.  if(vdErrorSquared.size() == 0)    return (make_Vector, 0,0,0,0,0,0);    // What is the distribution of errors?  double dSigmaSquared;  if(dOverrideSigma > 0)    dSigmaSquared = dOverrideSigma; // Bit of a waste having stored the vector of square errors in this case!  else    {      if (nEstimator == 0)	dSigmaSquared = Tukey::FindSigmaSquared(vdErrorSquared);      else if(nEstimator == 1)	dSigmaSquared = Cauchy::FindSigmaSquared(vdErrorSquared);      else 	dSigmaSquared = Huber::FindSigmaSquared(vdErrorSquared);    }    // The TooN WLSCholesky class handles reweighted least squares.  // It just needs errors and jacobians.  WLSCholesky<6> wls;  wls.add_prior(100.0); // Stabilising prior  for(unsigned int f=0; f<vTD.size(); f++)    {      TrackerData &TD = *vTD[f];      if(!TD.bFound)	continue;      Vector<2> &v2 = TD.v2Error_CovScaled;      double dErrorSq = v2 * v2;      double dWeight;            if(nEstimator == 0)	dWeight= Tukey::Weight(dErrorSq, dSigmaSquared);      else if(nEstimator == 1)	dWeight= Cauchy::Weight(dErrorSq, dSigmaSquared);      else 	dWeight= Huber::Weight(dErrorSq, dSigmaSquared);            // Inlier/outlier accounting, only really works for cut-off estimators such as Tukey.      if(dWeight == 0.0)	{	  if(bMarkOutliers)	    TD.Point.nMEstimatorOutlierCount++;	  continue;	}      else	if(bMarkOutliers)	  TD.Point.nMEstimatorInlierCount++;            Matrix<2,6> &m26Jac = TD.m26Jacobian;      wls.add_df(v2[0], TD.dSqrtInvNoise * m26Jac[0], dWeight); // These two lines are currently      wls.add_df(v2[1], TD.dSqrtInvNoise * m26Jac[1], dWeight); // the slowest bit of poseits    }    wls.compute();  return wls.get_mu();}// Just add the current velocity to the current pose.// N.b. this doesn't actually use time in any way, i.e. it assumes// a one-frame-per-second camera. Skipped frames etc// are not handled properly here.void Tracker::ApplyMotionModel(){  mse3StartPos = mse3CamFromWorld;  mse3CamFromWorld = SE3::exp(mv6CameraVelocity) * mse3StartPos;};// The motion model is entirely the tracker's, and is kept as a decaying// constant velocity model.void Tracker::UpdateMotionModel(){  SE3 se3NewFromOld = mse3CamFromWorld * mse3StartPos.inverse();  Vector<6> v6Motion = SE3::ln(se3NewFromOld);  Vector<6> v6OldVel = mv6CameraVelocity;    mv6CameraVelocity = 0.9 * (0.5 * v6Motion + 0.5 * v6OldVel);  mdVelocityMagnitude = sqrt(mv6CameraVelocity * mv6CameraVelocity);    // Also make an estimate of this which has been scaled by the mean scene depth.  // This is used to decide if we should use a coarse tracking stage.  // We can tolerate more translational vel when far away from scene!  Vector<6> v6 = mv6CameraVelocity;  v6.slice<0,3>() *= 1.0 / mCurrentKF.dSceneDepthMean;  mdMSDScaledVelocityMagnitude = sqrt(v6*v6);}// Time to add a new keyframe? The MapMaker handles most of this.void Tracker::AddNewKeyFrame(){  mMapMaker.AddKeyFrame(mCurrentKF);  mnLastKeyFrameDropped = mnFrame;}// Some heuristics to decide if tracking is any good, for this frame.// This influences decisions to add key-frames, and eventually// causes the tracker to attempt relocalisation.void Tracker::AssessTrackingQuality(){  int nTotalAttempted = 0;  int nTotalFound = 0;  int nLargeAttempted = 0;  int nLargeFound = 0;    for(int i=0; i<LEVELS; i++)    {      nTotalAttempted += manMeasAttempted[i];      nTotalFound += manMeasFound[i];      if(i>=2) nLargeAttempted += manMeasAttempted[i];      if(i>=2) nLargeFound += manMeasFound[i];    }    if(nTotalFound == 0 || nTotalAttempted == 0)    mTrackingQuality = BAD;  else    {      double dTotalFracFound = (double) nTotalFound / nTotalAttempted;      double dLargeFracFound;      if(nLargeAttempted > 10)	dLargeFracFound = (double) nLargeFound / nLargeAttempted;      else	dLargeFracFound = dTotalFracFound;      static gvar3<double> gvdQualityGood("Tracker.TrackingQualityGood", 0.3, SILENT);      static gvar3<double> gvdQualityLost("Tracker.TrackingQualityLost", 0.13, SILENT);                  if(dTotalFracFound > *gvdQualityGood)	mTrackingQuality = GOOD;      else if(dLargeFracFound < *gvdQualityLost)	mTrackingQuality = BAD;      else	mTrackingQuality = DODGY;    }    if(mTrackingQuality == DODGY)    {      // Further heuristics to see if it's actually bad, not just dodgy...      // If the camera pose estimate has run miles away, it's probably bad.      if(mMapMaker.IsDistanceToNearestKeyFrameExcessive(mCurrentKF))	mTrackingQuality = BAD;    }    if(mTrackingQuality==BAD)    mnLostFrames++;  else    mnLostFrames = 0;}string Tracker::GetMessageForUser(){  return mMessageForUser.str();}ImageRef TrackerData::irImageSize;  // Static member of TrackerData lives here

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -