⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tracker.cc

📁 this is software for visual SLAM
💻 CC
📖 第 1 页 / 共 3 页
字号:
{  mCurrentKF.MakeKeyFrame_Rest();  // This populates the Candidates list, which is Shi-Tomasi thresholded.  mFirstKF = mCurrentKF;   vector<pair<double,ImageRef> > vCornersAndSTScores;  for(unsigned int i=0; i<mCurrentKF.aLevels[0].vCandidates.size(); i++)  // Copy candidates into a trivially sortable vector    {                                                                     // so that we can choose the image corners with max ST score      Candidate &c = mCurrentKF.aLevels[0].vCandidates[i];      if(!mCurrentKF.aLevels[0].im.in_image_with_border(c.irLevelPos, MiniPatch::mnHalfPatchSize))	continue;      vCornersAndSTScores.push_back(pair<double,ImageRef>(-1.0 * c.dSTScore, c.irLevelPos)); // negative so highest score first in sorted list    };  sort(vCornersAndSTScores.begin(), vCornersAndSTScores.end());  // Sort according to Shi-Tomasi score  int nToAdd = GV2.GetInt("MaxInitialTrails", 1000, SILENT);  for(unsigned int i = 0; i<vCornersAndSTScores.size() && nToAdd > 0; i++)    {      if(!mCurrentKF.aLevels[0].im.in_image_with_border(vCornersAndSTScores[i].second, MiniPatch::mnHalfPatchSize))	continue;      Trail t;      t.mPatch.SampleFromImage(vCornersAndSTScores[i].second, mCurrentKF.aLevels[0].im);      t.irInitialPos = vCornersAndSTScores[i].second;      t.irCurrentPos = t.irInitialPos;      mlTrails.push_back(t);      nToAdd--;    }  mPreviousFrameKF = mFirstKF;  // Always store the previous frame so married-matching can work.}// Steady-state trail tracking: Advance from the previous frame, remove duds.int Tracker::TrailTracking_Advance(){  int nGoodTrails = 0;  if(mbDraw)    {      glPointSize(5);      glLineWidth(2);      glEnable(GL_POINT_SMOOTH);      glEnable(GL_LINE_SMOOTH);      glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);      glEnable(GL_BLEND);      glBegin(GL_LINES);    }    MiniPatch BackwardsPatch;  Level &lCurrentFrame = mCurrentKF.aLevels[0];  Level &lPreviousFrame = mPreviousFrameKF.aLevels[0];    for(list<Trail>::iterator i = mlTrails.begin(); i!=mlTrails.end();)    {	  list<Trail>::iterator next = i; next++;      Trail &trail = *i;      ImageRef irStart = trail.irCurrentPos;      ImageRef irEnd = irStart;      bool bFound = trail.mPatch.FindPatch(irEnd, lCurrentFrame.im, 10, lCurrentFrame.vCorners);      if(bFound)	{	  // Also find backwards in a married-matches check	  BackwardsPatch.SampleFromImage(irEnd, lCurrentFrame.im);	  ImageRef irBackWardsFound = irEnd;	  bFound = BackwardsPatch.FindPatch(irBackWardsFound, lPreviousFrame.im, 10, lPreviousFrame.vCorners);	  if((irBackWardsFound - irStart).mag_squared() > 2)	    bFound = false;	  	  trail.irCurrentPos = irEnd;	  nGoodTrails++;	}      if(mbDraw)	{	  if(!bFound)	    glColor3f(0,1,1); // Failed trails flash purple before dying.	  else	    glColor3f(1,1,0);	  glVertex(trail.irInitialPos);	  if(bFound) glColor3f(1,0,0);	  glVertex(trail.irCurrentPos);	}      if(!bFound) // Erase from list of trails if not found this frame.	{	  mlTrails.erase(i);	}	  i = next;    }  if(mbDraw)    glEnd();  mPreviousFrameKF = mCurrentKF;  return nGoodTrails;}// TrackMap is the main purpose of the Tracker.// It first projects all map points into the image to find a potentially-visible-set (PVS);// Then it tries to find some points of the PVS in the image;// Then it updates camera pose according to any points found.// Above may happen twice if a coarse tracking stage is performed.// Finally it updates the tracker's current-frame-KeyFrame struct with any// measurements made.// A lot of low-level functionality is split into helper classes:// class TrackerData handles the projection of a MapPoint and stores intermediate results;// class PatchFinder finds a projected MapPoint in the current-frame-KeyFrame.void Tracker::TrackMap(){  // Some accounting which will be used for tracking quality assessment:  for(int i=0; i<LEVELS; i++)    manMeasAttempted[i] = manMeasFound[i] = 0;    // The Potentially-Visible-Set (PVS) is split into pyramid levels.  vector<TrackerData*> avPVS[LEVELS];   for(int i=0; i<LEVELS; i++)    avPVS[i].reserve(500);  // For all points in the map..  for(unsigned int i=0; i<mMap.vpPoints.size(); i++)    {      MapPoint &p= *(mMap.vpPoints[i]);       // Ensure that this map point has an associated TrackerData struct.      if(!p.pTData) p.pTData = new TrackerData(&p);         TrackerData &TData = *p.pTData;            // Project according to current view, and if it's not in the image, skip.      TData.Project(mse3CamFromWorld, mCamera);       if(!TData.bInImage)	continue;               // Calculate camera projection derivatives of this point.      TData.GetDerivsUnsafe(mCamera);      // And check what the PatchFinder (included in TrackerData) makes of the mappoint in this view..      TData.nSearchLevel = TData.Finder.CalcSearchLevelAndWarpMatrix(TData.Point, mse3CamFromWorld, TData.m2CamDerivs);      if(TData.nSearchLevel == -1)	continue;   // a negative search pyramid level indicates an inappropriate warp for this view, so skip.      // Otherwise, this point is suitable to be searched in the current image! Add to the PVS.      TData.bSearched = false;      TData.bFound = false;      avPVS[TData.nSearchLevel].push_back(&TData);    };    // Next: A large degree of faffing about and deciding which points are going to be measured!  // First, randomly shuffle the individual levels of the PVS.  for(int i=0; i<LEVELS; i++)    random_shuffle(avPVS[i].begin(), avPVS[i].end());  // The next two data structs contain the list of points which will next   // be searched for in the image, and then used in pose update.  vector<TrackerData*> vNextToSearch;  vector<TrackerData*> vIterationSet;    // Tunable parameters to do with the coarse tracking stage:  static gvar3<unsigned int> gvnCoarseMin("Tracker.CoarseMin", 20, SILENT);   // Min number of large-scale features for coarse stage  static gvar3<unsigned int> gvnCoarseMax("Tracker.CoarseMax", 60, SILENT);   // Max number of large-scale features for coarse stage  static gvar3<unsigned int> gvnCoarseRange("Tracker.CoarseRange", 30, SILENT);       // Pixel search radius for coarse features  static gvar3<int> gvnCoarseSubPixIts("Tracker.CoarseSubPixIts", 8, SILENT); // Max sub-pixel iterations for coarse features  static gvar3<int> gvnCoarseDisabled("Tracker.DisableCoarse", 0, SILENT);    // Set this to 1 to disable coarse stage (except after recovery)  static gvar3<double> gvdCoarseMinVel("Tracker.CoarseMinVelocity", 0.006, SILENT);  // Speed above which coarse stage is used.    unsigned int nCoarseMax = *gvnCoarseMax;  unsigned int nCoarseRange = *gvnCoarseRange;    mbDidCoarse = false;  // Set of heuristics to check if we should do a coarse tracking stage.  bool bTryCoarse = true;  if(*gvnCoarseDisabled ||      mdMSDScaledVelocityMagnitude < *gvdCoarseMinVel  ||     nCoarseMax == 0)    bTryCoarse = false;  if(mbJustRecoveredSoUseCoarse)    {      bTryCoarse = true;      nCoarseMax *=2;      nCoarseRange *=2;      mbJustRecoveredSoUseCoarse = false;    };        // If we do want to do a coarse stage, also check that there's enough high-level   // PV map points. We use the lowest-res two pyramid levels (LEVELS-1 and LEVELS-2),  // with preference to LEVELS-1.  if(bTryCoarse && avPVS[LEVELS-1].size() + avPVS[LEVELS-2].size() > *gvnCoarseMin )    {      // Now, fill the vNextToSearch struct with an appropriate number of       // TrackerDatas corresponding to coarse map points! This depends on how many      // there are in different pyramid levels compared to CoarseMin and CoarseMax.            if(avPVS[LEVELS-1].size() <= nCoarseMax) 	{ // Fewer than CoarseMax in LEVELS-1? then take all of them, and remove them from the PVS list.	  vNextToSearch = avPVS[LEVELS-1];	  avPVS[LEVELS-1].clear();	}      else	{ // ..otherwise choose nCoarseMax at random, again removing from the PVS list.	  for(unsigned int i=0; i<nCoarseMax; i++)	    vNextToSearch.push_back(avPVS[LEVELS-1][i]);	  avPVS[LEVELS-1].erase(avPVS[LEVELS-1].begin(), avPVS[LEVELS-1].begin() + nCoarseMax);	}            // If didn't source enough from LEVELS-1, get some from LEVELS-2... same as above.      if(vNextToSearch.size() < nCoarseMax)	{	  unsigned int nMoreCoarseNeeded = nCoarseMax - vNextToSearch.size();	  if(avPVS[LEVELS-2].size() <= nMoreCoarseNeeded)	    {	      vNextToSearch = avPVS[LEVELS-2];	      avPVS[LEVELS-2].clear();	    }	  else	    {	      for(unsigned int i=0; i<nMoreCoarseNeeded; i++)		vNextToSearch.push_back(avPVS[LEVELS-2][i]);	      avPVS[LEVELS-2].erase(avPVS[LEVELS-2].begin(), avPVS[LEVELS-2].begin() + nMoreCoarseNeeded);	    }	}      // Now go and attempt to find these points in the image!      unsigned int nFound = SearchForPoints(vNextToSearch, nCoarseRange, *gvnCoarseSubPixIts);      vIterationSet = vNextToSearch;  // Copy over into the to-be-optimised list.      if(nFound >= *gvnCoarseMin)  // Were enough found to do any meaningful optimisation?	{	  mbDidCoarse = true;	  for(int iter = 0; iter<10; iter++) // If so: do ten Gauss-Newton pose updates iterations.	    {	      if(iter != 0)		{ // Re-project the points on all but the first iteration.		  for(unsigned int i=0; i<vIterationSet.size(); i++)		    if(vIterationSet[i]->bFound)  		      vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);		}	      for(unsigned int i=0; i<vIterationSet.size(); i++)		if(vIterationSet[i]->bFound)		  vIterationSet[i]->CalcJacobian();	      double dOverrideSigma = 0.0;	      // Hack: force the MEstimator to be pretty brutal 	      // with outliers beyond the fifth iteration.	      if(iter > 5)		dOverrideSigma = 1.0;	      	      // Calculate and apply the pose update...	      Vector<6> v6Update = 		CalcPoseUpdate(vIterationSet, dOverrideSigma);	      mse3CamFromWorld = SE3::exp(v6Update) * mse3CamFromWorld;	    };	}    };    // So, at this stage, we may or may not have done a coarse tracking stage.  // Now do the fine tracking stage. This needs many more points!    int nFineRange = 10;  // Pixel search range for the fine stage.   if(mbDidCoarse)       // Can use a tighter search if the coarse stage was already done.    nFineRange = 5;    // What patches shall we use this time? The high-level ones are quite important,  // so do all of these, with sub-pixel refinement.  {    int l = LEVELS - 1;    for(unsigned int i=0; i<avPVS[l].size(); i++)      avPVS[l][i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);    SearchForPoints(avPVS[l], nFineRange, 8);    for(unsigned int i=0; i<avPVS[l].size(); i++)      vIterationSet.push_back(avPVS[l][i]);  // Again, plonk all searched points onto the (maybe already populate) vIterationSet.  };    // All the others levels: Initially, put all remaining potentially visible patches onto vNextToSearch.  vNextToSearch.clear();  for(int l=LEVELS - 2; l>=0; l--)    for(unsigned int i=0; i<avPVS[l].size(); i++)      vNextToSearch.push_back(avPVS[l][i]);    // But we haven't got CPU to track _all_ patches in the map - arbitrarily limit   // ourselves to 1000, and choose these randomly.  static gvar3<int> gvnMaxPatchesPerFrame("Tracker.MaxPatchesPerFrame", 1000, SILENT);  int nFinePatchesToUse = *gvnMaxPatchesPerFrame - vIterationSet.size();  if((int) vNextToSearch.size() > nFinePatchesToUse)    {      random_shuffle(vNextToSearch.begin(), vNextToSearch.end());      vNextToSearch.resize(nFinePatchesToUse); // Chop!    };    // If we did a coarse tracking stage: re-project and find derivs of fine points  if(mbDidCoarse)    for(unsigned int i=0; i<vNextToSearch.size(); i++)      vNextToSearch[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);    // Find fine points in image:  SearchForPoints(vNextToSearch, nFineRange, 0);  // And attach them all to the end of the optimisation-set.  for(unsigned int i=0; i<vNextToSearch.size(); i++)    vIterationSet.push_back(vNextToSearch[i]);    // Again, ten gauss-newton pose update iterations.  Vector<6> v6LastUpdate;  Zero(v6LastUpdate);  for(int iter = 0; iter<10; iter++)    {      bool bNonLinearIteration; // For a bit of time-saving: don't do full nonlinear                                // reprojection at every iteration - it really isn't necessary!      if(iter == 0 | iter == 4 | iter == 9)	bNonLinearIteration = true;   // Even this is probably overkill, the reason we do many      else                            // iterations is for M-Estimator convergence rather than 	bNonLinearIteration = false;  // linearisation effects.            if(iter != 0)   // Either way: first iteration doesn't need projection update.	if(bNonLinearIteration)	  {	    for(unsigned int i=0; i<vIterationSet.size(); i++)	      if(vIterationSet[i]->bFound)		vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);	  }	else	  {	    for(unsigned int i=0; i<vIterationSet.size(); i++)	      if(vIterationSet[i]->bFound)		vIterationSet[i]->LinearUpdate(v6LastUpdate);	  };            if(bNonLinearIteration)	for(unsigned int i=0; i<vIterationSet.size(); i++)	  if(vIterationSet[i]->bFound)	    vIterationSet[i]->CalcJacobian();      // Again, an M-Estimator hack beyond the fifth iteration.      double dOverrideSigma = 0.0;      if(iter > 5)	dOverrideSigma = 16.0;            // Calculate and update pose; also store update vector for linear iteration updates.      Vector<6> v6Update = 	CalcPoseUpdate(vIterationSet, dOverrideSigma, iter==9);      mse3CamFromWorld = SE3::exp(v6Update) * mse3CamFromWorld;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -