⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 v3d_optimization.cpp

📁 A version of a sparse bundle adjustment implementation with adjustable intrinsics and distortion par
💻 CPP
📖 第 1 页 / 共 3 页
字号:
               for (int k = 0; k < _nMeasurements; ++k)               {                  int const i = _correspondingParamA[k] - _nNonvaryingA;                  if (i < 0) continue;                  multiply_At_A(Ak[k], U);                  addMatricesIP(U, Ui[i]);               } // end for (k)            } // end if            if (nVaryingB > 0)            {               // Compute Vj               Matrix<double> V(_paramDimensionB, _paramDimensionB);               for (int j = 0; j < nVaryingB; ++j) makeZeroMatrix(Vj[j]);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const j = _correspondingParamB[k] - _nNonvaryingB;                  if (j < 0) continue;                  multiply_At_A(Bk[k], V);                  addMatricesIP(V, Vj[j]);               } // end for (k)            } // end if            if (nVaryingC > 0)            {               Matrix<double> ZZ(_paramDimensionC, _paramDimensionC);               Matrix<double> Zsum(_paramDimensionC, _paramDimensionC);               makeZeroMatrix(Zsum);               for (int k = 0; k < _nMeasurements; ++k)               {                  multiply_At_A(Ck[k], ZZ);                  addMatricesIP(ZZ, Zsum);               } // end for (k)               // Ignore the non-varying parameters               for (int i = 0; i < nVaryingC; ++i)                  for (int j = 0; j < nVaryingC; ++j)                     Z[i][j] = Zsum[i+_nNonvaryingC][j+_nNonvaryingC];            } // end if            if (nVaryingA > 0 && nVaryingB > 0)            {               for (int n = 0; n < Wn.count(); ++n) makeZeroMatrix(Wn[n]);               Matrix<double> W(_paramDimensionA, _paramDimensionB);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const n = _jointIndexW[k];                  if (n >= 0)                  {                     int const i0 = _jointNonzerosW[n].first;                     int const j0 = _jointNonzerosW[n].second;                     multiply_At_B(Ak[k], Bk[k], W);                     addMatricesIP(W, Wn[n]);                  } // end if               } // end for (k)            } // end if            if (nVaryingA > 0 && nVaryingC > 0)            {               Matrix<double> XX(_paramDimensionA, _paramDimensionC);               makeZeroMatrix(X);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const i = _correspondingParamA[k] - _nNonvaryingA;                  // Ignore the non-varying parameters                  if (i < 0) continue;                  multiply_At_B(Ak[k], Ck[k], XX);                  for (int r = 0; r < _paramDimensionA; ++r)                     for (int c = 0; c < nVaryingC; ++c)                        X[r+i*_paramDimensionA][c] += XX[r][c+_nNonvaryingC];               } // end for (k)            } // end if            if (nVaryingB > 0 && nVaryingC > 0)            {               Matrix<double> YY(_paramDimensionB, _paramDimensionC);               makeZeroMatrix(Y);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const j = _correspondingParamB[k] - _nNonvaryingB;                  // Ignore the non-varying parameters                  if (j < 0) continue;                  multiply_At_B(Bk[k], Ck[k], YY);                  for (int r = 0; r < _paramDimensionB; ++r)                     for (int c = 0; c < nVaryingC; ++c)                        Y[r+j*_paramDimensionB][c] += YY[r][c+_nNonvaryingC];               } // end for (k)            } // end if            if (currentIteration == 0)            {               // Initialize lambda as tau*max(JtJ[i][i])               double maxEl = -1e30;               if (nVaryingA > 0)               {                  for (int i = 0; i < nVaryingA; ++i)                     for (int l = 0; l < _paramDimensionA; ++l)                        maxEl = std::max(maxEl, Ui[i][l][l]);               }               if (nVaryingB > 0)               {                  for (int j = 0; j < nVaryingB; ++j)                     for (int l = 0; l < _paramDimensionB; ++l)                        maxEl = std::max(maxEl, Vj[j][l][l]);               }               if (nVaryingC > 0)               {                  for (int l = 0; l < nVaryingC; ++l)                     maxEl = std::max(maxEl, Z[l][l]);               }               lambda = tau * maxEl;               if (optimizerVerbosenessLevel >= 2)                  cout << "SparseLevenbergOptimizer: initial lambda = " << lambda << endl;            } // end if (currentIteration == 0)         } // end if (computeDerivatives)         for (int i = 0; i < nVaryingA; ++i)         {            for (int l = 0; l < _paramDimensionA; ++l) diagUi[i][l] = Ui[i][l][l];         } // end for (i)         for (int j = 0; j < nVaryingB; ++j)         {            for (int l = 0; l < _paramDimensionB; ++l) diagVj[j][l] = Vj[j][l][l];         } // end for (j)         for (int l = 0; l < nVaryingC; ++l) diagZ[l] = Z[l][l];         // Augment the diagonals with lambda (either by the standard additive update or by multiplication).#if !defined(USE_MULTIPLICATIVE_UPDATE)         for (int i = 0; i < nVaryingA; ++i)            for (unsigned l = 0; l < _paramDimensionA; ++l)               Ui[i][l][l] += lambda;         for (int j = 0; j < nVaryingB; ++j)            for (unsigned l = 0; l < _paramDimensionB; ++l)               Vj[j][l][l] += lambda;         for (unsigned l = 0; l < nVaryingC; ++l)            Z[l][l] += lambda;#else         for (int i = 0; i < nVaryingA; ++i)            for (unsigned l = 0; l < _paramDimensionA; ++l)               Ui[i][l][l] = std::max(Ui[i][l][l] * (1.0 + lambda), 1e-15);         for (int j = 0; j < nVaryingB; ++j)            for (unsigned l = 0; l < _paramDimensionB; ++l)               Vj[j][l][l] = std::max(Vj[j][l][l] * (1.0 + lambda), 1e-15);         for (unsigned l = 0; l < nVaryingC; ++l)            Z[l][l] = std::max(Z[l][l] * (1.0 + lambda), 1e-15);#endif         this->fillSparseJtJ(Ui, Vj, Wn, Z, X, Y);         bool success = true;         double rho = 0.0;         {            int const nCols = _JtJ_Parent.size();            int const nnz   = _JtJ.getNonzeroCount();            int const lnz   = _JtJ_Lp.back();            vector<int> Li(lnz);            vector<double> Lx(lnz);            vector<double> D(nCols), Y(nCols);            vector<int> workPattern(nCols), workFlag(nCols);            int * colStarts = (int *)_JtJ.getColumnStarts();            int * rowIdxs   = (int *)_JtJ.getRowIndices();            double * values = _JtJ.getValues();            int const d = ldl_numeric(nCols, colStarts, rowIdxs, values,                                      &_JtJ_Lp[0], &_JtJ_Parent[0], &_JtJ_Lnz[0],                                      &Li[0], &Lx[0], &D[0],                                      &Y[0], &workPattern[0], &workFlag[0],                                      NULL, NULL);            if (d == nCols)            {               ldl_perm(nCols, &deltaPerm[0], &Jt_e[0], &_perm_JtJ[0]);               ldl_lsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);               ldl_dsolve(nCols, &deltaPerm[0], &D[0]);               ldl_ltsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);               ldl_permt(nCols, &delta[0], &deltaPerm[0], &_perm_JtJ[0]);            }            else            {               if (optimizerVerbosenessLevel >= 2)                  cout << "SparseLevenbergOptimizer: LDL decomposition failed. Increasing lambda." << endl;               success = false;            }         }         if (success)         {            double const deltaSqrLength = sqrNorm_L2(delta);            if (optimizerVerbosenessLevel >= 2)               cout << "SparseLevenbergOptimizer: ||delta||^2 = " << deltaSqrLength << endl;            double const paramLength = this->getParameterLength();            if (this->applyUpdateStoppingCriteria(paramLength, sqrt(deltaSqrLength)))            {               status = LEVENBERG_OPTIMIZER_SMALL_UPDATE;               goto end;            }            // Copy the updates from delta to the respective arrays            int pos = 0;            for (int i = 0; i < _nNonvaryingA; ++i) makeZeroVector(deltaAi[i]);            for (int i = _nNonvaryingA; i < _nParametersA; ++i)               for (int l = 0; l < _paramDimensionA; ++l, ++pos)                  deltaAi[i][l] = delta[pos];            for (int j = 0; j < _nNonvaryingB; ++j) makeZeroVector(deltaBj[j]);            for (int j = _nNonvaryingB; j < _nParametersB; ++j)               for (int l = 0; l < _paramDimensionB; ++l, ++pos)                  deltaBj[j][l] = delta[pos];            makeZeroVector(deltaC);            for (int l = _nNonvaryingC; l < _paramDimensionC; ++l, ++pos)               deltaC[l] = delta[pos];            saveAllParameters();            if (nVaryingA > 0) updateParametersA(deltaAi);            if (nVaryingB > 0) updateParametersB(deltaBj);            if (nVaryingC > 0) updateParametersC(deltaC);            this->evalResidual(residuals2);            for (int k = 0; k < _nMeasurements; ++k)               scaleVectorIP(weights[k], residuals2[k]);            double const newErr = squaredResidual(residuals2);            rho = err - newErr;            if (optimizerVerbosenessLevel >= 2)               cout << "SparseLevenbergOptimizer: |new residual|^2 = " << newErr << endl;#if !defined(USE_MULTIPLICATIVE_UPDATE)            double const denom1 = lambda * deltaSqrLength;#else            double denom1 = 0.0f;            for (int i = _nNonvaryingA; i < _nParametersA; ++i)               for (int l = 0; l < _paramDimensionA; ++l)                  denom1 += deltaAi[i][l] * deltaAi[i][l] * diagUi[i-_nNonvaryingA][l];            for (int j = _nNonvaryingB; j < _nParametersB; ++j)               for (int l = 0; l < _paramDimensionB; ++l)                  denom1 += deltaBj[j][l] * deltaBj[j][l] * diagVj[j-_nNonvaryingB][l];            for (int l = _nNonvaryingC; l < _paramDimensionC; ++l)               denom1 += deltaC[l] * deltaC[l] * diagZ[l-_nNonvaryingC];            denom1 *= lambda;#endif            double const denom2 = innerProduct(delta, Jt_e);            rho = rho / (denom1 + denom2);            if (optimizerVerbosenessLevel >= 2)               cout << "SparseLevenbergOptimizer: rho = " << rho                    << " denom1 = " << denom1 << " denom2 = " << denom2 << endl;         } // end if (success)         if (success && rho > 0)         {            if (optimizerVerbosenessLevel >= 2)               cout << "SparseLevenbergOptimizer: Improved solution - decreasing lambda." << endl;            // Improvement in the new solution            decreaseLambda(rho);            computeDerivatives = true;         }         else         {            if (optimizerVerbosenessLevel >= 2)               cout << "SparseLevenbergOptimizer: Inferior solution - increasing lambda." << endl;            restoreAllParameters();            increaseLambda();            computeDerivatives = false;            // Restore diagonal elements in Ui, Vj and Z.            for (int i = 0; i < nVaryingA; ++i)            {               for (int l = 0; l < _paramDimensionA; ++l) Ui[i][l][l] = diagUi[i][l];            } // end for (i)            for (int j = 0; j < nVaryingB; ++j)            {               for (int l = 0; l < _paramDimensionB; ++l) Vj[j][l][l] = diagVj[j][l];            } // end for (j)            for (int l = 0; l < nVaryingC; ++l) Z[l][l] = diagZ[l];         } // end if      } // end for     end:;      if (optimizerVerbosenessLevel >= 2)         cout << "Leaving SparseLevenbergOptimizer::minimize()." << endl;   } // end SparseLevenbergOptimizer::minimize()#endif // defined(V3DLIB_ENABLE_SUITESPARSE)} // end namespace V3D

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -