⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 v3d_optimization.cpp

📁 A version of a sparse bundle adjustment implementation with adjustable intrinsics and distortion par
💻 CPP
📖 第 1 页 / 共 3 页
字号:
   {      int const nVaryingA = _nParametersA - _nNonvaryingA;      int const nVaryingB = _nParametersB - _nNonvaryingB;      int const nVaryingC = _paramDimensionC - _nNonvaryingC;      int const bColumnStart = nVaryingA*_paramDimensionA;      int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;      dst.clear();      // Add the diagonal block matrices (only the upper triangular part).      // Ui submatrices of JtJ      for (int i = 0; i < nVaryingA; ++i)      {         int const i0 = i * _paramDimensionA;         for (int c = 0; c < _paramDimensionA; ++c)            for (int r = 0; r <= c; ++r)               dst.push_back(make_pair(i0 + r, i0 + c));      }      // Vj submatrices of JtJ      for (int j = 0; j < nVaryingB; ++j)      {         int const j0 = j*_paramDimensionB + bColumnStart;         for (int c = 0; c < _paramDimensionB; ++c)            for (int r = 0; r <= c; ++r)               dst.push_back(make_pair(j0 + r, j0 + c));      }      // Z submatrix of JtJ      for (int c = 0; c < nVaryingC; ++c)         for (int r = 0; r <= c; ++r)            dst.push_back(make_pair(cColumnStart + r, cColumnStart + c));      // Add the elements i and j linked by an observation k      // W submatrix of JtJ      for (size_t n = 0; n < _jointNonzerosW.size(); ++n)      {         int const i0 = _jointNonzerosW[n].first;         int const j0 = _jointNonzerosW[n].second;         int const r0 = i0*_paramDimensionA;         int const c0 = j0*_paramDimensionB + bColumnStart;         for (int r = 0; r < _paramDimensionA; ++r)            for (int c = 0; c < _paramDimensionB; ++c)               dst.push_back(make_pair(r0 + r, c0 + c));      } // end for (n)      if (nVaryingC > 0)      {         // Finally, add the dense columns linking i (resp. j) with the global parameters.         // X submatrix of JtJ         for (int i = 0; i < nVaryingA; ++i)         {            int const i0 = i*_paramDimensionA;            for (int r = 0; r < _paramDimensionA; ++r)               for (int c = 0; c < nVaryingC; ++c)                  dst.push_back(make_pair(i0 + r, cColumnStart + c));         }         // Y submatrix of JtJ         for (int j = 0; j < nVaryingB; ++j)         {            int const j0 = j*_paramDimensionB + bColumnStart;            for (int r = 0; r < _paramDimensionB; ++r)               for (int c = 0; c < nVaryingC; ++c)                  dst.push_back(make_pair(j0 + r, cColumnStart + c));         }      } // end if   } // end SparseLevenbergOptimizer::serializeNonZerosJtJ()   void   SparseLevenbergOptimizer::fillSparseJtJ(MatrixArray<double> const& Ui,                                           MatrixArray<double> const& Vj,                                           MatrixArray<double> const& Wn,                                           Matrix<double> const& Z,                                           Matrix<double> const& X,                                           Matrix<double> const& Y)   {      int const nVaryingA = _nParametersA - _nNonvaryingA;      int const nVaryingB = _nParametersB - _nNonvaryingB;      int const nVaryingC = _paramDimensionC - _nNonvaryingC;      int const bColumnStart = nVaryingA*_paramDimensionA;      int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;      int const nCols = _JtJ.num_cols();      int const nnz   = _JtJ.getNonzeroCount();      // The following has to replicate the procedure as in serializeNonZerosJtJ()      int serial = 0;      double * values = _JtJ.getValues();      int const * destIdxs = _JtJ.getDestIndices();      // Add the diagonal block matrices (only the upper triangular part).      // Ui submatrices of JtJ      for (int i = 0; i < nVaryingA; ++i)      {         int const i0 = i * _paramDimensionA;         for (int c = 0; c < _paramDimensionA; ++c)            for (int r = 0; r <= c; ++r, ++serial)               values[destIdxs[serial]] = Ui[i][r][c];      }      // Vj submatrices of JtJ      for (int j = 0; j < nVaryingB; ++j)      {         int const j0 = j*_paramDimensionB + bColumnStart;         for (int c = 0; c < _paramDimensionB; ++c)            for (int r = 0; r <= c; ++r, ++serial)               values[destIdxs[serial]] = Vj[j][r][c];      }      // Z submatrix of JtJ      for (int c = 0; c < nVaryingC; ++c)         for (int r = 0; r <= c; ++r, ++serial)            values[destIdxs[serial]] = Z[r][c];      // Add the elements i and j linked by an observation k      // W submatrix of JtJ      for (size_t n = 0; n < _jointNonzerosW.size(); ++n)      {         for (int r = 0; r < _paramDimensionA; ++r)            for (int c = 0; c < _paramDimensionB; ++c, ++serial)               values[destIdxs[serial]] = Wn[n][r][c];      } // end for (k)      if (nVaryingC > 0)      {         // Finally, add the dense columns linking i (resp. j) with the global parameters.         // X submatrix of JtJ         for (int i = 0; i < nVaryingA; ++i)         {            int const r0 = i * _paramDimensionA;            for (int r = 0; r < _paramDimensionA; ++r)               for (int c = 0; c < nVaryingC; ++c, ++serial)                  values[destIdxs[serial]] = X[r0+r][c];         }         // Y submatrix of JtJ         for (int j = 0; j < nVaryingB; ++j)         {            int const r0 = j * _paramDimensionB;            for (int r = 0; r < _paramDimensionB; ++r)               for (int c = 0; c < nVaryingC; ++c, ++serial)                  values[destIdxs[serial]] = Y[r0+r][c];         }      } // end if   } // end SparseLevenbergOptimizer::fillSparseJtJ()   void   SparseLevenbergOptimizer::minimize()   {      status = LEVENBERG_OPTIMIZER_TIMEOUT;      bool computeDerivatives = true;      int const nVaryingA = _nParametersA - _nNonvaryingA;      int const nVaryingB = _nParametersB - _nNonvaryingB;      int const nVaryingC = _paramDimensionC - _nNonvaryingC;      if (nVaryingA == 0 && nVaryingB == 0 && nVaryingC == 0)      {         // No degrees of freedom, nothing to optimize.         status = LEVENBERG_OPTIMIZER_CONVERGED;         return;      }      this->setupSparseJtJ();      Vector<double> weights(_nMeasurements);      MatrixArray<double> Ak(_nMeasurements, _measurementDimension, _paramDimensionA);      MatrixArray<double> Bk(_nMeasurements, _measurementDimension, _paramDimensionB);      MatrixArray<double> Ck(_nMeasurements, _measurementDimension, _paramDimensionC);      MatrixArray<double> Ui(nVaryingA, _paramDimensionA, _paramDimensionA);      MatrixArray<double> Vj(nVaryingB, _paramDimensionB, _paramDimensionB);      // Wn = Ak^t*Bk      MatrixArray<double> Wn(_jointNonzerosW.size(), _paramDimensionA, _paramDimensionB);      Matrix<double> Z(nVaryingC, nVaryingC);      // X = A^t*C      Matrix<double> X(nVaryingA*_paramDimensionA, nVaryingC);      // Y = B^t*C      Matrix<double> Y(nVaryingB*_paramDimensionB, nVaryingC);      VectorArray<double> residuals(_nMeasurements, _measurementDimension);      VectorArray<double> residuals2(_nMeasurements, _measurementDimension);      VectorArray<double> diagUi(nVaryingA, _paramDimensionA);      VectorArray<double> diagVj(nVaryingB, _paramDimensionB);      Vector<double> diagZ(nVaryingC);      VectorArray<double> At_e(nVaryingA, _paramDimensionA);      VectorArray<double> Bt_e(nVaryingB, _paramDimensionB);      Vector<double> Ct_e(nVaryingC);      Vector<double> Jt_e(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);      Vector<double> delta(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);      Vector<double> deltaPerm(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);      VectorArray<double> deltaAi(_nParametersA, _paramDimensionA);      VectorArray<double> deltaBj(_nParametersB, _paramDimensionB);      Vector<double> deltaC(_paramDimensionC);      double err = 0.0;      for (currentIteration = 0; currentIteration < maxIterations; ++currentIteration)      {         if (optimizerVerbosenessLevel >= 2)            cout << "SparseLevenbergOptimizer: currentIteration: " << currentIteration << endl;         if (computeDerivatives)         {            this->evalResidual(residuals);            this->fillWeights(residuals, weights);            for (int k = 0; k < _nMeasurements; ++k)               scaleVectorIP(weights[k], residuals[k]);            err = squaredResidual(residuals);            if (optimizerVerbosenessLevel >= 1) cout << "SparseLevenbergOptimizer: |residual|^2 = " << err << endl;            if (optimizerVerbosenessLevel >= 2) cout << "SparseLevenbergOptimizer: lambda = " << lambda << endl;            for (int k = 0; k < residuals.count(); ++k) scaleVectorIP(-1.0, residuals[k]);            this->setupJacobianGathering();            this->fillAllJacobians(weights, Ak, Bk, Ck);            // Compute the different parts of J^t*e            if (nVaryingA > 0)            {               for (int i = 0; i < nVaryingA; ++i) makeZeroVector(At_e[i]);               Vector<double> tmp(_paramDimensionA);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const i = _correspondingParamA[k] - _nNonvaryingA;                  if (i < 0) continue;                  multiply_At_v(Ak[k], residuals[k], tmp);                  addVectors(tmp, At_e[i], At_e[i]);               } // end for (k)            } // end if            if (nVaryingB > 0)            {               for (int j = 0; j < nVaryingB; ++j) makeZeroVector(Bt_e[j]);               Vector<double> tmp(_paramDimensionB);               for (int k = 0; k < _nMeasurements; ++k)               {                  int const j = _correspondingParamB[k] - _nNonvaryingB;                  if (j < 0) continue;                  multiply_At_v(Bk[k], residuals[k], tmp);                  addVectors(tmp, Bt_e[j], Bt_e[j]);               } // end for (k)            } // end if            if (nVaryingC > 0)            {               makeZeroVector(Ct_e);               Vector<double> tmp(_paramDimensionC);               for (int k = 0; k < _nMeasurements; ++k)               {                  multiply_At_v(Ck[k], residuals[k], tmp);                  for (int l = 0; l < nVaryingC; ++l) Ct_e[l] += tmp[_nNonvaryingC + l];               }            } // end if            int pos = 0;            for (int i = 0; i < nVaryingA; ++i)               for (int l = 0; l < _paramDimensionA; ++l, ++pos)                  Jt_e[pos] = At_e[i][l];            for (int j = 0; j < nVaryingB; ++j)               for (int l = 0; l < _paramDimensionB; ++l, ++pos)                  Jt_e[pos] = Bt_e[j][l];            for (int l = 0; l < nVaryingC; ++l, ++pos)               Jt_e[pos] = Ct_e[l];//                cout << "Jt_e = ";//                for (int k = 0; k < Jt_e.size(); ++k) cout << Jt_e[k] << " ";//                cout << endl;            if (this->applyGradientStoppingCriteria(norm_Linf(Jt_e)))            {               status = LEVENBERG_OPTIMIZER_CONVERGED;               goto end;            }            // The lhs J^t*J consists of several parts:            //         [ U     W   X ]            // J^t*J = [ W^t   V   Y ]            //         [ X^t  Y^t  Z ],            // where U, V and W are block-sparse matrices (due to the sparsity of A and B).            // X, Y and Z contain only a few columns (the number of global parameters).            if (nVaryingA > 0)            {               // Compute Ui               Matrix<double> U(_paramDimensionA, _paramDimensionA);               for (int i = 0; i < nVaryingA; ++i) makeZeroMatrix(Ui[i]);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -