📄 mmat_16.cc
字号:
} if (p > 0) { s = Integral::sqrt(p * p + q * q + r * r); } else { s = -Integral::sqrt(p * p + q * q + r * r); } if (s != 0) { if (k == m) { if (l != m) { this_a.setValue(k - 1, k - 2, -this_a(k - 1, k - 2)); } } else { this_a.setValue(k - 1, k - 2, -s * x); } p += s; x = p / s; y = q / s; z = r / s; q /= p; r /= p; for (long j = k; j <= nn; j++) { p = this_a(k - 1, j - 1) + q * this_a(k, j - 1); if (k != (nn - 1)) { p += r * this_a(k + 1, j - 1); this_a.setValue(k + 1, j - 1, this_a(k + 1, j - 1) - p * z); } this_a.setValue(k, j - 1, this_a(k, j - 1) - p * y); this_a.setValue(k - 1, j - 1, this_a(k - 1, j - 1) - p * x); } long mmin = nn < k + 3 ? nn : k + 3; for (long i = l; i <= mmin; i++) { p = x * this_a(i - 1, k - 1) + y * this_a(i - 1, k); if (k != (nn - 1)) { p += z * this_a(i - 1, k + 1); this_a.setValue(i - 1, k + 1, this_a(i - 1, k + 1) - p * r); } this_a.setValue(i - 1, k, this_a(i - 1, k) - p * q); this_a.setValue(i - 1, k - 1, this_a(i - 1, k - 1) - p); } } } } } } while (l < nn - 1); } // copy results to output vector // eigval_r_a.assign(wr); eigval_i_a.assign(wi); // exit gracefully // return true;}// method: eigenComputeVector//// arguments:// MMatrix<TScalar, TIntegral>& this: (input) input matrix// MVector<TScalar, TIntegral>& eigvect: (output) eigenvector// TIntegral eigval: (input) eigenvalue//// return: a boolean value indicating status//// this method computes the eigenvalues of a matrix. see://// W. Press, S. Teukolsky, W. Vetterling, B. Flannery,// Numerical Recipes in C, Second Edition, Cambridge University Press,// New York, New York, USA, pp. 493-495, 1995.//// for more details. there are a lot of local constants to this code,// and the algorithm is fairly dependent on these.//template<class TScalar, class TIntegral>booleanMMatrixMethods::eigenComputeVector(MMatrix<TScalar, TIntegral>& this_a, MVector<TScalar, TIntegral>& eigvect_a, TIntegral eigval_a) { // get the dimension of input matrix // long n = this_a.getNumRows(); // initialize constants and variables // const double MIN_INITIAL_GROWTH = (double)1000.0; const long MAX_INITIAL_ITER = (long)10; const long MAX_ITER = (long)8; const long MAX_L_UPDATE = (long)5; const double EPSILON = (double)0.0000001; const double MIN_DECREASE = (double)0.01; // initialize the output vector // eigvect_a.setLength(n); // treat diagonal matrix in a different way // if (this_a.isDiagonal()) { // fill output eigenvector with zero // eigvect_a.clear(Integral::RETAIN); // find the exact eigenvalue in diagonal elements of the matrix // for (long i = 0; i < n; i++) { Double temp_val = this_a(i, i); // if eigenvalue found, the corresponding eigenvector value // should be set to 1, otherwise the value should keep 0 // if (temp_val.almostEqual(eigval_a)) { eigvect_a(i) = 1; } } } // for FULL, SYMMETRIC, TRIANGULAR or SPARSE matrix // else { // subtract eigenvalue from the diagonal elements // double eig_val = (double) eigval_a; for (long i = 0; i < n; i++) { this_a.setValue(i, i, this_a(i, i) - eig_val); } // declare local variables // MVector<TScalar, TIntegral> xi(n); MVector<TScalar, TIntegral> xi_init; MVector<TScalar, TIntegral> y; MVector<TScalar, TIntegral> y_init_max; VectorLong index(n); long sign; double tiny_num = 0; if (typeid(TIntegral) == typeid(float)) { tiny_num = 1.e-10; } else if (typeid(TIntegral) == typeid(double)) { tiny_num = 1.e-20; } else { tiny_num = 1.e-10; } // get the LU decomposition of temporary matrix // MMatrix<TScalar, TIntegral> lower_trig, upper_trig; this_a.decompositionLU(lower_trig, upper_trig, index, sign, tiny_num); // choose a random normalized vector xi as the initial guess for the // eigenvector and solve A * y = xi, new vector y should be bigger than // xi by a "growth factor", which is now MIN_INITIAL_GROWTH // long iter_num = 0; double len_y; double maxGF = -1; do { iter_num++; // random and normalize a vector xi // make xi(0)*xi(0) + xi(1)*xi(1) + .... == 1 // xi.rand(-50, 50); xi.div((double)((Double)(xi.sumSquare())).sqrt()); // use LU matrix to solve equation L * U * y = xi // lower_trig.luSolve(y, lower_trig, upper_trig, index, xi); // get the norm of vector y // len_y = (double)((Double)y.sumSquare()).sqrt(); // record max len_y and the corresponding vector y // if (len_y > maxGF) { maxGF = len_y; xi_init.assign(xi); y_init_max.assign(y); } } while((len_y < MIN_INITIAL_GROWTH) && (iter_num < MAX_INITIAL_ITER)); // copy and normalize vector y // y.div(y_init_max, maxGF); iter_num = 0; long update_num = 0; double di = 0; double di1; do { // get the value of |y - xi| and if it is too small, no further // improvement is needed // di1 = (TScalar)xi.distance(y); if (di1 < EPSILON) { break; } // if |y - xi| is not decreasing rapidly enough, we will try to // updating the eigenvalue. if the eigenvalue can not be updated // to a new one because of machine accuracy, no further // improvement for the eigenvector is needed, we just quit // iter_num++; double decrease = (double) Integral::abs(di1 - di); double delta_val = 0; if (decrease < MIN_DECREASE) { // calculate the improvement for eigenvalue as 1/|xi * y * len_y| // delta_val = 1 / (xi.dotProduct(y) * len_y); eig_val += delta_val; update_num++; if (delta_val == 0) { // no further iteration is need since the delta value is zero // break; } else { iter_num = 0; // update new diagonal elements with updated eigenvalue // for (long i = 0; i < n; i++) { this_a.setValue(i, i, this_a(i, i) - delta_val); } // get LU decomposition result of updated temporary matrix // this_a.decompositionLU(lower_trig, upper_trig, index, sign,tiny_num); } } // this is iteration algorithm, we update xi with y and by solving // equation A * y = xi we get new vector y // xi.assign(y); di = di1; // solve y with the LU decomposition results // lower_trig.luSolve(y, lower_trig, upper_trig, index, xi); // normalize vector y // len_y = (double)((Double)y.sumSquare()).sqrt(); y.div((double)len_y); } while ((iter_num < MAX_ITER) && (update_num < MAX_L_UPDATE)); // copy the result vector to output vector // eigvect_a.assign(y); } // exit gracefully // return true;}#endif// method: luSolve//// arguments:// MVector<TScalar, TIntegral>& out_vec: (output) output vector// const MMatrix<TScalar, TIntegral>& l: (input) lower triangular// const MMatrix<TScalar, TIntegral>& u: (input) upper triangular// const VectorLong& index: (input) input pivot index// const MVector<TScalar, TIntegral>& in_vec: (output) input vector//// return: a boolean value indicating status//// this method solves the linear equation L * U * x = b, where L, U are// lower and upper triangular matrices respectively and b is an input vector.//template<class TScalar, class TIntegral>boolean MMatrixMethods::luSolve(MVector<TScalar, TIntegral>& out_vec_a, const MMatrix<TScalar, TIntegral>& l_a, const MMatrix<TScalar, TIntegral>& u_a, const VectorLong& index_a, const MVector<TScalar, TIntegral>& in_vec_a) { // get the dimensions of input matrix // long nrows = u_a.getNumRows(); // reorder the input vector elements' sequence caused by pivoting // and copy it to temporary vector // out_vec_a.setLength(in_vec_a.length()); for (long i = 0; i < nrows; i++) { out_vec_a(i) = in_vec_a(index_a(i)); } // solve lower triangular matrix first, we start loop from the second // element of vector because out_vec_a(0) == in_vec_a(0) // for (long i = 1; i < nrows; i++) { // declare an accumulator // TIntegral sum = out_vec_a(i); // subtract every previous element // for (long j = 0; j < i; j++) { sum -= (TIntegral)(l_a(i, j)) * out_vec_a(j); } // output the result // out_vec_a(i) = (TIntegral) sum; } // solve for the upper triangular matrix // for (long i = nrows - 1; i >= 0; i--) { // declare an accumulator // TIntegral sum = (TIntegral) out_vec_a(i); // subtract every previous element // for (long j = i + 1; j < nrows; j++) { sum -= ((TIntegral)(u_a(i, j))) * (TIntegral)out_vec_a(j); } // output the result // out_vec_a(i) = (TIntegral) sum / u_a(i, i); } // exit gracefully // return true;}// explicit instantiations//template booleanMMatrixMethods::luSolve(MVector<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const VectorLong&, const MVector<ISIP_TEMPLATE_TARGET>&);// method: choleskySolve//// arguments:// MVector<TScalar, TIntegral>& out_vec: (output) output vector// const MMatrix<TScalar, TIntegral>& l: (input) lower triangular cholesky// decomposition matrix// const MVector<TScalar, TIntegral>& in_vec: (output) input vector//// return: a boolean value indicating status//// this method solves the linear equation l * l' * x = b, where L is the// cholesky decomposition matrix and b is an input vector.//template<class TScalar, class TIntegral>boolean MMatrixMethods::choleskySolve(MVector<TScalar, TIntegral>& out_vec_a, const MMatrix<TScalar, TIntegral>& l_a, const MVector<TScalar, TIntegral>& in_vec_a) { // get the dimensions of input matrix // long nrows = l_a.getNumRows(); out_vec_a.setLength(nrows); // solve L * y = in_vec, result is stored in out_vec // for (long i = 0; i < nrows; i++) { // declare an accumulator // TIntegral sum = in_vec_a(i); // subtract every previous element // for (long j = i - 1; j >= 0; j--) { sum -= (TIntegral)(l_a(i, j)) * out_vec_a(j); } // output the result // out_vec_a(i) = (TIntegral) sum / l_a(i,i); } // solve for the L' * x = y // for (long i = nrows - 1; i >= 0; i--) { // declare an accumulator // TIntegral sum = (TIntegral) out_vec_a(i); // subtract every previous element // for (long j = i + 1; j < nrows; j++) { sum -= ((TIntegral)(l_a(j, i))) * (TIntegral)out_vec_a(j); } // output the result // out_vec_a(i) = (TIntegral) sum / l_a(i, i); } // exit gracefully // return true;}// explicit instantiations//template booleanMMatrixMethods::choleskySolve(MVector<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const MVector<ISIP_TEMPLATE_TARGET>&);// method: svdSolve//// arguments:// MVector<TScalar, TIntegral>& out_vec: (output) output vector// const MMatrix<TScalar, TIntegral>& u: (input) row orthonormal// const MMatrix<TScalar, TIntegral>& w: (input) singular value matrix// const MMatrix<TScalar, TIntegral>& v: (input) column orthonormal// const MVector<TScalar, TIntegral>& in_vec: (input) input vector// boolean zero_singulars : (input) if true then near-zero singular values// are ignored//// return: a boolean value indicating status//// this method solves the linear equation u * w * transpose(v) * x = b,// where u and v are orthonormal column matrices and w is the// singular value matrix. this routine follows the techniques from://// W. Press, S. Teukolsky, W. Vetterling, B. Flannery,// Numerical Recipes in C, Second Edition, Cambridge University Press,// New York, New York, USA, pp. 61-64, 1995.//// Particularly important is equation (2.6.7)// If zero_singulars is set to false then this routine assumes that the// near-singular values in the w matrix have already been zeroed.//template<class TScalar, class TIntegral>boolean MMatrixMethods::svdSolve(MVector<TScalar, TIntegral>& out_vec_a, const MMatrix<TScalar, TIntegral>& u_a, const MMatrix<TScalar, TIntegral>& w_a, const MMatrix<TScalar, TIntegral>& v_a, const MVector<TScalar, TIntegral>& in_vec_a, boolean zero_singulars_a) {#ifdef ISIP_TEMPLATE_fp // declare local variables. // long rows = w_a.getNumRows(); // the original matrix number of rows long cols = w_a.getNumColumns(); // the original matrix number of columns MVector<TScalar, TIntegral> tmp_vec; MVector<TScalar, TIntegral> tmp_w; // vector of singular values // check dimensions: // u should be a rows x rows matrix // v should be a cols x cols matrix // in_vec should be a rows-length vector // if (!u_a.isSquare() || (u_a.getNumRows() != rows) || !v_a.isSquare() || (v_a.getNumRows() != cols) || (in_vec_a.length() != rows)) { u_a.debug(L"u_a"); v_a.debug(L"v_a"); in_vec_a.debug(L"in_vec_a"); return Error::handle(name(), L"svdSolve", u_a.ERR_DIM, __FILE__,__LINE__); } // zero out near-singular values if requested // TIntegral wmin = 0.0; if (zero_singulars_a) { TIntegral wmax = w_a.max(); wmin = wmax * Integral::max(rows, cols) * w_a.THRESH_SINGULAR; } // compute the inverse of the singular values for all that are non-zero // long min_dimension = (rows < cols) ? rows : cols; tmp_w.assign((long)cols, (TIntegral)0.0); for (long i = 0; i < min_dimension; i++) { if (w_a(i, i) > wmin) { tmp_w(i) = 1.0 / w_a(i, i); } } // compute transpose(U) * b from equation (2.6.7) // note that only those columns of U which correspond to non-zero // entries in the w matrix are used. also note that the outer loop // is over the columns of U not the rows. // tmp_vec.assign(cols, (TIntegral)0.0); for (long j = 0; j < cols; j++) { if (tmp_w(j) != (TIntegral)0.0) { for (long i = 0; i < rows; i++) { tmp_vec(j) += u_a(i, j) * in_vec_a(i); } tmp_vec(j) *= tmp_w(j); } } // multiply v by the temporary vector to get the final result // v_a.multv(out_vec_a, tmp_vec); // exit gracefully // return true; // for matrices other than float and double, return error //#else return Error::handle(name(), L"svdSolve", Error::TEMPLATE_TYPE, __FILE__, __LINE__);#endif}// explicit instantiations//template booleanMMatrixMethods::svdSolve(MVector<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&, const MVector<ISIP_TEMPLATE_TARGET>&, boolean);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -