📄 mmat_07.cc
字号:
a.setValue(ipass, icol, a.getValue(ipass, icol) / pivot); } } // now replace each row by the row plus a multiple of the // pivot row with a factor chosen so that the element of a on // the pivot column is 0 // complexdouble factor = 0; for (long irow = 0; irow < nrows; irow++) { if (irow != ipass) { factor = a.getValue(irow, ipass); } for (long icol = 0; icol < nrows; icol++) { if (irow != ipass) { this_a.setValue(irow, icol, this_a.getValue(irow, icol) - factor * this_a.getValue(ipass, icol)); a.setValue(irow, icol, a.getValue(irow, icol) - factor * a.getValue(ipass, icol)); } } } } } // type: SYMMETRIC, LOWER_TRIANGULAR, UPPER_TRIANGULAR // for symmetric and triangular matrix, change them into a full matrix // and get the inverse // else if ((type == Integral::SYMMETRIC) || (type == Integral::LOWER_TRIANGULAR) || (type == Integral::UPPER_TRIANGULAR)) { this_a.assign(m_a, Integral::FULL); this_a.inverse(); } // type: SPARSE // else { // declare temporary sparse matrix to hold the minor, and use this_a // to perform cofactor matrix // MMatrix<TScalar, TIntegral> mat_minor(nrows - 1, ncols - 1, Integral::SPARSE); this_a.clear(Integral::RETAIN); // declare local variable // long length = 0; // compute determinant of input matrix // complexdouble det_in = m_a.determinant(); // compute matrix of co-factor for input matrix // // loop over all the elements of matrix // for (long i = 0; i < nrows; i++) { for (long j = 0; j < ncols; j++) { // call minor of the element at this (i, j) position // m_a.getMinor(mat_minor, i, j); // calculate the determinant of minor matrix // complexdouble det = mat_minor.determinant(); // check if the matrix is non-singular, if it is then compute inverse // if (det != 0) { // increasing the number of elements // length++; // checking sign of the elements and assigning it to // their determinant to get the cofactor matrix // if ((i + j) % 2 == 0) { // assign positive sign if sum of row index and column // index is even // this_a.setValue(i, j, (TScalar)det); } else { // assign negative sign if sum of row index and column // index is odd // this_a.setValue(i, j, (TScalar)-det); } } } } // gives adjoint matrix, which is, transpose of co-factor matrix // this_a.transpose(); this_a.div(det_in); } // solve the round-off problem during inverse computation // 1. for symmtric, copy the lower triangular part to upper triangular // part, making it completely symmetric // 2. for lower triangular, set zeros in complete upper part, in case // some non-zero but very close to zero elements exist; // 3. for upper triangular, set zeros in complete lower part, in case // some non-zero but very close to zero elements exist; // if (out_type == Integral::SYMMETRIC) { for (long row = 0; row < nrows; row++) { for (long col = row + 1; col < ncols; col++) { this_a.setValue(row, col, this_a(col, row)); } } } else if (out_type == Integral::LOWER_TRIANGULAR) { for (long row = 0; row < nrows; row++) { for (long col = row + 1; col < ncols; col++) { this_a.setValue(row, col, (double)0); } } } else if (out_type == Integral::UPPER_TRIANGULAR) { for (long row = 0; row < nrows; row++) { for (long col = 0; col < row; col++) { this_a.setValue(row, col, (double)0); } } } // change back to the oringinal type // if (!this_a.changeType(out_type)) { this_a.debug(L"this_a"); return Error::handle(name(), L"inverse", MMatrix<TScalar, TIntegral>::ERR_OPTYPE, __FILE__, __LINE__); } // exit gracefully // return true;#endif#endif // for matrices other than float, double, complexfloat and // complexdouble this should error // return Error::handle(name(), L"inverse", Error::TEMPLATE_TYPE, __FILE__, __LINE__);}// explicit instantiations//template boolean MMatrixMethods::inverse<ISIP_TEMPLATE_TARGET>(MMatrix<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&);// method: determinant//// arguments:// const MMatrix<TScalar, TIntegral>& this: (output) class operand//// return: a TIntegral value containing the determinant//// this method calculates the determinant of the matrix, attempting to// do this in an efficient way. a number of conditions are hardcoded,// and the fallback algorithm is an LU decomposition.//template<class TScalar, class TIntegral>TIntegral MMatrixMethods::determinant(const MMatrix<TScalar, TIntegral>& this_a) { // return error for unsigned matrices //#ifdef ISIP_TEMPLATE_unsigned this_a.debug(L"this_a"); return Error::handle(name(), L"determinant", Error::TEMPLATE_TYPE, __FILE__, __LINE__);#else // declare local variables // long nrows = this_a.nrows_d; TIntegral det = 1; // check arguments: the matrix must be square // if (!this_a.isSquare()) { this_a.debug(L"this_a"); Error::handle(name(), L"determinant()", Error::ARG, __FILE__, __LINE__); } // condition: nrows = 1 (one element), return its value // else if (nrows == 1) { det = this_a.getValue(0, 0); } // condition: (nrows == 2) and type is FULL // do this efficiently using direct indexing // else if ((nrows == 2) && (this_a.type_d == Integral::FULL)) { det = this_a.m_d(3) * this_a.m_d(0) - this_a.m_d(1) * this_a.m_d(2); } // condition: (nrows == 2) and type is not FULL // do this efficiently using indirect indexing // else if ((nrows == 2) && (this_a.type_d != Integral::FULL)) { // TIntegral v0 = this_a(0, 0); // TIntegral v1 = this_a(0, 1); // TIntegral v2 = this_a(1, 0); // TIntegral v3 = this_a(1, 1); det = this_a(1, 1) * this_a(0, 0) - this_a(1, 0) * this_a(0, 1); } // condition: (nrows == 3) and type is FULL // do this using direct indexing // else if ((nrows == 3) && (this_a.type_d == Integral::FULL)) { det = this_a.m_d(0) * this_a.m_d(4) * this_a.m_d(8) + this_a.m_d(1) * this_a.m_d(5) * this_a.m_d(6) + this_a.m_d(2) * this_a.m_d(3) * this_a.m_d(7) - this_a.m_d(6) * this_a.m_d(4) * this_a.m_d(2) - this_a.m_d(7) * this_a.m_d(5) * this_a.m_d(0) - this_a.m_d(8) * this_a.m_d(3) * this_a.m_d(1); } // condition: (nrows == 3) and type is not FULL // do this efficiently using indirect indexing. this is still faster // than LU decomposition. // else if ((nrows == 3) && (this_a.type_d != Integral::FULL)) { TIntegral val0 = this_a.getValue(0, 0); TIntegral val1 = this_a.getValue(0, 1); TIntegral val2 = this_a.getValue(0, 2); TIntegral val3 = this_a.getValue(1, 0); TIntegral val4 = this_a.getValue(1, 1); TIntegral val5 = this_a.getValue(1, 2); TIntegral val6 = this_a.getValue(2, 0); TIntegral val7 = this_a.getValue(2, 1); TIntegral val8 = this_a.getValue(2, 2); TIntegral v0 = val0 * val4 * val8; TIntegral v1 = val1 * val5 * val6; TIntegral v2 = val2 * val3 * val7; TIntegral v3 = val6 * val4 * val2; TIntegral v4 = val7 * val5 * val0; TIntegral v5 = val8 * val3 * val1; det = v0 + v1 + v2 - v3 -v4 -v5; } // condition: type is DIAGONAL // the determinant is equal to the product of the diagonal elements // else if (this_a.type_d == Integral::DIAGONAL) { for (long row = 0; row < nrows; row++) { det *= this_a.m_d(row); } } // type: LOWER_TRIANGULAR, UPPER_TRIANGULAR // compute the products of all the diagonal elements. we can do this // using direct indexing into the storage vector since the diagnonal // elements are the same. the increment is initialized to a value // of 2 for matrices of order 2 or greater. // else if (this_a.type_d == Integral::LOWER_TRIANGULAR || this_a.type_d == Integral::UPPER_TRIANGULAR) { for (long row = 0, index = 0; row < nrows; row++) { det *= this_a.m_d(index); index += 2 + row; } } // for floating point types, use LU decomposition to quickly arrive // at the determinant. // // type: (type is FULL or SYMMETRIC) and (nrows > 3) // else if (this_a.type_d == Integral::FULL || this_a.type_d == Integral::SYMMETRIC) { // for float and double matrices, we use LU decomposition to // compute determinant as it is much efficient //#ifdef ISIP_TEMPLATE_fp // using LU decomposition method to compute determinant efficiently // det = this_a.determinantLU(); // for other matrices, we use minor method as LU decomposition is // defined only for float and double matrices#else det = this_a.determinantMinor();#endif } // type: (type is SPARSE) and (nrows > 3) // else { // using getMinor method to compute determinant // det = this_a.determinantMinor(); } // exit gracefully: return the value // return (TIntegral)det;#endif}// explicit instantiations//template ISIP_TEMPLATE_T1MMatrixMethods::determinant<ISIP_TEMPLATE_TARGET>(const MMatrix<ISIP_TEMPLATE_TARGET>&);// method: transpose//// arguments:// MMatrix<TScalar, TIntegral>& this: (output) class operand// const MMatrix<TScalar, TIntegral>& m: (input) matrix operand//// return: a boolean value indicating status//// this method computes the transpose of the input matrix and assigns// it to the current matrix. note that this method will change the// type of the matrix: an upper triangular matrix will become lower// triangular, a lower will become upper. all other types remain// unchanged.//template<class TScalar, class TIntegral>boolean MMatrixMethods::transpose(MMatrix<TScalar, TIntegral>& this_a, const MMatrix<TScalar, TIntegral>& m_a) { // type: DIAGONAL, SYMMETRIC // just assign the input matrix to the current matrix since the // transpose is the same. // if (m_a.type_d == Integral::DIAGONAL || m_a.type_d == Integral::SYMMETRIC) { return this_a.assign(m_a); } // condition: the two input matrices are same // if the current matrix is same as input matrix, assign the current // matrix to a temporary matrix and call transpose // if (&this_a == &m_a) { MMatrix<TScalar, TIntegral> tmp(this_a); return this_a.transpose(tmp); } // change the row dimension and column dimension of the input matrix // long nrows = m_a.getNumColumns(); long ncols = m_a.getNumRows(); // type: FULL // for a full matrix, create space in the output matrix, // and manually copy the elements. // if (m_a.type_d == Integral::FULL) { this_a.setDimensions(nrows, ncols, false, m_a.type_d); for (long row = 0; row < nrows; row++) { for (long col = 0; col < ncols; col++) { this_a.m_d(this_a.index(row, col)) = m_a.m_d(m_a.index(col, row)); } } } // type: LOWER_TRIANGULAR // our storage mode for lower and triangle matrices guarantees that // one is the transpose of the other. // else if (m_a.type_d == Integral::LOWER_TRIANGULAR) { this_a.setDimensions(nrows, ncols, false, Integral::UPPER_TRIANGULAR); this_a.m_d.assign(m_a.m_d); } // type: UPPER_TRIANGULAR // our storage mode for lower and triangle matrices guarantees that // one is the transpose of the other. // else if (m_a.type_d == Integral::UPPER_TRIANGULAR) { this_a.setDimensions(nrows, ncols, false, Integral::LOWER_TRIANGULAR); this_a.m_d.assign(m_a.m_d); } // type: SPARSE // to reach this point for a sparse matrix, we must already have // a copy of the matrix, so we simply have to swap rows and columns. // else { // set up a SPARSE matrix and copy the data. transform rows // and columns as needed. // this_a.setDimensions(nrows, ncols, false, Integral::SPARSE); this_a.m_d.assign(m_a.m_d); this_a.row_index_d.assign(m_a.col_index_d); this_a.col_index_d.assign(m_a.row_index_d); // sort the rows and columns // this_a.sortSparse(); } // exit gracefully // return true;}// explicit instantiations//template boolean MMatrixMethods::transpose<ISIP_TEMPLATE_TARGET>(MMatrix<ISIP_TEMPLATE_TARGET>&, const MMatrix<ISIP_TEMPLATE_TARGET>&);// method: trace//// arguments:// const MMatrix<TScalar, TIntegral>& this: (input) class operand//// return: a TIntegral value containing the value of the trace//template<class TScalar, class TIntegral>TIntegral MMatrixMethods::trace(const MMatrix<TScalar, TIntegral>& this_a) { // condition: non-square matrix // if (!this_a.isSquare()) { this_a.debug(L"this_a"); return Error::handle(name(), L"trace", Error::ARG, __FILE__, __LINE__); } // declare local variables // long nrows = this_a.nrows_d; long ncols = this_a.ncols_d; TIntegral sum = 0; // type: FULL // do this efficiently using direct indexing
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -