📄 petsc_vector.c
字号:
v_local->_vec, is, &scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Perform the scatter#if PETSC_VERSION_LESS_THAN(2,3,3) ierr = VecScatterBegin(_vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (_vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);#else // API argument order change in PETSc 2.3.3 ierr = VecScatterBegin(scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr);#endif // Clean up ierr = ISDestroy (is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterDestroy(scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);}template <typename T>void PetscVector<T>::localize (NumericVector<T>& v_local_in, const std::vector<unsigned int>& send_list) const{ PetscVector<T>* v_local = dynamic_cast<PetscVector<T>*>(&v_local_in); libmesh_assert (v_local != NULL); libmesh_assert (v_local->local_size() == this->size()); libmesh_assert (send_list.size() <= v_local->size()); int ierr=0; const int n_sl = send_list.size(); IS is; VecScatter scatter; std::vector<int> idx(n_sl); for (int i=0; i<n_sl; i++) idx[i] = static_cast<int>(send_list[i]); // Create the index set & scatter object if (idx.empty()) ierr = ISCreateGeneral(libMesh::COMM_WORLD, n_sl, PETSC_NULL, &is); else ierr = ISCreateGeneral(libMesh::COMM_WORLD, n_sl, &idx[0], &is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterCreate(_vec, is, v_local->_vec, is, &scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Perform the scatter#if PETSC_VERSION_LESS_THAN(2,3,3) ierr = VecScatterBegin(_vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (_vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);#else // API argument order change in PETSc 2.3.3 ierr = VecScatterBegin(scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr);#endif // Clean up ierr = ISDestroy (is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterDestroy(scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);}template <typename T>void PetscVector<T>::localize (const unsigned int first_local_idx, const unsigned int last_local_idx, const std::vector<unsigned int>& send_list){ // Only good for serial vectors. libmesh_assert (this->size() == this->local_size()); libmesh_assert (last_local_idx > first_local_idx); libmesh_assert (send_list.size() <= this->size()); libmesh_assert (last_local_idx < this->size()); const unsigned int size = this->size(); const unsigned int local_size = (last_local_idx - first_local_idx + 1); int ierr=0; // Don't bother for serial cases if ((first_local_idx == 0) && (local_size == size)) return; // Build a parallel vector, initialize it with the local // parts of (*this) PetscVector<T> parallel_vec; parallel_vec.init (size, local_size); // Copy part of *this into the parallel_vec { IS is; VecScatter scatter; // Create idx, idx[i] = i+first_local_idx; std::vector<int> idx(local_size); Utility::iota (idx.begin(), idx.end(), first_local_idx); // Create the index set & scatter object ierr = ISCreateGeneral(libMesh::COMM_WORLD, local_size, &idx[0], &is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterCreate(_vec, is, parallel_vec._vec, is, &scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Perform the scatter#if PETSC_VERSION_LESS_THAN(2,3,3) ierr = VecScatterBegin(_vec, parallel_vec._vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (_vec, parallel_vec._vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);#else // API argument order change in PETSc 2.3.3 ierr = VecScatterBegin(scatter, _vec, parallel_vec._vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd (scatter, _vec, parallel_vec._vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr); #endif // Clean up ierr = ISDestroy (is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterDestroy(scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); } // localize like normal parallel_vec.close(); parallel_vec.localize (*this, send_list); this->close();}template <typename T>void PetscVector<T>::localize (std::vector<T>& v_local) const{ // This function must be run on all processors at once parallel_only(); int ierr=0; const int n = this->size(); const int nl = this->local_size(); PetscScalar *values; v_local.clear(); v_local.resize(n, 0.); ierr = VecGetArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); unsigned int ioff = first_local_index(); for (int i=0; i<nl; i++) v_local[i+ioff] = static_cast<T>(values[i]); ierr = VecRestoreArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); Parallel::sum(v_local);}// Full specialization for Real datatypes#ifdef USE_REAL_NUMBERStemplate <>void PetscVector<Real>::localize_to_one (std::vector<Real>& v_local, const unsigned int pid) const{ int ierr=0; const int n = size(); const int nl = local_size(); PetscScalar *values; v_local.resize(n); // only one processor if (n == nl) { ierr = VecGetArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); for (int i=0; i<n; i++) v_local[i] = static_cast<Real>(values[i]); ierr = VecRestoreArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); } // otherwise multiple processors else { unsigned int ioff = this->first_local_index(); std::vector<Real> local_values (n, 0.); { ierr = VecGetArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); for (int i=0; i<nl; i++) local_values[i+ioff] = static_cast<Real>(values[i]); ierr = VecRestoreArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); } MPI_Reduce (&local_values[0], &v_local[0], n, MPI_REAL, MPI_SUM, pid, libMesh::COMM_WORLD); }}#endif// Full specialization for Complex datatypes#ifdef USE_COMPLEX_NUMBERStemplate <>void PetscVector<Complex>::localize_to_one (std::vector<Complex>& v_local, const unsigned int pid) const{ int ierr=0; const int n = size(); const int nl = local_size(); PetscScalar *values; v_local.resize(n); for (int i=0; i<n; i++) v_local[i] = 0.; // only one processor if (n == nl) { ierr = VecGetArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); for (int i=0; i<n; i++) v_local[i] = static_cast<Complex>(values[i]); ierr = VecRestoreArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); } // otherwise multiple processors else { unsigned int ioff = this->first_local_index(); /* in here the local values are stored, acting as send buffer for MPI * initialize to zero, since we collect using MPI_SUM */ std::vector<Real> real_local_values(n, 0.); std::vector<Real> imag_local_values(n, 0.); { ierr = VecGetArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); // provide my local share to the real and imag buffers for (int i=0; i<nl; i++) { real_local_values[i+ioff] = static_cast<Complex>(values[i]).real(); imag_local_values[i+ioff] = static_cast<Complex>(values[i]).imag(); } ierr = VecRestoreArray (_vec, &values); CHKERRABORT(libMesh::COMM_WORLD,ierr); } /* have buffers of the real and imaginary part of v_local. * Once MPI_Reduce() collected all the real and imaginary * parts in these std::vector<double>, the values can be * copied to v_local */ std::vector<Real> real_v_local(n); std::vector<Real> imag_v_local(n); // collect entries from other proc's in real_v_local, imag_v_local MPI_Reduce (&real_local_values[0], &real_v_local[0], n, MPI_DOUBLE, MPI_SUM, pid, libMesh::COMM_WORLD); MPI_Reduce (&imag_local_values[0], &imag_v_local[0], n, MPI_DOUBLE, MPI_SUM, pid, libMesh::COMM_WORLD); // copy real_v_local and imag_v_local to v_local for (int i=0; i<n; i++) v_local[i] = Complex(real_v_local[i], imag_v_local[i]); } }#endiftemplate <typename T>void PetscVector<T>::print_matlab (const std::string name) const{ libmesh_assert (this->initialized()); libmesh_assert (this->closed()); int ierr=0; PetscViewer petsc_viewer; ierr = PetscViewerCreate (libMesh::COMM_WORLD, &petsc_viewer); CHKERRABORT(libMesh::COMM_WORLD,ierr); /** * Create an ASCII file containing the matrix * if a filename was provided. */ if (name != "NULL") { ierr = PetscViewerASCIIOpen( libMesh::COMM_WORLD, name.c_str(), &petsc_viewer); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = PetscViewerSetFormat (petsc_viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecView (_vec, petsc_viewer); CHKERRABORT(libMesh::COMM_WORLD,ierr); } /** * Otherwise the matrix will be dumped to the screen. */ else { ierr = PetscViewerSetFormat (PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_ASCII_MATLAB); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecView (_vec, PETSC_VIEWER_STDOUT_WORLD); CHKERRABORT(libMesh::COMM_WORLD,ierr); } /** * Destroy the viewer. */ ierr = PetscViewerDestroy (petsc_viewer); CHKERRABORT(libMesh::COMM_WORLD,ierr);}template <typename T>void PetscVector<T>::create_subvector(NumericVector<T>& subvector, const std::vector<unsigned int>& rows) const{ // PETSc data structures IS parent_is, subvector_is; VecScatter scatter; int ierr = 0; // Make sure the passed int subvector is really a PetscVector PetscVector<T>* petsc_subvector = dynamic_cast<PetscVector<T>*>(&subvector); libmesh_assert(petsc_subvector != NULL); // If the petsc_subvector is already initialized, we assume that the // user has already allocated the *correct* amount of space for it. // If not, we use the appropriate PETSc routines to initialize it. if (!petsc_subvector->initialized()) { // Initialize the petsc_subvector to have enough space to hold // the entries which will be scattered into it. Note: such an // init() function (where we let PETSc decide the number of local // entries) is not currently offered by the PetscVector // class. Should we differentiate here between sequential and // parallel vector creation based on libMesh::n_processors() ? ierr = VecCreateMPI(libMesh::COMM_WORLD, PETSC_DECIDE, // n_local rows.size(), // n_global &(petsc_subvector->_vec)); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecSetFromOptions (petsc_subvector->_vec); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Mark the subvector as initialized petsc_subvector->_is_initialized = true; } // Use iota to fill an array with entries [0,1,2,3,4,...rows.size()] std::vector<int> idx(rows.size()); Utility::iota (idx.begin(), idx.end(), 0); // Construct index sets ierr = ISCreateGeneral(libMesh::COMM_WORLD, rows.size(), (int*) &rows[0], &parent_is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = ISCreateGeneral(libMesh::COMM_WORLD, rows.size(), (int*) &idx[0], &subvector_is); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Construct the scatter object ierr = VecScatterCreate(this->_vec, parent_is, petsc_subvector->_vec, subvector_is, &scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); // Actually perform the scatter#if PETSC_VERSION_LESS_THAN(2,3,3) ierr = VecScatterBegin(this->_vec, petsc_subvector->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd(this->_vec, petsc_subvector->_vec, INSERT_VALUES, SCATTER_FORWARD, scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr);#else // API argument order change in PETSc 2.3.3 ierr = VecScatterBegin(scatter, this->_vec, petsc_subvector->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterEnd(scatter, this->_vec, petsc_subvector->_vec, INSERT_VALUES, SCATTER_FORWARD); CHKERRABORT(libMesh::COMM_WORLD,ierr);#endif // Clean up ierr = ISDestroy(parent_is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = ISDestroy(subvector_is); CHKERRABORT(libMesh::COMM_WORLD,ierr); ierr = VecScatterDestroy(scatter); CHKERRABORT(libMesh::COMM_WORLD,ierr); }//------------------------------------------------------------------// Explicit instantiationstemplate class PetscVector<Number>;#endif // #ifdef HAVE_PETSC
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -