📄 parmetis_partitioner.c
字号:
// $Id: parmetis_partitioner.C 2915 2008-07-08 19:14:50Z jwpeterson $// The libMesh Finite Element Library.// Copyright (C) 2002-2007 Benjamin S. Kirk, John W. Peterson // This library is free software; you can redistribute it and/or// modify it under the terms of the GNU Lesser General Public// License as published by the Free Software Foundation; either// version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful,// but WITHOUT ANY WARRANTY; without even the implied warranty of// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU// Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public// License along with this library; if not, write to the Free Software// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA// C++ Includes -----------------------------------// Local Includes -----------------------------------#include "libmesh_config.h"#include "mesh_base.h"#include "parallel.h"#include "mesh_tools.h"#include "mesh_communication.h"#include "parmetis_partitioner.h"#include "metis_partitioner.h"#include "libmesh_logging.h"#include "elem.h"#ifdef HAVE_PARMETIS// Include the MPI header files, which must be accessible for// ParMETIS to work properly.#include "mpi.h"// Include the ParMETIS header filesnamespace Parmetis { extern "C" {# include "parmetis.h" }}#endif // #ifdef HAVE_PARMETIS ... else ...// ------------------------------------------------------------// ParmetisPartitioner implementationvoid ParmetisPartitioner::_do_partition (MeshBase& mesh, const unsigned int n_sbdmns){ this->_do_repartition (mesh, n_sbdmns);// libmesh_assert (n_sbdmns > 0);// // Check for an easy return// if (n_sbdmns == 1)// {// this->single_partition (mesh);// return;// }// // This function must be run on all processors at once// parallel_only();// // What to do if the Parmetis library IS NOT present// #ifndef HAVE_PARMETIS// here();// std::cerr << "ERROR: The library has been built without" << std::endl// << "Parmetis support. Using a Metis" << std::endl// << "partitioner instead!" << std::endl;// MetisPartitioner mp;// mp.partition (mesh, n_sbdmns); // // What to do if the Parmetis library IS present// #else// START_LOG("partition()", "ParmetisPartitioner");// // Initialize the data structures required by ParMETIS// this->initialize (mesh, n_sbdmns);// // build the graph corresponding to the mesh// this->build_graph (mesh); // // Partition the graph// MPI_Comm mpi_comm = libMesh::COMM_WORLD; // // Call the ParMETIS k-way partitioning algorithm.// Parmetis::ParMETIS_V3_PartKway(&_vtxdist[0], &_xadj[0], &_adjncy[0], &_vwgt[0], NULL,// &_wgtflag, &_numflag, &_ncon, &_nparts, &_tpwgts[0],// &_ubvec[0], &_options[0], &_edgecut,// &_part[0],// &mpi_comm);// // Assign the returned processor ids// this->assign_partitioning (mesh); // STOP_LOG ("partition()", "ParmetisPartitioner"); // #endif // #ifndef HAVE_PARMETIS ... else ... }void ParmetisPartitioner::_do_repartition (MeshBase& mesh, const unsigned int n_sbdmns){ libmesh_assert (n_sbdmns > 0); // Check for an easy return if (n_sbdmns == 1) { this->single_partition(mesh); return; } // This function must be run on all processors at once parallel_only();// What to do if the Parmetis library IS NOT present#ifndef HAVE_PARMETIS here(); std::cerr << "ERROR: The library has been built without" << std::endl << "Parmetis support. Using a Metis" << std::endl << "partitioner instead!" << std::endl; MetisPartitioner mp; mp.partition (mesh, n_sbdmns); // What to do if the Parmetis library IS present#else // Revert to METIS on one processor. if (libMesh::n_processors() == 1) { MetisPartitioner mp; mp.partition (mesh, n_sbdmns); return; } START_LOG("repartition()", "ParmetisPartitioner"); // Initialize the data structures required by ParMETIS this->initialize (mesh, n_sbdmns); // Make sure all processors have some active local elements. { bool all_have_active_elements = true; for (unsigned int pid=0; pid<_n_active_elem_on_proc.size(); pid++) if (!_n_active_elem_on_proc[pid]) all_have_active_elements = false; // Parmetis will not work unless each processor has some // elements. Specifically, it will abort when passed a NULL // partition array on *any* of the processors. if (!all_have_active_elements) { if (!mesh.is_serial()) libmesh_error(); // Cowardly revert to METIS, although this requires a serial mesh STOP_LOG ("repartition()", "ParmetisPartitioner"); MetisPartitioner mp; mp.partition (mesh, n_sbdmns); return; } } // build the graph corresponding to the mesh this->build_graph (mesh); // Partition the graph std::vector<int> vsize(_vwgt.size(), 1); float itr = 1000000.0; MPI_Comm mpi_comm = libMesh::COMM_WORLD; // Call the ParMETIS adaptive repartitioning method. This respects the // original partitioning when computing the new partitioning so as to // minimize the required data redistribution. Parmetis::ParMETIS_V3_AdaptiveRepart(_vtxdist.empty() ? NULL : &_vtxdist[0], _xadj.empty() ? NULL : &_xadj[0], _adjncy.empty() ? NULL : &_adjncy[0], _vwgt.empty() ? NULL : &_vwgt[0], vsize.empty() ? NULL : &vsize[0], NULL, &_wgtflag, &_numflag, &_ncon, &_nparts, _tpwgts.empty() ? NULL : &_tpwgts[0], _ubvec.empty() ? NULL : &_ubvec[0], &itr, &_options[0], &_edgecut, _part.empty() ? NULL : &_part[0], &mpi_comm); // Assign the returned processor ids this->assign_partitioning (mesh); STOP_LOG ("repartition()", "ParmetisPartitioner"); #endif // #ifndef HAVE_PARMETIS ... else ... }// Only need to compile these methods if ParMETIS is present#ifdef HAVE_PARMETISvoid ParmetisPartitioner::initialize (const MeshBase& mesh, const unsigned int n_sbdmns){ const unsigned int n_active_local_elem = mesh.n_active_local_elem(); // Set parameters. _wgtflag = 2; // weights on vertices only _ncon = 1; // one weight per vertex _numflag = 0; // C-style 0-based numbering _nparts = static_cast<int>(n_sbdmns); // number of subdomains to create _edgecut = 0; // the numbers of edges cut by the // partition // Initialize data structures for ParMETIS _vtxdist.resize (libMesh::n_processors()+1); std::fill (_vtxdist.begin(), _vtxdist.end(), 0); _tpwgts.resize (_nparts); std::fill (_tpwgts.begin(), _tpwgts.end(), 1./_nparts); _ubvec.resize (_ncon); std::fill (_ubvec.begin(), _ubvec.end(), 1.); _part.resize (n_active_local_elem); std::fill (_part.begin(), _part.end(), 0); _options.resize (5); _vwgt.resize (n_active_local_elem); // Set the options _options[0] = 1; // don't use default options _options[1] = 0; // default (level of timing) _options[2] = 15; // random seed (default) _options[3] = 2; // processor distribution and subdomain distribution are decoupled // Find the number of active elements on each processor. We cannot use // mesh.n_active_elem_on_proc(pid) since that only returns the number of // elements assigned to pid which are currently stored on the calling // processor. This will not in general be correct for parallel meshes // when (pid!=libMesh::processor_id()). _n_active_elem_on_proc.resize(libMesh::n_processors()); Parallel::allgather(n_active_local_elem, _n_active_elem_on_proc); // count the total number of active elements in the mesh. Note we cannot // use mesh.n_active_elem() in general since this only returns the number // of active elements which are stored on the calling processor. // We should not use n_active_elem for any allocation because that will // be inheritly unscalable, but it can be useful for libmesh_assertions. unsigned int n_active_elem=0; // Set up the vtxdist array. This will be the same on each processor. // ***** Consult the Parmetis documentation. ***** libmesh_assert (_vtxdist.size() == libMesh::n_processors()+1); libmesh_assert (_vtxdist[0] == 0); for (unsigned int pid=0; pid<libMesh::n_processors(); pid++) { _vtxdist[pid+1] = _vtxdist[pid] + _n_active_elem_on_proc[pid]; n_active_elem += _n_active_elem_on_proc[pid]; } libmesh_assert (_vtxdist.back() == static_cast<int>(n_active_elem)); // ParMetis expects the elements to be numbered in contiguous blocks // by processor, i.e. [0, ne0), [ne0, ne0+ne1), ... // Since we only partition active elements we should have no expectation // that we currently have such a distribution. So we need to create it. // Also, at the same time we are going to map all the active elements into a globally // unique range [0,n_active_elem) which is *independent* of the current partitioning. // This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled // from the partitioning of the objects themselves). This allows us to get the same // resultant partitioning independed of the input partitioning. MeshTools::BoundingBox bbox = MeshTools::bounding_box(mesh); _global_index_by_pid_map.clear(); // Maps active element ids into a contiguous range independent of partitioning. // (only needs local scope) std::map<unsigned int, unsigned int> global_index_map; { std::vector<unsigned int> global_index; // create the mapping which is contiguous by processor for (unsigned int pid=0, pid_offset=0; pid<libMesh::n_processors(); pid++) { MeshBase::const_element_iterator it = mesh.active_pid_elements_begin(pid); const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid); // note that we may not have all (or any!) the active elements which belong on this processor, // but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid]) // is constructed. Only the indices for the elements we pass in are returned in the array. MeshCommunication().find_global_indices (bbox, it, end, global_index); for (unsigned int cnt=0; it != end; ++it) { const Elem *elem = *it; libmesh_assert (!_global_index_by_pid_map.count(elem->id())); libmesh_assert (cnt < global_index.size()); libmesh_assert (global_index[cnt] < _n_active_elem_on_proc[pid]); _global_index_by_pid_map[elem->id()] = global_index[cnt++] + pid_offset; } pid_offset += _n_active_elem_on_proc[pid]; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -