📄 mesh_communication.c
字号:
#ifdef HAVE_MPI // This function must be run on all processors at once parallel_only(); START_LOG("broadcast_bcs()","MeshCommunication"); // Explicitly clear the boundary conditions on all // but processor 0. if (libMesh::processor_id() != 0) boundary_info.clear(); // Build up the list of elements with boundary conditions { std::vector<unsigned int> el_id; std::vector<unsigned short int> side_id; std::vector<short int> bc_id; if (libMesh::processor_id() == 0) boundary_info.build_side_list (el_id, side_id, bc_id); libmesh_assert (el_id.size() == side_id.size()); libmesh_assert (el_id.size() == bc_id.size()); unsigned int n_bcs = el_id.size(); // Broadcast the number of bcs to expect from processor 0. Parallel::broadcast (n_bcs); // Only continue if we have element BCs if (n_bcs > 0) { // Allocate space. On CPU 0, these vectors should already have size n_bcs. el_id.resize (n_bcs); side_id.resize (n_bcs); bc_id.resize (n_bcs); // Broadcast the element identities Parallel::broadcast (el_id); // Broadcast the side ids for those elements Parallel::broadcast (side_id); // Broadcast the bc ids for each side Parallel::broadcast (bc_id); // Build the boundary_info structure if we aren't processor 0 if (libMesh::processor_id() != 0) for (unsigned int e=0; e<n_bcs; e++) { libmesh_assert (el_id[e] < mesh.n_elem()); const Elem* elem = mesh.elem(el_id[e]); libmesh_assert (elem != NULL); // sanity: be sure that the element returned by mesh.elem() really has id()==el_id[e] libmesh_assert(elem->id() == el_id[e]); libmesh_assert (side_id[e] < elem->n_sides()); boundary_info.add_side (elem, side_id[e], bc_id[e]); } } } // Build up the list of nodes with boundary conditions { std::vector<unsigned int> node_id; std::vector<short int> bc_id; if (libMesh::processor_id() == 0) boundary_info.build_node_list (node_id, bc_id); libmesh_assert (node_id.size() == bc_id.size()); unsigned int n_bcs = node_id.size(); // Broadcast the number of bcs to expect from processor 0. Parallel::broadcast (n_bcs); // Only continue if we have nodal BCs if (n_bcs > 0) { // Allocate space, again on CPU 0 this should be a no-op. node_id.resize (n_bcs); bc_id.resize (n_bcs); // Broadcast the node ids Parallel::broadcast (node_id); // Broadcast the bc ids for each side Parallel::broadcast (bc_id); // Build the boundary_info structure if we aren't processor 0 if (libMesh::processor_id() != 0) for (unsigned int n=0; n<n_bcs; n++) { libmesh_assert (node_id[n] < mesh.n_nodes()); const Node* node = mesh.node_ptr (node_id[n]); libmesh_assert (node != NULL); // sanity: be sure that the node returned by mesh.node_ptr() really has id()==node_id[n] libmesh_assert(node->id() == node_id[n]); boundary_info.add_node (node, bc_id[n]); } } } STOP_LOG("broadcast_bcs()","MeshCommunication"); #else // no MPI but multiple processors? Huh?? libmesh_error();#endif }void MeshCommunication::allgather (ParallelMesh& mesh) const{ START_LOG("allgather()","MeshCommunication"); // The mesh should know it's about to be serialized libmesh_assert (mesh.is_serial()); this->allgather_mesh (mesh); this->allgather_bcs (mesh, *(mesh.boundary_info)); // Inform new elements of their neighbors, // while resetting all remote_elem links mesh.find_neighbors(true); STOP_LOG("allgather()","MeshCommunication");}#ifndef HAVE_MPI void MeshCommunication::allgather_mesh (ParallelMesh&) const{ // NO MPI == one processor, no need for this method return;} void MeshCommunication::allgather_bcs (const ParallelMesh&, BoundaryInfo&) const{ // NO MPI == one processor, no need for this method return;}#elsevoid MeshCommunication::allgather_mesh (ParallelMesh& mesh) const{ // Check for quick return if (libMesh::n_processors() == 1) return; // This function must be run on all processors at once parallel_only(); START_LOG ("allgather_mesh()","MeshCommunication"); // Gather the number of nodes and elements on each processor. std::vector<unsigned int> n_nodes(libMesh::n_processors()), n_elem(libMesh::n_processors()); { std::vector<unsigned int> n_objects(2); n_objects[0] = mesh.n_local_nodes(); n_objects[1] = mesh.n_local_elem(); Parallel::allgather(n_objects); for (unsigned int p=0, idx=0; p<libMesh::n_processors(); p++) { n_nodes[p] = n_objects[idx++]; n_elem[p] = n_objects[idx++]; } libmesh_assert (mesh.n_local_nodes() == n_nodes[libMesh::processor_id()]); libmesh_assert (mesh.n_local_elem() == n_elem[libMesh::processor_id()]); } std::vector<unsigned int> node_offsets(libMesh::n_processors(), 0), elem_offsets(libMesh::n_processors(), 0); // Compute the global sizes to cross-check the results of the // operations that follow. unsigned int global_n_nodes = n_nodes[0], global_n_elem = n_elem[0]; for (unsigned int p=1; p<libMesh::n_processors(); p++) { node_offsets[p] = node_offsets[p-1] + n_nodes[p-1]; elem_offsets[p] = elem_offsets[p-1] + n_elem[p-1]; global_n_nodes += n_nodes[p]; global_n_elem += n_elem[p]; } //------------------------------------------------- // Gather the nodal coordinates from each processor. { std::vector<Real> xyz; xyz.reserve(3*n_nodes[libMesh::processor_id()]); ParallelMesh::node_iterator it = mesh.local_nodes_begin(); const ParallelMesh::node_iterator end = mesh.local_nodes_end(); for (; it != end; ++it) { libmesh_assert (*it != NULL); const Point &p = **it; xyz.push_back(p(0)); xyz.push_back(p(1)); xyz.push_back(p(2)); } libmesh_assert (xyz.size() == 3*n_nodes[libMesh::processor_id()]); // Get values from other processors Parallel::allgather (xyz); // And add them to our mesh. for (unsigned int p=0; p<libMesh::n_processors(); p++) if (p == libMesh::processor_id()) continue; // We've already got our // own local nodes! else { const unsigned int first_global_idx = node_offsets[p], last_global_idx = first_global_idx + n_nodes[p]; // Extract the coordinates for each node belonging to processor p // and add it to our mesh. for (unsigned int global_idx = first_global_idx; global_idx<last_global_idx; global_idx++) { libmesh_assert ((3*global_idx + 2) < xyz.size()); Node *node = Node::build(xyz[3*global_idx + 0], xyz[3*global_idx + 1], xyz[3*global_idx + 2], global_idx).release(); libmesh_assert (node != NULL); libmesh_assert (node->id() == global_idx); node->processor_id() = p; mesh.insert_node(node); } } // Check the result libmesh_assert (global_n_nodes == mesh.n_nodes()); } //---------------------------------------------------- // Gather the element connectivity from each processor. { // Get the sum of elem->n_nodes() for all local elements. This // will allow for efficient preallocation. const unsigned int local_weight = MeshTools::weight(mesh), local_n_levels = MeshTools::n_local_levels(mesh); unsigned int global_n_levels = local_n_levels; Parallel::max (global_n_levels); // The conn array contains the information needed to construct each element. std::vector<int> conn; conn.reserve (Elem::PackedElem::header_size*n_elem[libMesh::processor_id()] + local_weight); for (unsigned int level=0; level<=local_n_levels; level++) { ParallelMesh::element_iterator it = mesh.local_level_elements_begin(level); const ParallelMesh::element_iterator end = mesh.local_level_elements_end(level); for (; it != end; ++it) { const Elem* elem = *it; libmesh_assert (elem != NULL); libmesh_assert (elem->level() == level); // We're not handling unpartitioned elements! libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id); // Only local elements! libmesh_assert (elem->processor_id() == libMesh::processor_id()); Elem::PackedElem::pack (conn, elem); } } // ...that was easy. libmesh_assert (conn.size() == Elem::PackedElem::header_size*n_elem[libMesh::processor_id()] + local_weight); // Get the size of the connectivity array on each processor std::vector<unsigned int> conn_size (libMesh::n_processors(), 0), conn_offset (libMesh::n_processors(), 0); Parallel::allgather (static_cast<unsigned int>(conn.size()), conn_size); for (unsigned int p=1; p<libMesh::n_processors(); p++) conn_offset[p] = conn_offset[p-1] + conn_size[p-1]; // Get the element connectivity from all the other processors Parallel::allgather (conn); // ...and add them to our mesh. // This is a little tricky. We need to insure that parents are added before children. // So we need to add elements level-wise to handle, for example, the case where a child on // processor [0] has a parent on processor [1]. But we also need to add the elements // processor-wise so that we can set the processor_id() properly. // So, loop on levels/processors for (unsigned int level=0; level<=global_n_levels; level++) for (unsigned int p=0; p<libMesh::n_processors(); p++) if (p != libMesh::processor_id()) // We've already got our { // own local elements! unsigned int cnt = conn_offset[p]; // counter into the conn[] array. #ifndef NDEBUG // The first and last global indices are only used for error-checking. // Avoid declaring them unless asserts are enabled. const unsigned int first_global_idx = elem_offsets[p], last_global_idx = first_global_idx + n_elem[p];#endif // Process each element for processor p. // Note this must work in the case when conn_size[p] == 0. while (cnt < (conn_offset[p] + conn_size[p])) { // Unpack the element Elem::PackedElem packed_elem (conn.begin()+cnt); // We require contiguous numbering on each processor // for elements. libmesh_assert (packed_elem.id() >= first_global_idx); libmesh_assert (packed_elem.id() < last_global_idx);#ifdef ENABLE_AMR libmesh_assert ((packed_elem.level() == 0) || (packed_elem.parent_id() != -1)); // Ignore elements not matching the current level. if (packed_elem.level() > level) // skip all entries in the conn array for this element. cnt += Elem::PackedElem::header_size + packed_elem.n_nodes(); else#endif if (packed_elem.level() < level || // we should already have (packed_elem.level() == level && // lower level and some mesh.elem(packed_elem.id()))) // ghost elements { // No need to construct a dummy element of this type#ifndef NDEBUG // The elem information need not be extracted unless asserts are enabled. const Elem* elem = mesh.elem(packed_elem.id());#endif libmesh_assert (elem);#ifdef ENABLE_AMR libmesh_assert (!elem->parent() || elem->parent()->id() == static_cast<unsigned int>(packed_elem.parent_id())); libmesh_assert (elem->p_level() == packed_elem.p_level()); libmesh_assert (elem->refinement_flag() == packed_elem.refinement_flag()); libmesh_assert (elem->p_refinement_flag() == packed_elem.p_refinement_flag());#endif libmesh_assert (elem->type() == packed_elem.type()); libmesh_assert (elem->subdomain_id() == packed_elem.subdomain_id()); libmesh_assert (elem->id() == packed_elem.id()); libmesh_assert (elem->n_nodes() == packed_elem.n_nodes()); cnt += Elem::PackedElem::header_size + packed_elem.n_nodes(); } // Those are the easy cases... // now elem_level == level and we don't have it else { // Declare the element we will add Elem* elem = NULL;#ifdef ENABLE_AMR // Maybe find its parent if (level > 0) { Elem* my_parent = mesh.elem(packed_elem.parent_id()); // If the parent was not previously added, we // cannot continue. if (my_parent == NULL) { std::cerr << "Parent element with ID " << packed_elem.parent_id() << " not found." << std::endl; libmesh_error(); } elem = packed_elem.unpack (mesh, my_parent); } else { libmesh_assert (packed_elem.parent_id() == -1);#endif // ENABLE_AMR elem = packed_elem.unpack (mesh);#ifdef ENABLE_AMR }#endif // Good to go. Add to the mesh. libmesh_assert (elem); mesh.insert_elem(elem); libmesh_assert (elem->n_nodes() == packed_elem.n_nodes()); cnt += Elem::PackedElem::header_size + packed_elem.n_nodes(); } // end elem_level == level } } // Check the result libmesh_assert (global_n_elem == mesh.n_elem()); } // All done! STOP_LOG ("allgather_mesh()","MeshCommunication");}void MeshCommunication::allgather_bcs (const ParallelMesh& mesh, BoundaryInfo& boundary_info) const{ // Check for quick return if (libMesh::n_processors() == 1) return; // This function must be run on all processors at once parallel_only(); START_LOG ("allgather_bcs()","MeshCommunication"); std::vector<int> xfer_elem_bcs, xfer_node_bcs; // Get the element boundary conditions
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -