📄 partitioner.c
字号:
for (unsigned int blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++) { last_elem_id = std::min((blk+1)*communication_blocksize, max_elem_id); const unsigned int first_elem_id = blk*communication_blocksize; std::fill (parent_processor_ids.begin(), parent_processor_ids.end(), DofObject::invalid_processor_id); // first build up local contributions to parent_processor_ids MeshBase::element_iterator not_it = mesh.ancestor_elements_begin(); const MeshBase::element_iterator not_end = mesh.ancestor_elements_end(); bool have_parent_in_block = false; for ( ; not_it != not_end; ++not_it) {#ifdef ENABLE_AMR Elem *parent = *not_it; const unsigned int parent_idx = parent->id(); libmesh_assert (parent_idx < max_elem_id); if ((parent_idx >= first_elem_id) && (parent_idx < last_elem_id)) { have_parent_in_block = true; unsigned short int parent_pid = DofObject::invalid_processor_id; for (unsigned int c=0; c<parent->n_children(); c++) parent_pid = std::min (parent_pid, parent->child(c)->processor_id()); const unsigned int packed_idx = parent_idx - first_elem_id; libmesh_assert (packed_idx < parent_processor_ids.size()); parent_processor_ids[packed_idx] = parent_pid; }#else // without AMR there should be no inactive elements libmesh_error();#endif } // then find the global minimum Parallel::min (parent_processor_ids); // and assign the ids, if we have a parent in this block. if (have_parent_in_block) for (not_it = mesh.ancestor_elements_begin(); not_it != not_end; ++not_it) { Elem *parent = *not_it; const unsigned int parent_idx = parent->id(); if ((parent_idx >= first_elem_id) && (parent_idx < last_elem_id)) { const unsigned int packed_idx = parent_idx - first_elem_id; libmesh_assert (packed_idx < parent_processor_ids.size()); const unsigned short int parent_pid = parent_processor_ids[packed_idx]; libmesh_assert (parent_pid != DofObject::invalid_processor_id); parent->processor_id() = parent_pid; } } } } STOP_LOG("set_parent_processor_ids()","Partitioner");}void Partitioner::set_node_processor_ids(MeshBase& mesh){ START_LOG("set_node_processor_ids()","Partitioner"); // This function must be run on all processors at once parallel_only(); // If we have any unpartitioned elements at this // stage there is a problem libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(), mesh.unpartitioned_elements_end()) == 0);// const unsigned int orig_n_local_nodes = mesh.n_local_nodes();// std::cerr << "[" << libMesh::processor_id() << "]: orig_n_local_nodes="// << orig_n_local_nodes << std::endl; // Build up request sets. Each node is currently owned by a processor because // it is connected to an element owned by that processor. However, during the // repartitioning phase that element may have been assigned a new processor id, but // it is still resident on the original processor. We need to know where to look // for new ids before assigning new ids, otherwise we may be asking the wrong processors // for the wrong information. // // The only remaining issue is what to do with unpartitioned nodes. Since they are required // to live on all processors we can simply rely on ourselves to number them properly. std::vector<std::vector<unsigned int> > requested_node_ids(libMesh::n_processors()); // Loop over all the nodes, count the ones on each processor. We can skip ourself std::vector<unsigned int> ghost_nodes_from_proc(libMesh::n_processors(), 0); MeshBase::node_iterator node_it = mesh.nodes_begin(); const MeshBase::node_iterator node_end = mesh.nodes_end(); for (; node_it != node_end; ++node_it) { Node *node = *node_it; libmesh_assert(node); const unsigned int current_pid = node->processor_id(); if (current_pid != libMesh::processor_id() && current_pid != DofObject::invalid_processor_id) { libmesh_assert(current_pid < ghost_nodes_from_proc.size()); ghost_nodes_from_proc[current_pid]++; } } // We know how many objects live on each processor, so reserve() // space for each. for (unsigned int pid=0; pid != libMesh::n_processors(); ++pid) requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]); // We need to get the new pid for each node from the processor // which *currently* owns the node. We can safely skip ourself for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it) { Node *node = *node_it; libmesh_assert(node); const unsigned int current_pid = node->processor_id(); if (current_pid != libMesh::processor_id() && current_pid != DofObject::invalid_processor_id) { libmesh_assert(current_pid < requested_node_ids.size()); libmesh_assert(requested_node_ids[current_pid].size() < ghost_nodes_from_proc[current_pid]); requested_node_ids[current_pid].push_back(node->id()); } // Unset any previously-set node processor ids node->invalidate_processor_id(); } // Loop over all the active elements MeshBase::element_iterator elem_it = mesh.active_elements_begin(); const MeshBase::element_iterator elem_end = mesh.active_elements_end(); for ( ; elem_it != elem_end; ++elem_it) { Elem* elem = *elem_it; libmesh_assert(elem); libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id); // For each node, set the processor ID to the min of // its current value and this Element's processor id. for (unsigned int n=0; n<elem->n_nodes(); ++n) elem->get_node(n)->processor_id() = std::min(elem->get_node(n)->processor_id(), elem->processor_id()); } // And loop over the subactive elements, but don't reassign // nodes that are already active on another processor. MeshBase::element_iterator sub_it = mesh.subactive_elements_begin(); const MeshBase::element_iterator sub_end = mesh.subactive_elements_end(); for ( ; sub_it != sub_end; ++sub_it) { Elem* elem = *sub_it; libmesh_assert(elem); libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id); for (unsigned int n=0; n<elem->n_nodes(); ++n) if (elem->get_node(n)->processor_id() == DofObject::invalid_processor_id) elem->get_node(n)->processor_id() = elem->processor_id(); } // Same for the inactive elements -- we will have already gotten most of these // nodes, *except* for the case of a parent with a subset of children which are // ghost elements. In that case some of the parent nodes will not have been // properly handled yet MeshBase::element_iterator not_it = mesh.not_active_elements_begin(); const MeshBase::element_iterator not_end = mesh.not_active_elements_end(); for ( ; not_it != not_end; ++not_it) { Elem* elem = *not_it; libmesh_assert(elem); libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id); for (unsigned int n=0; n<elem->n_nodes(); ++n) if (elem->get_node(n)->processor_id() == DofObject::invalid_processor_id) elem->get_node(n)->processor_id() = elem->processor_id(); }#ifndef NDEBUG { // make sure all the nodes connected to any element have received a // valid processor id std::set<const Node*> used_nodes; MeshBase::element_iterator all_it = mesh.elements_begin(); const MeshBase::element_iterator all_end = mesh.elements_end(); for ( ; all_it != all_end; ++all_it) { Elem* elem = *all_it; libmesh_assert(elem); libmesh_assert(elem->processor_id() != DofObject::invalid_processor_id); for (unsigned int n=0; n<elem->n_nodes(); ++n) used_nodes.insert(elem->get_node(n)); } for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it) { Node *node = *node_it; libmesh_assert(node); libmesh_assert(used_nodes.count(node)); libmesh_assert(node->processor_id() != DofObject::invalid_processor_id); } }#endif // Next set node ids from other processors, excluding self for (unsigned int p=1; p != libMesh::n_processors(); ++p) { // Trade my requests with processor procup and procdown unsigned int procup = (libMesh::processor_id() + p) % libMesh::n_processors(); unsigned int procdown = (libMesh::n_processors() + libMesh::processor_id() - p) % libMesh::n_processors(); std::vector<unsigned int> request_to_fill; Parallel::send_receive(procup, requested_node_ids[procup], procdown, request_to_fill); // Fill those requests in-place for (unsigned int i=0; i != request_to_fill.size(); ++i) { Node *node = mesh.node_ptr(request_to_fill[i]); libmesh_assert(node); const unsigned int new_pid = node->processor_id(); libmesh_assert (new_pid != DofObject::invalid_processor_id); libmesh_assert (new_pid < mesh.n_partitions()); // this is the correct test -- request_to_fill[i] = new_pid; // the number of partitions may } // not equal the number of processors // Trade back the results std::vector<unsigned int> filled_request; Parallel::send_receive(procdown, request_to_fill, procup, filled_request); libmesh_assert(filled_request.size() == requested_node_ids[procup].size()); // And copy the id changes we've now been informed of for (unsigned int i=0; i != filled_request.size(); ++i) { Node *node = mesh.node_ptr(requested_node_ids[procup][i]); libmesh_assert(node); libmesh_assert(filled_request[i] < mesh.n_partitions()); // this is the correct test -- node->processor_id(filled_request[i]); // the number of partitions may } // not equal the number of processors } STOP_LOG("set_node_processor_ids()","Partitioner");}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -