📄 parallel_ghost_sync.h
字号:
// Trade back the results std::vector<typename SyncFunctor::datum> received_data; Parallel::send_receive(procdown, data, procup, received_data); libmesh_assert(requested_objs_x[procup].size() == received_data.size()); // Let the user process the results sync.act_on_data(requested_objs_id[procup], received_data); }}template <typename Iterator, typename SyncFunctor>void sync_dofobject_data_by_id(const Iterator& range_begin, const Iterator& range_end, SyncFunctor& sync){ // This function must be run on all processors at once parallel_only(); // Count the objects to ask each processor about std::vector<unsigned int> ghost_objects_from_proc(libMesh::n_processors(), 0); for (Iterator it = range_begin; it != range_end; ++it) { DofObject *obj = *it; libmesh_assert (obj); unsigned int obj_procid = obj->processor_id(); libmesh_assert (obj_procid != DofObject::invalid_processor_id); ghost_objects_from_proc[obj_procid]++; } // Request sets to send to each processor std::vector<std::vector<unsigned int> > requested_objs_id(libMesh::n_processors()); // We know how many objects live on each processor, so reserve() // space for each. for (unsigned int p=0; p != libMesh::n_processors(); ++p) if (p != libMesh::processor_id()) { requested_objs_id[p].reserve(ghost_objects_from_proc[p]); } for (Iterator it = range_begin; it != range_end; ++it) { DofObject *obj = *it; unsigned int obj_procid = obj->processor_id(); if (obj_procid == libMesh::processor_id()) continue; requested_objs_id[obj_procid].push_back(obj->id()); } // Trade requests with other processors for (unsigned int p=1; p != libMesh::n_processors(); ++p) { // Trade my requests with processor procup and procdown unsigned int procup = (libMesh::processor_id() + p) % libMesh::n_processors(); unsigned int procdown = (libMesh::n_processors() + libMesh::processor_id() - p) % libMesh::n_processors(); std::vector<unsigned int> request_to_fill_id; Parallel::send_receive(procup, requested_objs_id[procup], procdown, request_to_fill_id); // Gather whatever data the user wants std::vector<typename SyncFunctor::datum> data; sync.gather_data(request_to_fill_id, data); // Trade back the results std::vector<typename SyncFunctor::datum> received_data; Parallel::send_receive(procdown, data, procup, received_data); libmesh_assert(requested_objs_id[procup].size() == received_data.size()); // Let the user process the results sync.act_on_data(requested_objs_id[procup], received_data); }} // If there's no refined elements, there's nothing to sync#ifdef ENABLE_AMRtemplate <typename Iterator, typename SyncFunctor>void sync_element_data_by_parent_id(MeshBase& mesh, const Iterator& range_begin, const Iterator& range_end, SyncFunctor& sync){ // This function must be run on all processors at once parallel_only(); // Count the objects to ask each processor about std::vector<unsigned int> ghost_objects_from_proc(libMesh::n_processors(), 0); for (Iterator it = range_begin; it != range_end; ++it) { DofObject *obj = *it; libmesh_assert (obj); unsigned int obj_procid = obj->processor_id(); libmesh_assert (obj_procid != DofObject::invalid_processor_id); ghost_objects_from_proc[obj_procid]++; } // Request sets to send to each processor std::vector<std::vector<unsigned int> > requested_objs_id(libMesh::n_processors()), requested_objs_parent_id(libMesh::n_processors()), requested_objs_child_num(libMesh::n_processors()); // We know how many objects live on each processor, so reserve() // space for each. for (unsigned int p=0; p != libMesh::n_processors(); ++p) if (p != libMesh::processor_id()) { requested_objs_id[p].reserve(ghost_objects_from_proc[p]); requested_objs_parent_id[p].reserve(ghost_objects_from_proc[p]); requested_objs_child_num[p].reserve(ghost_objects_from_proc[p]); } for (Iterator it = range_begin; it != range_end; ++it) { Elem *elem = *it; unsigned int obj_procid = elem->processor_id(); if (obj_procid == libMesh::processor_id()) continue; const Elem *parent = elem->parent(); if (!parent || !elem->active()) continue; requested_objs_id[obj_procid].push_back(elem->id()); requested_objs_parent_id[obj_procid].push_back(parent->id()); requested_objs_child_num[obj_procid].push_back (parent->which_child_am_i(elem)); } // Trade requests with other processors for (unsigned int p=1; p != libMesh::n_processors(); ++p) { // Trade my requests with processor procup and procdown unsigned int procup = (libMesh::processor_id() + p) % libMesh::n_processors(); unsigned int procdown = (libMesh::n_processors() + libMesh::processor_id() - p) % libMesh::n_processors(); std::vector<unsigned int> request_to_fill_parent_id, request_to_fill_child_num; Parallel::send_receive(procup, requested_objs_parent_id[procup], procdown, request_to_fill_parent_id); Parallel::send_receive(procup, requested_objs_child_num[procup], procdown, request_to_fill_child_num); // Find the id of each requested element unsigned int request_size = request_to_fill_parent_id.size(); std::vector<unsigned int> request_to_fill_id(request_size); for (unsigned int i=0; i != request_size; ++i) { Elem *parent = mesh.elem(request_to_fill_parent_id[i]); libmesh_assert(parent); libmesh_assert(parent->has_children()); Elem *child = parent->child(request_to_fill_child_num[i]); libmesh_assert(child); libmesh_assert(child->active()); request_to_fill_id[i] = child->id(); } // Gather whatever data the user wants std::vector<typename SyncFunctor::datum> data; sync.gather_data(request_to_fill_id, data); // Trade back the results std::vector<typename SyncFunctor::datum> received_data; Parallel::send_receive(procdown, data, procup, received_data); libmesh_assert(requested_objs_id[procup].size() == received_data.size()); // Let the user process the results sync.act_on_data(requested_objs_id[procup], received_data); }}#elsetemplate <typename Iterator, typename SyncFunctor>void sync_element_data_by_parent_id(MeshBase&, const Iterator&, const Iterator&, SyncFunctor&){}#endif // ENABLE_AMR}#endif // #define __parallel_ghost_sync_h__
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -