⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mesh_communication_global_indices.c

📁 一个用来实现偏微分方程中网格的计算库
💻 C
📖 第 1 页 / 共 2 页
字号:
	      std::distance (node_upper_bounds.begin(), 			     std::lower_bound(node_upper_bounds.begin(), 					      node_upper_bounds.end(),					      hi));	    libmesh_assert (pid < libMesh::n_processors());	    libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());	    const unsigned int global_index = *next_obj_on_proc[pid];	    libmesh_assert (global_index < mesh.n_nodes());	    node->set_id() = global_index;	    	    ++next_obj_on_proc[pid];	  }      }    }    //---------------------------------------------------    // elements next -- all elements, not just local ones    {      // Request sets to send to each processor      std::vector<std::vector<Hilbert::HilbertIndices> > 	requested_ids (libMesh::n_processors());      // Results to gather from each processor      std::vector<std::vector<unsigned int> >	filled_request (libMesh::n_processors());            MeshBase::const_element_iterator       it  = mesh.elements_begin();      const MeshBase::const_element_iterator end = mesh.elements_end();      for (; it != end; ++it)	{	  const Elem* elem = (*it);	  libmesh_assert (elem != NULL);	  const Hilbert::HilbertIndices hi = 	    get_hilbert_index (elem->centroid(), bbox);	  const unsigned int pid = 	    std::distance (elem_upper_bounds.begin(), 			   std::lower_bound(elem_upper_bounds.begin(), 					    elem_upper_bounds.end(),					    hi));	  libmesh_assert (pid < libMesh::n_processors());	  requested_ids[pid].push_back(hi);	}      // The number of objects in my_elem_bin on each processor      std::vector<unsigned int> elem_bin_sizes(libMesh::n_processors());      Parallel::allgather (static_cast<unsigned int>(my_elem_bin.size()), elem_bin_sizes);      // The offset of my first global index      unsigned int my_offset = 0;      for (unsigned int pid=0; pid<libMesh::processor_id(); pid++)	my_offset += elem_bin_sizes[pid];      // start with pid=0, so that we will trade with ourself      for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)	{          // Trade my requests with processor procup and procdown          const unsigned int procup = (libMesh::processor_id() + pid) %                                       libMesh::n_processors();          const unsigned int procdown = (libMesh::n_processors() +                                         libMesh::processor_id() - pid) %                                         libMesh::n_processors();          std::vector<Hilbert::HilbertIndices> request_to_fill;          Parallel::send_receive(procup, requested_ids[procup],                                 procdown, request_to_fill,				 hilbert_type);	  	  // Fill the requests	  std::vector<unsigned int> global_ids; /**/ global_ids.reserve(request_to_fill.size());	  for (unsigned int idx=0; idx<request_to_fill.size(); idx++)	    {	      const Hilbert::HilbertIndices &hi = request_to_fill[idx];	      libmesh_assert (hi <= elem_upper_bounds[libMesh::processor_id()]);	      	      // find the requested index in my elem bin	      std::vector<Hilbert::HilbertIndices>::const_iterator pos =		std::lower_bound (my_elem_bin.begin(), my_elem_bin.end(), hi);	      libmesh_assert (pos != my_elem_bin.end());	      libmesh_assert (*pos == hi);	      	      // Finally, assign the global index based off the position of the index	      // in my array, properly offset.	      global_ids.push_back (std::distance(my_elem_bin.begin(), pos) + my_offset);	    }	  // and trade back	  Parallel::send_receive (procdown, global_ids,				  procup,   filled_request[procup]);	}      // We now have all the filled requests, so we can loop through our      // elements once and assign the global index to each one.      {	std::vector<std::vector<unsigned int>::const_iterator>	  next_obj_on_proc; next_obj_on_proc.reserve(libMesh::n_processors());	for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)	  next_obj_on_proc.push_back(filled_request[pid].begin());	MeshBase::element_iterator       it  = mesh.elements_begin();	const MeshBase::element_iterator end = mesh.elements_end();	for (; it != end; ++it)	  {	    Elem* elem = (*it);	    libmesh_assert (elem != NULL);	    const Hilbert::HilbertIndices hi = 	      get_hilbert_index (elem->centroid(), bbox);	    const unsigned int pid = 	      std::distance (elem_upper_bounds.begin(), 			     std::lower_bound(elem_upper_bounds.begin(), 					      elem_upper_bounds.end(),					      hi));	    libmesh_assert (pid < libMesh::n_processors());	    libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());	    const unsigned int global_index = *next_obj_on_proc[pid];	    libmesh_assert (global_index < mesh.n_elem());	    elem->set_id() = global_index;	    	    ++next_obj_on_proc[pid];	  }      }    }          }  // Clean up  MPI_Type_free (&hilbert_type);  STOP_LOG ("assign_global_indices()", "MeshCommunication");}#else // HAVE_LIBHILBERT, HAVE_MPIvoid MeshCommunication::assign_global_indices (MeshBase&) const{}#endif // HAVE_LIBHILBERT, HAVE_MPI#if defined(HAVE_LIBHILBERT) && defined(HAVE_MPI)template <typename ForwardIterator>void MeshCommunication::find_global_indices (const MeshTools::BoundingBox &bbox,					     const ForwardIterator &begin,					     const ForwardIterator &end,					     std::vector<unsigned int> &index_map) const{  START_LOG ("find_global_indices()", "MeshCommunication");  // This method determines partition-agnostic global indices  // for nodes and elements.  // Algorithm:  // (1) compute the Hilbert key for each local node/element  // (2) perform a parallel sort of the Hilbert key  // (3) get the min/max value on each processor  // (4) determine the position in the global ranking for  //     each local object  index_map.clear();  index_map.reserve(std::distance (begin, end));  // Set up a derived MPI datatype to handle communication of HilbertIndices  MPI_Datatype hilbert_type;  MPI_Type_contiguous (3, MPI_UNSIGNED, &hilbert_type);  MPI_Type_commit     (&hilbert_type);  //-------------------------------------------------------------  // (1) compute Hilbert keys  // These aren't trivial to compute, and we will need them again.  // But the binsort will sort the input vector, trashing the order  // that we'd like to rely on.  So, two vectors...  std::vector<Hilbert::HilbertIndices>     sorted_hilbert_keys,    hilbert_keys;  sorted_hilbert_keys.reserve(index_map.capacity());  hilbert_keys.reserve(index_map.capacity());  {    START_LOG("compute_hilbert_indices()", "MeshCommunication");    for (ForwardIterator it=begin; it!=end; ++it)      {	const Hilbert::HilbertIndices hi(get_hilbert_index (*it, bbox));	hilbert_keys.push_back(hi);		if ((*it)->processor_id() == libMesh::processor_id())	  sorted_hilbert_keys.push_back(hi);	// someone needs to take care of unpartitioned objects!	if ((libMesh::processor_id() == 0) &&	    ((*it)->processor_id() == DofObject::invalid_processor_id))	  sorted_hilbert_keys.push_back(hi);      }        STOP_LOG("compute_hilbert_indices()", "MeshCommunication");  }    //-------------------------------------------------------------  // (2) parallel sort the Hilbert keys  START_LOG ("parallel_sort()", "MeshCommunication");    Parallel::Sort<Hilbert::HilbertIndices> sorter (sorted_hilbert_keys);  sorter.sort();   STOP_LOG ("parallel_sort()", "MeshCommunication");  const std::vector<Hilbert::HilbertIndices> &my_bin = sorter.bin();  // The number of objects in my_bin on each processor  std::vector<unsigned int> bin_sizes(libMesh::n_processors());  Parallel::allgather (static_cast<unsigned int>(my_bin.size()), bin_sizes);      // The offset of my first global index  unsigned int my_offset = 0;  for (unsigned int pid=0; pid<libMesh::processor_id(); pid++)    my_offset += bin_sizes[pid];    //-------------------------------------------------------------  // (3) get the max value on each processor  std::vector<Hilbert::HilbertIndices>        upper_bounds(libMesh::n_processors());      // limit scope of temporaries  {    Hilbert::HilbertIndices my_max;        if (!my_bin.empty()) my_max = my_bin.back();        MPI_Allgather (&my_max,          1, hilbert_type,		   &upper_bounds[0], 1, hilbert_type,		   libMesh::COMM_WORLD);    // Be cereful here.  The *_upper_bounds will be used to find the processor    // a given object belongs to.  So, if a processor contains no objects (possible!)    // then copy the bound from the lower processor id.    for (unsigned int p=1; p<libMesh::n_processors(); p++)      if (!bin_sizes[p]) upper_bounds[p] = upper_bounds[p-1];  }  //-------------------------------------------------------------  // (4) determine the position in the global ranking for  //     each local object  {    //----------------------------------------------    // all objects, not just local ones        // Request sets to send to each processor    std::vector<std::vector<Hilbert::HilbertIndices> >       requested_ids (libMesh::n_processors());    // Results to gather from each processor    std::vector<std::vector<unsigned int> >      filled_request (libMesh::n_processors());    // build up list of requests    std::vector<Hilbert::HilbertIndices>::const_iterator hi =       hilbert_keys.begin();    for (ForwardIterator it = begin; it != end; ++it)      {	libmesh_assert (hi != hilbert_keys.end());	const unsigned int pid = 	  std::distance (upper_bounds.begin(), 			 std::lower_bound(upper_bounds.begin(), 					  upper_bounds.end(),					  *hi));	libmesh_assert (pid < libMesh::n_processors());	requested_ids[pid].push_back(*hi);	hi++;	// go ahead and put pid in index_map, that way we 	// don't have to repeat the std::lower_bound()	index_map.push_back(pid);      }    // start with pid=0, so that we will trade with ourself    std::vector<Hilbert::HilbertIndices> request_to_fill;    std::vector<unsigned int> global_ids;    for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)      {	// Trade my requests with processor procup and procdown	const unsigned int procup = (libMesh::processor_id() + pid) %                                     libMesh::n_processors();	const unsigned int procdown = (libMesh::n_processors() +                                       libMesh::processor_id() - pid) %                                       libMesh::n_processors();	Parallel::send_receive(procup, requested_ids[procup],			       procdown, request_to_fill,			       hilbert_type);	  	// Fill the requests	global_ids.clear(); /**/ global_ids.reserve(request_to_fill.size());	for (unsigned int idx=0; idx<request_to_fill.size(); idx++)	  {	    const Hilbert::HilbertIndices &hi = request_to_fill[idx];	    libmesh_assert (hi <= upper_bounds[libMesh::processor_id()]);	    	    // find the requested index in my node bin	    std::vector<Hilbert::HilbertIndices>::const_iterator pos =	      std::lower_bound (my_bin.begin(), my_bin.end(), hi);	    libmesh_assert (pos != my_bin.end());	    libmesh_assert (*pos == hi);	    	    // Finally, assign the global index based off the position of the index	    // in my array, properly offset.	    global_ids.push_back (std::distance(my_bin.begin(), pos) + my_offset);	  }		// and trade back	Parallel::send_receive (procdown, global_ids,				procup,   filled_request[procup]);      }    // We now have all the filled requests, so we can loop through our    // nodes once and assign the global index to each one.    {      std::vector<std::vector<unsigned int>::const_iterator>	next_obj_on_proc; next_obj_on_proc.reserve(libMesh::n_processors());      for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)	next_obj_on_proc.push_back(filled_request[pid].begin());            unsigned int cnt=0;      for (ForwardIterator it = begin; it != end; ++it, cnt++)	{	  const unsigned int pid = index_map[cnt];	  	  libmesh_assert (pid < libMesh::n_processors());	  libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());	  	  const unsigned int global_index = *next_obj_on_proc[pid];	  index_map[cnt] = global_index;	  ++next_obj_on_proc[pid];	}    }  }  // Clean up  MPI_Type_free (&hilbert_type);  STOP_LOG ("find_global_indices()", "MeshCommunication");}#else // HAVE_LIBHILBERT, HAVE_MPItemplate <typename ForwardIterator>void MeshCommunication::find_global_indices (const MeshTools::BoundingBox &,					     const ForwardIterator &,					     const ForwardIterator &,					     std::vector<unsigned int> &) const{}#endif // HAVE_LIBHILBERT, HAVE_MPI//------------------------------------------------------------------template void MeshCommunication::find_global_indices<MeshBase::const_node_iterator> (const MeshTools::BoundingBox &,										     const MeshBase::const_node_iterator &,										     const MeshBase::const_node_iterator &,										     std::vector<unsigned int> &) const;template void MeshCommunication::find_global_indices<MeshBase::const_element_iterator> (const MeshTools::BoundingBox &,											const MeshBase::const_element_iterator &,											const MeshBase::const_element_iterator &,											std::vector<unsigned int> &) const;template void MeshCommunication::find_global_indices<MeshBase::node_iterator> (const MeshTools::BoundingBox &,										     const MeshBase::node_iterator &,										     const MeshBase::node_iterator &,										     std::vector<unsigned int> &) const;template void MeshCommunication::find_global_indices<MeshBase::element_iterator> (const MeshTools::BoundingBox &,											const MeshBase::element_iterator &,											const MeshBase::element_iterator &,											std::vector<unsigned int> &) const;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -