⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 parallel.h

📁 一个用来实现偏微分方程中网格的计算库
💻 H
📖 第 1 页 / 共 4 页
字号:
	globalsize += sendlengths[i];      }    // Check for quick return    if (globalsize == 0)      {	STOP_LOG("gather()", "Parallel");	return;      }    // Make temporary buffers for the input/output data    std::vector<std::complex<T> > r_src(r);    // now resize r to hold the global data    // on the receiving processor    if (root_id == libMesh::processor_id())      r.resize(globalsize);    // and get the data from the remote processors    const int ierr =      MPI_Gatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),		   r.empty() ? NULL : &r[0], &sendlengths[0],		   &displacements[0], datatype<T>(),		   root_id, libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("gather()", "Parallel");  }  template <typename T>  inline void allgather(T send,			std::vector<T> &recv)  {    START_LOG ("allgather()","Parallel");        recv.resize(libMesh::n_processors());        if (libMesh::n_processors() > 1)      {	MPI_Allgather (&send,		       1,		       datatype<T>(),		       &recv[0],		       1, 		       datatype<T>(),		       libMesh::COMM_WORLD);      }    else      recv[0] = send;    STOP_LOG ("allgather()","Parallel");  }  template <typename T>  inline void allgather(std::complex<T> send,			std::vector<std::complex<T> > &recv)  {    START_LOG ("allgather()","Parallel");    recv.resize(libMesh::n_processors());    if (libMesh::n_processors() > 1)      {	MPI_Allgather (&send,		       2,		       datatype<T>(),		       &recv[0],		       2, 		       libMesh::COMM_WORLD);      }    else      recv[0] = send;    STOP_LOG ("allgather()","Parallel");  }    /**   * This function provides a convenient method   * for combining vectors from each processor into one   * contiguous chunk.  This handles the case where the   * lengths of the vectors may vary.  Specifically, this   * function transforms this:   \verbatim    Processor 0: [ ... N_0 ]    Processor 1: [ ....... N_1 ]      ...    Processor M: [ .. N_M]   \endverbatim   *   * into this:   *   \verbatim   [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]   \endverbatim   *   * on each processor. This function is collective and therefore   * must be called by all processors.   */  template <typename T>  inline void allgather(std::vector<T> &r)  {    if (libMesh::n_processors() == 1)      return;    START_LOG("allgather()", "Parallel");    std::vector<int>      sendlengths  (libMesh::n_processors(), 0),      displacements(libMesh::n_processors(), 0);    const int mysize = r.size();    Parallel::allgather(mysize, sendlengths);    // Find the total size of the final array and    // set up the displacement offsets for each processor.    unsigned int globalsize = 0;     for (unsigned int i=0; i != libMesh::n_processors(); ++i)      {	displacements[i] = globalsize;	globalsize += sendlengths[i];      }    // Check for quick return    if (globalsize == 0)      {	STOP_LOG("allgather()", "Parallel");	return;      }    // copy the input buffer    std::vector<T> r_src(r);    // now resize it to hold the global data    r.resize(globalsize);    // and get the data from the remote processors.    // Pass NULL if our vector is empty.#ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),		      r.empty()     ? NULL : &r[0],     &sendlengths[0],		      &displacements[0], datatype<T>(), libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("allgather()", "Parallel");  }  template <typename T>  inline void allgather(std::vector<std::complex<T> > &r)  {    if (libMesh::n_processors() == 1)      return;    START_LOG("allgather()", "Parallel");    std::vector<int>      sendlengths  (libMesh::n_processors(), 0),      displacements(libMesh::n_processors(), 0);    const int mysize = r.size() * 2;    Parallel::allgather(mysize, sendlengths);    // Find the total size of the final array and    // set up the displacement offsets for each processor.    unsigned int globalsize = 0;     for (unsigned int i=0; i != libMesh::n_processors(); ++i)      {	displacements[i] = globalsize;	globalsize += sendlengths[i];      }    // Check for quick return    if (globalsize == 0)      {	STOP_LOG("allgather()", "Parallel");	return;      }    // copy the input buffer    std::vector<std::complex<T> > r_src(r);    // now resize it to hold the global data    r.resize(globalsize);    // and get the data from the remote processors.    // Pass NULL if our vector is empty.    const int ierr =      MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),		      r.empty()     ? NULL : &r[0],     &sendlengths[0],		      &displacements[0], datatype<T>(),		      libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("allgather()", "Parallel");  }    /**   * Replaces the input buffer with the result of MPI_Alltoall.   * The vector size must be of te form N*n_procs, where N is    * the number of elements to be sent/received from each    * processor.   */  template <typename T>  inline void alltoall(std::vector<T> &buf)  {    if (libMesh::n_processors() == 1)      return;    START_LOG("alltoall()", "Parallel");    // the per-processor size.  this is the same for all    // processors using MPI_Alltoall, could be variable    // using MPI_Alltoallv    const unsigned int size_per_proc =       buf.size()/libMesh::n_processors();    libmesh_assert (buf.size()%libMesh::n_processors() == 0);    std::vector<T> tmp(buf);    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Alltoall (tmp.empty() ? NULL : &tmp[0],		    size_per_proc,		    datatype<T>(),		    buf.empty() ? NULL : &buf[0],		    size_per_proc,		    datatype<T>(),		    libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("alltoall()", "Parallel");  }  template <typename T>  inline void broadcast (T &data, const unsigned int root_id)  {    if (libMesh::n_processors() == 1)      {	libmesh_assert (libMesh::processor_id() == root_id);	return;      }        START_LOG("broadcast()", "Parallel");    // Spread data to remote processors.#ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Bcast (&data, 1, datatype<T>(), root_id, libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("broadcast()", "Parallel");  }  template <typename T>  inline void broadcast (std::complex<T> &data, const unsigned int root_id)  {    if (libMesh::n_processors() == 1)      {	libmesh_assert (libMesh::processor_id() == root_id);	return;      }        START_LOG("broadcast()", "Parallel");    // Spread data to remote processors.    const int ierr =      MPI_Bcast (&data, 2, datatype<T>(), root_id, libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("broadcast()", "Parallel");  }  template <>  inline void broadcast (std::string &data, const unsigned int root_id)  {    if (libMesh::n_processors() == 1)      {	libmesh_assert (libMesh::processor_id() == root_id);	return;      }    START_LOG("broadcast()", "Parallel");    unsigned int data_size = data.size();    Parallel::broadcast(data_size, root_id);        std::vector<char> data_c(data_size);    std::string orig(data);    if (libMesh::processor_id() == root_id)      for(unsigned int i=0; i<data.size(); i++)	data_c[i] = data[i];    Parallel::broadcast (data_c,root_id);        data.clear(); data.reserve(data_c.size());    for(unsigned int i=0; i<data_c.size(); i++)      data.push_back(data_c[i]);        if (libMesh::processor_id() == root_id)      libmesh_assert(data == orig);    STOP_LOG("broadcast()", "Parallel");  }  template <typename T>  inline void broadcast (std::vector<T> &data, const unsigned int root_id)  {    if (libMesh::n_processors() == 1)      {	libmesh_assert (libMesh::processor_id() == root_id);	return;      }    START_LOG("broadcast()", "Parallel");    // and get the data from the remote processors.    // Pass NULL if our vector is empty.#ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Bcast (data.empty() ? NULL : &data[0], data.size(), datatype<T>(),		 root_id, libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("broadcast()", "Parallel");  }  template <typename T>  inline void broadcast (std::vector<std::complex<T> > &data,			 const unsigned int root_id)  {    if (libMesh::n_processors() == 1)      {	libmesh_assert (libMesh::processor_id() == root_id);	return;      }    START_LOG("broadcast()", "Parallel");    // and get the data from the remote processors.    // Pass NULL if our vector is empty.    const int ierr =      MPI_Bcast (data.empty() ? NULL : &data[0], data.size() * 2, datatype<T>(),		 root_id, libMesh::COMM_WORLD);    libmesh_assert (ierr == MPI_SUCCESS);    STOP_LOG("broadcast()", "Parallel");  }#else // HAVE_MPI  template <typename T>  inline bool verify(const T &) { return true; }  template <typename T>  inline void min(T &) {}  template <typename T>  inline void min(std::vector<T> &) {}  template <typename T>  inline void max(T &) {}  template <typename T>  inline void max(std::vector<T> &) {}  template <typename T>  inline void sum(T &) {}  template <typename T>  inline void sum(std::vector<T> &) {}  // Blocking sends don't make sense on one processor  template <typename T>  inline void send (const unsigned int,		    std::vector<T> &,		    const unsigned int) { libmesh_error(); }  template <typename T>  inline void isend (const unsigned int,		     std::vector<T> &,		     request &,		     const int) {}  // Blocking receives don't make sense on one processor  template <typename T>  inline Status recv (const int,		      std::vector<T> &,		      const int) { libmesh_error(); return Status(); }  template <typename T>  inline void irecv (const int,		     std::vector<T> &,		     request &,		     const int) {}    inline void wait (request &) {}    inline void wait (std::vector<request> &) {}    template <typename T>  inline void send_receive (const unsigned int send_tgt,			    T &send,			    const unsigned int recv_source,			    T &recv)  {    libmesh_assert (send_tgt == recv_source);    recv = send;  }  template <typename T>  inline void gather(const unsigned int root_id,		     T send,		     std::vector<T> &recv)  {    libmesh_assert (!root_id);    recv.resize(1);    recv[0] = send;  }  template <typename T>  inline void gather(const unsigned int, std::vector<T> &) {}  template <typename T>  inline void allgather(T send,			std::vector<T> &recv)  {    recv.resize(1);    recv[0] = send;  }  template <typename T>  inline void allgather(std::vector<T> &) {}  template <typename T>  inline void alltoall(std::vector<T> &) {}  template <typename T>    inline void broadcast (T &, const unsigned int =0) {}  template <typename T>    inline void broadcast (std::vector<T> &, const unsigned int =0) {}#endif // HAVE_MPI}#endif // #define __parallel_h__

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -