⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 parallel.h

📁 一个用来实现偏微分方程中网格的计算库
💻 H
📖 第 1 页 / 共 4 页
字号:
  }  template <typename T>  inline void min(T &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("min()", "Parallel");    	T temp;	MPI_Allreduce (&r,		       &temp,		       1,		       datatype<T>(),		       MPI_MIN,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("min()", "Parallel");      }  }  template <>  inline void min(bool &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("min()", "Parallel");    	unsigned int tempsend = r;	unsigned int temp;	MPI_Allreduce (&tempsend,		       &temp,		       1,		       datatype<unsigned int>(),		       MPI_MIN,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("min()", "Parallel");      }  }  template <typename T>  inline void min(std::vector<T> &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("min()", "Parallel");    	std::vector<T> temp(r.size());	MPI_Allreduce (&r[0],		       &temp[0],		       r.size(),		       datatype<T>(),		       MPI_MIN,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("min()", "Parallel");      }  }  template <>  inline void min(std::vector<bool> &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("min()", "Parallel");            std::vector<unsigned int> rchar;        pack_vector_bool(r, rchar);	std::vector<unsigned int> temp(rchar.size());	MPI_Allreduce (&rchar[0],		       &temp[0],		       rchar.size(),		       datatype<unsigned int>(),		       MPI_BAND,		       libMesh::COMM_WORLD);        unpack_vector_bool(temp, r);	STOP_LOG("min()", "Parallel");      }  }  template <typename T>  inline void max(T &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("max()", "Parallel");    	T temp;	MPI_Allreduce (&r,		       &temp,		       1,		       datatype<T>(),		       MPI_MAX,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("max()", "Parallel");      }  }  template <>  inline void max(bool &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("max()", "Parallel");    	unsigned int tempsend = r;	unsigned int temp;	MPI_Allreduce (&tempsend,		       &temp,		       1,		       datatype<unsigned int>(),		       MPI_MAX,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("max()", "Parallel");      }  }  template <typename T>  inline void max(std::vector<T> &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("max()", "Parallel");    	std::vector<T> temp(r.size());	MPI_Allreduce (&r[0],		       &temp[0],		       r.size(),		       datatype<T>(),		       MPI_MAX,		       libMesh::COMM_WORLD);	r = temp;	STOP_LOG("max()", "Parallel");      }  }  template <>  inline void max(std::vector<bool> &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("max()", "Parallel");            std::vector<unsigned int> rchar;        pack_vector_bool(r, rchar);	std::vector<unsigned int> temp(rchar.size());	MPI_Allreduce (&rchar[0],		       &temp[0],		       rchar.size(),		       datatype<unsigned int>(),		       MPI_BOR,		       libMesh::COMM_WORLD);        unpack_vector_bool(temp, r);	STOP_LOG("max()", "Parallel");      }  }  template <typename T>  inline void sum(T &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("sum()", "Parallel");    	T temp = r;	MPI_Allreduce (&temp,		       &r,		       1,		       datatype<T>(),		       MPI_SUM,		       libMesh::COMM_WORLD);	STOP_LOG("sum()", "Parallel");      }  }  template <typename T>  inline void sum(std::vector<T> &r)  {    if (libMesh::n_processors() > 1 && !r.empty())      {	START_LOG("sum()", "Parallel");    	std::vector<T> temp(r);	MPI_Allreduce (&temp[0],		       &r[0],		       r.size(),		       datatype<T>(),		       MPI_SUM,		       libMesh::COMM_WORLD);	STOP_LOG("sum()", "Parallel");      }  }  template <typename T>  inline void sum(std::complex<T> &r)  {    if (libMesh::n_processors() > 1)      {	START_LOG("sum()", "Parallel");    	std::complex<T> temp(r);	MPI_Allreduce (&temp,		       &r,		       2,		       datatype<T>(),		       MPI_SUM,		       libMesh::COMM_WORLD);	STOP_LOG("sum()", "Parallel");      }  }  template <typename T>  inline void sum(std::vector<std::complex<T> > &r)  {    if (libMesh::n_processors() > 1 && !r.empty())      {	START_LOG("sum()", "Parallel");    	std::vector<std::complex<T> > temp(r);	MPI_Allreduce (&temp[0],		       &r[0],		       r.size() * 2,		       datatype<T>(),		       MPI_SUM,		       libMesh::COMM_WORLD);	STOP_LOG("sum()", "Parallel");      }  }  template <typename T>  inline void send (const unsigned int dest_processor_id,		    std::vector<T> &buf,		    const int tag)  {    START_LOG("send()", "Parallel");    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Send (buf.empty() ? NULL : &buf[0],		buf.size(),		datatype<T>(),		dest_processor_id,		tag,		libMesh::COMM_WORLD);        libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("send()", "Parallel");  }  template <typename T>  inline void send (const unsigned int dest_processor_id,		    std::vector<std::complex<T> > &buf,		    const int tag)  {    START_LOG("send()", "Parallel");        const int ierr =	        MPI_Send (buf.empty() ? NULL : &buf[0],		buf.size() * 2,		datatype<T>(),		dest_processor_id,		tag,		libMesh::COMM_WORLD);        libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("send()", "Parallel");  }  template <typename T>  inline void isend (const unsigned int dest_processor_id,		     std::vector<T> &buf,		     request &r,		     const int tag)  {    START_LOG("isend()", "Parallel");    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Isend (buf.empty() ? NULL : &buf[0],		 buf.size(),		 datatype<T>(),		 dest_processor_id,		 tag,		 libMesh::COMM_WORLD,		 &r);        libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("isend()", "Parallel");  }  template <typename T>  inline void isend (const unsigned int dest_processor_id,		     std::vector<std::complex<T> > &buf,		     request &r,		     const int tag)  {    START_LOG("isend()", "Parallel");        const int ierr =	        MPI_Isend (buf.empty() ? NULL : &buf[0],		 buf.size() * 2,		 datatype<T>(),		 dest_processor_id,		 tag,		 libMesh::COMM_WORLD,		 &r);        libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("isend()", "Parallel");  }  template <typename T>  inline void isend (const unsigned int dest_processor_id,		     std::vector<T> &buf,		     MPI_Datatype &type,		     request &r,		     const int tag)  {    START_LOG("isend()", "Parallel");    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Isend (buf.empty() ? NULL : &buf[0],		 buf.size(),		 type,		 dest_processor_id,		 tag,		 libMesh::COMM_WORLD,		 &r);        libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("isend()", "Parallel");  }  template <typename T>  inline Status recv (const int src_processor_id,		      std::vector<T> &buf,		      const int tag)  {    START_LOG("recv()", "Parallel");    MPI_Status status;    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Recv (buf.empty() ? NULL : &buf[0],		buf.size(),		datatype<T>(),		src_processor_id,		tag,		libMesh::COMM_WORLD,		&status);    libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("recv()", "Parallel");    return Status(status, datatype<T>());  }  template <typename T>  inline Status recv (const int src_processor_id,		      std::vector<T> &buf,		      MPI_Datatype &type,		      const int tag)  {    START_LOG("recv()", "Parallel");    MPI_Status status;    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Recv (buf.empty() ? NULL : &buf[0],		buf.size(),		type,		src_processor_id,		tag,		libMesh::COMM_WORLD,		&status);    libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("recv()", "Parallel");    return Status(status, type);  }  template <typename T>  inline Status recv (const int src_processor_id,		      std::vector<std::complex<T> > &buf,		      const int tag)  {    START_LOG("recv()", "Parallel");    MPI_Status status;        const int ierr =	        MPI_Recv (buf.empty() ? NULL : &buf[0],		buf.size() * 2,		datatype<T>(),		src_processor_id,		tag,		libMesh::COMM_WORLD,		&status);    libmesh_assert (ierr == MPI_SUCCESS);        STOP_LOG("recv()", "Parallel");    return Status(status, datatype<T>());  }  template <typename T>  inline void irecv (const int src_processor_id,		     std::vector<T> &buf,		     request &r,		     const int tag)  {    START_LOG("irecv()", "Parallel");    #ifndef NDEBUG    // Only catch the return value when asserts are active.    const int ierr =#endif      MPI_Irecv (buf.empty() ? NULL : &buf[0],		 buf.size(),		 datatype<T>(),		 src_processor_id,		 tag,		 libMesh::COMM_WORLD,		 &r);    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -