⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gather.c

📁 刚才是说明 现在是安装程序在 LINUX环境下进行编程的MPICH安装文件
💻 C
📖 第 1 页 / 共 2 页
字号:
                position = nbytes;                NMPI_Unpack(tmp_buf, tmp_buf_size, &position,                            ((char *) recvbuf + extent*recvcnt*(rank+1)),                            recvcnt*(comm_size-rank-1), recvtype,                            comm);            }            if (root != 0)                NMPI_Unpack(tmp_buf, tmp_buf_size, &position, recvbuf,                            recvcnt*rank, recvtype, comm);         }                MPIU_Free(tmp_buf);    }    /* Unlock for collective operation */    MPID_Comm_thread_unlock( comm_ptr );        return (mpi_errno);}/* end:nested *//* begin:nested */PMPI_LOCAL int MPIR_Gather_inter ( 	void *sendbuf, 	int sendcnt, 	MPI_Datatype sendtype, 	void *recvbuf, 	int recvcnt, 	MPI_Datatype recvtype, 	int root, 	MPID_Comm *comm_ptr ){/*  Intercommunicator gather.    For short messages, remote group does a local intracommunicator    gather to rank 0. Rank 0 then sends data to root.    Cost: (lgp+1).alpha + n.((p-1)/p).beta + n.beta       For long messages, we use linear gather to avoid the extra n.beta.    Cost: p.alpha + n.beta*/    int rank, local_size, remote_size, mpi_errno=MPI_SUCCESS;    int i, nbytes, sendtype_size, recvtype_size;    MPI_Status status;    MPI_Aint extent, true_extent, true_lb;    void *tmp_buf=NULL;    MPID_Comm *newcomm_ptr = NULL;    MPI_Comm comm;    if (root == MPI_PROC_NULL) {        /* local processes other than root do nothing */        return MPI_SUCCESS;    }        comm = comm_ptr->handle;    remote_size = comm_ptr->remote_size;     local_size = comm_ptr->local_size;     if (root == MPI_ROOT) {        MPID_Datatype_get_size_macro(recvtype, recvtype_size);        nbytes = recvtype_size * recvcnt * remote_size;    }    else {        /* remote side */        MPID_Datatype_get_size_macro(sendtype, sendtype_size);        nbytes = sendtype_size * sendcnt * local_size;    }    if (nbytes < MPIR_GATHER_SHORT_MSG) {        if (root == MPI_ROOT) {            /* root receives data from rank 0 on remote group */            MPID_Comm_thread_lock( comm_ptr );            mpi_errno = MPIC_Recv(recvbuf, recvcnt*remote_size,                                  recvtype, 0, MPIR_GATHER_TAG, comm,                                  &status);            MPID_Comm_thread_unlock( comm_ptr );              return mpi_errno;        }        else {            /* remote group. Rank 0 allocates temporary buffer, does               local intracommunicator gather, and then sends the data               to root. */                        rank = comm_ptr->rank;                        if (rank == 0) {                mpi_errno = NMPI_Type_get_true_extent(sendtype, &true_lb,                                                      &true_extent);                  if (mpi_errno) return mpi_errno;                tmp_buf = MPIU_Malloc(true_extent*sendcnt*local_size);                if (!tmp_buf) {                    mpi_errno = MPIR_Err_create_code( MPI_ERR_OTHER, "**nomem", 0 );                    return mpi_errno;                }                /* adjust for potential negative lower bound in datatype */                tmp_buf = (void *)((char*)tmp_buf - true_lb);            }                        /* all processes in remote group form new intracommunicator */            if (!comm_ptr->local_comm)                MPIR_Setup_intercomm_localcomm( comm_ptr );            newcomm_ptr = comm_ptr->local_comm;            /* now do the a local gather on this intracommunicator */            mpi_errno = MPIR_Gather(sendbuf, sendcnt, sendtype,                                    tmp_buf, sendcnt, sendtype, 0,                                    newcomm_ptr);             if (rank == 0) {                MPID_Comm_thread_lock( comm_ptr );                mpi_errno = MPIC_Send(tmp_buf, sendcnt*local_size,                                      sendtype, root,                                      MPIR_GATHER_TAG, comm);                 MPID_Comm_thread_unlock( comm_ptr );                 if (mpi_errno) return mpi_errno;                MPIU_Free(((char*)tmp_buf+true_lb));            }        }    }    else {        /* long message. use linear algorithm. */        MPID_Comm_thread_lock( comm_ptr );        if (root == MPI_ROOT) {            MPID_Datatype_get_extent_macro(recvtype, extent);            for (i=0; i<remote_size; i++) {                mpi_errno = MPIC_Recv(((char *)recvbuf+recvcnt*i*extent),                                       recvcnt, recvtype, i,                                      MPIR_GATHER_TAG, comm, &status);                if (mpi_errno) return mpi_errno;                            }        }        else {            mpi_errno = MPIC_Send(sendbuf,sendcnt,sendtype,root,                                  MPIR_GATHER_TAG,comm);        }        MPID_Comm_thread_unlock( comm_ptr );     }    return mpi_errno;}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Gather/*@   MPI_Gather - gather   Arguments:+  void *sendbuf - send buffer.  int sendcnt - send count.  MPI_Datatype sendtype - send datatype.  void *recvbuf - receive buffer.  int recvcnt - receive count.  MPI_Datatype recvtype - receive datatype.  int root - root-  MPI_Comm comm - communicator   Notes:.N Fortran.N Errors.N MPI_SUCCESS@*/int MPI_Gather(void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root, MPI_Comm comm){    static const char FCNAME[] = "MPI_Gather";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_MPI_STATE_DECL(MPID_STATE_MPI_GATHER);    MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_GATHER);    /* Verify that MPI has been initialized */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_INITIALIZED(mpi_errno);	    MPIR_ERRTEST_COMM(comm, mpi_errno);            if (mpi_errno != MPI_SUCCESS) {                return MPIR_Err_return_comm( 0, FCNAME, mpi_errno );            }	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* Get handles to MPI objects. */    MPID_Comm_get_ptr( comm, comm_ptr );#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL;	    int rank;            MPID_Comm_valid_ptr( comm_ptr, mpi_errno );            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_GATHER);                return MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );            }            MPIR_ERRTEST_COUNT(sendcnt, mpi_errno);	    MPIR_ERRTEST_DATATYPE(sendcnt, sendtype, mpi_errno);	    MPIR_ERRTEST_INTRA_ROOT(comm_ptr, root, mpi_errno);            rank = comm_ptr->rank;            if (rank == root) {                MPIR_ERRTEST_COUNT(recvcnt, mpi_errno);                MPIR_ERRTEST_DATATYPE(recvcnt, recvtype, mpi_errno);                if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {                    MPID_Datatype_get_ptr(recvtype, recvtype_ptr);                    MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );                }            }            if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(sendtype, sendtype_ptr);                MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );            }            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_GATHER);                return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );            }        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ...  */    if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Gather != NULL)    {	mpi_errno = comm_ptr->coll_fns->Gather(sendbuf, sendcnt,                                               sendtype, recvbuf, recvcnt,                                               recvtype, root, comm_ptr);    }    else    {	MPIR_Nest_incr();        if (comm_ptr->comm_kind == MPID_INTRACOMM)             /* intracommunicator */            mpi_errno = MPIR_Gather(sendbuf, sendcnt, sendtype,                                    recvbuf, recvcnt, recvtype, root,                                    comm_ptr);          else {            /* intercommunicator */ 	    mpi_errno = MPIR_Err_create_code( MPI_ERR_COMM, 					      "**intercommcoll",					      "**intercommcoll %s", FCNAME );            /*mpi_errno = MPIR_Gather_inter(sendbuf, sendcnt, sendtype,                                          recvbuf, recvcnt, recvtype, root,                                          comm_ptr);*/        }	MPIR_Nest_decr();    }    if (mpi_errno == MPI_SUCCESS)    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_GATHER);	return MPI_SUCCESS;    }    else    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_GATHER);	return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );    }    /* ... end of body of routine ... */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -