⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 allgatherv.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 3 页
字号:
        MPIU_Free((char*)tmp_buf + recvtype_true_lb);    }    else {  /* long message or medium-size message and non-power-of-two             * no. of processes. Use ring algorithm. */        if (sendbuf != MPI_IN_PLACE) {            /* First, load the "local" version in the recvbuf. */            mpi_errno = MPIR_Localcopy(sendbuf, sendcount, sendtype,                               ((char *)recvbuf + displs[rank]*recvtype_extent),                                       recvcounts[rank], recvtype);	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */        }        left  = (comm_size + rank - 1) % comm_size;        right = (rank + 1) % comm_size;          j     = rank;        jnext = left;        for (i=1; i<comm_size; i++) {            mpi_errno = MPIC_Sendrecv(((char *)recvbuf+displs[j]*recvtype_extent),                                      recvcounts[j], recvtype, right,                                      MPIR_ALLGATHERV_TAG,                                  ((char *)recvbuf + displs[jnext]*recvtype_extent),                                      recvcounts[jnext], recvtype, left,                                       MPIR_ALLGATHERV_TAG, comm, &status );	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */            j	    = jnext;            jnext = (comm_size + jnext - 1) % comm_size;        }    }  /* check if multiple threads are calling this collective function */    MPIDU_ERR_CHECK_MULTIPLE_THREADS_EXIT( comm_ptr );  return (mpi_errno);}/* end:nested *//* begin:nested *//* not declared static because a machine-specific function may call this one in some cases */int MPIR_Allgatherv_inter (     void *sendbuf,     int sendcount,      MPI_Datatype sendtype,     void *recvbuf,     int *recvcounts,     int *displs,       MPI_Datatype recvtype,     MPID_Comm *comm_ptr ){/* Intercommunicator Allgatherv.   This is done differently from the intercommunicator allgather   because we don't have all the information to do a local   intracommunictor gather (sendcount can be different on each   process). Therefore, we do the following:   Each group first does an intercommunicator gather to rank 0   and then does an intracommunicator broadcast. */    static const char FCNAME[] = "MPIR_Allgatherv_inter";    int remote_size, mpi_errno, root, rank;    MPID_Comm *newcomm_ptr = NULL;    MPI_Datatype newtype;    remote_size = comm_ptr->remote_size;    rank = comm_ptr->rank;    /* first do an intercommunicator gatherv from left to right group,       then from right to left group */    if (comm_ptr->is_low_group) {        /* gatherv from right group */        root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */        /* gatherv to right group */        root = 0;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */    }    else {        /* gatherv to left group  */        root = 0;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */        /* gatherv from left group */        root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */    }    /* now do an intracommunicator broadcast within each group. we use       a derived datatype to handle the displacements */    /* Get the local intracommunicator */    if (!comm_ptr->local_comm)	MPIR_Setup_intercomm_localcomm( comm_ptr );    newcomm_ptr = comm_ptr->local_comm;    NMPI_Type_indexed(remote_size, recvcounts, displs, recvtype,                      &newtype);    NMPI_Type_commit(&newtype);    mpi_errno = MPIR_Bcast(recvbuf, 1, newtype, 0, newcomm_ptr);    NMPI_Type_free(&newtype);    return mpi_errno;}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Allgatherv/*@MPI_Allgatherv - Gathers data from all tasks and deliver the combined data                 to all tasksInput Parameters:+ sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcounts - integer array (of length group size) containing the number of elements that are to be received from each process . displs - integer array (of length group size). Entry  'i'  specifies the displacement (relative to recvbuf ) atwhich to place the incoming data from process  'i'  . recvtype - data type of receive buffer elements (handle) - comm - communicator (handle) Output Parameter:. recvbuf - address of receive buffer (choice) Notes: The MPI standard (1.0 and 1.1) says that .n.n The jth block of data sent from  each proess is received by every process and placed in the jth block of the  buffer 'recvbuf'.  .n.n This is misleading; a better description is.n.n The block of data sent from the jth process is received by every process and placed in the jth block of the buffer 'recvbuf'..n.n This text was suggested by Rajeev Thakur, and has been adopted as a  clarification to the MPI standard by the MPI-Forum..N ThreadSafe.N Fortran.N Errors.N MPI_ERR_BUFFER.N MPI_ERR_COUNT.N MPI_ERR_TYPE@*/int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,                    void *recvbuf, int *recvcounts, int *displs,                    MPI_Datatype recvtype, MPI_Comm comm){    static const char FCNAME[] = "MPI_Allgatherv";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLGATHERV);    MPIR_ERRTEST_INITIALIZED_ORDIE();        MPIU_THREAD_SINGLE_CS_ENTER("coll");    MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLGATHERV);    /* Validate parameters, especially handles needing to be converted */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_COMM(comm, mpi_errno);            if (mpi_errno != MPI_SUCCESS) goto fn_fail;	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* Convert MPI object handles to object pointers */    MPID_Comm_get_ptr( comm, comm_ptr );    /* Validate parameters and objects (post conversion) */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {            MPID_Datatype *recvtype_ptr=NULL, *sendtype_ptr=NULL;            int i, comm_size;	                MPID_Comm_valid_ptr( comm_ptr, mpi_errno );            if (mpi_errno != MPI_SUCCESS) goto fn_fail;	    if (comm_ptr->comm_kind == MPID_INTERCOMM)                MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, sendcount, mpi_errno);            if (sendbuf != MPI_IN_PLACE) {                MPIR_ERRTEST_COUNT(sendcount, mpi_errno);                MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);                if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {                    MPID_Datatype_get_ptr(sendtype, sendtype_ptr);                    MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );                    MPID_Datatype_committed_ptr( sendtype_ptr, mpi_errno );                }                MPIR_ERRTEST_USERBUFFER(sendbuf,sendcount,sendtype,mpi_errno);            }            if (comm_ptr->comm_kind == MPID_INTRACOMM)                 comm_size = comm_ptr->local_size;            else                comm_size = comm_ptr->remote_size;            for (i=0; i<comm_size; i++) {                MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);                MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);            }            if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(recvtype, recvtype_ptr);                MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );                MPID_Datatype_committed_ptr( recvtype_ptr, mpi_errno );            }            for (i=0; i<comm_size; i++) {                if (recvcounts[i] > 0) {                    MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf,recvcounts[i],mpi_errno);                    MPIR_ERRTEST_USERBUFFER(recvbuf,recvcounts[i],recvtype,mpi_errno);                     break;                }            }	    if (mpi_errno != MPI_SUCCESS) goto fn_fail;        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ...  */    if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Allgatherv != NULL)    {	mpi_errno = comm_ptr->coll_fns->Allgatherv(sendbuf, sendcount,                                                   sendtype, recvbuf,                                                   recvcounts, displs,                                                   recvtype, comm_ptr);    }    else    {	MPIU_THREADPRIV_DECL;	MPIU_THREADPRIV_GET;	MPIR_Nest_incr();        if (comm_ptr->comm_kind == MPID_INTRACOMM)             /* intracommunicator */            mpi_errno = MPIR_Allgatherv(sendbuf, sendcount,                                         sendtype, recvbuf,                                        recvcounts, displs,                                        recvtype, comm_ptr);         else {            /* intracommunicator */            mpi_errno = MPIR_Allgatherv_inter(sendbuf, sendcount, 					      sendtype, recvbuf,					      recvcounts, displs,					      recvtype, comm_ptr);         }	MPIR_Nest_decr();    }    if (mpi_errno != MPI_SUCCESS) goto fn_fail;    /* ... end of body of routine ... */  fn_exit:    MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHERV);    MPIU_THREAD_SINGLE_CS_EXIT("coll");    return mpi_errno;  fn_fail:    /* --BEGIN ERROR HANDLING-- */#   ifdef HAVE_ERROR_CHECKING    {	mpi_errno = MPIR_Err_create_code(	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_allgatherv",	    "**mpi_allgatherv %p %d %D %p %p %p %D %C", sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);    }#   endif    mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );    goto fn_exit;    /* --END ERROR HANDLING-- */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -