⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alltoall.c

📁 刚才是说明 现在是安装程序在 LINUX环境下进行编程的MPICH安装文件
💻 C
📖 第 1 页 / 共 2 页
字号:
            if (mpi_errno) return mpi_errno;        }          /* ... then wait for *all* of them to finish: */        mpi_errno = NMPI_Waitall(2*comm_size,reqarray,starray);        if (mpi_errno == MPI_ERR_IN_STATUS) {            for (j=0; j<2*comm_size; j++) {                if (starray[j].MPI_ERROR != MPI_SUCCESS)                     mpi_errno = starray[j].MPI_ERROR;            }        }        MPIU_Free(starray);        MPIU_Free(reqarray);    }    else {        /* Long message. Use pairwise exchange. If comm_size is a           power-of-two, use exclusive-or to create pairs. Else send           to rank+i, receive from rank-i. */                /* Make local copy first */        mpi_errno = MPIR_Localcopy(((char *)sendbuf +                                     rank*sendcount*sendtype_extent),                                    sendcount, sendtype,                                    ((char *)recvbuf +                                    rank*recvcount*recvtype_extent),                                   recvcount, recvtype);        if (mpi_errno) return mpi_errno;        /* Is comm_size a power-of-two? */        i = 1;        while (i < comm_size)            i *= 2;        if (i == comm_size)            pof2 = 1;        else             pof2 = 0;        /* Do the pairwise exchanges */        for (i=1; i<comm_size; i++) {            if (pof2 == 1) {                /* use exclusive-or algorithm */                src = dst = rank ^ i;            }            else {                src = (rank - i + comm_size) % comm_size;                dst = (rank + i) % comm_size;            }            mpi_errno = MPIC_Sendrecv(((char *)sendbuf +                                       dst*sendcount*sendtype_extent),                                       sendcount, sendtype, dst,                                      MPIR_ALLTOALL_TAG,                                       ((char *)recvbuf +                                       src*recvcount*recvtype_extent),                                      recvcount, recvtype, src,                                      MPIR_ALLTOALL_TAG, comm, &status);            if (mpi_errno) return mpi_errno;        }    }        /* Unlock for collective operation */    MPID_Comm_thread_unlock( comm_ptr );        return (mpi_errno);}/* end:nested *//* begin:nested */PMPI_LOCAL int MPIR_Alltoall_inter(     void *sendbuf,     int sendcount,     MPI_Datatype sendtype,     void *recvbuf,     int recvcount,     MPI_Datatype recvtype,     MPID_Comm *comm_ptr ){/* Intercommunicator alltoall. We use a pairwise exchange algorithm   similar to the one used in intracommunicator alltoall for long   messages. Since the local and remote groups can be of different   sizes, we first compute the max of local_group_size,   remote_group_size. At step i, 0 <= i < max_size, each process   receives from src = (rank - i + max_size) % max_size if src <   remote_size, and sends to dst = (rank + i) % max_size if dst <   remote_size. */    int          local_size, remote_size, max_size, i;    MPI_Aint     sendtype_extent, recvtype_extent;    int          mpi_errno = MPI_SUCCESS;    MPI_Status status;    int src, dst, rank;    char *sendaddr, *recvaddr;    MPI_Comm comm;        local_size = comm_ptr->local_size;     remote_size = comm_ptr->remote_size;    rank = comm_ptr->rank;    comm = comm_ptr->handle;    /* Get extent of send and recv types */    MPID_Datatype_get_extent_macro(sendtype, sendtype_extent);    MPID_Datatype_get_extent_macro(recvtype, recvtype_extent);        /* Lock for collective operation */    MPID_Comm_thread_lock( comm_ptr );        /* Do the pairwise exchanges */    max_size = MPIR_MAX(local_size, remote_size);    for (i=0; i<max_size; i++) {        src = (rank - i + max_size) % max_size;        dst = (rank + i) % max_size;        if (src >= remote_size) {            src = MPI_PROC_NULL;            recvaddr = NULL;        }        else {            recvaddr = (char *)recvbuf + src*recvcount*recvtype_extent;        }        if (dst >= remote_size) {            dst = MPI_PROC_NULL;            sendaddr = NULL;        }        else {            sendaddr = (char *)sendbuf + dst*sendcount*sendtype_extent;        }        mpi_errno = MPIC_Sendrecv(sendaddr, sendcount, sendtype, dst,                                   MPIR_ALLTOALL_TAG, recvaddr,                                  recvcount, recvtype, src,                                  MPIR_ALLTOALL_TAG, comm, &status);        if (mpi_errno) return mpi_errno;    }    /* Unlock for collective operation */    MPID_Comm_thread_unlock( comm_ptr );        return (mpi_errno);}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Alltoall/*@   MPI_Alltoall - alltoall   Arguments:+  void *sendbuf - send buffer.  int sendcount - send count.  MPI_Datatype sendtype - send datatype.  void *recvbuf - receive buffer.  int recvcount - receive count.  MPI_Datatype recvtype - receive datatype-  MPI_Comm comm - communicator   Notes:.N Fortran.N Errors.N MPI_SUCCESS@*/int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm){    static const char FCNAME[] = "MPI_Alltoall";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLTOALL);    MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLTOALL);    /* Verify that MPI has been initialized */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_INITIALIZED(mpi_errno);	    MPIR_ERRTEST_COMM(comm, mpi_errno);            if (mpi_errno != MPI_SUCCESS) {                return MPIR_Err_return_comm( 0, FCNAME, mpi_errno );            }	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* Get handles to MPI objects. */    MPID_Comm_get_ptr( comm, comm_ptr );#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL;	                MPID_Comm_valid_ptr( comm_ptr, mpi_errno );            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALL);                return MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );            }	    MPIR_ERRTEST_COUNT(sendcount, mpi_errno);	    MPIR_ERRTEST_COUNT(recvcount, mpi_errno);	    MPIR_ERRTEST_DATATYPE(sendcount, sendtype, mpi_errno);	    MPIR_ERRTEST_DATATYPE(recvcount, recvtype, mpi_errno);            if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(sendtype, sendtype_ptr);                MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );            }            if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(recvtype, recvtype_ptr);                MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );            }            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALL);                return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );            }        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ...  */    if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Alltoall != NULL)    {	mpi_errno = comm_ptr->coll_fns->Alltoall(sendbuf, sendcount,                                                 sendtype, recvbuf, recvcount,                                                 recvtype, comm_ptr);    }    else    {	MPIR_Nest_incr();        if (comm_ptr->comm_kind == MPID_INTRACOMM)             /* intracommunicator */            mpi_errno = MPIR_Alltoall(sendbuf, sendcount, sendtype,                                      recvbuf, recvcount, recvtype, comm_ptr);         else {            /* intercommunicator */	    mpi_errno = MPIR_Err_create_code( MPI_ERR_COMM, 					      "**intercommcoll",					      "**intercommcoll %s", FCNAME );            /*mpi_errno = MPIR_Alltoall_inter(sendbuf, sendcount,                                            sendtype, recvbuf,                                            recvcount, recvtype,                                            comm_ptr); */        }	MPIR_Nest_decr();    }    if (mpi_errno == MPI_SUCCESS)    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALL);	return MPI_SUCCESS;    }    else    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALL);	return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );    }    /* ... end of body of routine ... */    MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALL);    return MPI_SUCCESS;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -