⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 allgatherv.c

📁 刚才是说明 现在是安装程序在 LINUX环境下进行编程的MPICH安装文件
💻 C
📖 第 1 页 / 共 2 页
字号:
                   that is being divided into two. k is the number of                   least-significant bits in this process's rank that                   must be zeroed out to find the rank of the root */                 j = mask;                k = 0;                while (j) {                    j >>= 1;                    k++;                }                k--;                                offset = 0;                for (j=0; j<(my_tree_root+mask); j++)                    offset += recvcounts[j];                offset *= nbytes;                tmp_mask = mask >> 1;                                while (tmp_mask) {                    dst = rank ^ tmp_mask;                                        tree_root = rank >> k;                    tree_root <<= k;                                        /* send only if this proc has data and destination                       doesn't have data. at any step, multiple processes                       can send if they have the data */                    if ((dst > rank) &&                         (rank < tree_root + nprocs_completed)                        && (dst >= tree_root + nprocs_completed)) {                                                mpi_errno = MPIC_Send(((char *)tmp_buf + offset),                                              last_recv_cnt, MPI_BYTE,                                              dst, MPIR_ALLGATHERV_TAG,                                              comm);                          /* last_recv_cnt was set in the previous                           receive. that's the amount of data to be                           sent now. */                        if (mpi_errno) return mpi_errno;                    }                    /* recv only if this proc. doesn't have data and sender                       has data */                    else if ((dst < rank) &&                              (dst < tree_root + nprocs_completed) &&                             (rank >= tree_root + nprocs_completed)) {                        mpi_errno = MPIC_Recv(((char *)tmp_buf + offset),                                              nbytes*total_count, MPI_BYTE,                                              dst,                                              MPIR_ALLGATHERV_TAG,                                              comm, &status);                         /* for convenience, recv is posted for a bigger amount                           than will be sent */                         if (mpi_errno) return mpi_errno;                        NMPI_Get_count(&status, MPI_BYTE, &last_recv_cnt);                        curr_cnt += last_recv_cnt;                    }                    tmp_mask >>= 1;                    k--;                }            }            mask <<= 1;            i++;        }                position = 0;        for (j=0; j<comm_size; j++) {            if ((sendbuf != MPI_IN_PLACE) || (j != rank)) {                /* not necessary to unpack if in_place and                   j==rank. otherwise unpack. */                NMPI_Unpack(tmp_buf, tmp_buf_size, &position,                             ((char *)recvbuf + displs[j]*recv_extent),                            recvcounts[j], recvtype, comm);            }        }                MPIU_Free(tmp_buf);  }  /* Unlock for collective operation */    MPID_Comm_thread_unlock( comm_ptr );  return (mpi_errno);}/* end:nested *//* begin:nested */PMPI_LOCAL int MPIR_Allgatherv_inter (     void *sendbuf,     int sendcount,      MPI_Datatype sendtype,     void *recvbuf,     int *recvcounts,     int *displs,       MPI_Datatype recvtype,     MPID_Comm *comm_ptr ){/* Intercommunicator Allgatherv.   This is done differently from the intercommunicator allgather   because we don't have all the information to do a local   intracommunictor gather (sendcount can be different on each   process). Therefore, we do the following:   Each group first does an intercommunicator gather to rank 0   and then does an intracommunicator broadcast. */    int remote_size, mpi_errno, root, rank;    MPID_Comm *newcomm_ptr = NULL;    MPI_Datatype newtype;    remote_size = comm_ptr->remote_size;    rank = comm_ptr->rank;    /* first do an intercommunicator gatherv from left to right group,       then from right to left group */    if (comm_ptr->is_low_group) {        /* gatherv from right group */        root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);          if (mpi_errno) return mpi_errno;        /* gatherv to right group */        root = 0;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);          if (mpi_errno) return mpi_errno;    }    else {        /* gatherv to left group  */        root = 0;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);          if (mpi_errno) return mpi_errno;        /* gatherv from left group */        root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL;        mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype, recvbuf,                                 recvcounts, displs, recvtype, root,                                 comm_ptr);          if (mpi_errno) return mpi_errno;    }    /* now do an intracommunicator broadcast within each group. we use       a derived datatype to handle the displacements */    /* Get the local intracommunicator */    if (!comm_ptr->local_comm)	MPIR_Setup_intercomm_localcomm( comm_ptr );    newcomm_ptr = comm_ptr->local_comm;    NMPI_Type_indexed(remote_size, recvcounts, displs, recvtype,                      &newtype);    NMPI_Type_commit(&newtype);    mpi_errno = MPIR_Bcast(recvbuf, 1, newtype, 0, newcomm_ptr);    NMPI_Type_free(&newtype);    return mpi_errno;}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Allgatherv/*@   MPI_Allgatherv - allgatherv   Arguments:+  void *sendbuf - send buffer.  int sendcount - send count.  MPI_Datatype sendtype - send datatype.  void *recvbuf - receive buffer.  int *recvcounts - receive counts.  int *displs - receive displacements.  MPI_Datatype recvtype - receive datatype-  MPI_Comm comm - communicator   Notes:.N Fortran.N Errors.N MPI_SUCCESS@*/int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm){    static const char FCNAME[] = "MPI_Allgatherv";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLGATHERV);    MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLGATHERV);    /* Verify that MPI has been initialized */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_INITIALIZED(mpi_errno);	    MPIR_ERRTEST_COMM(comm, mpi_errno);            if (mpi_errno != MPI_SUCCESS) {                return MPIR_Err_return_comm( 0, FCNAME, mpi_errno );            }	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* Get handles to MPI objects. */    MPID_Comm_get_ptr( comm, comm_ptr );#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {            MPID_Datatype *recvtype_ptr=NULL, *sendtype_ptr=NULL;            int i, comm_size;	                MPID_Comm_valid_ptr( comm_ptr, mpi_errno );            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHERV);                return MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );            }            MPIR_ERRTEST_COUNT(sendcount, mpi_errno);	    MPIR_ERRTEST_DATATYPE(sendcount, sendtype, mpi_errno);            comm_size = comm_ptr->local_size;            for (i=0; i<comm_size; i++) {                MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);                MPIR_ERRTEST_DATATYPE(recvcounts[i], recvtype, mpi_errno);            }            if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(recvtype, recvtype_ptr);                MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );            }                if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(sendtype, sendtype_ptr);                MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );            }            if (mpi_errno != MPI_SUCCESS) {                MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHERV);                return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );            }        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ...  */    if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Allgatherv != NULL)    {	mpi_errno = comm_ptr->coll_fns->Allgatherv(sendbuf, sendcount,                                                   sendtype, recvbuf,                                                   recvcounts, displs,                                                   recvtype, comm_ptr);    }    else    {	MPIR_Nest_incr();        if (comm_ptr->comm_kind == MPID_INTRACOMM)             /* intracommunicator */            mpi_errno = MPIR_Allgatherv(sendbuf, sendcount,                                         sendtype, recvbuf,                                        recvcounts, displs,                                        recvtype, comm_ptr);         else {            /* intracommunicator */	    mpi_errno = MPIR_Err_create_code( MPI_ERR_COMM, 					      "**intercommcoll",					      "**intercommcoll %s", FCNAME );            /* mpi_errno = MPIR_Allgatherv_inter(sendbuf, sendcount,                                         sendtype, recvbuf,                                        recvcounts, displs,                                        recvtype, comm_ptr); */        }	MPIR_Nest_decr();    }    if (mpi_errno == MPI_SUCCESS)    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHERV);	return MPI_SUCCESS;    }    else    {	MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHERV);	return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );    }    /* ... end of body of routine ... */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -