📄 allgather.c
字号:
subtree that have all the data. Send data to others in a tree fashion. First find root of current tree that is being divided into two. k is the number of least-significant bits in this process's rank that must be zeroed out to find the rank of the root */ j = mask; k = 0; while (j) { j >>= 1; k++; } k--; offset = nbytes * (my_tree_root + mask); tmp_mask = mask >> 1; while (tmp_mask) { dst = rank ^ tmp_mask; tree_root = rank >> k; tree_root <<= k; /* send only if this proc has data and destination doesn't have data. at any step, multiple processes can send if they have the data */ if ((dst > rank) && (rank < tree_root + nprocs_completed) && (dst >= tree_root + nprocs_completed)) { mpi_errno = MPIC_Send(((char *)tmp_buf + offset), last_recv_cnt, MPI_BYTE, dst, MPIR_ALLGATHER_TAG, comm); /* last_recv_cnt was set in the previous receive. that's the amount of data to be sent now. */ if (mpi_errno) return mpi_errno; } /* recv only if this proc. doesn't have data and sender has data */ else if ((dst < rank) && (dst < tree_root + nprocs_completed) && (rank >= tree_root + nprocs_completed)) { mpi_errno = MPIC_Recv(((char *)tmp_buf + offset), nbytes*nprocs_completed, MPI_BYTE, dst, MPIR_ALLGATHER_TAG, comm, &status); /* nprocs_completed is also equal to the no. of processes whose data we don't have */ if (mpi_errno) return mpi_errno; NMPI_Get_count(&status, MPI_BYTE, &last_recv_cnt); curr_cnt += last_recv_cnt; } tmp_mask >>= 1; k--; } } mask <<= 1; i++; } position = 0; NMPI_Unpack(tmp_buf, tmp_buf_size, &position, recvbuf, recvcount*comm_size, recvtype, comm); MPIU_Free(tmp_buf); } /* Unlock for collective operation */ MPID_Comm_thread_unlock( comm_ptr ); return (mpi_errno);}/* end:nested *//* begin:nested */PMPI_LOCAL int MPIR_Allgather_inter ( void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPID_Comm *comm_ptr ){/* Intercommunicator Allgather. Each group does a gather to local root with the local intracommunicator, and then does an intercommunicator broadcast.*/ int rank, local_size, remote_size, mpi_errno, root; MPI_Comm newcomm; MPI_Aint true_extent, true_lb; void *tmp_buf=NULL; MPID_Comm *newcomm_ptr = NULL; local_size = comm_ptr->local_size; remote_size = comm_ptr->remote_size; rank = comm_ptr->rank; if (rank == 0) { /* In each group, rank 0 allocates temp. buffer for local gather */ mpi_errno = NMPI_Type_get_true_extent(sendtype, &true_lb, &true_extent); if (mpi_errno) return mpi_errno; tmp_buf = MPIU_Malloc(true_extent*sendcount*local_size); if (!tmp_buf) { mpi_errno = MPIR_Err_create_code( MPI_ERR_OTHER, "**nomem", 0 ); return mpi_errno; } /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); } /* Get the local intracommunicator */ if (!comm_ptr->local_comm) MPIR_Setup_intercomm_localcomm( comm_ptr ); newcomm_ptr = comm_ptr->local_comm; newcomm = newcomm_ptr->handle; mpi_errno = MPIR_Gather(sendbuf, sendcount, sendtype, tmp_buf, sendcount, sendtype, 0, newcomm_ptr); if (mpi_errno) return mpi_errno; /* first broadcast from left to right group, then from right to left group */ if (comm_ptr->is_low_group) { /* bcast to right*/ root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL; mpi_errno = MPIR_Bcast(tmp_buf, sendcount*local_size, sendtype, root, comm_ptr); if (mpi_errno) return mpi_errno; /* receive bcast from right */ root = 0; mpi_errno = MPIR_Bcast(recvbuf, recvcount*remote_size, recvtype, root, comm_ptr); if (mpi_errno) return mpi_errno; } else { /* receive bcast from left */ root = 0; mpi_errno = MPIR_Bcast(recvbuf, recvcount*remote_size, recvtype, root, comm_ptr); if (mpi_errno) return mpi_errno; /* bcast to left */ root = (rank == 0) ? MPI_ROOT : MPI_PROC_NULL; mpi_errno = MPIR_Bcast(tmp_buf, sendcount*local_size, sendtype, root, comm_ptr); if (mpi_errno) return mpi_errno; } if (rank == 0) MPIU_Free((char*)tmp_buf+true_lb); return mpi_errno;}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Allgather/*@ MPI_Allgather - allgather Arguments:+ void *sendbuf - send buffer. int sendcount - send count. MPI_Datatype sendtype - send datatype. void *recvbuf - receive buffer. int recvcount - receive count. MPI_Datatype recvtype - receive datatype- MPI_Comm comm - communicator Notes:.N Fortran.N Errors.N MPI_SUCCESS@*/int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm){ static const char FCNAME[] = "MPI_Allgather"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLGATHER); MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLGATHER); /* Verify that MPI has been initialized */# ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_INITIALIZED(mpi_errno); MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) { return MPIR_Err_return_comm( 0, FCNAME, mpi_errno ); } } MPID_END_ERROR_CHECKS; }# endif /* HAVE_ERROR_CHECKING */ /* Get handles to MPI objects. */ MPID_Comm_get_ptr( comm, comm_ptr );# ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Datatype *recvtype_ptr=NULL, *sendtype_ptr=NULL; MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); if (mpi_errno != MPI_SUCCESS) { MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHER); return MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); } MPIR_ERRTEST_COUNT(sendcount, mpi_errno); MPIR_ERRTEST_COUNT(recvcount, mpi_errno); MPIR_ERRTEST_DATATYPE(sendcount, sendtype, mpi_errno); MPIR_ERRTEST_DATATYPE(recvcount, recvtype, mpi_errno); if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(sendtype, sendtype_ptr); MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno ); } if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(recvtype, recvtype_ptr); MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno ); } if (mpi_errno != MPI_SUCCESS) { MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHER); return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); } } MPID_END_ERROR_CHECKS; }# endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Allgather != NULL) { mpi_errno = comm_ptr->coll_fns->Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm_ptr); } else { MPIR_Nest_incr(); if (comm_ptr->comm_kind == MPID_INTRACOMM) /* intracommunicator */ mpi_errno = MPIR_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm_ptr); else { /* intercommunicator */ mpi_errno = MPIR_Err_create_code( MPI_ERR_COMM, "**intercommcoll", "**intercommcoll %s", FCNAME ); /*mpi_errno = MPIR_Allgather_inter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm_ptr); */ } MPIR_Nest_decr(); } if (mpi_errno == MPI_SUCCESS) { MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHER); return MPI_SUCCESS; } else { MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLGATHER); return MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); } /* ... end of body of routine ... */}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -