📄 scatterv.c
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* * * (C) 2001 by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */#include "mpiimpl.h"/* -- Begin Profiling Symbol Block for routine MPI_Scatterv */#if defined(HAVE_PRAGMA_WEAK)#pragma weak MPI_Scatterv = PMPI_Scatterv#elif defined(HAVE_PRAGMA_HP_SEC_DEF)#pragma _HP_SECONDARY_DEF PMPI_Scatterv MPI_Scatterv#elif defined(HAVE_PRAGMA_CRI_DUP)#pragma _CRI duplicate MPI_Scatterv as PMPI_Scatterv#endif/* -- End Profiling Symbol Block *//* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build the MPI routines */#ifndef MPICH_MPI_FROM_PMPI#define MPI_Scatterv PMPI_Scatterv/* This is the default implementation of scatterv. The algorithm is: Algorithm: MPI_Scatterv Since the array of sendcounts is valid only on the root, we cannot do a tree algorithm without first communicating the sendcounts to other processes. Therefore, we simply use a linear algorithm for the scatter, which takes (p-1) steps versus lgp steps for the tree algorithm. The bandwidth requirement is the same for both algorithms. Cost = (p-1).alpha + n.((p-1)/p).beta Possible improvements: End Algorithm: MPI_Scatterv*//* not declared static because it is called in intercomm. reduce_scatter */#undef FUNCNAME#define FUNCNAME MPIR_Scattervint MPIR_Scatterv ( void *sendbuf, int *sendcnts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root, MPID_Comm *comm_ptr ){ static const char FCNAME[] = "MPIR_Scatterv"; int rank, mpi_errno = MPI_SUCCESS; MPI_Comm comm; MPI_Aint extent; int i; comm = comm_ptr->handle; rank = comm_ptr->rank; /* check if multiple threads are calling this collective function */ MPIDU_ERR_CHECK_MULTIPLE_THREADS_ENTER( comm_ptr ); /* If I'm the root, then scatter */ if ((comm_ptr->comm_kind == MPID_INTRACOMM) && (rank == root)) { /* intracomm root */ int comm_size; comm_size = comm_ptr->local_size; MPID_Datatype_get_extent_macro(sendtype, extent); /* We could use Isend here, but since the receivers need to execute a simple Recv, it may not make much difference in performance, and using the blocking version is simpler */ for ( i=0; i<root; i++ ) { if (sendcnts[i]) { mpi_errno = MPIC_Send(((char *)sendbuf+displs[i]*extent), sendcnts[i], sendtype, i, MPIR_SCATTERV_TAG, comm); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ } } if (recvbuf != MPI_IN_PLACE) { if (sendcnts[rank]) { mpi_errno = MPIR_Localcopy(((char *)sendbuf+displs[rank]*extent), sendcnts[rank], sendtype, recvbuf, recvcnt, recvtype); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ } } for ( i=root+1; i<comm_size; i++ ) { if (sendcnts[i]) { mpi_errno = MPIC_Send(((char *)sendbuf+displs[i]*extent), sendcnts[i], sendtype, i, MPIR_SCATTERV_TAG, comm); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ } } } else if ((comm_ptr->comm_kind == MPID_INTERCOMM) && (root == MPI_ROOT)) { /* intercommunicator root */ int remote_comm_size; remote_comm_size = comm_ptr->remote_size; MPID_Datatype_get_extent_macro(sendtype, extent); for (i=0; i<remote_comm_size; i++) { if (sendcnts[i]) { mpi_errno = MPIC_Send(((char *)sendbuf+displs[i]*extent), sendcnts[i], sendtype, i, MPIR_SCATTERV_TAG, comm); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ } } } else if (root != MPI_PROC_NULL) { /* non-root nodes, and in the intercomm. case, non-root nodes on remote side */ if (recvcnt) mpi_errno = MPIC_Recv(recvbuf,recvcnt,recvtype,root, MPIR_SCATTERV_TAG,comm,MPI_STATUS_IGNORE); } /* check if multiple threads are calling this collective function */ MPIDU_ERR_CHECK_MULTIPLE_THREADS_EXIT( comm_ptr ); return (mpi_errno);}#endif#undef FUNCNAME#define FUNCNAME MPI_Scatterv/*@MPI_Scatterv - Scatters a buffer in parts to all processes in a communicatorInput Parameters:+ sendbuf - address of send buffer (choice, significant only at 'root') . sendcounts - integer array (of length group size) specifying the number of elements to send to each processor . displs - integer array (of length group size). Entry 'i' specifies the displacement (relative to sendbuf fromwhich to take the outgoing data to process 'i' . sendtype - data type of send buffer elements (handle) . recvcount - number of elements in receive buffer (integer) . recvtype - data type of receive buffer elements (handle) . root - rank of sending process (integer) - comm - communicator (handle) Output Parameter:. recvbuf - address of receive buffer (choice) .N ThreadSafe.N Fortran.N Errors.N MPI_SUCCESS.N MPI_ERR_COMM.N MPI_ERR_COUNT.N MPI_ERR_TYPE.N MPI_ERR_BUFFER@*/int MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root, MPI_Comm comm){ static const char FCNAME[] = "MPI_Scatterv"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_SCATTERV); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_CS_ENTER(); MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_SCATTERV); /* Validate parameters, especially handles needing to be converted */# ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; }# endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */# ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL; int i, comm_size, rank; MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); if (mpi_errno != MPI_SUCCESS) goto fn_fail; if (comm_ptr->comm_kind == MPID_INTRACOMM) { MPIR_ERRTEST_INTRA_ROOT(comm_ptr, root, mpi_errno); rank = comm_ptr->rank; comm_size = comm_ptr->local_size; if (rank == root) { for (i=0; i<comm_size; i++) { MPIR_ERRTEST_COUNT(sendcnts[i], mpi_errno); MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno); } if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(sendtype, sendtype_ptr); MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno ); MPID_Datatype_committed_ptr( sendtype_ptr, mpi_errno ); } for (i=0; i<comm_size; i++) { if (sendcnts[i] > 0) { MPIR_ERRTEST_USERBUFFER(sendbuf,sendcnts[i],sendtype,mpi_errno); break; } } for (i=0; i<comm_size; i++) { if (sendcnts[i] > 0) { MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, sendcnts[i], mpi_errno); break; } } } else MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcnt, mpi_errno); if (recvbuf != MPI_IN_PLACE) { MPIR_ERRTEST_COUNT(recvcnt, mpi_errno); MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno); if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(recvtype, recvtype_ptr); MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno ); MPID_Datatype_committed_ptr( recvtype_ptr, mpi_errno ); } MPIR_ERRTEST_USERBUFFER(recvbuf,recvcnt,recvtype,mpi_errno); } } if (comm_ptr->comm_kind == MPID_INTERCOMM) { MPIR_ERRTEST_INTER_ROOT(comm_ptr, root, mpi_errno); if (root == MPI_ROOT) { comm_size = comm_ptr->remote_size; for (i=0; i<comm_size; i++) { MPIR_ERRTEST_COUNT(sendcnts[i], mpi_errno); MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno); } if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(sendtype, sendtype_ptr); MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno ); MPID_Datatype_committed_ptr( sendtype_ptr, mpi_errno ); } for (i=0; i<comm_size; i++) { if (sendcnts[i] > 0) { MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, sendcnts[i], mpi_errno); MPIR_ERRTEST_USERBUFFER(sendbuf,sendcnts[i],sendtype,mpi_errno); break; } } } else if (root != MPI_PROC_NULL) { MPIR_ERRTEST_COUNT(recvcnt, mpi_errno); MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno); if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(recvtype, recvtype_ptr); MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno ); MPID_Datatype_committed_ptr( recvtype_ptr, mpi_errno ); } MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcnt, mpi_errno); MPIR_ERRTEST_USERBUFFER(recvbuf,recvcnt,recvtype,mpi_errno); } } if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; }# endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Scatter != NULL) { mpi_errno = comm_ptr->coll_fns->Scatterv(sendbuf, sendcnts, displs, sendtype, recvbuf, recvcnt, recvtype, root, comm_ptr); } else { /* if (comm_ptr->comm_kind == MPID_INTERCOMM) { mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_COMM, "**intercommcoll", "**intercommcoll %s", FCNAME ); } else */ MPIR_Nest_incr(); mpi_errno = MPIR_Scatterv(sendbuf, sendcnts, displs, sendtype, recvbuf, recvcnt, recvtype, root, comm_ptr); MPIR_Nest_decr(); } if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_SCATTERV); MPID_CS_EXIT(); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */# ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_scatterv", "**mpi_scatterv %p %p %p %D %p %d %D %d %C", sendbuf, sendcnts, displs, sendtype, recvbuf, recvcnt, recvtype, root, comm); }# endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -