📄 comm.c
字号:
rc = ompi_request_wait_all (1, &sendreq, MPI_STATUS_IGNORE); if ( OMPI_SUCCESS != rc ) { goto exit; } /* Step 2: the inter-bcast step */ rc = MCA_PML_CALL(irecv (outbuf, size*outcount, outtype, 0, OMPI_COMM_ALLGATHER_TAG, comm, &sendreq)); if ( OMPI_SUCCESS != rc ) { goto exit; } if ( 0 == rank ) { for ( i=0; i < rsize; i++ ){ rc = MCA_PML_CALL(send (tmpbuf, rsize*outcount, outtype, i, OMPI_COMM_ALLGATHER_TAG, MCA_PML_BASE_SEND_STANDARD, comm)); if ( OMPI_SUCCESS != rc ) { goto exit; } } } rc = ompi_request_wait_all (1, &sendreq, MPI_STATUS_IGNORE ); exit: if ( NULL != req ) { free ( req ); } if ( NULL != tmpbuf ) { free ( tmpbuf ); } return (rc);}/**********************************************************************//**********************************************************************//**********************************************************************//*** Counterpart to MPI_Comm_free. To be used within OMPI.** The freeing of all attached objects (groups, errhandlers** etc. ) has moved to the destructor. */int ompi_comm_free ( ompi_communicator_t **comm ){ int ret; /* Release attributes. We do this now instead of during the communicator destructor for 2 reasons: 1. The destructor will only NOT be called immediately during ompi_comm_free() if the reference count is still greater than zero at that point, meaning that there are ongoing communications. However, pending communications will never need attributes, so it's safe to release them directly here. 2. Releasing attributes in ompi_comm_free() enables us to check the return status of the attribute delete functions. At least one interpretation of the MPI standard (i.e., the one of the Intel test suite) is that if any of the attribute deletion functions fail, then MPI_COMM_FREE / MPI_COMM_DISCONNECT should also fail. We can't do that if we delay releasing the attributes -- we need to release the attributes right away so that we can report the error right away. */ if (NULL != (*comm)->c_keyhash) { ret = ompi_attr_delete_all(COMM_ATTR, *comm, (*comm)->c_keyhash); if (OMPI_SUCCESS != ret) { return ret; } OBJ_RELEASE((*comm)->c_keyhash); } /* Special case: if we are freeing the parent handle, then we need to set our internal handle to the parent to be equal to COMM_NULL. This is according to MPI-2:88-89. */ if (*comm == ompi_mpi_comm_parent && comm != &ompi_mpi_comm_parent) { ompi_mpi_comm_parent = &ompi_mpi_comm_null; } /* Release the communicator */ if ( OMPI_COMM_IS_DYNAMIC (*comm) ) { ompi_comm_num_dyncomm --; } OBJ_RELEASE ( (*comm) ); *comm = MPI_COMM_NULL; return OMPI_SUCCESS;}/**********************************************************************//**********************************************************************//**********************************************************************/ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm, ompi_communicator_t *bridge_comm, int local_leader, int remote_leader, orte_rml_tag_t tag, int rsize){ MPI_Request req; int rc; int local_rank, local_size; ompi_proc_t **rprocs=NULL; orte_std_cntr_t size_len; int int_len, rlen; orte_buffer_t *sbuf=NULL, *rbuf=NULL; void *sendbuf; char *recvbuf; local_rank = ompi_comm_rank (local_comm); local_size = ompi_comm_size (local_comm); if (local_rank == local_leader) { sbuf = OBJ_NEW(orte_buffer_t); if (NULL == sbuf) { rc = ORTE_ERROR; goto err_exit; } rc = ompi_proc_pack(local_comm->c_local_group->grp_proc_pointers, local_size, sbuf); if ( OMPI_SUCCESS != rc ) { goto err_exit; } if (ORTE_SUCCESS != (rc = orte_dss.unload(sbuf, &sendbuf, &size_len))) { goto err_exit; } /* send the remote_leader the length of the buffer */ rc = MCA_PML_CALL(irecv (&rlen, 1, MPI_INT, remote_leader, tag, bridge_comm, &req )); if ( OMPI_SUCCESS != rc ) { goto err_exit; } int_len = (int)size_len; rc = MCA_PML_CALL(send (&int_len, 1, MPI_INT, remote_leader, tag, MCA_PML_BASE_SEND_STANDARD, bridge_comm )); if ( OMPI_SUCCESS != rc ) { goto err_exit; } rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE ); if ( OMPI_SUCCESS != rc ) { goto err_exit; } } /* broadcast buffer length to all processes in local_comm */ rc = local_comm->c_coll.coll_bcast( &rlen, 1, MPI_INT, local_leader, local_comm ); if ( OMPI_SUCCESS != rc ) { goto err_exit; } /* Allocate temporary buffer */ recvbuf = (char *)malloc(rlen); if ( NULL == recvbuf ) { goto err_exit; } if ( local_rank == local_leader ) { /* local leader exchange name lists */ rc = MCA_PML_CALL(irecv (recvbuf, rlen, MPI_BYTE, remote_leader, tag, bridge_comm, &req )); if ( OMPI_SUCCESS != rc ) { goto err_exit; } rc = MCA_PML_CALL(send(sendbuf, int_len, MPI_BYTE, remote_leader, tag, MCA_PML_BASE_SEND_STANDARD, bridge_comm )); if ( OMPI_SUCCESS != rc ) { goto err_exit; } rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE ); if ( OMPI_SUCCESS != rc ) { goto err_exit; } OBJ_RELEASE(sbuf); } /* broadcast name list to all proceses in local_comm */ rc = local_comm->c_coll.coll_bcast( recvbuf, rlen, MPI_BYTE, local_leader, local_comm ); if ( OMPI_SUCCESS != rc ) { goto err_exit; } rbuf = OBJ_NEW(orte_buffer_t); if (NULL == rbuf) { rc = ORTE_ERROR; goto err_exit; } if (ORTE_SUCCESS != (rc = orte_dss.load(rbuf, recvbuf, rlen))) { goto err_exit; } /* decode the names into a proc-list */ rc = ompi_proc_unpack(rbuf, rsize, &rprocs ); OBJ_RELEASE(rbuf); err_exit: /* rprocs isn't freed unless we have an error, since it is used in the communicator */ if ( OMPI_SUCCESS !=rc ) { opal_output(0, "%d: Error in ompi_get_rprocs\n", local_rank); if ( NULL != rprocs ) { free ( rprocs ); rprocs=NULL; } } /* make sure the buffers have been released */ if (NULL != sbuf) { OBJ_RELEASE(sbuf); } if (NULL != rbuf) { OBJ_RELEASE(rbuf); } return rprocs;}/**********************************************************************//**********************************************************************//**********************************************************************//** * This routine verifies, whether local_group and remote group are overlapping * in intercomm_create */int ompi_comm_overlapping_groups (int size, ompi_proc_t **lprocs, int rsize, ompi_proc_t ** rprocs){ int rc=OMPI_SUCCESS; int i,j; for (i=0; i<size; i++) { for ( j=0; j<rsize; j++) { if ( lprocs[i] == rprocs[j] ) { rc = MPI_ERR_COMM; return rc; } } } return rc;}/**********************************************************************//**********************************************************************//**********************************************************************/int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high ){ int flag, rhigh; int rank, rsize; int *rcounts; int *rdisps; int scount=0; int rc; ompi_proc_t *ourproc, *theirproc; orte_ns_cmp_bitmask_t mask; rank = ompi_comm_rank (intercomm); rsize= ompi_comm_remote_size (intercomm); rdisps = (int *) calloc ( rsize, sizeof(int)); rcounts = (int *) calloc ( rsize, sizeof(int)); if ( NULL == rdisps || NULL == rcounts ){ return OMPI_ERR_OUT_OF_RESOURCE; } rcounts[0] = 1; if ( 0 == rank ) { scount = 1; } rc = intercomm->c_coll.coll_allgatherv(&high, scount, MPI_INT, &rhigh, rcounts, rdisps, MPI_INT, intercomm); if ( rc != OMPI_SUCCESS ) { flag = MPI_UNDEFINED; } if ( NULL != rdisps ) { free ( rdisps ); } if ( NULL != rcounts ) { free ( rcounts ); } /* This is the logic for determining who is first, who is second */ if ( high && !rhigh ) { flag = false; } else if ( !high && rhigh ) { flag = true; } else { ourproc = intercomm->c_local_group->grp_proc_pointers[0]; theirproc = intercomm->c_remote_group->grp_proc_pointers[0]; mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID; rc = orte_ns.compare_fields(mask, &(ourproc->proc_name), &(theirproc->proc_name)); if ( 0 > rc ) { flag = true; } else { flag = false; } } return flag;}/********************************************************************************//********************************************************************************//********************************************************************************/int ompi_comm_dump ( ompi_communicator_t *comm ){ opal_output(0, "Dumping information for comm_cid %d\n", comm->c_contextid); opal_output(0," f2c index:%d cube_dim: %d\n", comm->c_f_to_c_index, comm->c_cube_dim); opal_output(0," Local group: size = %d my_rank = %d\n", comm->c_local_group->grp_proc_count, comm->c_local_group->grp_my_rank ); opal_output(0," Communicator is:"); /* Display flags */ if ( OMPI_COMM_IS_INTER(comm) ) opal_output(0," inter-comm,"); if ( OMPI_COMM_IS_CART(comm)) opal_output(0," topo-cart,"); if ( OMPI_COMM_IS_GRAPH(comm)) opal_output(0," topo-graph"); opal_output(0,"\n"); if (OMPI_COMM_IS_INTER(comm)) { opal_output(0," Remote group size:%d\n", comm->c_remote_group->grp_proc_count); } return MPI_SUCCESS;}/********************************************************************************//********************************************************************************//********************************************************************************//* static functions *//* ** rankkeygidcompare() compares a tuple of (rank,key,gid) producing ** sorted lists that match the rules needed for a MPI_Comm_split */static int rankkeycompare (const void *p, const void *q){ int *a, *b; /* ranks at [0] key at [1] */ /* i.e. we cast and just compare the keys and then the original ranks.. */ a = (int*)p; b = (int*)q; /* simple tests are those where the keys are different */ if (a[1] < b[1]) { return (-1); } if (a[1] > b[1]) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -