⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 comm.c

📁 MPI stands for the Message Passing Interface. Written by the MPI Forum (a large committee comprising
💻 C
📖 第 1 页 / 共 4 页
字号:
    int my_rsize;    int mode;    int rsize;    int i, loc;    int inter;    int *results=NULL, *sorted=NULL;     int *rresults=NULL, *rsorted=NULL;     int rc=OMPI_SUCCESS;    ompi_proc_t **procs=NULL, **rprocs=NULL;    ompi_communicator_t *newcomp;        ompi_comm_allgatherfct *allgatherfct=NULL;    /* Step 1: determine all the information for the local group */    /* --------------------------------------------------------- */    /* sort according to color and rank. Gather information from everyone */    myinfo[0] = color;    myinfo[1] = key;    size     = ompi_comm_size ( comm );    inter    = OMPI_COMM_IS_INTER(comm);    if ( inter ) {        allgatherfct = (ompi_comm_allgatherfct *)ompi_comm_allgather_emulate_intra;    }    else {        allgatherfct = (ompi_comm_allgatherfct *)comm->c_coll.coll_allgather;    }    results  = (int*) malloc ( 2 * size * sizeof(int));    if ( NULL == results ) {        return OMPI_ERR_OUT_OF_RESOURCE;    }    rc = allgatherfct( myinfo, 2, MPI_INT, results, 2, MPI_INT, comm );    if ( OMPI_SUCCESS != rc ) {        goto exit;    }            /* how many have the same color like me */    for ( my_size = 0, i=0; i < size; i++) {        if ( results[(2*i)+0] == color) my_size++;    }    sorted = (int *) malloc ( sizeof( int ) * my_size * 2);    if ( NULL == sorted) {        rc =  OMPI_ERR_OUT_OF_RESOURCE;        goto exit;    }        /* ok we can now fill this info */    for( loc = 0, i = 0; i < size; i++ ) {        if ( results[(2*i)+0] == color) {            sorted[(2*loc)+0] = i;                 /* copy org rank */            sorted[(2*loc)+1] = results[(2*i)+1];  /* copy key */            loc++;        }    }        /* the new array needs to be sorted so that it is in 'key' order */    /* if two keys are equal then it is sorted in original rank order! */    if(my_size>1){        qsort ((int*)sorted, my_size, sizeof(int)*2, rankkeycompare);    }    /* put group elements in a list */    procs = (ompi_proc_t **) malloc ( sizeof(ompi_proc_t *) * my_size);    if ( NULL == procs ) {        rc = OMPI_ERR_OUT_OF_RESOURCE;        goto exit;    }    for (i = 0; i < my_size; i++) {        procs[i] = comm->c_local_group->grp_proc_pointers[sorted[i*2]];    }                  /* Step 2: determine all the information for the remote group */    /* --------------------------------------------------------- */    if ( inter ) {        rsize    = comm->c_remote_group->grp_proc_count;        rresults = (int *) malloc ( rsize * 2 * sizeof(int));        if ( NULL == rresults ) {            rc = OMPI_ERR_OUT_OF_RESOURCE;            goto exit;        }        /* this is an allgather on an inter-communicator */        rc = comm->c_coll.coll_allgather( myinfo, 2, MPI_INT, rresults, 2,                                           MPI_INT, comm );        if ( OMPI_SUCCESS != rc ) {            goto exit;        }        /* how many have the same color like me */        for ( my_rsize = 0, i=0; i < rsize; i++) {            if ( rresults[(2*i)+0] == color) my_rsize++;        }        rsorted = (int *) malloc ( sizeof( int ) * my_rsize * 2);        if ( NULL == rsorted) {            rc = OMPI_ERR_OUT_OF_RESOURCE;            goto exit;        }                /* ok we can now fill this info */        for( loc = 0, i = 0; i < rsize; i++ ) {            if ( rresults[(2*i)+0] == color) {                rsorted[(2*loc)+0] = i;                  /* org rank */                rsorted[(2*loc)+1] = rresults[(2*i)+1];  /* key */                loc++;            }        }                /* the new array needs to be sorted so that it is in 'key' order */        /* if two keys are equal then it is sorted in original rank order! */        if(my_rsize>1) {            qsort ((int*)rsorted, my_rsize, sizeof(int)*2, rankkeycompare);        }        /* put group elements in a list */        rprocs = (ompi_proc_t **) malloc ( sizeof(ompi_proc_t *) * my_rsize);        if ( NULL == procs ) {            rc = OMPI_ERR_OUT_OF_RESOURCE;            goto exit;        }        for (i = 0; i < my_rsize; i++) {            rprocs[i] = comm->c_remote_group->grp_proc_pointers[rsorted[i*2]];        }          mode = OMPI_COMM_CID_INTER;    }    else {        my_rsize  = 0;        rprocs    = NULL;        mode      = OMPI_COMM_CID_INTRA;    }            /* Step 3: set up the communicator                           */    /* --------------------------------------------------------- */    /* Create the communicator finally */    newcomp = ompi_comm_allocate (my_size, my_rsize );    if ( NULL == newcomp ) {        rc = MPI_ERR_INTERN;        goto exit;    }    /* Determine context id. It is identical to f_2_c_handle */    rc = ompi_comm_nextcid ( newcomp,  /* new communicator */                              comm,     /* old comm */                             NULL,     /* bridge comm */                             NULL,     /* local leader */                             NULL,     /* remote_leader */                             mode,     /* mode */                             -1 );     /* send first, doesn't matter */    if ( OMPI_SUCCESS != rc ) {        goto exit;    }    rc = ompi_comm_set ( newcomp,            /* new comm */                         comm,               /* old comm */                         my_size,            /* local_size */                         procs,              /* local_procs*/                         my_rsize,           /* remote_size */                         rprocs,             /* remote_procs */                         NULL,               /* attrs */                         comm->error_handler,/* error handler */                         (pass_on_topo)?                              (mca_base_component_t *)comm->c_topo_component:                            NULL);                /* topo component */    if ( OMPI_SUCCESS != rc  ) {        goto exit;    }    /* Set name for debugging purposes */    snprintf(newcomp->c_name, MPI_MAX_OBJECT_NAME, "MPI COMMUNICATOR %d SPLIT FROM %d",              newcomp->c_contextid, comm->c_contextid );    /* Activate the communicator and init coll-component */    rc = ompi_comm_activate ( newcomp,  /* new communicator */                              comm,     /* old comm */                             NULL,     /* bridge comm */                             NULL,     /* local leader */                             NULL,     /* remote_leader */                             mode,     /* mode */                             -1,       /* send first */                             NULL );   /* coll component */                                 if ( OMPI_SUCCESS != rc ) {        goto exit;    } exit:    if ( NULL != results ) {        free ( results );    }    if ( NULL != sorted  ) {        free ( sorted );    }    if ( NULL != rresults) {        free ( rresults );    }    if ( NULL != rsorted ) {        free ( rsorted );    }    if ( NULL != procs   ) {        free ( procs );    }    if ( NULL != rprocs  ) {        free ( rprocs );    }    /* Step 4: if we are not part of the comm, free the struct   */    /* --------------------------------------------------------- */    if ( MPI_UNDEFINED == color ) {        ompi_comm_free ( &newcomp );    }    *newcomm = newcomp;    return ( rc );}/**********************************************************************//**********************************************************************//**********************************************************************/int ompi_comm_dup ( ompi_communicator_t * comm, ompi_communicator_t **newcomm){    ompi_communicator_t *comp=NULL;    ompi_communicator_t *newcomp=NULL;    int rsize, mode, rc=MPI_SUCCESS;    ompi_proc_t **rprocs;    comp = (ompi_communicator_t *) comm;    if ( OMPI_COMM_IS_INTER ( comp ) ){        rsize  = comp->c_remote_group->grp_proc_count;        rprocs = comp->c_remote_group->grp_proc_pointers;        mode   = OMPI_COMM_CID_INTER;    }    else {        rsize  = 0;        rprocs = NULL;        mode   = OMPI_COMM_CID_INTRA;    }        *newcomm = MPI_COMM_NULL;    newcomp = ompi_comm_allocate (comp->c_local_group->grp_proc_count, rsize );    if ( NULL == newcomp ) {        return MPI_ERR_INTERN;    }    /* Determine context id. It is identical to f_2_c_handle */    rc = ompi_comm_nextcid ( newcomp,  /* new communicator */                              comp,     /* old comm */                             NULL,     /* bridge comm */                             NULL,     /* local leader */                             NULL,     /* remote_leader */                             mode,     /* mode */                             -1 );     /* send_first */    if ( MPI_SUCCESS != rc ) {        return rc;    }    rc =  ompi_comm_set ( newcomp,                                /* new comm */                          comp,                                   /* old comm */                          comp->c_local_group->grp_proc_count,    /* local_size */                          comp->c_local_group->grp_proc_pointers, /* local_procs*/                          rsize,                                  /* remote_size */                          rprocs,                                 /* remote_procs */                          comp->c_keyhash,                        /* attrs */                          comp->error_handler,                    /* error handler */                          (mca_base_component_t *) comp->c_topo_component                  /* topo component */                          );    if ( MPI_SUCCESS != rc) {         return rc;    }    /* Set name for debugging purposes */    snprintf(newcomp->c_name, MPI_MAX_OBJECT_NAME, "MPI COMMUNICATOR %d DUP FROM %d",              newcomp->c_contextid, comm->c_contextid );    /* activate communicator and init coll-module */    rc = ompi_comm_activate (newcomp,  /* new communicator */                              comp,     /* old comm */                             NULL,     /* bridge comm */                             NULL,     /* local leader */                             NULL,     /* remote_leader */                             mode,     /* mode */                             -1,      /* send_first */                             (mca_base_component_t *) comp->c_coll_selected_component /* coll component */                             );    if ( MPI_SUCCESS != rc ) {        return rc;    }    *newcomm = newcomp;    return MPI_SUCCESS;}/**********************************************************************//**********************************************************************//**********************************************************************/int ompi_comm_set_name (ompi_communicator_t *comm, char *name ){#ifdef USE_MUTEX_FOR_COMMS    OPAL_THREAD_LOCK(&(comm->c_lock));#endif    memset(comm->c_name, 0, MPI_MAX_OBJECT_NAME);    strncpy(comm->c_name, name, MPI_MAX_OBJECT_NAME);    comm->c_name[MPI_MAX_OBJECT_NAME - 1] = 0;    comm->c_flags |= OMPI_COMM_NAMEISSET;#ifdef USE_MUTEX_FOR_COMMS    OPAL_THREAD_UNLOCK(&(comm->c_lock));#endif    return OMPI_SUCCESS;}/**********************************************************************//**********************************************************************//**********************************************************************//*  * Implementation of MPI_Allgather for the local_group in an inter-comm. * The algorithm consists of two steps:  * 1. an inter-gather to rank 0 in remote group * 2. an inter-bcast from rank 0 in remote_group. */static int ompi_comm_allgather_emulate_intra( void *inbuf, int incount,                                               MPI_Datatype intype, void* outbuf,                                              int outcount, MPI_Datatype outtype,                                              ompi_communicator_t *comm){    int rank, size, rsize, i, rc;    int *tmpbuf=NULL;    MPI_Request *req=NULL, sendreq;    rsize = ompi_comm_remote_size(comm);    size  = ompi_comm_size(comm);    rank  = ompi_comm_rank(comm);    /* Step 1: the gather-step */    if ( 0 == rank ) {        tmpbuf = (int *) malloc (rsize*outcount*sizeof(int));        req = (MPI_Request *)malloc (rsize*outcount*sizeof(MPI_Request));        if ( NULL == tmpbuf || NULL == req ) {            return (OMPI_ERR_OUT_OF_RESOURCE);        }        for ( i=0; i<rsize; i++) {            rc = MCA_PML_CALL(irecv( &tmpbuf[outcount*i], outcount, outtype, i,                                     OMPI_COMM_ALLGATHER_TAG, comm, &req[i] ));            if ( OMPI_SUCCESS != rc ) {                goto exit;                   }        }    }            rc = MCA_PML_CALL(isend( inbuf, incount, intype, 0, OMPI_COMM_ALLGATHER_TAG,                             MCA_PML_BASE_SEND_STANDARD, comm, &sendreq ));    if ( OMPI_SUCCESS != rc ) {        goto exit;           }            if ( 0 == rank ) {        rc = ompi_request_wait_all (rsize, req, MPI_STATUSES_IGNORE);        if ( OMPI_SUCCESS != rc ) {            goto exit;               }    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -