⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mpe_proff.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 5 页
字号:
    int          local_l_blocklens[MPIR_USE_LOCAL_ARRAY];    int          *l_indices = 0;    int          local_l_indices[MPIR_USE_LOCAL_ARRAY];    MPI_Datatype ldatatype;    static char myname[] = "MPI_TYPE_INDEXED";    if ((int)*count > 0) {        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {            MPIR_FALLOC(l_blocklens,(int *) MALLOC( *count * sizeof(int) ),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname );            MPIR_FALLOC(l_indices,(int *) MALLOC( *count * sizeof(int) ),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname );        }        else {            l_blocklens = local_l_blocklens;            l_indices = local_l_indices;        }        for (i=0; i<(int)*count; i++) {            l_indices[i] = (int)indices[i];            l_blocklens[i] = (int)blocklens[i];         }    }    *__ierr = MPI_Type_indexed((int)*count, l_blocklens, l_indices,                               MPI_Type_f2c(*old_type),                                &ldatatype);    if ((int)*count > MPIR_USE_LOCAL_ARRAY) {        FREE( l_indices );        FREE( l_blocklens );    }    *newtype = MPI_Type_c2f(ldatatype);}void mpi_type_lb_ ( MPI_Fint *, MPI_Fint *, MPI_Fint * );void mpi_type_lb_ ( MPI_Fint *datatype, MPI_Fint *displacement,                    MPI_Fint *__ierr ){    MPI_Aint   c_displacement;    *__ierr = MPI_Type_lb(MPI_Type_f2c(*datatype), &c_displacement);    /* Should check for truncation */    *displacement = (MPI_Fint)c_displacement;}void mpi_type_size_ ( MPI_Fint *, MPI_Fint *, MPI_Fint * );void mpi_type_size_ ( MPI_Fint *datatype, MPI_Fint *size, MPI_Fint *__ierr ){    /* MPI_Aint c_size;*/    int c_size;    *__ierr = MPI_Type_size(MPI_Type_f2c(*datatype), &c_size);    /* Should check for truncation */    *size = (MPI_Fint)c_size;}void mpi_type_struct_ ( MPI_Fint *, MPI_Fint [], MPI_Fint [],                        MPI_Fint [], MPI_Fint *, MPI_Fint * );void mpi_type_struct_( MPI_Fint *count, MPI_Fint blocklens[],                       MPI_Fint indices[], MPI_Fint old_types[],                       MPI_Fint *newtype, MPI_Fint *__ierr ){    MPI_Aint     *c_indices;    MPI_Aint     local_c_indices[MPIR_USE_LOCAL_ARRAY];    MPI_Datatype *l_datatype;    MPI_Datatype local_l_datatype[MPIR_USE_LOCAL_ARRAY];    MPI_Datatype l_newtype;    int          *l_blocklens;    int          local_l_blocklens[MPIR_USE_LOCAL_ARRAY];    int          i;    int          mpi_errno;    static char  myname[] = "MPI_TYPE_STRUCT";        if ((int)*count > 0) {        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {        /* Since indices come from MPI_ADDRESS (the FORTRAN VERSION),           they are currently relative to MPIF_F_MPI_BOTTOM.             Convert them back */            MPIR_FALLOC(c_indices,                        (MPI_Aint *) MALLOC( *count * sizeof(MPI_Aint) ),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname );            MPIR_FALLOC(l_blocklens,(int *) MALLOC( *count * sizeof(int) ),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname );            MPIR_FALLOC(l_datatype,                        (MPI_Datatype *)                        MALLOC( *count * sizeof(MPI_Datatype) ),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname );        }        else {            c_indices = local_c_indices;            l_blocklens = local_l_blocklens;            l_datatype = local_l_datatype;        }        for (i=0; i<(int)*count; i++) {            c_indices[i] = (MPI_Aint) indices[i]/* + (MPI_Aint)MPIR_F_MPI_BOTTOM*/;            l_blocklens[i] = (int) blocklens[i];            l_datatype[i] = MPI_Type_f2c(old_types[i]);        }        *__ierr = MPI_Type_struct((int)*count, l_blocklens, c_indices,                                  l_datatype, &l_newtype);        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {            FREE( c_indices );            FREE( l_blocklens );            FREE( l_datatype );        }    }    else if ((int)*count == 0) {        *__ierr = MPI_SUCCESS;        *newtype = 0;    }    else {        mpi_errno = MPER_Err_setmsg( MPI_ERR_COUNT, MPIR_ERR_DEFAULT, myname,                                     (char *)0, (char *)0, (int)(*count) );        *__ierr = MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname );        return;    }    *newtype = MPI_Type_c2f(l_newtype);}void mpi_type_ub_ ( MPI_Fint *, MPI_Fint *, MPI_Fint * );void mpi_type_ub_ ( MPI_Fint *datatype, MPI_Fint *displacement,                    MPI_Fint *__ierr ){    MPI_Aint c_displacement;    *__ierr = MPI_Type_ub(MPI_Type_f2c(*datatype), &c_displacement);    /* Should check for truncation */    *displacement = (MPI_Fint)c_displacement;}void mpi_type_vector_ ( MPI_Fint *, MPI_Fint *, MPI_Fint *,                        MPI_Fint *, MPI_Fint *, MPI_Fint * );void mpi_type_vector_( MPI_Fint *count, MPI_Fint *blocklen, MPI_Fint *stride,                       MPI_Fint *old_type, MPI_Fint *newtype,                       MPI_Fint *__ierr ){    MPI_Datatype l_datatype;    *__ierr = MPI_Type_vector((int)*count, (int)*blocklen, (int)*stride,                              MPI_Type_f2c(*old_type),                              &l_datatype);    *newtype = MPI_Type_c2f(l_datatype);}void mpi_unpack_ ( void *, MPI_Fint *, MPI_Fint *, void *,                           MPI_Fint *, MPI_Fint *, MPI_Fint *,                           MPI_Fint * );void mpi_unpack_ ( void *inbuf, MPI_Fint *insize, MPI_Fint *position,                   void *outbuf, MPI_Fint *outcount, MPI_Fint *type,                   MPI_Fint *comm, MPI_Fint *__ierr ){    int l_position;    l_position = (int)*position;    *__ierr = MPI_Unpack(inbuf, (int)*insize, &l_position,                         MPIR_F_PTR(outbuf), (int)*outcount,                         MPI_Type_f2c(*type), MPI_Comm_f2c(*comm) );    *position = (MPI_Fint)l_position;}void mpi_waitall_ ( MPI_Fint *, MPI_Fint [],                    MPI_Fint [][MPI_STATUS_SIZE], MPI_Fint *);void mpi_waitall_( MPI_Fint *count, MPI_Fint array_of_requests[],                   MPI_Fint array_of_statuses[][MPI_STATUS_SIZE],                   MPI_Fint *__ierr ){    int i;    MPI_Request *lrequest = 0;    MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY];    MPI_Status *c_status = 0;    MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY];    if ((int)*count > 0) {        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {            MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request) *                         (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITALL" );            MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status) *                         (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITALL" );        }        else {            lrequest = local_lrequest;            c_status = local_c_status;        }        for (i=0; i<(int)*count; i++) {            lrequest[i] = MPI_Request_f2c( array_of_requests[i] );        }        *__ierr = MPI_Waitall((int)*count,lrequest,c_status);        /* By checking for lrequest[i] = 0, we handle persistant requests */        for (i=0; i<(int)*count; i++) {                array_of_requests[i] = MPI_Request_c2f( lrequest[i] );        }    }    else         *__ierr = MPI_Waitall((int)*count,(MPI_Request *)0, c_status );#if defined( HAVE_MPI_F_STATUSES_IGNORE )    if ( (MPI_Fint *) array_of_statuses != MPI_F_STATUSES_IGNORE )#endif        for (i=0; i<(int)*count; i++)             MPI_Status_c2f(&(c_status[i]), &(array_of_statuses[i][0]) );        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {        FREE( lrequest );        FREE( c_status );    }}void mpi_waitany_ ( MPI_Fint *, MPI_Fint [], MPI_Fint *,                    MPI_Fint *, MPI_Fint * );void mpi_waitany_( MPI_Fint *count, MPI_Fint array_of_requests[],                   MPI_Fint *index, MPI_Fint *status, MPI_Fint *__ierr ){    int lindex;    MPI_Request *lrequest;    MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY];    MPI_Status c_status;    int i;    if ((int)*count > 0) {        if ((int)*count > MPIR_USE_LOCAL_ARRAY) {            MPIR_FALLOC(lrequest,                        (MPI_Request*)                        MALLOC(sizeof(MPI_Request) * (int)*count),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITANY" );        }        else             lrequest = local_lrequest;        for (i=0; i<(int)*count; i++)             lrequest[i] = MPI_Request_f2c( array_of_requests[i] );    }    else        lrequest = 0;    *__ierr = MPI_Waitany((int)*count,lrequest,&lindex,&c_status);    if (lindex != -1) {        if (!*__ierr) {            array_of_requests[lindex] = MPI_Request_c2f(lrequest[lindex]);        }    }   if ((int)*count > MPIR_USE_LOCAL_ARRAY) {        FREE( lrequest );    }    /* See the description of waitany in the standard; the Fortran index ranges       are from 1, not zero */    *index = (MPI_Fint)lindex;    if ((int)*index >= 0) *index = (MPI_Fint)*index + 1;#if defined( HAVE_MPI_F_STATUS_IGNORE )    if ( status != MPI_F_STATUS_IGNORE )#endif        MPI_Status_c2f(&c_status, status);}void mpi_wait_ ( MPI_Fint *, MPI_Fint *, MPI_Fint * );void mpi_wait_ ( MPI_Fint *request, MPI_Fint *status, MPI_Fint *__ierr ){    MPI_Request lrequest;    MPI_Status c_status;    lrequest = MPI_Request_f2c(*request);    *__ierr = MPI_Wait(&lrequest, &c_status);    *request = MPI_Request_c2f(lrequest);#if defined( HAVE_MPI_F_STATUS_IGNORE )    if ( status != MPI_F_STATUS_IGNORE )#endif        MPI_Status_c2f(&c_status, status);}void mpi_waitsome_ ( MPI_Fint *, MPI_Fint [], MPI_Fint *,                     MPI_Fint [], MPI_Fint [][MPI_STATUS_SIZE],                     MPI_Fint * );void mpi_waitsome_( MPI_Fint *incount, MPI_Fint array_of_requests[],                    MPI_Fint *outcount, MPI_Fint array_of_indices[],                     MPI_Fint array_of_statuses[][MPI_STATUS_SIZE],                    MPI_Fint *__ierr ){    int i,j,found;    int loutcount;    int *l_indices = 0;    int local_l_indices[MPIR_USE_LOCAL_ARRAY];    MPI_Request *lrequest = 0;    MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY];    MPI_Status * c_status = 0;    MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY];    if ((int)*incount > 0) {        if ((int)*incount > MPIR_USE_LOCAL_ARRAY) {            MPIR_FALLOC(lrequest,                        (MPI_Request*)                        MALLOC(sizeof(MPI_Request)* (int)*incount),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITSOME" );            MPIR_FALLOC(l_indices,(int*)MALLOC(sizeof(int) * (int)*incount),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITSOME" );            MPIR_FALLOC(c_status,                        (MPI_Status*)                        MALLOC(sizeof(MPI_Status) * (int)*incount),                        MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                         "MPI_WAITSOME" );        }        else {            lrequest = local_lrequest;            l_indices = local_l_indices;            c_status = local_c_status;        }        for (i=0; i<(int)*incount; i++)             lrequest[i] = MPI_Request_f2c( array_of_requests[i] );        *__ierr = MPI_Waitsome((int)*incount,lrequest,&loutcount,l_indices,                               c_status);/* By checking for lrequest[l_indices[i]] = 0,    we handle persistant requests */        for (i=0; i<(int)*incount; i++) {            if ( i < loutcount) {                if (l_indices[i] >= 0) {                    array_of_requests[l_indices[i]]                    = MPI_Request_c2f( lrequest[l_indices[i]] );                }            }            else {                found = 0;                j = 0;                while ( (!found) && (j<loutcount) ) {                    if (l_indices[j++] == i)                        found = 1;                }                if (!found)                    array_of_requests[i] = MPI_Request_c2f( lrequest[i] );            }        }    }    else         *__ierr = MPI_Waitsome( (int)*incount, (MPI_Request *)0, &loutcount,                                l_indices, c_status );    for (i=0; i<loutcount; i++) {#if defined( HAVE_MPI_F_STATUSES_IGNORE )        if ( (MPI_Fint *) array_of_statuses != MPI_F_STATUSES_IGNORE )#endif            MPI_Status_c2f( &c_status[i], &(array_of_statuses[i][0]) );        if (l_indices[i] >= 0)            array_of_indices[i] = l_indices[i] + 1;    }    *outcount = (MPI_Fint)loutcount;    if ((int)*incount > MPIR_USE_LOCAL_ARRAY) {        FREE( l_indices );        FREE( lrequest );        FREE( c_status );    }}void mpi_allgather_ ( void *, MPI_Fint *, MPI_Fint *, void *,               

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -