⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mpe_proff.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 5 页
字号:
#define mpi_sendrecv_ mpi_sendrecv#define mpi_sendrecv_replace_ mpi_sendrecv_replace#define mpi_ssend_init_ mpi_ssend_init#define mpi_ssend_ mpi_ssend#define mpi_startall_ mpi_startall#define mpi_start_ mpi_start#define mpi_testall_ mpi_testall#define mpi_testany_ mpi_testany#define mpi_test_cancelled_ mpi_test_cancelled#define mpi_test_ mpi_test#define mpi_testsome_ mpi_testsome#define mpi_type_commit_ mpi_type_commit#define mpi_type_contiguous_ mpi_type_contiguous#define mpi_type_extent_ mpi_type_extent#define mpi_type_free_ mpi_type_free#define mpi_type_hindexed_ mpi_type_hindexed#define mpi_type_hvector_ mpi_type_hvector#define mpi_type_indexed_ mpi_type_indexed#define mpi_type_lb_ mpi_type_lb#define mpi_type_size_ mpi_type_size#define mpi_type_struct_ mpi_type_struct#define mpi_type_ub_ mpi_type_ub#define mpi_type_vector_ mpi_type_vector#define mpi_unpack_ mpi_unpack#define mpi_waitall_ mpi_waitall#define mpi_waitany_ mpi_waitany#define mpi_wait_ mpi_wait#define mpi_waitsome_ mpi_waitsome#define mpi_allgather_ mpi_allgather#define mpi_allgatherv_ mpi_allgatherv#define mpi_allreduce_ mpi_allreduce#define mpi_alltoall_ mpi_alltoall#define mpi_alltoallv_ mpi_alltoallv#define mpi_barrier_ mpi_barrier#define mpi_bcast_ mpi_bcast#define mpi_gather_ mpi_gather#define mpi_gatherv_ mpi_gatherv#define mpi_op_create_ mpi_op_create#define mpi_op_free_ mpi_op_free#define mpi_reduce_scatter_ mpi_reduce_scatter#define mpi_reduce_ mpi_reduce#define mpi_scan_ mpi_scan#define mpi_scatter_ mpi_scatter#define mpi_scatterv_ mpi_scatterv#define mpi_finalize_ mpi_finalize#endif/* * Define prototypes next to the fortran2c wrapper to keep the compiler happy */#if defined(USE_STDARG) && !defined(USE_OLDSTYLE_STDARG)int MPER_Err_setmsg( int errclass, int errkind,                     const char *routine_name,                      const char *generic_string,                      const char *default_string, ... ){    va_list Argp;    va_start( Argp, default_string );#else/* This assumes old-style varargs support */int MPER_Err_setmsg( errclass, errkind, routine_name,                      generic_string, default_string, va_alist )int errclass, errkind;const char *routine_name, *generic_string, *default_string;va_dcl{    va_list Argp;    va_start( Argp );#endif    va_end( Argp );    fprintf( stderr, __FILE__":MPER_Err_setmg(%s) in MPE\n", routine_name );    return errclass;}/****************************************************************************/void mpi_init_( MPI_Fint * );void mpi_init_( MPI_Fint *ierr ){    int Argc;    int i, argsize = 1024;    char **Argv, *p;    int  ArgcSave;           /* Save the argument count */    char **ArgvSave;         /* Save the pointer to the argument vector *//* Recover the args with the Fortran routines iargc_ and getarg_ */    ArgcSave        = Argc = mpir_iargc_() + 1;    ArgvSave        = Argv = (char **)MALLOC( Argc * sizeof(char *) );    if (!Argv) {        *ierr = MPE_ErrPrint( (MPI_Comm)0, MPI_ERR_OTHER,                               "Out of space in MPI_INIT" );        return;    }    for (i=0; i<Argc; i++) {        ArgvSave[i] = Argv[i] = (char *)MALLOC( argsize + 1 );        if (!Argv[i]) {            *ierr = MPE_ErrPrint( (MPI_Comm)0, MPI_ERR_OTHER,                                   "Out of space in MPI_INIT" );            return;        }        mpir_getarg_( &i, Argv[i], argsize );        /* Trim trailing blanks */        p = Argv[i] + argsize - 1;        while (p > Argv[i]) {            if (*p != ' ') {                p[1] = '\0';                break;            }            p--;        }    }    *ierr = MPI_Init( &Argc, &Argv );        /* Recover space */    for (i=0; i<ArgcSave; i++) {        FREE( ArgvSave[i] );    }    FREE( ArgvSave );}#if defined( HAVE_MPI_INIT_THREAD )void mpi_init_thread_( MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr );void mpi_init_thread_( MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr ){    *ierr = MPI_Init_thread( NULL, NULL, *required, provided );}#endifvoid mpi_pcontrol_( MPI_Fint *icontrol, MPI_Fint *__ierr );void mpi_pcontrol_( MPI_Fint *icontrol, MPI_Fint *__ierr ){    *__ierr = MPI_Pcontrol( *icontrol );}void mpi_comm_create_( MPI_Fint *comm, MPI_Fint *group,                       MPI_Fint *comm_out, MPI_Fint *__ierr );void mpi_comm_create_( MPI_Fint *comm, MPI_Fint *group,                       MPI_Fint *comm_out, MPI_Fint *__ierr ){    MPI_Comm l_comm_out;    *__ierr = MPI_Comm_create( MPI_Comm_f2c(*comm), MPI_Group_f2c(*group),                               &l_comm_out);    if (*__ierr == MPI_SUCCESS)        *comm_out = MPI_Comm_c2f(l_comm_out);}void mpi_comm_dup_( MPI_Fint *comm, MPI_Fint *comm_out, MPI_Fint *__ierr );void mpi_comm_dup_( MPI_Fint *comm, MPI_Fint *comm_out, MPI_Fint *__ierr ){    MPI_Comm l_comm_out;    *__ierr = MPI_Comm_dup( MPI_Comm_f2c(*comm), &l_comm_out );    if (*__ierr == MPI_SUCCESS)        *comm_out = MPI_Comm_c2f(l_comm_out);}void mpi_comm_free_( MPI_Fint *comm, MPI_Fint *__ierr );void mpi_comm_free_( MPI_Fint *comm, MPI_Fint *__ierr ){    MPI_Comm l_comm = MPI_Comm_f2c(*comm);    *__ierr = MPI_Comm_free(&l_comm);    if (*__ierr == MPI_SUCCESS)        *comm = MPI_Comm_c2f(l_comm);}void mpi_comm_split_( MPI_Fint *comm, MPI_Fint *color, MPI_Fint *key,                      MPI_Fint *comm_out, MPI_Fint *__ierr );void mpi_comm_split_( MPI_Fint *comm, MPI_Fint *color, MPI_Fint *key,                      MPI_Fint *comm_out, MPI_Fint *__ierr ){    MPI_Comm l_comm_out;    *__ierr = MPI_Comm_split( MPI_Comm_f2c(*comm), (int)*color, (int)*key,                              &l_comm_out);    if (*__ierr == MPI_SUCCESS)        *comm_out = MPI_Comm_c2f(l_comm_out);}void mpi_intercomm_create_( MPI_Fint *local_comm, MPI_Fint *local_leader,                            MPI_Fint *peer_comm, MPI_Fint *remote_leader,                            MPI_Fint *tag, MPI_Fint *comm_out,                            MPI_Fint *__ierr );void mpi_intercomm_create_( MPI_Fint *local_comm, MPI_Fint *local_leader,                            MPI_Fint *peer_comm, MPI_Fint *remote_leader,                            MPI_Fint *tag, MPI_Fint *comm_out,                            MPI_Fint *__ierr ){    MPI_Comm l_comm_out;    *__ierr = MPI_Intercomm_create( MPI_Comm_f2c(*local_comm),                                    (int)*local_leader,                                    MPI_Comm_f2c(*peer_comm),                                    (int)*remote_leader, (int)*tag,                                    &l_comm_out);    if (*__ierr == MPI_SUCCESS)        *comm_out = MPI_Comm_c2f(l_comm_out);}void mpi_intercomm_merge_( MPI_Fint *comm, MPI_Fint *high, MPI_Fint *comm_out,                           MPI_Fint *__ierr );void mpi_intercomm_merge_( MPI_Fint *comm, MPI_Fint *high, MPI_Fint *comm_out,                           MPI_Fint *__ierr ){    MPI_Comm l_comm_out;    *__ierr = MPI_Intercomm_merge( MPI_Comm_f2c(*comm), (int)*high,                                   &l_comm_out);    if (*__ierr == MPI_SUCCESS)        *comm_out = MPI_Comm_c2f(l_comm_out);}void mpi_cart_create_( MPI_Fint *comm_old, MPI_Fint *ndims, MPI_Fint *dims,                       MPI_Fint *periods, MPI_Fint *reorder,                       MPI_Fint *comm_cart, MPI_Fint *ierr );void mpi_cart_create_( MPI_Fint *comm_old, MPI_Fint *ndims, MPI_Fint *dims,                       MPI_Fint *periods, MPI_Fint *reorder,                       MPI_Fint *comm_cart, MPI_Fint *ierr ){    MPI_Comm   l_comm_cart;    int       *lperiods, *ldims;    int        ls_ints[40];     /* local static int[] */    int       *la_ints;         /* local allocated int[] */    int        is_malloced;    int        i;    is_malloced = 0;    if ( *ndims > 20 ) {#if ! defined( HAVE_ALLOCA )        la_ints  = (int *) malloc( 2 * (*ndims) * sizeof(int) );        is_malloced = 1;#else        la_ints  = (int *) alloca( 2 * (*ndims) * sizeof(int) );#endif        lperiods = &(la_ints[0]);        ldims    = &(la_ints[*ndims]);    }    else  { /* if ( *ndims <= 20 ) */        lperiods = &(ls_ints[0]);        ldims    = &(ls_ints[20]);    }    for (i=0; i<(int)*ndims; i++) {        lperiods[i] = MPIR_FROM_FLOG(periods[i]);        ldims[i] = (int)dims[i];    }#if defined(_TWO_WORD_FCD)    int tmp = *reorder;    *ierr = MPI_Cart_create( MPI_Comm_f2c(*comm_old),                             (int)*ndims, ldims,                             lperiods, MPIR_FROM_FLOG(tmp),                             &l_comm_cart);#else    *ierr = MPI_Cart_create( MPI_Comm_f2c(*comm_old),                             (int)*ndims, ldims,                             lperiods, MPIR_FROM_FLOG(*reorder),                             &l_comm_cart);#endif#if ! defined( HAVE_ALLOCA )    if ( is_malloced == 1 )        free( la_ints );#endif     if (*ierr == MPI_SUCCESS)        *comm_cart = MPI_Comm_c2f(l_comm_cart);}void mpi_cart_sub_( MPI_Fint *comm, MPI_Fint *remain_dims,                    MPI_Fint *comm_new, MPI_Fint *__ierr );void mpi_cart_sub_( MPI_Fint *comm, MPI_Fint *remain_dims,                    MPI_Fint *comm_new, MPI_Fint *__ierr ){    MPI_Comm   lcomm_new;    int        ls_ints[20];     /* local static int[] */    int       *la_ints;         /* local allocated int[] */    int        is_malloced;    int       *lremain_dims;    int        ndims, i;    MPI_Cartdim_get( MPI_Comm_f2c(*comm), &ndims );    is_malloced = 0;    if ( ndims > 20 ) {#if ! defined( HAVE_ALLOCA )        la_ints  = (int *) malloc( ndims * sizeof(int) );        is_malloced = 1;#else        la_ints  = (int *) alloca( ndims * sizeof(int) );#endif        lremain_dims = la_ints;    }    else  { /* if ( ndims <= 20 ) */        lremain_dims = ls_ints;    }    for (i=0; i<ndims; i++)        lremain_dims[i] = MPIR_FROM_FLOG(remain_dims[i]);    *__ierr = MPI_Cart_sub( MPI_Comm_f2c(*comm), lremain_dims,                            &lcomm_new);#if ! defined( HAVE_ALLOCA )    if ( is_malloced == 1 )        free( la_ints );#endif     if (*__ierr == MPI_SUCCESS)        *comm_new = MPI_Comm_c2f(lcomm_new);}void mpi_graph_create_( MPI_Fint *comm_old, MPI_Fint *nnodes,                        MPI_Fint *index, MPI_Fint *edges, MPI_Fint *reorder,                        MPI_Fint *comm_graph, MPI_Fint *__ierr );void mpi_graph_create_( MPI_Fint *comm_old, MPI_Fint *nnodes,                        MPI_Fint *index, MPI_Fint *edges, MPI_Fint *reorder,                        MPI_Fint *comm_graph, MPI_Fint *__ierr ){    MPI_Comm lcomm_graph;    if (sizeof(MPI_Fint) == sizeof(int))#if defined(_TWO_WORD_FCD)        int tmp = *reorder;        *__ierr = MPI_Graph_create( MPI_Comm_f2c(*comm_old), *nnodes,                                    index, edges,                                    MPIR_FROM_FLOG(tmp),                                    &lcomm_graph);#else        *__ierr = MPI_Graph_create( MPI_Comm_f2c(*comm_old), *nnodes,                                    index, edges,                                    MPIR_FROM_FLOG(*reorder),                                    &lcomm_graph);#endif    else {        int i;        int nedges;        int *lindex;        int *ledges;        MPI_Graphdims_get(MPI_Comm_f2c(*comm_old), nnodes, &nedges);        MPIR_FALLOC(lindex,(int*)MALLOC(sizeof(int)* (int)*nnodes),                    MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                    "MPI_Graph_create");        MPIR_FALLOC(ledges,(int*)MALLOC(sizeof(int)* (int)nedges),                    MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED,                    "MPI_Graph_create");        for (i=0; i<(int)*nnodes; i++)            lindex[i] = (int)index[i];        for (i=0; i<nedges; i++)            ledges[i] = (int)edges[i];#if defined(_TWO_WORD_FCD)        int tmp = *reorder;        *__ierr = MPI_Graph_create( MPI_Comm_f2c(*comm_old), (int)*nnodes,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -