⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 log_mpi_core.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 5 页
字号:
    state->format = NULL;    state = &states[MPE_CART_SHIFT_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Cart_shift";    state->color = "white";    state->format = NULL;    state = &states[MPE_CART_SUB_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Cart_sub";    state->color ="DarkOliveGreen2";    state->format = NULL;    state = &states[MPE_CARTDIM_GET_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Cartdim_get";    state->color = "white";    state->format = NULL;    state = &states[MPE_DIMS_CREATE_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Dims_create";    state->color = "white";    state->format = NULL;    state = &states[MPE_GRAPH_CREATE_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graph_create";    state->color="DarkOliveGreen3";    state->format = NULL;    state = &states[MPE_GRAPH_GET_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graph_get";    state->color = "white";    state->format = NULL;    state = &states[MPE_GRAPH_MAP_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graph_map";    state->color = "white";    state->format = NULL;    state = &states[MPE_GRAPH_NEIGHBORS_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graph_neighbors";    state->color = "white";    state->format = NULL;    state = &states[MPE_GRAPH_NEIGHBORS_COUNT_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graph_neighbors_count";    state->color = "white";    state->format = NULL;    state = &states[MPE_GRAPHDIMS_GET_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Graphdims_get";    state->color = "white";    state->format = NULL;    state = &states[MPE_TOPO_TEST_ID];    state->kind_mask = MPE_KIND_TOPO;    state->name = "MPI_Topo_test";    state->color = "white";    state->format = NULL;    state = &states[MPE_RECV_IDLE_ID];    state->kind_mask = MPE_KIND_MSG;    state->name = "MPI_Recv_idle";    state->color ="SeaGreen1";    state->format = NULL;}void MPE_Init_internal_logging( void ){    MPE_State  *state;    MPE_Event  *event;    /* These are MPE internal states */    state = &states[MPE_ISEND_WAITED_ID];    state->kind_mask = MPE_KIND_INTERNAL;    state->name = "MPE_Isend_waited";    state->color="magenta";    state->format = NULL;    state = &states[MPE_IRECV_WAITED_ID];    state->kind_mask = MPE_KIND_INTERNAL;    state->name = "MPE_Irecv_waited";    state->color="DarkOrange";    state->format = NULL;    /* These are MPE internal Events */    event = &events[MPE_COMM_INIT_ID];    event->kind_mask = MPE_KIND_INTERNAL;    event->name = "MPE_Comm_init";    event->color = "red";    state->format = NULL;    event = &events[MPE_COMM_FINALIZE_ID];    event->kind_mask = MPE_KIND_INTERNAL;    event->name = "MPE_Comm_finalize";    event->color = "orange";    state->format = NULL;}/* * Here begins the individual routines.  We may eventually want to * break them up, at least by class (no need to load the MPI_CART/GRAPH * routines if the application doesn't use them). */int   MPI_Allgather( sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm )void * sendbuf;int sendcount;MPI_Datatype sendtype;void * recvbuf;int recvcount;MPI_Datatype recvtype;MPI_Comm comm;{  int       returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Allgather - prototyping replacement for MPI_Allgather    Log the beginning and ending of the time spent in MPI_Allgather calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_ALLGATHER_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Allgather( sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int   MPI_Allgatherv( sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm )void * sendbuf;int sendcount;MPI_Datatype sendtype;void * recvbuf;int * recvcounts;int * displs;MPI_Datatype recvtype;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Allgatherv - prototyping replacement for MPI_Allgatherv    Log the beginning and ending of the time spent in MPI_Allgatherv calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_ALLGATHERV_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Allgatherv( sendbuf, sendcount, sendtype,                               recvbuf, recvcounts, displs, recvtype, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int   MPI_Allreduce( sendbuf, recvbuf, count, datatype, op, comm )void * sendbuf;void * recvbuf;int count;MPI_Datatype datatype;MPI_Op op;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Allreduce - prototyping replacement for MPI_Allreduce    Log the beginning and ending of the time spent in MPI_Allreduce calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_ALLREDUCE_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Allreduce( sendbuf, recvbuf, count, datatype, op, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int  MPI_Alltoall( sendbuf, sendcnt, sendtype, recvbuf, recvcnt, recvtype, comm )void * sendbuf;int sendcnt;MPI_Datatype sendtype;void * recvbuf;int recvcnt;MPI_Datatype recvtype;MPI_Comm comm;{  int  returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL  MPE_LOG_BYTEBUF_DECL  int  comm_size, type_sz, msg_sz;/*    MPI_Alltoall - prototyping replacement for MPI_Alltoall    Log the beginning and ending of the time spent in MPI_Alltoall calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_ALLTOALL_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Alltoall( sendbuf, sendcnt, sendtype,                             recvbuf, recvcnt, recvtype, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK     PMPI_Comm_size( comm, &comm_size );     bytebuf_pos = 0;     PMPI_Type_size( sendtype, &type_sz );     msg_sz = comm_size * sendcnt * type_sz;     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );     PMPI_Type_size( recvtype, &type_sz );     msg_sz = comm_size * recvcnt * type_sz;     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );  MPE_LOG_STATE_END(comm,bytebuf)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int   MPI_Alltoallv( sendbuf, sendcnts, sdispls, sendtype, recvbuf, recvcnts, rdispls, recvtype, comm )void * sendbuf;int * sendcnts;int * sdispls;MPI_Datatype sendtype;void * recvbuf;int * recvcnts;int * rdispls;MPI_Datatype recvtype;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL  MPE_LOG_BYTEBUF_DECL  int  idx, comm_size, type_sz, msg_sz;/*    MPI_Alltoallv - prototyping replacement for MPI_Alltoallv    Log the beginning and ending of the time spent in MPI_Alltoallv calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_ALLTOALLV_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Alltoallv( sendbuf, sendcnts, sdispls, sendtype,                              recvbuf, recvcnts, rdispls, recvtype, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK     PMPI_Comm_size( comm, &comm_size );     bytebuf_pos = 0;     PMPI_Type_size( sendtype, &type_sz );     msg_sz = 0;     for ( idx = 0; idx < comm_size; idx++ )         msg_sz += sendcnts[idx] * type_sz;     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );     PMPI_Type_size( recvtype, &type_sz );     msg_sz = 0;     for ( idx = 0; idx < comm_size; idx++ )         msg_sz += recvcnts[idx] * type_sz;     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );  MPE_LOG_STATE_END(comm,bytebuf)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int   MPI_Barrier( comm )MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Barrier - prototyping replacement for MPI_Barrier    Log the beginning and ending of the time spent in MPI_Barrier calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_BARRIER_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Barrier( comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int MPI_Bcast( buffer, count, datatype, root, comm )void * buffer;int count;MPI_Datatype datatype;int root;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Bcast - prototyping replacement for MPI_Bcast    Log the beginning and ending of the time spent in MPI_Bcast calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_BCAST_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Bcast( buffer, count, datatype, root, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int MPI_Gather( sendbuf, sendcnt, sendtype, recvbuf, recvcount, recvtype, root, comm )void * sendbuf;int sendcnt;MPI_Datatype sendtype;void * recvbuf;int recvcount;MPI_Datatype recvtype;int root;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Gather - prototyping replacement for MPI_Gather    Log the beginning and ending of the time spent in MPI_Gather calls.*/  MPE_LOG_THREADSTM_GET  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_BEGIN(comm,MPE_GATHER_ID)  MPE_LOG_THREAD_UNLOCK#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_OFF#endif  returnVal = PMPI_Gather( sendbuf, sendcnt, sendtype, recvbuf, recvcount, recvtype, root, comm );#if defined( MAKE_SAFE_PMPI_CALL )    MPE_LOG_ON#endif  MPE_LOG_THREAD_LOCK  MPE_LOG_STATE_END(comm,NULL)  MPE_LOG_THREAD_UNLOCK  return returnVal;}int MPI_Gatherv( sendbuf, sendcnt, sendtype, recvbuf, recvcnts, displs, recvtype, root, comm )void * sendbuf;int sendcnt;MPI_Datatype sendtype;void * recvbuf;int * recvcnts;int * displs;MPI_Datatype recvtype;int root;MPI_Comm comm;{  int   returnVal;  MPE_LOG_STATE_DECL  MPE_LOG_THREADSTM_DECL/*    MPI_Gatherv - prototyping replacement for MPI_Gatherv

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -