📄 log_mpi_core.c
字号:
#define MPE_REQ_START(request) \ MPE_Req_start( request, state, THREADID, IS_MPELOG_ON );#define MPE_REQ_WAIT_TEST(request,status,note) \ MPE_Req_wait_test( request, status, note, state, THREADID, IS_MPELOG_ON );/* Update commIDs after CLOG_CommSet_add_intracomm() which may have invoked realloc() on CLOG_CommSet's table[] of commIDs, because invocation of realloc() may invalidate all commIDs handed out by CLOG_CommSet.*//* if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */#define MPE_LOG_INTRACOMM(comm,new_comm,comm_etype) \ if (is_thisfn_logged) { \ if ( new_comm != MPI_COMM_NULL ) { \ IS_MPELOG_ON = 0; \ new_commIDs = CLOG_CommSet_add_intracomm( CLOG_CommSet, \ new_comm ); \ IS_MPELOG_ON = 1; \ commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \ MPE_Log_commIDs_intracomm( commIDs, THREADID, \ comm_etype, new_commIDs ); \ MPE_LOG_SOLO_EVENT( new_commIDs, THREADID, MPE_COMM_INIT_ID ) \ } \ else { \ MPE_Log_commIDs_nullcomm( commIDs, THREADID, comm_etype ); \ MPE_LOG_SOLO_EVENT( commIDs, THREADID, MPE_COMM_FINALIZE_ID ) \ } \ }/* Update commIDs after CLOG_CommSet_add_intercomm() which may have invoked realloc() on CLOG_CommSet's table[] of commIDs, because invocation of realloc() may invalidate all commIDs handed out by CLOG_CommSet.*//* if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */#define MPE_LOG_INTERCOMM(comm,new_comm,comm_etype) \ if (is_thisfn_logged) { \ if ( new_comm != MPI_COMM_NULL ) { \ IS_MPELOG_ON = 0; \ new_commIDs = CLOG_CommSet_add_intercomm( CLOG_CommSet, \ new_comm, commIDs ); \ IS_MPELOG_ON = 1; \ commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \ MPE_Log_commIDs_intercomm( commIDs, THREADID, \ comm_etype, new_commIDs ); \ MPE_LOG_SOLO_EVENT( new_commIDs, THREADID, MPE_COMM_INIT_ID ) \ } \ else { \ MPE_Log_commIDs_nullcomm( commIDs, THREADID, comm_etype ); \ MPE_LOG_SOLO_EVENT( commIDs, THREADID, MPE_COMM_FINALIZE_ID ) \ } \ }#define MPE_LOG_ON \ if (is_thisfn_logged) IS_MPELOG_ON = 1;#define MPE_LOG_OFF \ if (is_thisfn_logged) IS_MPELOG_ON = 0;/* Service routines for managing requests .... *//* If there are large numbers of requests, we should probably use a better search structure, such as a hash table or tree*/void MPE_Req_add_send( request, datatype, count, dest, tag, commIDs, is_persistent ) MPI_Request request; MPI_Datatype datatype;const CLOG_CommIDs_t *commIDs; int count, dest, tag, is_persistent;{ request_list *newrq; int typesize; rq_alloc(requests_avail_0,newrq); if (newrq) { PMPI_Type_size( datatype, &typesize ); newrq->request = request; newrq->commIDs = commIDs; newrq->status = RQ_SEND; newrq->size = count * typesize; newrq->tag = tag; newrq->mate = dest; newrq->next = 0; newrq->is_persistent = is_persistent; rq_add( requests_head_0, requests_tail_0, newrq ); }}void MPE_Req_add_recv( request, datatype, count, source, tag, commIDs, is_persistent ) MPI_Request request; MPI_Datatype datatype;const CLOG_CommIDs_t *commIDs; int count, source, tag, is_persistent;{ request_list *newrq; /* We could pre-allocate request_list members, or allocate in blocks. Do this is we see this is a bottleneck */ rq_alloc( requests_avail_0, newrq ); if (newrq) { newrq->request = request; newrq->commIDs = commIDs; newrq->status = RQ_RECV; newrq->next = 0; newrq->is_persistent = is_persistent; rq_add( requests_head_0, requests_tail_0, newrq ); }}void MPE_Req_cancel( request )MPI_Request request;{ request_list *rq; rq_find( requests_head_0, request, rq ); if (rq) rq->status |= RQ_CANCEL;}void MPE_Req_remove( request )MPI_Request request;{ rq_remove( requests_head_0, requests_tail_0, requests_avail_0, request );}/* Persistent sends and receives are handled with this routine (called by start or startall) */void MPE_Req_start( request, state, thdID, is_logging_on )MPI_Request request;MPE_State *state;int thdID;int is_logging_on;{ request_list *rq; MPE_State *istate; /* look for request */ rq = requests_head_0; while (rq && (rq->request != request)) { rq = rq->next; } if (!rq) {#ifdef PRINT_PROBLEMS fprintf( stderr, "Request not found in '%s'.\n", note );#endif return; /* request not found */ } if ((rq->status & RQ_SEND) && rq->mate != MPI_PROC_NULL) { if (is_mpilog_on && is_logging_on && state->is_active) { istate = &states[MPE_ISEND_WAITED_ID]; if (istate->is_active) { MPE_Log_commIDs_event( rq->commIDs, thdID, istate->start_evtID, NULL ); MPE_Log_commIDs_send( rq->commIDs, thdID, rq->mate, rq->tag, rq->size ); MPE_Log_commIDs_event( rq->commIDs, thdID, istate->final_evtID, NULL ); istate->n_calls += 2; } else { MPE_Log_commIDs_send( rq->commIDs, thdID, rq->mate, rq->tag, rq->size ); } } }}void MPE_Req_wait_test( request, status, note, state, thdID, is_logging_on )MPI_Request request;MPI_Status *status;char *note;MPE_State *state;int thdID;int is_logging_on;{ request_list *rq, *last; int flag, size; MPE_State *istate; /* look for request */ rq = requests_head_0; last = 0; while (rq && (rq->request != request)) { last = rq; rq = rq->next; } if (!rq) {#ifdef PRINT_PROBLEMS fprintf( stderr, __FILE__":MPE_Req_wait_test(), " "Request not found in '%s'.\n", note ); fflush( stderr );#endif return; /* request not found */ }#ifdef HAVE_MPI_STATUS_IGNORE if (status == MPI_STATUS_IGNORE) { fprintf( stderr, __FILE__":MPE_Req_wait_test() cannot proess " "incoming MPI_Status, MPI_STATUS_IGNORE" ); fflush( stderr ); return; }#endif if (status->MPI_TAG != MPI_ANY_TAG || (rq->status & RQ_SEND) ) { /* if the request was not invalid */ if (rq->status & RQ_CANCEL) { PMPI_Test_cancelled( status, &flag ); if (flag) return; /* the request has been cancelled */ } /* Receives conclude at the END of Wait/Test. Sends start at the beginning. */ if ((rq->status & RQ_RECV) && (status->MPI_SOURCE != MPI_PROC_NULL)) { PMPI_Get_count( status, MPI_BYTE, &size ); if (is_mpilog_on && is_logging_on && state->is_active) { istate = &states[MPE_IRECV_WAITED_ID]; if (istate->is_active) { MPE_Log_commIDs_event( rq->commIDs, thdID, istate->start_evtID, NULL ); MPE_Log_commIDs_receive( rq->commIDs, thdID, status->MPI_SOURCE, status->MPI_TAG, size ); MPE_Log_commIDs_event( rq->commIDs, thdID, istate->final_evtID, NULL ); istate->n_calls += 2; } else { MPE_Log_commIDs_receive( rq->commIDs, thdID, status->MPI_SOURCE, status->MPI_TAG, size ); } } } } /* Since the request has already been found, removing it */ if (!rq->is_persistent) { rq_remove_at( requests_head_0, requests_tail_0, requests_avail_0, rq, last ); }}void MPE_Init_states_events( void ){ MPE_State *state; MPE_Event *event; int allow_mask; int idx; /* Initialize all internal events */ for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) { event = &events[idx]; event->eventID = MPE_Log_get_known_solo_eventID(); event->n_calls = 0; event->is_active = 0; event->name = NULL; event->kind_mask = 0; event->color = "white"; } /* Initialize all internal states */ for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) { state = &states[idx]; state->stateID = MPE_Log_get_known_stateID(); state->start_evtID = MPE_Log_get_known_eventID(); state->final_evtID = MPE_Log_get_known_eventID(); state->n_calls = 0; state->is_active = 0; state->name = NULL; state->kind_mask = 0; state->color = "white"; state->format = NULL; } /* Should check environment and command-line for changes to allow_mask */ /* By default, log only message-passing (pt-to-pt and collective) */ allow_mask = MPE_KIND_MSG | MPE_KIND_MSG_INIT | MPE_KIND_COLL ; allow_mask |= MPE_KIND_COMM | MPE_KIND_COMM_INFO; allow_mask |= MPE_KIND_TOPO; MPE_Init_mpi_core();#ifdef HAVE_MPI_IO allow_mask |= MPE_KIND_FILE; MPE_Init_mpi_io();#endif#ifdef HAVE_MPI_RMA allow_mask |= MPE_KIND_RMA; MPE_Init_mpi_rma();#endif#ifdef HAVE_MPI_SPAWN allow_mask |= MPE_KIND_SPAWN; MPE_Init_mpi_spawn();#endif /* The internal flag is always ON */ allow_mask |= MPE_KIND_INTERNAL; MPE_Init_internal_logging(); /* Activate the basic states */ for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) { if ( (states[idx].kind_mask & allow_mask) != 0 ) states[idx].is_active = 1; } /* Activate the basic events */ for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) { if ( (events[idx].kind_mask & allow_mask) != 0 ) events[idx].is_active = 1; }}void MPE_Init_mpi_core( void ){ MPE_State *state; /* We COULD read these definitions from a file, but accessing the file in PARALLEL can be a problem and even if one process accessed it and broadcast, we'd still have to find the file. Is this a problem? (We have to WRITE the file, after all). We only need to load the name and kind_mask. is_active is derived from kind_mask and allowed mask. */ state = &states[MPE_ALLGATHER_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Allgather"; state->color = "purple3"; state->format = NULL; state = &states[MPE_ALLGATHERV_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Allgatherv"; state->color = "purple3"; state->format = NULL; state = &states[MPE_ALLREDUCE_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Allreduce"; state->color = "purple"; state->format = NULL; state = &states[MPE_ALLTOALL_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Alltoall"; state->color = "DarkViolet"; state->format = "send_msg_sz/proc=%d, recv_msg_sz/proc=%d."; state = &states[MPE_ALLTOALLV_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Alltoallv"; state->color = "DarkViolet"; state->format = "send_msg_sz/proc=%d, recv_msg_sz/proc=%d."; state = &states[MPE_BARRIER_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Barrier"; state->color = "yellow"; state->format = NULL; state = &states[MPE_BCAST_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Bcast"; state->color = "cyan"; state->format = NULL; state = &states[MPE_GATHER_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Gather"; state->format = NULL; state = &states[MPE_GATHERV_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Gatherv"; state->format = NULL; state = &states[MPE_OP_CREATE_ID]; state->kind_mask = MPE_KIND_ENV; state->name = "MPI_Op_create"; state->format = NULL; state = &states[MPE_OP_FREE_ID]; state->kind_mask = MPE_KIND_ENV; state->name = "MPI_Op_free"; state->format = NULL; state = &states[MPE_REDUCE_SCATTER_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Reduce_scatter"; state->format = NULL; state = &states[MPE_REDUCE_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Reduce"; state->color = "MediumPurple"; state->format = NULL; state = &states[MPE_SCAN_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Scan"; state->format = NULL; state = &states[MPE_SCATTER_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Scatter"; state->color = "orchid"; state->format = NULL; state = &states[MPE_SCATTERV_ID]; state->kind_mask = MPE_KIND_COLL; state->name = "MPI_Scatterv"; state->color = "orchid"; state->format = NULL; state = &states[MPE_ATTR_DELETE_ID]; state->kind_mask = MPE_KIND_ATTR; state->name = "MPI_Attr_delete"; state->format = NULL; state = &states[MPE_ATTR_GET_ID]; state->kind_mask = MPE_KIND_ATTR; state->name = "MPI_Attr_get"; state->format = NULL; state = &states[MPE_ATTR_PUT_ID]; state->kind_mask = MPE_KIND_ATTR; state->name = "MPI_Attr_put"; state->format = NULL; state = &states[MPE_COMM_COMPARE_ID]; state->kind_mask = MPE_KIND_COMM_INFO; state->name = "MPI_Comm_compare"; state->color = "white"; state->format = NULL; state = &states[MPE_COMM_CREATE_ID]; state->kind_mask = MPE_KIND_COMM; state->name = "MPI_Comm_create"; state->color = "DarkOliveGreen1"; state->format = NULL; state = &states[MPE_COMM_DUP_ID]; state->kind_mask = MPE_KIND_COMM; state->name = "MPI_Comm_dup"; state->color = "OliveDrab1"; state->format = NULL; state = &states[MPE_COMM_FREE_ID]; state->kind_mask = MPE_KIND_COMM; state->name = "MPI_Comm_free"; state->color = "LightSeaGreen"; state->format = NULL; state = &states[MPE_COMM_GROUP_ID]; state->kind_mask = MPE_KIND_COMM_INFO; state->name = "MPI_Comm_group"; state->color = "white"; state->format = NULL; state = &states[MPE_COMM_RANK_ID]; state->kind_mask = MPE_KIND_COMM_INFO; state->name = "MPI_Comm_rank"; state->color = "white"; state->format = NULL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -