📄 communicator.h
字号:
/** * extract the local group from a communicator */ int ompi_comm_group (ompi_communicator_t *comm, ompi_group_t **group); /** * create a communicator based on a group */ int ompi_comm_create (ompi_communicator_t* comm, ompi_group_t *group, ompi_communicator_t** newcomm); /** * create a cartesian communicator */ int ompi_topo_create (ompi_communicator_t *old_comm, int ndims_or_nnodes, int *dims_or_index, int *periods_or_edges, bool reorder, ompi_communicator_t **comm_cart, int cart_or_graph); /** * split a communicator based on color and key. Parameters * are identical to the MPI-counterpart of the function. * * @param comm: input communicator * @param color * @param key * * @ */ int ompi_comm_split (ompi_communicator_t *comm, int color, int key, ompi_communicator_t** newcomm, bool pass_on_topo); /** * dup a communicator. Parameter are identical to the MPI-counterpart * of the function. It has been extracted, since we need to be able * to dup a communicator internally as well. * * @param comm: input communicator * */ int ompi_comm_dup (ompi_communicator_t *comm, ompi_communicator_t **newcomm); /** * free a communicator */ int ompi_comm_free (ompi_communicator_t **comm); /** * allocate a new communicator structure * @param local_group_size * @param remote_group_size * * This routine allocates the structure, the according local and * remote groups, the proc-arrays in the local and remote group. * It furthermore sets the fortran index correctly, * and sets all other elements to zero. */ ompi_communicator_t* ompi_comm_allocate (int local_group_size, int remote_group_size); /** * allocate new communicator ID * @param newcomm: pointer to the new communicator * @param oldcomm: original comm * @param bridgecomm: bridge comm for intercomm_create * @param mode: combination of input * OMPI_COMM_CID_INTRA: intra-comm * OMPI_COMM_CID_INTER: inter-comm * OMPI_COMM_CID_INTRA_BRIDGE: 2 intracomms connected by * a bridge comm. local_leader * and remote leader are in this * case an int (rank in bridge-comm). * OMPI_COMM_CID_INTRA_OOB: 2 intracomms, leaders talk * through OOB. lleader and rleader * are the required contact information. * @param send_first: to avoid a potential deadlock for * the OOB version. * This routine has to be thread safe in the final version. */ int ompi_comm_nextcid ( ompi_communicator_t* newcomm, ompi_communicator_t* oldcomm, ompi_communicator_t* bridgecomm, void* local_leader, void* remote_leader, int mode, int send_first); /** * shut down the communicator infrastructure. */ int ompi_comm_finalize (void); /** * This is THE routine, where all the communicator stuff * is really set. */ int ompi_comm_set ( ompi_communicator_t* newcomm, ompi_communicator_t* oldcomm, int local_size, struct ompi_proc_t **local_procs, int remote_size, struct ompi_proc_t **remote_procs, opal_hash_table_t *attr, ompi_errhandler_t *errh, mca_base_component_t *topocomponent ); /** * This is a short-hand routine used in intercomm_create. * The routine makes sure, that all processes have afterwards * a list of ompi_proc_t pointers for the remote group. */ struct ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm, ompi_communicator_t *bridge_comm, int local_leader, int remote_leader, orte_rml_tag_t tag, int rsize); /** * This routine verifies, whether local_group and remote group are overlapping * in intercomm_create */ int ompi_comm_overlapping_groups (int size, struct ompi_proc_t ** lprocs, int rsize, struct ompi_proc_t ** rprocs); /** * This is a routine determining whether the local or the * remote group will be first in the new intra-comm. * Just used from within MPI_Intercomm_merge. */ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high ); int ompi_comm_activate ( ompi_communicator_t* newcomm, ompi_communicator_t* oldcomm, ompi_communicator_t* bridgecomm, void* local_leader, void* remote_leader, int mode, int send_first, mca_base_component_t *collcomponent ); /** * a simple function to dump the structure */ int ompi_comm_dump ( ompi_communicator_t *comm ); /** * a simple function to determint a port number */ int ompi_open_port (char *port_name); /** * takes a port_name and returns the oob-contact information * and the tag */ char * ompi_parse_port (char *port_name, orte_rml_tag_t *tag) ; /** * routines handling name publishing, lookup and unpublishing */ int ompi_comm_namepublish ( char *service_name, char *port_name ); char* ompi_comm_namelookup ( char *service_name ); int ompi_comm_nameunpublish ( char *service_name ); /* setting name */ int ompi_comm_set_name (ompi_communicator_t *comm, char *name ); /* THE routine for dynamic process management. This routine sets the connection up between two independent applications. */ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root, orte_process_name_t *port, int send_first, ompi_communicator_t **newcomm, orte_rml_tag_t tag); /* * these are the init and finalize functions for the comm_reg * stuff. These routines are necessary for handling multi-threading * scenarious in the communicator_cid allocation */ void ompi_comm_reg_init(void); void ompi_comm_reg_finalize(void); /* start the new processes from MPI_Comm_spawn_multiple. Initial * version, very rough */ int ompi_comm_start_processes(int count, char **array_of_commands, char ***array_of_argv, int *array_of_maxprocs, MPI_Info *array_of_info, char *port_name); /* * This routine checks, whether an application has been spawned * by another MPI application, or has been independently started. * If it has been spawned, it establishes the parent communicator. * Since the routine has to communicate, it should be among the last * steps in MPI_Init, to be sure that everything is already set up. */ int ompi_comm_dyn_init(void); /** * Executes internally a disconnect on all dynamic communicators * in case the user did not disconnect them. */ int ompi_comm_dyn_finalize(void); /* this routine counts the number of different jobids of the processes given in a certain communicator. If there is more than one jobid, we mark the communicator as 'dynamic'. This is especially relevant for the MPI_Comm_disconnect *and* for MPI_Finalize, where we have to wait for all still connected processes. */ extern int ompi_comm_num_dyncomm; void ompi_comm_mark_dyncomm (ompi_communicator_t *comm); /* the next two routines implement a kind of non-blocking barrier. the only difference is, that you can wait for the completion of more than one initiated ibarrier. This is required for waiting for all still connected processes in MPI_Finalize. ompi_comm_disconnect_init returns a handle, which has to be passed in to ompi_comm_disconnect_waitall. The second routine blocks, until all non-blocking barriers described by the handles are finished. The communicators can than be released. */ struct ompi_comm_disconnect_obj { ompi_communicator_t *comm; int size; struct ompi_request_t **reqs; int buf; }; typedef struct ompi_comm_disconnect_obj ompi_comm_disconnect_obj; ompi_comm_disconnect_obj *ompi_comm_disconnect_init (ompi_communicator_t *comm); void ompi_comm_disconnect_waitall (int count, ompi_comm_disconnect_obj **objs );#if defined(c_plusplus) || defined(__cplusplus)}#endif#endif /* OMPI_COMMUNICATOR_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -