⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 intercomm_create.c

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 C
📖 第 1 页 / 共 2 页
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* * *  (C) 2001 by Argonne National Laboratory. *      See COPYRIGHT in top-level directory. */#include "mpiimpl.h"#include "mpicomm.h"/* -- Begin Profiling Symbol Block for routine MPI_Intercomm_create */#if defined(HAVE_PRAGMA_WEAK)#pragma weak MPI_Intercomm_create = PMPI_Intercomm_create#elif defined(HAVE_PRAGMA_HP_SEC_DEF)#pragma _HP_SECONDARY_DEF PMPI_Intercomm_create  MPI_Intercomm_create#elif defined(HAVE_PRAGMA_CRI_DUP)#pragma _CRI duplicate MPI_Intercomm_create as PMPI_Intercomm_create#endif/* -- End Profiling Symbol Block *//* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build   the MPI routines */PMPI_LOCAL int MPIR_CheckDisjointLpids( int [], int, int [], int );#ifndef MPICH_MPI_FROM_PMPI#define MPI_Intercomm_create PMPI_Intercomm_create/* 128 allows us to handle up to 4k processes */#ifdef HAVE_ERROR_CHECKING#define MAX_LPID32_ARRAY 128PMPI_LOCAL int MPIR_CheckDisjointLpids( int lpids1[], int n1, 					 int lpids2[], int n2 ){    static const char FCNAME[] = "MPIR_CheckDisjointLpids";    int i, maxi, idx, bit, maxlpid = -1;    int mpi_errno = MPI_SUCCESS;    int32_t lpidmask[MAX_LPID32_ARRAY];    /* Find the max lpid */    for (i=0; i<n1; i++) {	if (lpids1[i] > maxlpid) maxlpid = lpids1[i];    }    for (i=0; i<n2; i++) {	if (lpids2[i] > maxlpid) maxlpid = lpids2[i];    }    /* --BEGIN ERROR HANDLING-- */    if (maxlpid >= MAX_LPID32_ARRAY * 32) {	/* FIXME: internationalize */	MPIU_ERR_SET1(mpi_errno,MPI_ERR_OTHER, "**intern",		      "**intern %s", 		      "Too many processes in intercomm_create" );	return mpi_errno;    }    /* --END ERROR HANDLING-- */        /* Compute the max index and zero the pids array */    maxi = (maxlpid + 31) / 32;    for (i=0; i<maxi; i++) lpidmask[i] = 0;    /* Set the bits for the first array */    for (i=0; i<n1; i++) {	idx = lpids1[i] / 32;	bit = lpids1[i] % 32;	lpidmask[idx] = lpidmask[idx] | (1 << bit);    }        /* Look for any duplicates in the second array */    for (i=0; i<n2; i++) {	idx = lpids2[i] / 32;	bit = lpids2[i] % 32;	/* --BEGIN ERROR HANDLING-- */	if (lpidmask[idx] & (1 << bit)) {	    MPIU_ERR_SET1(mpi_errno,MPI_ERR_COMM, 			  "**dupprocesses", "**dupprocesses %d", lpids2[i] );	    return mpi_errno;	}	/* --END ERROR HANDLING-- */	/* Add a check on duplicates *within* group 2 */	lpidmask[idx] = lpidmask[idx] | (1 << bit);    }        return 0;}#endif /* HAVE_ERROR_CHECKING */#ifndef HAVE_GPID_ROUTINES/* FIXME: A temporary version for lpids within my comm world */PMPI_LOCAL int MPID_GPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size, 				       int local_gpids[], int *singlePG ){    int i;    int *gpid = local_gpids;        for (i=0; i<comm_ptr->local_size; i++) {	*gpid++ = 0;	(void)MPID_VCR_Get_lpid( comm_ptr->vcr[i], gpid );	gpid++;    }    *singlePG = 1;    return 0;}/* FIXME: A temp for lpids within my comm world */PMPI_LOCAL int MPID_GPID_ToLpidArray( int size, int gpid[], int lpid[] ){    int i;    for (i=0; i<size; i++) {	lpid[i] = *++gpid;  gpid++;    }    return 0;}/* FIXME: for MPI1, all process ids are relative to MPI_COMM_WORLD.   For MPI2, we'll need to do something more complex */PMPI_LOCAL int MPID_VCR_CommFromLpids( MPID_Comm *newcomm_ptr, 				       int size, const int lpids[] ){    MPID_Comm *commworld_ptr;    int i;    commworld_ptr = MPIR_Process.comm_world;    /* Setup the communicator's vc table: remote group */    MPID_VCRT_Create( size, &newcomm_ptr->vcrt );    MPID_VCRT_Get_ptr( newcomm_ptr->vcrt, &newcomm_ptr->vcr );    for (i=0; i<size; i++) {	/* For rank i in the new communicator, find the corresponding	   rank in the comm world (FIXME FOR MPI2) */	/* printf( "[%d] Remote rank %d has lpid %d\n", 	   MPIR_Process.comm_world->rank, i, lpids[i] ); */	if (lpids[i] < commworld_ptr->remote_size) {	    MPID_VCR_Dup( commworld_ptr->vcr[lpids[i]], 			  &newcomm_ptr->vcr[i] );	}	else {	    /* We must find the corresponding vcr for a given lpid */	    /* FIXME: Error */	    return 1;	    /* MPID_VCR_Dup( ???, &newcomm_ptr->vcr[i] ); */	}    }    return 0;}#endif /* HAVE_GPID_ROUTINES */PMPI_LOCAL int MPID_LPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size, 				       int local_lpids[] ){    int i;        for (i=0; i<comm_ptr->local_size; i++) {	(void)MPID_VCR_Get_lpid( comm_ptr->vcr[i], &local_lpids[i] );    }    return 0;}#endif /* MPICH_MPI_FROM_PMPI */#undef FUNCNAME#define FUNCNAME MPI_Intercomm_create/*@MPI_Intercomm_create - Creates an intercommuncator from two intracommunicatorsInput Parameters:+ local_comm - Local (intra)communicator. local_leader - Rank in local_comm of leader (often 0). peer_comm - Communicator used to communicate between a               designated process in the other communicator.                Significant only at the process in 'local_comm' with	      rank 'local_leader'.. remote_leader - Rank in peer_comm of remote leader (often 0)- tag - Message tag to use in constructing intercommunicator; if multiple  'MPI_Intercomm_creates' are being made, they should use different tags (more  precisely, ensure that the local and remote leaders are using different  tags for each 'MPI_intercomm_create').Output Parameter:. comm_out - Created intercommunicatorNotes:   'peer_comm' is significant only for the process designated the    'local_leader' in the 'local_comm'.  The MPI 1.1 Standard contains two mutually exclusive comments on the  input intracommunicators.  One says that their repective groups must be  disjoint; the other that the leaders can be the same process.  After  some discussion by the MPI Forum, it has been decided that the groups must  be disjoint.  Note that the `reason` given for this in the standard is  `not` the reason for this choice; rather, the `other` operations on   intercommunicators (like 'MPI_Intercomm_merge') do not make sense if the  groups are not disjoint..N ThreadSafe.N Fortran.N Errors.N MPI_SUCCESS.N MPI_ERR_COMM.N MPI_ERR_TAG.N MPI_ERR_EXHAUSTED.N MPI_ERR_RANK.seealso: MPI_Intercomm_merge, MPI_Comm_free, MPI_Comm_remote_group,           MPI_Comm_remote_size@*/int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, 			 MPI_Comm peer_comm, int remote_leader, int tag, 			 MPI_Comm *newintercomm){    static const char FCNAME[] = "MPI_Intercomm_create";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_Comm *peer_comm_ptr = NULL;    int context_id, final_context_id;    int remote_size, *remote_lpids=0, *remote_gpids=0, singlePG;    int local_size, *local_gpids=0, *local_lpids=0;    int comm_info[3];    int is_low_group = 0;    int i;    MPID_Comm *newcomm_ptr;    MPIU_CHKLMEM_DECL(4);    MPID_MPI_STATE_DECL(MPID_STATE_MPI_INTERCOMM_CREATE);    MPIR_ERRTEST_INITIALIZED_ORDIE();        MPID_CS_ENTER();    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INTERCOMM_CREATE);        /* Validate parameters, especially handles needing to be converted */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_COMM(local_comm, mpi_errno);            if (mpi_errno) goto fn_fail;	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */        /* Convert MPI object handles to object pointers */    MPID_Comm_get_ptr( local_comm, comm_ptr );        /* Validate parameters and objects (post conversion) */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {            /* Validate comm_ptr */            MPID_Comm_valid_ptr( comm_ptr, mpi_errno );	    if (comm_ptr) {		/*  Only check if comm_ptr valid */		MPIR_ERRTEST_COMM_INTRA(comm_ptr, mpi_errno );		if ((local_leader < 0 || 		     local_leader >= comm_ptr->local_size)) {		    MPIU_ERR_SET2(mpi_errno,MPI_ERR_RANK, 				  "**ranklocal", "**ranklocal %d %d", 				  local_leader, comm_ptr->local_size );		}		if (comm_ptr->rank == local_leader) {		    MPIR_ERRTEST_COMM(peer_comm, mpi_errno);		}	    }	    /* If comm_ptr is not valid, it will be reset to null */            if (mpi_errno) goto fn_fail;        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ... */        /*     * Error checking for this routine requires care.  Because this     * routine is collective over two different sets of processes,     * it is relatively easy for the user to try to create an      * intercommunicator from two overlapping groups of processes.     * This is made more likely by inconsistencies in the MPI-1     * specification (clarified in MPI-2) that seemed to allow     * the groups to overlap.  Because of that, we first check that the     * groups are in fact disjoint before performing any collective 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -