⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bcast.c

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 C
📖 第 1 页 / 共 2 页
字号:
                 executed because we are not using recursive                 doubling for non power of two. Mark it as experimental                 so that it doesn't show up as red in the coverage tests. */  	      /* --BEGIN EXPERIMENTAL-- */              if (dst_tree_root + mask > comm_size)	      {                  nprocs_completed = comm_size - my_tree_root - mask;                  /* nprocs_completed is the number of processes in this                     subtree that have all the data. Send data to others                     in a tree fashion. First find root of current tree                     that is being divided into two. k is the number of                     least-significant bits in this process's rank that                     must be zeroed out to find the rank of the root */                   j = mask;                  k = 0;                  while (j)		  {                      j >>= 1;                      k++;                  }                  k--;                                    offset = scatter_size * (my_tree_root + mask);                  tmp_mask = mask >> 1;                                    while (tmp_mask)		  {                      relative_dst = relative_rank ^ tmp_mask;                      dst = (relative_dst + root) % comm_size;                                             tree_root = relative_rank >> k;                      tree_root <<= k;                                            /* send only if this proc has data and destination                         doesn't have data. */                                            /* if (rank == 3) {                          printf("rank %d, dst %d, root %d, nprocs_completed %d\n", relative_rank, relative_dst, tree_root, nprocs_completed);                         fflush(stdout);                         }*/                                            if ((relative_dst > relative_rank) &&                           (relative_rank < tree_root + nprocs_completed)                          && (relative_dst >= tree_root + nprocs_completed))		      {                                                    /* printf("Rank %d, send to %d, offset %d, size %d\n", rank, dst, offset, recv_size);                             fflush(stdout); */                          mpi_errno = MPIC_Send(((char *)tmp_buf + offset),                                                recv_size, MPI_BYTE, dst,                                                MPIR_BCAST_TAG, comm);                           /* recv_size was set in the previous                             receive. that's the amount of data to be                             sent now. */                          if (mpi_errno != MPI_SUCCESS) {			      MPIU_ERR_POP(mpi_errno);			  }                      }                      /* recv only if this proc. doesn't have data and sender                         has data */                      else if ((relative_dst < relative_rank) &&                                (relative_dst < tree_root + nprocs_completed) &&                               (relative_rank >= tree_root + nprocs_completed))		      {                          /* printf("Rank %d waiting to recv from rank %d\n",                             relative_rank, dst); */                          mpi_errno = MPIC_Recv(((char *)tmp_buf + offset),                                                scatter_size*nprocs_completed,                                                 MPI_BYTE, dst, MPIR_BCAST_TAG,                                                comm, &status);                           /* nprocs_completed is also equal to the no. of processes                             whose data we don't have */                          if (mpi_errno != MPI_SUCCESS) {			      MPIU_ERR_POP(mpi_errno);			  }                          NMPI_Get_count(&status, MPI_BYTE, &recv_size);                          curr_size += recv_size;                          /* printf("Rank %d, recv from %d, offset %d, size %d\n", rank, dst, offset, recv_size);                             fflush(stdout);*/                      }                      tmp_mask >>= 1;                      k--;                  }              }              /* --END EXPERIMENTAL-- */                            mask <<= 1;              i++;          }      }       else      {          /* long-message allgather or medium-size but non-power-of-two. use ring algorithm. */           recvcnts = MPIU_Malloc(comm_size*sizeof(int));	  /* --BEGIN ERROR HANDLING-- */          if (!recvcnts)	  {              mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem",						"**nomem %d", comm_size * sizeof(int));              return mpi_errno;          }	  /* --END ERROR HANDLING-- */          displs = MPIU_Malloc(comm_size*sizeof(int));	  /* --BEGIN ERROR HANDLING-- */          if (!displs)	  {              mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem",						"**nomem %d", comm_size * sizeof(int));              return mpi_errno;          }	  /* --END ERROR HANDLING-- */                    for (i=0; i<comm_size; i++)	  {              recvcnts[i] = nbytes - i*scatter_size;              if (recvcnts[i] > scatter_size)                  recvcnts[i] = scatter_size;              if (recvcnts[i] < 0)                  recvcnts[i] = 0;          }                    displs[0] = 0;          for (i=1; i<comm_size; i++)              displs[i] = displs[i-1] + recvcnts[i-1];                    left  = (comm_size + rank - 1) % comm_size;          right = (rank + 1) % comm_size;                    j     = rank;          jnext = left;          for (i=1; i<comm_size; i++)	  {              mpi_errno =                   MPIC_Sendrecv((char *)tmp_buf +                                displs[(j-root+comm_size)%comm_size],                                  recvcnts[(j-root+comm_size)%comm_size],                                MPI_BYTE, right, MPIR_BCAST_TAG,                                 (char *)tmp_buf +                                displs[(jnext-root+comm_size)%comm_size],                                 recvcnts[(jnext-root+comm_size)%comm_size],                                  MPI_BYTE, left,                                   MPIR_BCAST_TAG, comm, MPI_STATUS_IGNORE);              if (mpi_errno != MPI_SUCCESS) {		  MPIU_ERR_POP(mpi_errno);	      }              j	    = jnext;              jnext = (comm_size + jnext - 1) % comm_size;          }                    MPIU_Free(recvcnts);          MPIU_Free(displs);      }      if (!is_contig || !is_homogeneous)      {          if (rank != root)	  {              position = 0;              NMPI_Unpack(tmp_buf, nbytes, &position, buffer, count,                          datatype, comm);          }          MPIU_Free(tmp_buf);      }  } fn_exit:  /* check if multiple threads are calling this collective function */  MPIDU_ERR_CHECK_MULTIPLE_THREADS_EXIT( comm_ptr );  return mpi_errno; fn_fail:  goto fn_exit;}/* end:nested *//* begin:nested *//* Not PMPI_LOCAL because it is called in intercomm allgather */int MPIR_Bcast_inter (     void *buffer,     int count,     MPI_Datatype datatype,     int root,     MPID_Comm *comm_ptr ){/*  Intercommunicator broadcast.    Root sends to rank 0 in remote group. Remote group does local    intracommunicator broadcast.*/    static const char FCNAME[] = "MPIR_Bcast_inter";    int rank, mpi_errno;    MPI_Status status;    MPID_Comm *newcomm_ptr = NULL;    MPI_Comm comm;    comm = comm_ptr->handle;    if (root == MPI_PROC_NULL)    {        /* local processes other than root do nothing */        mpi_errno = MPI_SUCCESS;    }    else if (root == MPI_ROOT)    {        /* root sends to rank 0 on remote group and returns */        MPIDU_ERR_CHECK_MULTIPLE_THREADS_ENTER( comm_ptr );        mpi_errno =  MPIC_Send(buffer, count, datatype, 0,                               MPIR_BCAST_TAG, comm); 	/* --BEGIN ERROR HANDLING-- */	if (mpi_errno != MPI_SUCCESS)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, 					     FCNAME, __LINE__, MPI_ERR_OTHER, 					     "**fail", 0);	}	/* --END ERROR HANDLING-- */        MPIDU_ERR_CHECK_MULTIPLE_THREADS_EXIT( comm_ptr );        return mpi_errno;    }    else    {        /* remote group. rank 0 on remote group receives from root */                rank = comm_ptr->rank;                if (rank == 0)	{            mpi_errno = MPIC_Recv(buffer, count, datatype, root,                                  MPIR_BCAST_TAG, comm, &status);	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno != MPI_SUCCESS)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, 						 FCNAME, __LINE__, 						 MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */        }                /* Get the local intracommunicator */        if (!comm_ptr->local_comm)            MPIR_Setup_intercomm_localcomm( comm_ptr );        newcomm_ptr = comm_ptr->local_comm;        /* now do the usual broadcast on this intracommunicator           with rank 0 as root. */        mpi_errno = MPIR_Bcast(buffer, count, datatype, 0, newcomm_ptr);	/* --BEGIN ERROR HANDLING-- */	if (mpi_errno != MPI_SUCCESS)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	}	/* --END ERROR HANDLING-- */    }        return mpi_errno;}/* end:nested */#endif#undef FUNCNAME#define FUNCNAME MPI_Bcast/*@MPI_Bcast - Broadcasts a message from the process with rank "root" to            all other processes of the communicatorInput/Output Parameter:. buffer - starting address of buffer (choice) Input Parameters:+ count - number of entries in buffer (integer) . datatype - data type of buffer (handle) . root - rank of broadcast root (integer) - comm - communicator (handle) .N ThreadSafe.N Fortran.N Errors.N MPI_SUCCESS.N MPI_ERR_COMM.N MPI_ERR_COUNT.N MPI_ERR_TYPE.N MPI_ERR_BUFFER.N MPI_ERR_ROOT@*/int MPI_Bcast( void *buffer, int count, MPI_Datatype datatype, int root,                MPI_Comm comm ){    static const char FCNAME[] = "MPI_Bcast";    int mpi_errno = MPI_SUCCESS;    MPID_Comm *comm_ptr = NULL;    MPID_MPI_STATE_DECL(MPID_STATE_MPI_BCAST);    MPIR_ERRTEST_INITIALIZED_ORDIE();        MPID_CS_ENTER();    MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_BCAST);    /* Validate parameters, especially handles needing to be converted */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_COMM(comm, mpi_errno);            if (mpi_errno != MPI_SUCCESS) goto fn_fail;	}        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* Convert MPI object handles to object pointers */    MPID_Comm_get_ptr( comm, comm_ptr );    /* Validate parameters and objects (post conversion) */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {            MPID_Datatype *datatype_ptr = NULL;	                MPID_Comm_valid_ptr( comm_ptr, mpi_errno );            if (mpi_errno != MPI_SUCCESS) goto fn_fail;	    MPIR_ERRTEST_COUNT(count, mpi_errno);	    MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);	    if (comm_ptr->comm_kind == MPID_INTRACOMM) {		MPIR_ERRTEST_INTRA_ROOT(comm_ptr, root, mpi_errno);	    }	    else {		MPIR_ERRTEST_INTER_ROOT(comm_ptr, root, mpi_errno);	    }	                if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {                MPID_Datatype_get_ptr(datatype, datatype_ptr);                MPID_Datatype_valid_ptr( datatype_ptr, mpi_errno );                MPID_Datatype_committed_ptr( datatype_ptr, mpi_errno );            }            MPIR_ERRTEST_BUF_INPLACE(buffer, count, mpi_errno);            MPIR_ERRTEST_USERBUFFER(buffer,count,datatype,mpi_errno);                        if (mpi_errno != MPI_SUCCESS) goto fn_fail;        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ...  */    if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Bcast != NULL)    {	mpi_errno = comm_ptr->coll_fns->Bcast(buffer, count,                                              datatype, root, comm_ptr);    }    else    {	MPIR_Nest_incr();        if (comm_ptr->comm_kind == MPID_INTRACOMM)	{            /* intracommunicator */            mpi_errno = MPIR_Bcast( buffer, count, datatype, root, comm_ptr );	}        else	{            /* intercommunicator */            mpi_errno = MPIR_Bcast_inter( buffer, count, datatype,	      root, comm_ptr );        }	MPIR_Nest_decr();    }    if (mpi_errno != MPI_SUCCESS) goto fn_fail;    /* ... end of body of routine ... */      fn_exit:    MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_BCAST);    MPID_CS_EXIT();    return mpi_errno;  fn_fail:    /* --BEGIN ERROR HANDLING-- */#   ifdef HAVE_ERROR_CHECKING    {	mpi_errno = MPIR_Err_create_code(	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, 	    "**mpi_bcast",	    "**mpi_bcast %p %d %D %d %C", buffer, count, datatype, root, comm);    }#   endif    mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );    goto fn_exit;    /* --END ERROR HANDLING-- */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -