⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alltoall.c

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 C
📖 第 1 页 / 共 3 页
字号:
        /* Blocks are in the reverse order now (comm_size-1 to 0).          * Reorder them to (0 to comm_size-1) and store them in recvbuf. */        for (i=0; i<comm_size; i++)             MPIR_Localcopy((char *) tmp_buf + i*recvcount*recvtype_extent,                           recvcount, recvtype,                            (char *) recvbuf + (comm_size-i-1)*recvcount*recvtype_extent,                            recvcount, recvtype);         MPIU_Free((char*)tmp_buf + recvtype_true_lb);#ifdef OLD        /* Short message. Use recursive doubling. Each process sends all           its data at each step along with all data it received in           previous steps. */                /* need to allocate temporary buffer of size           sendbuf_extent*comm_size */                /* get true extent of sendtype */        mpi_errno = NMPI_Type_get_true_extent(sendtype, &sendtype_true_lb,                                              &sendtype_true_extent);          /* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}        /* --END ERROR HANDLING-- */        sendbuf_extent = sendcount * comm_size *            (MPIR_MAX(sendtype_true_extent, sendtype_extent));        tmp_buf = MPIU_Malloc(sendbuf_extent*comm_size);	/* --BEGIN ERROR HANDLING-- */        if (!tmp_buf) {            mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 );            return mpi_errno;        }	/* --END ERROR HANDLING-- */                /* adjust for potential negative lower bound in datatype */        tmp_buf = (void *)((char*)tmp_buf - sendtype_true_lb);                /* copy local sendbuf into tmp_buf at location indexed by rank */        curr_cnt = sendcount*comm_size;        mpi_errno = MPIR_Localcopy(sendbuf, curr_cnt, sendtype,                                   ((char *)tmp_buf + rank*sendbuf_extent),                                   curr_cnt, sendtype);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */                mask = 0x1;        i = 0;        while (mask < comm_size) {            dst = rank ^ mask;                        dst_tree_root = dst >> i;            dst_tree_root <<= i;                        my_tree_root = rank >> i;            my_tree_root <<= i;                        if (dst < comm_size) {                mpi_errno = MPIC_Sendrecv(((char *)tmp_buf +                                           my_tree_root*sendbuf_extent),                                          curr_cnt, sendtype,                                          dst, MPIR_ALLTOALL_TAG,                                           ((char *)tmp_buf +                                           dst_tree_root*sendbuf_extent),                                          sendcount*comm_size*mask,                                          sendtype, dst, MPIR_ALLTOALL_TAG,                                           comm, &status);		/* --BEGIN ERROR HANDLING-- */                if (mpi_errno)		{		    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		    return mpi_errno;		}		/* --END ERROR HANDLING-- */                                /* in case of non-power-of-two nodes, less data may be                   received than specified */                NMPI_Get_count(&status, sendtype, &last_recv_cnt);                curr_cnt += last_recv_cnt;            }                        /* if some processes in this process's subtree in this step               did not have any destination process to communicate with               because of non-power-of-two, we need to send them the               result. We use a logarithmic recursive-halfing algorithm               for this. */                        if (dst_tree_root + mask > comm_size) {                nprocs_completed = comm_size - my_tree_root - mask;                /* nprocs_completed is the number of processes in this                   subtree that have all the data. Send data to others                   in a tree fashion. First find root of current tree                   that is being divided into two. k is the number of                   least-significant bits in this process's rank that                   must be zeroed out to find the rank of the root */                 j = mask;                k = 0;                while (j) {                    j >>= 1;                    k++;                }                k--;                                tmp_mask = mask >> 1;                while (tmp_mask) {                    dst = rank ^ tmp_mask;                                        tree_root = rank >> k;                    tree_root <<= k;                                        /* send only if this proc has data and destination                       doesn't have data. at any step, multiple processes                       can send if they have the data */                    if ((dst > rank) &&                         (rank < tree_root + nprocs_completed)                        && (dst >= tree_root + nprocs_completed)) {                        /* send the data received in this step above */                        mpi_errno = MPIC_Send(((char *)tmp_buf +                                               dst_tree_root*sendbuf_extent),                                              last_recv_cnt, sendtype,                                              dst, MPIR_ALLTOALL_TAG,                                              comm);  			/* --BEGIN ERROR HANDLING-- */                        if (mpi_errno)			{			    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);			    return mpi_errno;			}			/* --END ERROR HANDLING-- */                    }                    /* recv only if this proc. doesn't have data and sender                       has data */                    else if ((dst < rank) &&                              (dst < tree_root + nprocs_completed) &&                             (rank >= tree_root + nprocs_completed)) {                        mpi_errno = MPIC_Recv(((char *)tmp_buf +                                               dst_tree_root*sendbuf_extent),                                              sendcount*comm_size*mask,                                               sendtype,                                                 dst, MPIR_ALLTOALL_TAG,                                              comm, &status); 			/* --BEGIN ERROR HANDLING-- */                        if (mpi_errno)			{			    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);			    return mpi_errno;			}			/* --END ERROR HANDLING-- */                        NMPI_Get_count(&status, sendtype, &last_recv_cnt);                        curr_cnt += last_recv_cnt;                    }                    tmp_mask >>= 1;                    k--;                }            }                        mask <<= 1;            i++;        }                /* now copy everyone's contribution from tmp_buf to recvbuf */        for (p=0; p<comm_size; p++) {            mpi_errno = MPIR_Localcopy(((char *)tmp_buf +                                        p*sendbuf_extent +                                        rank*sendcount*sendtype_extent),                                        sendcount, sendtype,                                         ((char*)recvbuf +                                         p*recvcount*recvtype_extent),                                         recvcount, recvtype);	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */        }                MPIU_Free((char *)tmp_buf+sendtype_true_lb); #endif    }    else if (nbytes <= MPIR_ALLTOALL_MEDIUM_MSG) {          /* Medium-size message. Use isend/irecv with scattered           destinations */        reqarray = (MPI_Request *) MPIU_Malloc(2*comm_size*sizeof(MPI_Request));        /* --BEGIN ERROR HANDLING-- */        if (!reqarray) {            mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 );            return mpi_errno;        }        /* --END ERROR HANDLING-- */        starray = (MPI_Status *) MPIU_Malloc(2*comm_size*sizeof(MPI_Status));        /* --BEGIN ERROR HANDLING-- */        if (!starray) {            mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 );            return mpi_errno;        }        /* --END ERROR HANDLING-- */        /* do the communication -- post all sends and receives: */        for ( i=0; i<comm_size; i++ ) {             dst = (rank+i) % comm_size;            mpi_errno = MPIC_Irecv((char *)recvbuf +                                  dst*recvcount*recvtype_extent,                                   recvcount, recvtype, dst,                                  MPIR_ALLTOALL_TAG, comm,                                  &reqarray[i]);	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */        }        for ( i=0; i<comm_size; i++ ) {             dst = (rank+i) % comm_size;            mpi_errno = MPIC_Isend((char *)sendbuf +                                   dst*sendcount*sendtype_extent,                                    sendcount, sendtype, dst,                                   MPIR_ALLTOALL_TAG, comm,                                   &reqarray[i+comm_size]);	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */        }          /* ... then wait for *all* of them to finish: */        mpi_errno = NMPI_Waitall(2*comm_size,reqarray,starray);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno == MPI_ERR_IN_STATUS) {            for (j=0; j<2*comm_size; j++) {                if (starray[j].MPI_ERROR != MPI_SUCCESS)                     mpi_errno = starray[j].MPI_ERROR;            }        }	/* --END ERROR HANDLING-- */        MPIU_Free(starray);        MPIU_Free(reqarray);    }    else {        /* Long message. If comm_size is a power-of-two, do a pairwise           exchange using exclusive-or to create pairs. Else send to           rank+i, receive from rank-i. */                /* Make local copy first */        mpi_errno = MPIR_Localcopy(((char *)sendbuf +                                     rank*sendcount*sendtype_extent),                                    sendcount, sendtype,                                    ((char *)recvbuf +                                    rank*recvcount*recvtype_extent),                                   recvcount, recvtype);	/* --BEGIN ERROR HANDLING-- */        if (mpi_errno)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -