⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 type_create_darray.c

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 C
📖 第 1 页 / 共 2 页
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* * *  (C) 2001 by Argonne National Laboratory. *      See COPYRIGHT in top-level directory. */#include "mpiimpl.h"/* -- Begin Profiling Symbol Block for routine MPI_Type_create_darray */#if defined(HAVE_PRAGMA_WEAK)#pragma weak MPI_Type_create_darray = PMPI_Type_create_darray#elif defined(HAVE_PRAGMA_HP_SEC_DEF)#pragma _HP_SECONDARY_DEF PMPI_Type_create_darray  MPI_Type_create_darray#elif defined(HAVE_PRAGMA_CRI_DUP)#pragma _CRI duplicate MPI_Type_create_darray as PMPI_Type_create_darray#endif/* -- End Profiling Symbol Block */#ifndef MIN#define MIN(__a, __b) (((__a) < (__b)) ? (__a) : (__b))#endif/* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build   the MPI routines */#ifndef MPICH_MPI_FROM_PMPI#define MPI_Type_create_darray PMPI_Type_create_darrayPMPI_LOCAL int MPIR_Type_block(int *array_of_gsizes,			       int dim,			       int ndims,			       int nprocs,			       int rank,			       int darg,			       int order,			       MPI_Aint orig_extent,			       MPI_Datatype type_old,			       MPI_Datatype *type_new,			       MPI_Aint *st_offset); PMPI_LOCAL int MPIR_Type_cyclic(int *array_of_gsizes,				int dim,				int ndims,				int nprocs,				int rank,				int darg,				int order,				MPI_Aint orig_extent,				MPI_Datatype type_old,				MPI_Datatype *type_new,				MPI_Aint *st_offset); PMPI_LOCAL int MPIR_Type_block(int *array_of_gsizes,			       int dim,			       int ndims,			       int nprocs,			       int rank,			       int darg,			       int order,			       MPI_Aint orig_extent,			       MPI_Datatype type_old,			       MPI_Datatype *type_new,			       MPI_Aint *st_offset) {/* nprocs = no. of processes in dimension dim of grid   rank = coordinate of this process in dimension dim */    static const char FCNAME[] = "MPIR_Type_block";    int mpi_errno, blksize, global_size, mysize, i, j;    MPI_Aint stride;        global_size = array_of_gsizes[dim];    if (darg == MPI_DISTRIBUTE_DFLT_DARG)	blksize = (global_size + nprocs - 1)/nprocs;    else {	blksize = darg;#ifdef HAVE_ERROR_CHECKING	if (blksize <= 0) {	    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,					     MPIR_ERR_RECOVERABLE,					     FCNAME,					     __LINE__,					     MPI_ERR_ARG,					     "**darrayblock",					     "**darrayblock %d",					     blksize);	    return mpi_errno;	}	if (blksize * nprocs < global_size) {	    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,					     MPIR_ERR_RECOVERABLE,					     FCNAME,					     __LINE__,					     MPI_ERR_ARG,					     "**darrayblock2",					     "**darrayblock2 %d %d",					     blksize*nprocs,					     global_size);	    return mpi_errno;	}#endif    }    j = global_size - blksize*rank;    mysize = MIN(blksize, j);    if (mysize < 0) mysize = 0;    stride = orig_extent;    if (order == MPI_ORDER_FORTRAN) {	if (dim == 0) {	    mpi_errno = MPID_Type_contiguous(mysize,					     type_old,					     type_new);	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */	}	else {	    for (i=0; i<dim; i++) stride *= array_of_gsizes[i];	    mpi_errno = MPID_Type_vector(mysize,					 1,					 stride,					 1, /* stride in bytes */					 type_old,					 type_new);	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */	}    }    else {	if (dim == ndims-1) {	    mpi_errno = MPID_Type_contiguous(mysize,					     type_old,					     type_new);	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */	}	else {	    for (i=ndims-1; i>dim; i--) stride *= array_of_gsizes[i];	    mpi_errno = MPID_Type_vector(mysize,					 1,					 stride,					 1, /* stride in bytes */					 type_old,					 type_new);	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS)	    {		mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);		return mpi_errno;	    }	    /* --END ERROR HANDLING-- */	}    }    *st_offset = blksize * rank;     /* in terms of no. of elements of type oldtype in this dimension */    if (mysize == 0) *st_offset = 0;    return MPI_SUCCESS;}PMPI_LOCAL int MPIR_Type_cyclic(int *array_of_gsizes,				int dim,				int ndims,				int nprocs,				int rank,				int darg,				int order,				MPI_Aint orig_extent,				MPI_Datatype type_old,				MPI_Datatype *type_new,				MPI_Aint *st_offset) {/* nprocs = no. of processes in dimension dim of grid   rank = coordinate of this process in dimension dim */    static const char FCNAME[] = "MPIR_Type_cyclic";    int mpi_errno,blksize, i, blklens[3], st_index, end_index,	local_size, rem, count;    MPI_Aint stride, disps[3];    MPI_Datatype type_tmp, types[3];    if (darg == MPI_DISTRIBUTE_DFLT_DARG) blksize = 1;    else blksize = darg;#ifdef HAVE_ERROR_CHECKING    if (blksize <= 0) {	mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,					 MPIR_ERR_RECOVERABLE,					 FCNAME,					 __LINE__,					 MPI_ERR_ARG,					 "**darraycyclic",					 "**darraycyclic %d",					 blksize);	return mpi_errno;    }#endif        st_index = rank*blksize;    end_index = array_of_gsizes[dim] - 1;    if (end_index < st_index) local_size = 0;    else {	local_size = ((end_index - st_index + 1)/(nprocs*blksize))*blksize;	rem = (end_index - st_index + 1) % (nprocs*blksize);	local_size += MIN(rem, blksize);    }    count = local_size/blksize;    rem = local_size % blksize;        stride = nprocs*blksize*orig_extent;    if (order == MPI_ORDER_FORTRAN)	for (i=0; i<dim; i++) stride *= array_of_gsizes[i];    else for (i=ndims-1; i>dim; i--) stride *= array_of_gsizes[i];    mpi_errno = MPID_Type_vector(count,				 blksize,				 stride,				 1, /* stride in bytes */				 type_old,				 type_new);    /* --BEGIN ERROR HANDLING-- */    if (mpi_errno != MPI_SUCCESS)    {	mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	return mpi_errno;    }    /* --END ERROR HANDLING-- */    if (rem) {	/* if the last block is of size less than blksize, include	   it separately using MPI_Type_struct */	types[0] = *type_new;	types[1] = type_old;	disps[0] = 0;	disps[1] = count*stride;	blklens[0] = 1;	blklens[1] = rem;	mpi_errno = MPID_Type_struct(2,				     blklens,				     disps,				     types,				     &type_tmp);	MPIR_Nest_incr();	NMPI_Type_free(type_new);	MPIR_Nest_decr();	*type_new = type_tmp;	/* --BEGIN ERROR HANDLING-- */	if (mpi_errno != MPI_SUCCESS)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */    }    /* In the first iteration, we need to set the displacement in that       dimension correctly. */     if ( ((order == MPI_ORDER_FORTRAN) && (dim == 0)) ||         ((order == MPI_ORDER_C) && (dim == ndims-1)) )    {        types[0] = MPI_LB;        disps[0] = 0;        types[1] = *type_new;        disps[1] = rank * blksize * orig_extent;        types[2] = MPI_UB;        disps[2] = orig_extent * array_of_gsizes[dim];        blklens[0] = blklens[1] = blklens[2] = 1;        mpi_errno = MPID_Type_struct(3,				     blklens,				     disps,				     types,				     &type_tmp);	MPIR_Nest_incr();        NMPI_Type_free(type_new);	MPIR_Nest_decr();        *type_new = type_tmp;	/* --BEGIN ERROR HANDLING-- */	if (mpi_errno != MPI_SUCCESS)	{	    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	    return mpi_errno;	}	/* --END ERROR HANDLING-- */        *st_offset = 0;  /* set it to 0 because it is taken care of in                            the struct above */    }    else {        *st_offset = rank * blksize;         /* st_offset is in terms of no. of elements of type oldtype in         * this dimension */     }    if (local_size == 0) *st_offset = 0;    return MPI_SUCCESS;}#endif#undef FUNCNAME#define FUNCNAME MPI_Type_create_darray/*@   MPI_Type_create_darray - Create a datatype representing a distributed array   Input Parameters:+ size - size of process group (positive integer) . rank - rank in process group (nonnegative integer) . ndims - number of array dimensions as well as process grid dimensions (positive integer) . array_of_gsizes - number of elements of type oldtype in each dimension of global array (array of positive integers) . array_of_distribs - distribution of array in each dimension (array of state) . array_of_dargs - distribution argument in each dimension (array of positive integers) . array_of_psizes - size of process grid in each dimension (array of positive integers) . order - array storage order flag (state) - oldtype - old datatype (handle)     Output Parameter:. newtype - new datatype (handle) .N ThreadSafe.N Fortran.N Errors.N MPI_SUCCESS.N MPI_ERR_TYPE.N MPI_ERR_ARG@*/int MPI_Type_create_darray(int size,			   int rank,			   int ndims,			   int array_of_gsizes[],			   int array_of_distribs[],			   int array_of_dargs[],			   int array_of_psizes[],			   int order,			   MPI_Datatype oldtype,			   MPI_Datatype *newtype){    static const char FCNAME[] = "MPI_Type_create_darray";    int mpi_errno = MPI_SUCCESS, i;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -