⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 type_create_darray.c

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 C
📖 第 1 页 / 共 2 页
字号:
    int procs, tmp_rank, tmp_size, blklens[3], *coords;    MPI_Aint *st_offsets, orig_extent, disps[3];    MPI_Datatype type_old, type_new = MPI_DATATYPE_NULL, types[3];    int *ints;    MPID_Datatype *datatype_ptr = NULL;    MPIU_CHKLMEM_DECL(3);    MPID_MPI_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_DARRAY);    MPIR_ERRTEST_INITIALIZED_ORDIE();        MPID_CS_ENTER();    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_TYPE_CREATE_DARRAY);        /* Validate parameters, especially handles needing to be converted */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno);            if (mpi_errno != MPI_SUCCESS) goto fn_fail;        }        MPID_END_ERROR_CHECKS;    }#   endif        /* Convert MPI object handles to object pointers */    MPID_Datatype_get_ptr( oldtype, datatype_ptr );    MPID_Datatype_get_extent_macro(oldtype, orig_extent);    /* Validate parameters and objects (post conversion) */#   ifdef HAVE_ERROR_CHECKING    {        MPID_BEGIN_ERROR_CHECKS;        {	    /* Check parameters */	    MPIR_ERRTEST_ARGNEG(rank, "rank", mpi_errno);	    MPIR_ERRTEST_ARGNONPOS(size, "size", mpi_errno);	    MPIR_ERRTEST_ARGNONPOS(ndims, "ndims", mpi_errno);	    MPIR_ERRTEST_ARGNULL(array_of_gsizes, "array_of_gsizes", mpi_errno);	    MPIR_ERRTEST_ARGNULL(array_of_distribs, "array_of_distribs", mpi_errno);	    MPIR_ERRTEST_ARGNULL(array_of_dargs, "array_of_dargs", mpi_errno);	    MPIR_ERRTEST_ARGNULL(array_of_psizes, "array_of_psizes", mpi_errno);	    if (order != MPI_ORDER_C && order != MPI_ORDER_FORTRAN) {		mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,						 MPIR_ERR_RECOVERABLE,						 FCNAME,						 __LINE__,						 MPI_ERR_ARG,						 "**arg",						 "**arg %s",						 "order");	    }	    for (i=0; mpi_errno == MPI_SUCCESS && i < ndims; i++) {		MPIR_ERRTEST_ARGNONPOS(array_of_gsizes[i], "gsize", mpi_errno);		MPIR_ERRTEST_ARGNONPOS(array_of_psizes[i], "psize", mpi_errno);		if ((array_of_distribs[i] != MPI_DISTRIBUTE_NONE) &&		    (array_of_distribs[i] != MPI_DISTRIBUTE_BLOCK) &&		    (array_of_distribs[i] != MPI_DISTRIBUTE_CYCLIC))		{		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,						     MPIR_ERR_RECOVERABLE,						     FCNAME,						     __LINE__,						     MPI_ERR_ARG,						     "**darrayunknown",						     0);		}		if ((array_of_dargs[i] != MPI_DISTRIBUTE_DFLT_DARG) && 		    (array_of_dargs[i] <= 0))		{		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,						     MPIR_ERR_RECOVERABLE,						     FCNAME,						     __LINE__,						     MPI_ERR_ARG,						     "**arg",						     "**arg %s",						     "array_of_dargs");		}		if ((array_of_distribs[i] == MPI_DISTRIBUTE_NONE) &&		    (array_of_psizes[i] != 1))		{		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,						     MPIR_ERR_RECOVERABLE,						     FCNAME,						     __LINE__,						     MPI_ERR_ARG,						     "**darraydist",						     "**darraydist %d %d",						     i, array_of_psizes[i]);		}	    }	    /* TODO: GET THIS CHECK IN ALSO */#if 0	    /* check if MPI_Aint is large enough for size of global array. 	       if not, complain. */	    	    size_with_aint = orig_extent;	    for (i=0; i<ndims; i++) size_with_aint *= array_of_gsizes[i];	    size_with_offset = orig_extent;	    for (i=0; i<ndims; i++) size_with_offset *= array_of_gsizes[i];	    if (size_with_aint != size_with_offset) {		FPRINTF(stderr, "MPI_Type_create_darray: Can't use an array of this size unless the MPI implementation defines a 64-bit MPI_Aint\n");		MPI_Abort(MPI_COMM_WORLD, 1);	    }#endif	                /* Validate datatype_ptr */            MPID_Datatype_valid_ptr( datatype_ptr, mpi_errno );	    /* If datatype_ptr is not valid, it will be reset to null */	    /* --BEGIN ERROR HANDLING-- */            if (mpi_errno) goto fn_fail;	    /* --END ERROR HANDLING-- */        }        MPID_END_ERROR_CHECKS;    }#   endif /* HAVE_ERROR_CHECKING */    /* ... body of routine ... */    /* calculate position in Cartesian grid as MPI would (row-major   ordering) */    MPIU_CHKLMEM_MALLOC_ORJUMP(coords, int *, ndims * sizeof(int), mpi_errno, "position is Cartesian grid");    procs = size;    tmp_rank = rank;    for (i=0; i<ndims; i++) {	procs = procs/array_of_psizes[i];	coords[i] = tmp_rank/procs;	tmp_rank = tmp_rank % procs;    }    MPIU_CHKLMEM_MALLOC_ORJUMP(st_offsets, MPI_Aint *, ndims * sizeof(MPI_Aint), mpi_errno, "st_offsets");    type_old = oldtype;    if (order == MPI_ORDER_FORTRAN) {      /* dimension 0 changes fastest */	for (i=0; i<ndims; i++) {	    switch(array_of_distribs[i]) {	    case MPI_DISTRIBUTE_BLOCK:		mpi_errno = MPIR_Type_block(array_of_gsizes,					    i,					    ndims,					    array_of_psizes[i],					    coords[i],					    array_of_dargs[i],					    order,					    orig_extent, 					    type_old,					    &type_new,					    st_offsets+i); 		break;	    case MPI_DISTRIBUTE_CYCLIC:		mpi_errno = MPIR_Type_cyclic(array_of_gsizes,					     i,					     ndims, 					     array_of_psizes[i],					     coords[i],					     array_of_dargs[i],					     order,					     orig_extent,					     type_old,					     &type_new,					     st_offsets+i);		break;	    case MPI_DISTRIBUTE_NONE:		/* treat it as a block distribution on 1 process */		mpi_errno = MPIR_Type_block(array_of_gsizes,					    i,					    ndims,					    1,					    0, 					    MPI_DISTRIBUTE_DFLT_DARG,					    order,					    orig_extent, 					    type_old,					    &type_new,					    st_offsets+i); 		break;	    }	    if (i)	    {		MPIR_Nest_incr();		NMPI_Type_free(&type_old);		MPIR_Nest_decr();	    }	    type_old = type_new;	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS) goto fn_fail;	    /* --END ERROR HANDLING-- */	}	/* add displacement and UB */	disps[1] = st_offsets[0];	tmp_size = 1;	for (i=1; i<ndims; i++) {	    tmp_size *= array_of_gsizes[i-1];	    disps[1] += tmp_size*st_offsets[i];	}        /* rest done below for both Fortran and C order */    }    else /* order == MPI_ORDER_C */ {        /* dimension ndims-1 changes fastest */	for (i=ndims-1; i>=0; i--) {	    switch(array_of_distribs[i]) {	    case MPI_DISTRIBUTE_BLOCK:		mpi_errno = MPIR_Type_block(array_of_gsizes,					    i,					    ndims,					    array_of_psizes[i],					    coords[i],					    array_of_dargs[i],					    order,					    orig_extent, 					    type_old,					    &type_new,					    st_offsets+i); 		break;	    case MPI_DISTRIBUTE_CYCLIC:		mpi_errno = MPIR_Type_cyclic(array_of_gsizes,					     i,					     ndims, 					     array_of_psizes[i],					     coords[i],					     array_of_dargs[i],					     order, 					     orig_extent,					     type_old,					     &type_new,					     st_offsets+i);		break;	    case MPI_DISTRIBUTE_NONE:		/* treat it as a block distribution on 1 process */		mpi_errno = MPIR_Type_block(array_of_gsizes,					    i,					    ndims,					    array_of_psizes[i],					    coords[i],					    MPI_DISTRIBUTE_DFLT_DARG,					    order,					    orig_extent, 					    type_old,					    &type_new,					    st_offsets+i); 		break;	    }	    if (i != ndims-1)	    {		MPIR_Nest_incr();		NMPI_Type_free(&type_old);		MPIR_Nest_decr();	    }	    type_old = type_new;	    /* --BEGIN ERROR HANDLING-- */	    if (mpi_errno != MPI_SUCCESS) goto fn_fail;	    /* --END ERROR HANDLING-- */	}	/* add displacement and UB */	disps[1] = st_offsets[ndims-1];	tmp_size = 1;	for (i=ndims-2; i>=0; i--) {	    tmp_size *= array_of_gsizes[i+1];	    disps[1] += tmp_size*st_offsets[i];	}    }    disps[1] *= orig_extent;    disps[2] = orig_extent;    for (i=0; i<ndims; i++) disps[2] *= array_of_gsizes[i];	    disps[0] = 0;    blklens[0] = blklens[1] = blklens[2] = 1;    types[0] = MPI_LB;    types[1] = type_new;    types[2] = MPI_UB;        mpi_errno = MPID_Type_struct(3,				 blklens,				 disps,				 types,				 newtype);    /* --BEGIN ERROR HANDLING-- */    if (mpi_errno != MPI_SUCCESS) goto fn_fail;    /* --END ERROR HANDLING-- */    MPIR_Nest_incr();    NMPI_Type_free(&type_new);    MPIR_Nest_decr();    /* at this point we have the new type, and we've cleaned up any     * intermediate types created in the process.  we just need to save     * all our contents/envelope information.     */    /* Save contents */    MPIU_CHKLMEM_MALLOC_ORJUMP(ints, int *, (4 * ndims + 4) * sizeof(int), mpi_errno, "content description");    ints[0] = size;    ints[1] = rank;    ints[2] = ndims;    for (i=0; i < ndims; i++) {	ints[i + 3] = array_of_gsizes[i];    }    for (i=0; i < ndims; i++) {	ints[i + ndims + 3] = array_of_distribs[i];    }    for (i=0; i < ndims; i++) {	ints[i + 2*ndims + 3] = array_of_dargs[i];    }    for (i=0; i < ndims; i++) {	ints[i + 3*ndims + 3] = array_of_psizes[i];    }    ints[4*ndims + 3] = order;    MPID_Datatype_get_ptr(*newtype, datatype_ptr);    mpi_errno = MPID_Datatype_set_contents(datatype_ptr,					   MPI_COMBINER_DARRAY,					   4*ndims + 4,					   0,					   1,					   ints,					   NULL,					   &oldtype);    /* --BEGIN ERROR HANDLING-- */    if (mpi_errno != MPI_SUCCESS) goto fn_fail;    /* --END ERROR HANDLING-- */    /* ... end of body of routine ... */      fn_exit:    MPIU_CHKLMEM_FREEALL();    MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_TYPE_CREATE_DARRAY);    MPID_CS_EXIT();    return mpi_errno;  fn_fail:    /* --BEGIN ERROR HANDLING-- */#   ifdef HAVE_ERROR_CHECKING    {	mpi_errno = MPIR_Err_create_code(	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_type_create_darray",	    "**mpi_type_create_darray %d %d %d %p %p %p %p %d %D %p", size, rank, ndims, array_of_gsizes,	    array_of_distribs, array_of_dargs, array_of_psizes, order, oldtype, newtype);    }#   endif    mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );    goto fn_exit;    /* --END ERROR HANDLING-- */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -