⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 msg2.c

📁 Parallel programming/Lou Baker, Bradley J.Smith .—New York:McGraw-Hill Book Co.
💻 C
字号:
/* PSPH - Parallel SPH program
 * Bradley Smith,  and Lou Baker, Dagonet Software
 */
static char sccs_id[] = "@(#) /home/bsmith/src/psph/SCCS/s.msg2.c 1.6 94/01/06";

/* Message passing routines - Collective
 *(see also MPI_Reduce and MPI_Barrier in msg.c)

 * note that Allgather may be used
 * in SPH so that each processor knows the max smoothing lengh
 * of all processors

 */

#include "all.h"

#define BCAST_MSG_TAG  16382
#define SCATTER_MSG_TAG  16383
/* assumes GATHER tag etc biggest tags so
proc# can be low-order bits */
#define GATHER_MSG_TAG 16384

/* one(root) to all broadcast */

int
MPI_Bcast(void *buf,  int count, MPI_Datatype datatype,
	int root, MPI_Comm comm)
{
 int rank, nproc, i;
 void *buff;
 MPI_Status status;

 /* For right now - do this with generic sends and receives */
 MPI_Comm_size(comm, &nproc);
 MPI_Comm_rank(comm, &rank);
 if(nproc <= 1)
 	return(MPI_SUCCESS);
 if(rank != root)/*Recv */
	{
	MPI_Recv(buf, count, datatype,root, BCAST_MSG_TAG, comm, &status);
    }
 else
	{
	/* root sends */
	for(i=0; i<nproc; i++)
		{
		if(i==root) continue;/* no need to send to self*/
		MPI_Send(buf,count, datatype, i, BCAST_MSG_TAG, comm);
		}
	}
 return(MPI_SUCCESS);
}


/* all send to root */
int
MPI_Gather(void *sendbuf,  int sendcount, MPI_Datatype sendtype,
	void *recvbuf,int recvcount, MPI_Datatype recvtype,
	int root, MPI_Comm comm)
{
 int rank, nproc, i;
 void *buff;
 MPI_Status status;

 /* For right now - do this with generic sends and receives */
 MPI_Comm_size(comm, &nproc);
 MPI_Comm_rank(comm, &rank);
 if(nproc <= 1)
 	return(MPI_SUCCESS);
 if(rank == root) /* recv */
	{
/*	  buff = New(recvcount * _MPI_SizeDataType(sendtype));*/
	for(i=0;i<nproc;i++)
		{
		buff = &(recvbuf[i*recvcount* _MPI_SizeDataType(recvtype) ]);
        if(i==root)
			{
			memcpy(buff, sendbuf, recvcount * _MPI_SizeDataType(recvtype));
			}
		else MPI_Recv(buff, recvcount, recvtype,MPI_ANY_SOURCE, (GATHER_MSG_TAG|i)
			, comm, &status);
		}
/*	  free(buff);*/
    }
 else
	{
	/* Not root-send to root. */
		MPI_Send(sendbuf,sendcount, sendtype, root, (GATHER_MSG_TAG | rank) , comm);
	}
 return(MPI_SUCCESS);
}



/* root sends to all */
int
MPI_Scatter(void *sendbuf,	int sendcount, MPI_Datatype datatype,
	void *recvbuf,int recvcount, MPI_Datatype recvtype,
	int root, MPI_Comm comm)
{
 int rank, nproc, i;
 void *buff;
 MPI_Status status;

 /* For right now - do this with generic sends and receives */
 MPI_Comm_size(comm, &nproc);
 MPI_Comm_rank(comm, &rank);
 if(nproc <= 1)
 	return(MPI_SUCCESS);
 if(rank != root)
	{
	MPI_Recv(recvbuf, recvcount, datatype,MPI_ANY_SOURCE, (SCATTER_MSG_TAG)
		, comm, &status);
    }
 else
	{
	/* root-send.*/
	/* send to self! */
	for(i=0; i<nproc; i++)
		{
		buff=(char*)( ((int)sendbuf)+i*_MPI_SizeDataType(datatype));
		if(i==root)memcpy(recvbuf, buff, _MPI_SizeDataType(datatype));
		else MPI_Send(buff,sendcount, datatype, i, (SCATTER_MSG_TAG ) , comm);
		}
	}
 return(MPI_SUCCESS);
}



/* all send same data to each other */

int
MPI_Allgather(void *sendbuf,  int sendcount, MPI_Datatype datatype,
	void *recvbuf,int recvcount, MPI_Datatype recvtype,
	int root, MPI_Comm comm)
{
 int rank, nproc, i;
 void *buff;
 MPI_Status status;

 /* For right now - do this with generic sends and receives */
 MPI_Comm_size(comm, &nproc);
 MPI_Comm_rank(comm, &rank);
 if(nproc <= 1)
 	return(MPI_SUCCESS);
	/* everyone send to everyone else */
	for(i=0; i<nproc; i++)
        {
        if(i==rank) continue;/* skip send to self*/
		MPI_Send(sendbuf,sendcount, datatype, i, (GATHER_MSG_TAG | rank) , comm);
        }
/* ALL Recv-including root! */
	{
/*	  buff = New();*/
	/* receive from each other proc in order! */
	for(i=0;i<nproc;i++)
		{
		buff= &(recvbuf[i*recvcount * _MPI_SizeDataType(datatype)]);
		if(i==rank)
			{
			/* memcpy = send to self */
			memcpy(buff,sendbuf, recvcount * _MPI_SizeDataType(datatype));
			}
		else
			MPI_Recv(buff, recvcount, datatype,MPI_ANY_SOURCE, (GATHER_MSG_TAG|i)
			, comm, &status);
		}
	free(buff);
	}
 return(MPI_SUCCESS);
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -