⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pair.c

📁 MPICH是MPI的重要研究,提供了一系列的接口函数,为并行计算的实现提供了编程环境.
💻 C
📖 第 1 页 / 共 2 页
字号:
  if(ctx->is_slave){    MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);    for(i=0;i<reps;i++){      MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),	       MPI_COMM_WORLD,&status);      MPI_Send(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);    }  }  free(sbuffer);  free(rbuffer);  return(elapsed_time);}/*    Ready-receiver round trip */double round_trip_force( int reps, int len, PairData ctx){  double elapsed_time;  int  i, to = ctx->destination, from = ctx->source;  char *rbuffer,*sbuffer;  double t0, t1;  MPI_Request rid;  MPI_Status  status;  sbuffer = (char *)malloc(len);  rbuffer = (char *)malloc(len);  elapsed_time = 0;  if(ctx->is_master){    MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);    t0=MPI_Wtime();    for(i=0;i<reps;i++){      MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		MPI_COMM_WORLD,&(rid));      MPI_Rsend(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);      MPI_Wait(&(rid),&status);    }    t1=MPI_Wtime();    elapsed_time = t1 -t0;  }  if(ctx->is_slave){    MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),	      MPI_COMM_WORLD,&(rid));    MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);    for(i=0;i<reps-1;i++){      MPI_Wait(&(rid),&status);      MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		MPI_COMM_WORLD,&(rid));      MPI_Rsend(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);    }    MPI_Wait(&(rid),&status);    MPI_Rsend(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);  }  free(sbuffer);  free(rbuffer);  return(elapsed_time);}/*    Nonblocking round trip */double round_trip_async( int reps, int len, PairData ctx){  double elapsed_time;  int  i, to = ctx->destination, from = ctx->source;  char *rbuffer,*sbuffer;  MPI_Status status;  double t0, t1;  MPI_Request rid;  sbuffer = (char *)malloc(len);  rbuffer = (char *)malloc(len);  elapsed_time = 0;  if(ctx->is_master){    MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);    t0=MPI_Wtime();    for(i=0;i<reps;i++){      MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		MPI_COMM_WORLD,&(rid));      MPI_Send(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);      MPI_Wait(&(rid),&status);    }    t1=MPI_Wtime();    elapsed_time = t1 -t0;  }  if(ctx->is_slave){    MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),	      MPI_COMM_WORLD,&(rid));    MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);    for(i=0;i<reps-1;i++){      MPI_Wait(&(rid),&status);      MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		MPI_COMM_WORLD,&(rid));      MPI_Send(sbuffer,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);    }    MPI_Wait(&(rid),&status);    MPI_Send(sbuffer,len,MPI_BYTE,to,1,MPI_COMM_WORLD);  }  free(sbuffer);  free(rbuffer);  return(elapsed_time);}/*    Persistant communication (only in MPI)  */double round_trip_persis( int reps, int len, PairData ctx){  double elapsed_time;  int  i, to = ctx->destination, from = ctx->source;  char *rbuffer,*sbuffer;  double t0, t1;  MPI_Request sid, rid, rq[2];  MPI_Status status, statuses[2];  sbuffer = (char *)malloc(len);  rbuffer = (char *)malloc(len);  elapsed_time = 0;  if(ctx->is_master){    MPI_Send_init( sbuffer, len, MPI_BYTE, to, 1, MPI_COMM_WORLD, &sid );    MPI_Recv_init( rbuffer, len, MPI_BYTE, to, 1, MPI_COMM_WORLD, &rid );    rq[0] = rid;    rq[1] = sid;    MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);    t0=MPI_Wtime();    for(i=0;i<reps;i++){      MPI_Startall( 2, rq );      MPI_Waitall( 2, rq, statuses );    }    t1=MPI_Wtime();    elapsed_time = t1 -t0;    MPI_Request_free( &rid );    MPI_Request_free( &sid );  }  if(ctx->is_slave){    MPI_Send_init( sbuffer, len, MPI_BYTE, from, 1, MPI_COMM_WORLD, &sid );    MPI_Recv_init( rbuffer, len, MPI_BYTE, from, 1, MPI_COMM_WORLD, &rid );    rq[0] = rid;    rq[1] = sid;    MPI_Start( &rid );    MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);    for(i=0;i<reps-1;i++){      MPI_Wait( &rid, &status );      MPI_Startall( 2, rq );      MPI_Wait( &sid, &status );    }    MPI_Wait( &rid, &status );    MPI_Start( &sid );    MPI_Wait( &sid, &status );    MPI_Request_free( &rid );    MPI_Request_free( &sid );  }  free(sbuffer);  free(rbuffer);  return(elapsed_time);}static int VectorStride = 10;int set_vector_stride( int n ){    VectorStride = n;    return 0;}double round_trip_vector(int reps, int len, PairData ctx){  double elapsed_time;  int  i, to = ctx->destination, from = ctx->source;  double *rbuffer,*sbuffer;  double t0, t1;  MPI_Datatype vec, types[2];  int          blens[2];  MPI_Aint     displs[2];  MPI_Status   status;  MPI_Comm     comm;  /* Adjust len to be in bytes */  len = len / sizeof(double);  comm = MPI_COMM_WORLD;  blens[0] = 1; displs[0] = 0; types[0] = MPI_DOUBLE;  blens[1] = 1; displs[1] = VectorStride * sizeof(double); types[1] = MPI_UB;  MPI_Type_struct( 2, blens, displs, types, &vec );  MPI_Type_commit( &vec );  sbuffer = (double *)malloc((unsigned)(VectorStride * len * sizeof(double) ));  rbuffer = (double *)malloc((unsigned)(VectorStride * len * sizeof(double) ));  if (!sbuffer)return 0;;  if (!rbuffer)return 0;;  elapsed_time = 0;  if(ctx->is_master){    MPI_Recv( rbuffer, len, vec, to, 0, comm, &status );    t0=MPI_Wtime();    for(i=0;i<reps;i++){      MPI_Send( sbuffer, len, vec, to, MSG_TAG(i), comm );      MPI_Recv( rbuffer, len, vec, from, MSG_TAG(i), comm, &status );      }    t1=MPI_Wtime();    elapsed_time = t1 - t0;    }  if(ctx->is_slave){    MPI_Send( sbuffer, len, vec, from, 0, comm );    for(i=0;i<reps;i++){	MPI_Recv( rbuffer, len, vec, from, MSG_TAG(i), comm, &status );	MPI_Send( sbuffer, len, vec, to, MSG_TAG(i), comm );	}    }  free(sbuffer);  free(rbuffer);  MPI_Type_free( &vec );  return(elapsed_time);}double round_trip_vectortype( int reps, int len, PairData ctx){  double elapsed_time;  int  i, to = ctx->destination, from = ctx->source;  double *rbuffer,*sbuffer;  double t0, t1;  MPI_Datatype vec;  MPI_Status   status;  MPI_Comm     comm;  /* Adjust len to be in doubles */  len = len / sizeof(double);  comm = MPI_COMM_WORLD;  MPI_Type_vector( len, 1, VectorStride, MPI_DOUBLE, &vec );  MPI_Type_commit( &vec );  sbuffer = (double *)malloc((unsigned)(VectorStride * len * sizeof(double) ));  rbuffer = (double *)malloc((unsigned)(VectorStride * len * sizeof(double) ));  if (!sbuffer)return 0;;  if (!rbuffer)return 0;;  elapsed_time = 0;  if(ctx->is_master){    MPI_Recv( rbuffer, 1, vec, to, 0, comm, &status );    t0=MPI_Wtime();    for(i=0;i<reps;i++){      MPI_Send( sbuffer, 1, vec, to, MSG_TAG(i), comm );      MPI_Recv( rbuffer, 1, vec, from, MSG_TAG(i), comm, &status );      }    t1=MPI_Wtime();    elapsed_time = t1 -t0;    }  if(ctx->is_slave){    MPI_Send( sbuffer, 1, vec, from, 0, comm );    for(i=0;i<reps;i++){	MPI_Recv( rbuffer, 1, vec, from, MSG_TAG(i), comm, &status );	MPI_Send( sbuffer, 1, vec, to, MSG_TAG(i), comm );	}    }  free(sbuffer );  free(rbuffer );  MPI_Type_free( &vec );  return(elapsed_time);}/*    These versions try NOT to operate out of cache; rather, then send/receive    into a moving window. *//*    Blocking round trip (always unidirectional)  */double round_trip_nc_sync( int reps, int len, PairData ctx){    double elapsed_time;    int  i, to = ctx->destination, from = ctx->source;    char *rbuffer,*sbuffer, *rp, *sp, *rlast, *slast;    MPI_Status status;    double t0, t1;    sbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    slast   = sbuffer + 2 * CacheSize - len;    rbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    rlast   = rbuffer + 2 * CacheSize - len;    if (!sbuffer || !rbuffer) {	fprintf( stderr, "Could not allocate %d bytes\n", 4 * CacheSize );	exit(1 );    }    sp = sbuffer;    rp = rbuffer;    elapsed_time = 0;    if(ctx->is_master){	MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);	t0=MPI_Wtime();	for(i=0;i<reps;i++){	    MPI_Send(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    MPI_Recv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		     MPI_COMM_WORLD,&status);	    sp += len;	    rp += len;	    if (sp > slast) sp = sbuffer;	    if (rp > rlast) rp = rbuffer;	}	t1=MPI_Wtime();	elapsed_time = t1 -t0;    }    if(ctx->is_slave){	MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);	for(i=0;i<reps;i++){	    MPI_Recv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		     MPI_COMM_WORLD,&status);	    MPI_Send(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    sp += len;	    rp += len;	    if (sp > slast) sp = sbuffer;	    if (rp > rlast) rp = rbuffer;	}    }    free(sbuffer );    free(rbuffer );    return(elapsed_time);}/*    Ready-receiver round trip */double round_trip_nc_force( int reps, int len, PairData ctx){    double elapsed_time;    int  i, to = ctx->destination, from = ctx->source;    char *rbuffer,*sbuffer, *rp, *sp, *rlast, *slast;    double t0, t1;    MPI_Request rid;    MPI_Status  status;    sbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    slast   = sbuffer + 2 * CacheSize - len;    rbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    rlast   = rbuffer + 2 * CacheSize - len;    if (!sbuffer || !rbuffer) {	fprintf( stderr, "Could not allocate %d bytes\n", 4 * CacheSize );	exit(1 );    }    sp = sbuffer;    rp = rbuffer;    elapsed_time = 0;    if(ctx->is_master){	MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);	t0=MPI_Wtime();	for(i=0;i<reps;i++){	    MPI_Irecv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		      MPI_COMM_WORLD,&(rid));	    MPI_Rsend(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    MPI_Wait(&(rid),&status);	    sp += len;	    rp += len;	    if (sp > slast) sp = sbuffer;	    if (rp > rlast) rp = rbuffer;	}	t1=MPI_Wtime();	elapsed_time = t1 -t0;    }    if(ctx->is_slave){	MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		  MPI_COMM_WORLD,&(rid));	MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);	for(i=0;i<reps-1;i++){	    MPI_Wait(&(rid),&status);	    rp += len;	    if (rp > rlast) rp = rbuffer;	    MPI_Irecv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		      MPI_COMM_WORLD,&(rid));	    MPI_Rsend(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    sp += len;	    if (sp > slast) sp = sbuffer;	}	MPI_Wait(&(rid),&status);	MPI_Rsend(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);    }    free(sbuffer );    free(rbuffer );    return(elapsed_time);}/*    Nonblocking round trip */double round_trip_nc_async( int reps, int len, PairData ctx){    double elapsed_time;    int  i, to = ctx->destination, from = ctx->source;    char *rbuffer,*sbuffer, *rp, *sp, *rlast, *slast;    double t0, t1;    MPI_Request rid;    MPI_Status  status;    sbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    slast   = sbuffer + 2 * CacheSize - len;    rbuffer = (char *)malloc((unsigned)(2 * CacheSize ));    rlast   = rbuffer + 2 * CacheSize - len;    if (!sbuffer || !rbuffer) {	fprintf( stderr, "Could not allocate %d bytes\n", 4 * CacheSize );	exit(1 );    }    sp = sbuffer;    rp = rbuffer;    elapsed_time = 0;    if(ctx->is_master){	MPI_Recv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,0,MPI_COMM_WORLD,&status);	t0=MPI_Wtime();	for(i=0;i<reps;i++){	    MPI_Irecv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		      MPI_COMM_WORLD,&(rid));	    MPI_Send(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    MPI_Wait(&(rid),&status);	    sp += len;	    rp += len;	    if (sp > slast) sp = sbuffer;	    if (rp > rlast) rp = rbuffer;	}	t1=MPI_Wtime();	elapsed_time = t1 -t0;    }    if(ctx->is_slave){	MPI_Irecv(rbuffer,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		  MPI_COMM_WORLD,&(rid));	MPI_Send(sbuffer,len,MPI_BYTE,from,0,MPI_COMM_WORLD);	for(i=0;i<reps-1;i++){	    MPI_Wait(&(rid),&status);	    rp += len;	    if (rp > rlast) rp = rbuffer;	    MPI_Irecv(rp,len,MPI_BYTE,MPI_ANY_SOURCE,MSG_TAG(i),		      MPI_COMM_WORLD,&(rid));	    MPI_Send(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);	    sp += len;	    if (sp > slast) sp = sbuffer;	}	MPI_Wait(&(rid),&status);	MPI_Send(sp,len,MPI_BYTE,to,MSG_TAG(i),MPI_COMM_WORLD);    }    free(sbuffer );    free(rbuffer );    return(elapsed_time);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -