⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 transpose_mpi.c

📁 FFTW, a collection of fast C routines to compute the Discrete Fourier Transform in one or more dime
💻 C
📖 第 1 页 / 共 2 页
字号:
			      TRANSPOSE_EL_TYPE *buf2, int n){     int i;     TRANSPOSE_EL_TYPE t0,t1,t2,t3;     for (i = 0; i < (n & 3); ++i) {          t0 = buf1[i];          buf1[i] = buf2[i];          buf2[i] = t0;     }     for (; i < n; i += 4) {          t0 = buf1[i];          t1 = buf1[i+1];          t2 = buf1[i+2];          t3 = buf1[i+3];          buf1[i] = buf2[i];          buf1[i+1] = buf2[i+1];          buf1[i+2] = buf2[i+2];          buf1[i+3] = buf2[i+3];          buf2[i] = t0;          buf2[i+1] = t1;          buf2[i+2] = t2;          buf2[i+3] = t3;     }}static void do_permutation(TRANSPOSE_EL_TYPE *data,                           int *perm_block_dest,                           int num_perm_blocks,                           int perm_block_size){     int start_block;     /* Perform the permutation in the perm_block_dest array, following        the cycles around and *changing* the perm_block_dest array        to reflect the permutations that have already been performed.        At the end of this routine, we change the perm_block_dest        array back to its original state. (ugh) */     for (start_block = 0; start_block < num_perm_blocks; ++start_block) {          int cur_block = start_block;          int new_block = perm_block_dest[start_block];          while (new_block > 0 && new_block < num_perm_blocks &&                 new_block != start_block) {               exchange_elements(data + perm_block_size*start_block,                                data + perm_block_size*new_block,                                perm_block_size);               perm_block_dest[cur_block] = -1 - new_block;               cur_block = new_block;               new_block = perm_block_dest[cur_block];          }          if (new_block == start_block)               perm_block_dest[cur_block] = -1 - new_block;     }     /* reset the permutation array (ugh): */     for (start_block = 0; start_block < num_perm_blocks; ++start_block)          perm_block_dest[start_block] = -1 - perm_block_dest[start_block];}TRANSPOSE_EL_TYPE *transpose_allocate_send_buf(transpose_mpi_plan p,					       int el_size){     TRANSPOSE_EL_TYPE *send_buf = 0;     /* allocate the send buffer: */     if (p->send_block_size > 0) {          send_buf = (TRANSPOSE_EL_TYPE *)               fftw_malloc(p->send_block_size * el_size                                * sizeof(TRANSPOSE_EL_TYPE));          if (!send_buf)               fftw_mpi_die("Out of memory!\n");     }     return send_buf;}void transpose_in_place_local(transpose_mpi_plan p,			      int el_size, TRANSPOSE_EL_TYPE *local_data,			      transpose_in_place_which which){     switch (which) {	 case BEFORE_TRANSPOSE:	      if (el_size == 1)		   TOMS_transpose_2d(local_data,				     p->local_nx, p->ny,				     p->move, p->move_size);	      else		   TOMS_transpose_2d_arbitrary(local_data,					       p->local_nx, p->ny,					       el_size,					       p->move, p->move_size);	      break;	 case AFTER_TRANSPOSE:	      do_permutation(local_data, p->perm_block_dest,			     p->num_perm_blocks, p->perm_block_size * el_size);	      break;     }}			      /**************************************************************************/static void local_transpose_copy(TRANSPOSE_EL_TYPE *src, 				 TRANSPOSE_EL_TYPE *dest,				 int el_size, int nx, int ny){     int x, y;     if (el_size == 1)	  for (x = 0; x < nx; ++x)	       for (y = 0; y < ny; ++y)		    dest[y * nx + x] = src[x * ny + y];     else if (el_size == 2)	  for (x = 0; x < nx; ++x)	       for (y = 0; y < ny; ++y) {		    dest[y * (2 * nx) + 2*x]     = src[x * (2 * ny) + 2*y];		    dest[y * (2 * nx) + 2*x + 1] = src[x * (2 * ny) + 2*y + 1];	       }     else	  for (x = 0; x < nx; ++x)	       for (y = 0; y < ny; ++y)		    memcpy(&dest[y * (el_size*nx) + (el_size*x)],			   &src[x * (el_size*ny) + (el_size*y)],			   el_size * sizeof(TRANSPOSE_EL_TYPE));}/* Out-of-place version of transpose_mpi (or rather, in place using   a scratch array): */static void transpose_mpi_out_of_place(transpose_mpi_plan p, int el_size,				       TRANSPOSE_EL_TYPE *local_data,				       TRANSPOSE_EL_TYPE *work){     local_transpose_copy(local_data, work, el_size, p->local_nx, p->ny);     if (p->all_blocks_equal)	  MPI_Alltoall(work, p->send_block_size * el_size, p->el_type,		       local_data, p->recv_block_size * el_size, p->el_type,		       p->comm);     else {	  int i, n_pes = p->n_pes;	  for (i = 0; i < n_pes; ++i) {	       p->send_block_sizes[i] *= el_size;	       p->recv_block_sizes[i] *= el_size;	       p->send_block_offsets[i] *= el_size;	       p->recv_block_offsets[i] *= el_size;	  }	  MPI_Alltoallv(work, p->send_block_sizes, p->send_block_offsets,			p->el_type,			local_data, p->recv_block_sizes, p->recv_block_offsets,			p->el_type,			p->comm);	  for (i = 0; i < n_pes; ++i) {	       p->send_block_sizes[i] /= el_size;	       p->recv_block_sizes[i] /= el_size;	       p->send_block_offsets[i] /= el_size;	       p->recv_block_offsets[i] /= el_size;	  }     }     do_permutation(local_data, p->perm_block_dest, p->num_perm_blocks,		    p->perm_block_size * el_size);}/**************************************************************************/void transpose_mpi(transpose_mpi_plan p, int el_size,		   TRANSPOSE_EL_TYPE *local_data,		   TRANSPOSE_EL_TYPE *work){     /* if local_data and work are both NULL, we have no way of knowing	whether we should use in-place or out-of-place transpose routine;	if we guess wrong, MPI_Alltoall will block.  We prevent this	by making sure that transpose_mpi_get_local_storage_size returns	at least 1. */     if (!local_data && !work)	  fftw_mpi_die("local_data and work are both NULL!");     if (work)	  transpose_mpi_out_of_place(p, el_size, local_data, work);     else if (p->local_nx > 0 || p->local_ny > 0) {	  int step;	  TRANSPOSE_EL_TYPE *send_buf = transpose_allocate_send_buf(p,el_size);	  transpose_in_place_local(p, el_size, local_data, BEFORE_TRANSPOSE);	  for (step = 0; step < p->num_steps; ++step) {               transpose_finish_exchange_step(p, step - 1);	                      transpose_start_exchange_step(p, el_size, local_data,                                             send_buf, step, TRANSPOSE_SYNC);	  }	  	  transpose_finish_exchange_step(p, step - 1);	  	  transpose_in_place_local(p, el_size, local_data, AFTER_TRANSPOSE);	  if (send_buf)	       fftw_free(send_buf);     } /* if (local_nx > 0 || local_ny > 0) */}/**************************************************************************//* non-blocking routines for overlapping communication and computation: */#define USE_SYNCHRONOUS_ISEND 1#if USE_SYNCHRONOUS_ISEND#define ISEND MPI_Issend#else#define ISEND MPI_Isend#endifvoid transpose_get_send_block(transpose_mpi_plan p, int step,			      int *block_y_start, int *block_ny){     if (p->local_nx > 0) {	  *block_y_start = 	       p->send_block_size / p->local_nx * p->exchange[step].block_num;	  *block_ny = p->exchange[step].send_size / p->local_nx;     }     else {	  *block_y_start = 0;	  *block_ny = 0;     }}void transpose_start_exchange_step(transpose_mpi_plan p,				   int el_size,				   TRANSPOSE_EL_TYPE *local_data,				   TRANSPOSE_EL_TYPE *send_buf,				   int step,				   transpose_sync_type sync_type){     if (p->local_nx > 0 || p->local_ny > 0) {	  transpose_mpi_exchange *exchange = p->exchange;	  int block = exchange[step].block_num;	  int send_block_size = p->send_block_size;	  int recv_block_size = p->recv_block_size;	            if (exchange[step].dest_pe != p->my_pe) {	       /* first, copy to send buffer: */	       if (exchange[step].send_size > 0)		    memcpy(send_buf,			   local_data + el_size*send_block_size*block,			   el_size * exchange[step].send_size *			   sizeof(TRANSPOSE_EL_TYPE));#define DO_ISEND  \               if (exchange[step].send_size > 0) {  \			 ISEND(send_buf, \			       exchange[step].send_size * el_size, \			       p->el_type, \			       exchange[step].dest_pe, 0, \			       p->comm, \			       &p->request[0]); \	       } 	       p->request[0] = MPI_REQUEST_NULL;	       p->request[1] = MPI_REQUEST_NULL;	       if (sync_type == TRANSPOSE_ASYNC) {		    /* Note that we impose an ordering on the sends and		       receives (lower pe sends first) so that we won't		       have deadlock if Isend & Irecv are blocking in some		       MPI implementation: */	  		    if (p->my_pe < exchange[step].dest_pe)			 DO_ISEND;		    		    if (exchange[step].recv_size > 0) {			 MPI_Irecv(local_data + el_size*recv_block_size*block,				   exchange[step].recv_size * el_size,				   p->el_type,				   exchange[step].dest_pe, MPI_ANY_TAG,				   p->comm,				   &p->request[1]);		    }	       		    if (p->my_pe > exchange[step].dest_pe)			 DO_ISEND;	       }	       else /* (sync_type == TRANSPOSE_SYNC) */ {		    MPI_Status status;		    MPI_Sendrecv(send_buf,				 exchange[step].send_size * el_size,				 p->el_type,				 exchange[step].dest_pe, 0,				 local_data + el_size*recv_block_size*block,				 exchange[step].recv_size * el_size,				 p->el_type,				 exchange[step].dest_pe, MPI_ANY_TAG,				 p->comm, &status);	       }	  }	  else if (exchange[step].recv_size > 0 &&		   recv_block_size != send_block_size)	       memmove(local_data + el_size*recv_block_size*block,		       local_data + el_size*send_block_size*block,		       exchange[step].recv_size * el_size *		       sizeof(TRANSPOSE_EL_TYPE));     }}void transpose_finish_exchange_step(transpose_mpi_plan p, int step){     if ((p->local_nx > 0 || p->local_ny > 0) && step >= 0	 && p->exchange[step].dest_pe != p->my_pe) {	  MPI_Status status[2];	  	  MPI_Waitall(2,p->request,status);     }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -