⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fdtdmpi1.f90

📁 时域有限差分的并行运算,matlab编程.主要是并行运算的连接问题的程序
💻 F90
字号:


	program fdtdmpi1

	implicit none

   SUBROUTINE PARALLEL_H_EXCHANG()

   integer i,j,k,tag,count, newType, status(MPI_STATUS_SIZE),ierr, req(8),tempType
   integer X_DirCutType, oldType(-1:nz+1),blocklens(-1:nz+1),offsets(-1:nz+1)
   real*4 r1
   real*8 r2
   
   IF (n_cut_dir_flag.eq.1) THEN
     IF(single_flag.eq..true) THEN
          
		  CALL MPI_TYPE_VECTOR(ny+3,1,nx+3,MPI_REAL,tempType,ierr)
	      CALL MPI_TYPE_COMMIT(temType,ierr)
		  DO i=-1,nz+1
	          blocklens(i)=1
			  oldType(i)=tempType
			  offsets(i)=(i+1)*sizeof(r1)*(nx+3)*(ny+3)
		 ENDDO

   ELSEIF (double_flag.eq..true) THEN
         CALL MPI_TYPE_VECTOR(ny+3,1,nx+3,MPI_DOUBLE_PRECISION,tempType,ierr)
         CALL MPI_TYPE_COMMIt(tempType,ierr)
		 DO i=-1,nz+1
             blocklens(i)=1
			  oldType(i)=tempType
			  offsets(i)=(i+1)*sizeof(r2)*(nx+3)*(ny+3)
	     END DO   
   END IF
   
   CALL MPI_TYPE_STRUCT(nz+3,blocklens,offsets,oldType,X_DirectionCutType,ierr)
   CALL MPI_TYPE_COMMIT(X_DirectionCutType,ierr)
   IF(ind_process.ne.0)THEN
   	  CALL MPI_ISEND(hy(1,-1,-1),1,X_DirCutType,ind_process-1,0,MPI_COMM_WORLD,req(1),ierr)
	  CALL MPI_IRECV(hy(0,-1,-1),1,X_DirCutType,ind_process-1,1,MPI_COMM_WORLD,req(2),ierr)
	  CALL MPI_ISEND(hz(1,-1,-1),1,X_DirCutType,ind_process-1,2,MPI_COMM_WORLD,req(3),ierr)
	  CALL MPI_IRECV(hy(0,-1,-1),1,X_DirCutType,ind_process-1,3,MPI_COMM_WORLD,req(4),ierr) 
  END IF
   IF(ind_process.ne.num_process-1)THEN
   	  CALL MPI_IRECV(hy(nx-1,-1,-1),1,X_DirCutType,ind_process+1,0,MPI_COMM_WORLD,req(5),ierr)
	  CALL MPI_ISEND(hz(nx-2,-1,-1),1,X_DirCutType,ind_process+1,1,MPI_COMM_WORLD,req(6),ierr)
	  CALL MPI_IRECV(hy(nx-1,-1,-1),1,X_DirCutType,ind_process+1,2,MPI_COMM_WORLD,req(7),ierr)
	  CALL MPI_ISEND(hz(nx-2,-1,-1),1,X_DirCutType,ind_process+1,3,MPI_COMM_WORLD,req(8),ierr) 
  END IF
  CALL MPI_WAITALL(8,request,status,ierr)
  CALL MPI_TYPE_FREE(X_DirCutType,ierr)
  CALL MPI_TYPE_FREE(tempType,ierr)
  CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
  ELSEIF (n_cut_dir_flag.eq.2)THEN
       IF(single_flag.eq..true) THEN
          
		  CALL MPI_TYPE_VECTOR(nz+3,1,nx+3,(nx+3)*(ny+3),MPI_REAL,tempType,ierr)
	      CALL MPI_TYPE_COMMIT(newType,ierr)
      ELSEIF (double_flag.eq..true) THEN
         CALL MPI_TYPE_VECTOR(nz+3,1,nx+3,(nx+3)*(ny+3),MPI_DOUBLE_PRECISION,newType,ierr)
         CALL MPI_TYPE_COMMIt(newType,ierr)
	END IF
		IF(ind_process.ne.0)THEN
   	    CALL MPI_ISEND(hx(1,-1,-1),1,newType,ind_process-1,0,MPI_COMM_WORLD,req(1),ierr)
	    CALL MPI_IRECV(hx(-1,0,-1),1,newType,ind_process-1,1,MPI_COMM_WORLD,req(2),ierr)
	    CALL MPI_ISEND(hz(-1,-1,-1),1,newType,ind_process-1,2,MPI_COMM_WORLD,req(3),ierr)
	    CALL MPI_IRECV(hz(-1,0,-1),1,newType,ind_process-1,3,MPI_COMM_WORLD,req(4),ierr) 
  END IF 
     IF(ind_process.ne.num_process-1)THEN
   	  CALL MPI_IRECV(hx(-1,ny-1,-1),1,newType,ind_process+1,0,MPI_COMM_WORLD,req(5),ierr)
	  CALL MPI_ISEND(hx(-1,ny-2,-1),1,newType,ind_process+1,1,MPI_COMM_WORLD,req(6),ierr)
	  CALL MPI_IRECV(hz(-1,ny-1,-1),1,newType,ind_process+1,2,MPI_COMM_WORLD,req(7),ierr)
	  CALL MPI_ISEND(hz(-1,ny-2,-1),1,X_DirCutType,ind_process+1,3,MPI_COMM_WORLD,req(8),ierr) 
  END IF
    CALL MPI_WAITALL(8,request,status,ierr)
	CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
	CALL MPI_TYPE_FREE(newType,ierr)
 ELSEIF (n_cut_dir_flag.eq.3) THEN
     count=(nx+3)*(ny+3)
	  IF(single_flag.eq.true.)THEN
	   IF (ind_process.ne.0) THEN
	      CALL MPI_ISEND(hx(1,-1,1),count,MPI_REAL,ind_process-1,0,MPI_COMM_WORLD,req(1),ierr)
	      CALL MPI_IRECV(hx(-1,-1,0),count,MPI_REAL,ind_process-1,1,MPI_COMM_WORLD,req(2),ierr)
	      CALL MPI_ISEND(hy(-1,-1,1),count,MPI_REAL,ind_process-1,2,MPI_COMM_WORLD,req(3),ierr)
	      CALL MPI_IRECV(hy(-1,0,-1),count,MPI_REAL,ind_process-1,3,MPI_COMM_WORLD,req(4),ierr)
		END IF

    IF(ind_process.ne.num_process-1)THEN
   	  CALL MPI_IRECV(hx(-1,-1,nz-1),count,MPI_REAL,ind_process+1,0,MPI_COMM_WORLD,req(5),ierr)
	  CALL MPI_ISEND(hx(-1,-1,nz-2),count, MPI_REAL,ind_process+1,1,MPI_COMM_WORLD,req(6),ierr)
	  CALL MPI_IRECV(hy(-1,-1,nz-1),count,MPI_REAL,ind_process+1,2,MPI_COMM_WORLD,req(7),ierr)
	  CALL MPI_ISEND(hy(-1,-1,nz-2),count,MPI_REAL,ind_process+1,3,MPI_COMM_WORLD,req(8),ierr) 
  END IF
    
  ELSEIF (double_flag.eq..true.)THEN
	    IF (ind_process.ne.0) THEN
		  CALL MPI_ISEND(hx(1,-1,1),count,MPI_DOUBLE_PRECISION,ind_process-1,0,MPI_COMM_WORLD,req(1),ierr)
	      CALL MPI_IRECV(hx(-1,-1,0),count,MPI_DOUBLE_PRECISION,ind_process-1,1,MPI_COMM_WORLD,req(2),ierr)
	      CALL MPI_ISEND(hy(-1,-1,1),count,MPI_DOUBLE_PRECISION,ind_process-1,2,MPI_COMM_WORLD,req(3),ierr)
	      CALL MPI_IRECV(hy(-1,-1,0),count,MPI_DOUBLE_PRECISION,ind_process-1,3,MPI_COMM_WORLD,req(4),ierr)
		END IF

        IF(ind_process.ne.num_process-1)THEN
   	  CALL MPI_IRECV(hx(-1,-1,nz-1),count,MPI_REAL,ind_process+1,0,MPI_COMM_WORLD,req(5),ierr)
	  CALL MPI_ISEND(hx(-1,-1,nz-2),count, MPI_REAL,ind_process+1,1,MPI_COMM_WORLD,req(6),ierr)
	  CALL MPI_IRECV(hy(-1,-1,nz-1),count,MPI_REAL,ind_process+1,2,MPI_COMM_WORLD,req(7),ierr)
	  CALL MPI_ISEND(hy(-1,-1,nz-2),count,MPI_REAL,ind_process+1,3,MPI_COMM_WORLD,req(8),ierr) 
  END IF
  END IF
  CALL MPI_WAITALL(8,request,status,ierr)
  CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
   
  END IF
  END SUBROUTINE PARALLEL_H_EXCHANGE


       


	

	end program fdtdmpi1

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -