⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fcoll_test.f.in

📁 mpi并行计算的c++代码 可用vc或gcc编译通过 可以用来搭建并行计算试验环境
💻 IN
字号:
      program main      implicit none      include 'mpif.h'      @F77MPIOINC@!     Fortran equivalent of coll_test.c      integer FILESIZE       parameter (FILESIZE=32*32*32*4)!     A 32^3 array. For other array sizes, change FILESIZE above and!     array_of_gsizes below.!     Uses collective I/O. Writes a 3D block-distributed array to a file!     corresponding to the global array in row-major (C) order, reads it!     back, and checks that the data read is correct.!     Note that the file access pattern is noncontiguous.         integer newtype, i, ndims, array_of_gsizes(3)      integer order, intsize, nprocs, j, array_of_distribs(3)      integer array_of_dargs(3), array_of_psizes(3)      integer readbuf(FILESIZE), writebuf(FILESIZE), bufcount      integer mynod, tmpbuf(FILESIZE), array_size, argc, iargc      integer fh, status(MPI_STATUS_SIZE), request, ierr      character*1024 str    ! used to store the filename      integer errs, toterrs      @FORTRAN_MPI_OFFSET@ disp      @FTESTDEFINE@      errs = 0      call MPI_INIT(ierr)      call MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, ierr)      call MPI_COMM_RANK(MPI_COMM_WORLD, mynod, ierr)!     process 0 takes the file name as a command-line argument and !     broadcasts it to other processes      if (mynod .eq. 0) then         argc = @F77IARGC@         i = 0         @F77GETARG@         do while ((i .lt. argc) .and. (str .ne. '-fname'))            i = i + 1            @F77GETARG@         end do         if (i .ge. argc) then            print *            print *, '*#  Usage: fcoll_test -fname filename'            print *            call MPI_ABORT(MPI_COMM_WORLD, 1, ierr)         end if         i = i + 1         @F77GETARG@         call MPI_BCAST(str, 1024, MPI_CHARACTER, 0,                    &     &        MPI_COMM_WORLD, ierr)      else          call MPI_BCAST(str, 1024, MPI_CHARACTER, 0,                    &     &        MPI_COMM_WORLD, ierr)      end if!     create the distributed array filetype          ndims = 3      order = MPI_ORDER_FORTRAN      array_of_gsizes(1) = 32      array_of_gsizes(2) = 32      array_of_gsizes(3) = 32      array_of_distribs(1) = MPI_DISTRIBUTE_BLOCK      array_of_distribs(2) = MPI_DISTRIBUTE_BLOCK      array_of_distribs(3) = MPI_DISTRIBUTE_BLOCK      array_of_dargs(1) = MPI_DISTRIBUTE_DFLT_DARG      array_of_dargs(2) = MPI_DISTRIBUTE_DFLT_DARG      array_of_dargs(3) = MPI_DISTRIBUTE_DFLT_DARG      do i=1, ndims         array_of_psizes(i) = 0      end do      call MPI_DIMS_CREATE(nprocs, ndims, array_of_psizes, ierr)      call MPI_TYPE_CREATE_DARRAY(nprocs, mynod, ndims,                 &     &     array_of_gsizes, array_of_distribs, array_of_dargs,          &     &     array_of_psizes, order, MPI_INTEGER, newtype, ierr)      call MPI_TYPE_COMMIT(newtype, ierr)!     initialize writebuf       call MPI_TYPE_SIZE(newtype, bufcount, ierr)      call MPI_TYPE_SIZE(MPI_INTEGER, intsize, ierr)      bufcount = bufcount/intsize      do i=1, bufcount          writebuf(i) = 1      end do      do i=1, FILESIZE         tmpbuf(i) = 0      end do      call MPI_IRECV(tmpbuf, 1, newtype, mynod, 10, MPI_COMM_WORLD,     &     &     request, ierr)      call MPI_SEND(writebuf, bufcount, MPI_INTEGER, mynod, 10,         &     &     MPI_COMM_WORLD, ierr)      call MPI_WAIT(request, status, ierr)      j = 1      array_size = array_of_gsizes(1) * array_of_gsizes(2) *            &     &     array_of_gsizes(3)      do i=1, array_size         if (tmpbuf(i) .ne. 0) then            writebuf(j) = i            j = j + 1         end if      end do!     end of initialization!     write the array to the file      call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                           &     &     MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)      disp = 0       call MPI_FILE_SET_VIEW(fh, disp, MPI_INTEGER, newtype, "native",  &     &     MPI_INFO_NULL, ierr)      call MPI_FILE_WRITE_ALL(fh, writebuf, bufcount, MPI_INTEGER,      &     &     status, ierr)      call MPI_FILE_CLOSE(fh, ierr)!    now read it back      call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                           &     &     MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)       call MPI_FILE_SET_VIEW(fh, disp, MPI_INTEGER, newtype, "native",  &     &     MPI_INFO_NULL, ierr)      call MPI_FILE_READ_ALL(fh, readbuf, bufcount, MPI_INTEGER,        &     &     status, ierr)      call MPI_FILE_CLOSE(fh, ierr)!     check the data read      do i=1, bufcount         if (readbuf(i) .ne. writebuf(i)) then            errs = errs + 1             print *, 'Node ', mynod, '  readbuf ', readbuf(i),         &     &          '  writebuf ', writebuf(i), '  i', i         end if      end do      call MPI_TYPE_FREE(newtype, ierr)      call MPI_Allreduce( errs, toterrs, 1, MPI_INTEGER, MPI_SUM,       &     $     MPI_COMM_WORLD, ierr )        if (mynod .eq. 0) then        if( toterrs .gt. 0 ) then           print *, 'Found ', toterrs, ' errors'        else           print *, ' No Errors'        endif      endif      call MPI_FINALIZE(ierr)      stop      end

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -