parutilitiesmodule.f90

来自「CCSM Research Tools: Community Atmospher」· F90 代码 · 共 1,943 行 · 第 1/5 页

F90
1,943
字号
      print *, iam, "BlockSizes", BlockSizesA(1:ja), DisplacementsA(1:ja), PeA(1:ja), Count#endif      CPP_ASSERT_F90( JA .LE. GlobalSize )!! Now create the pattern from the displacements and block sizes!      Inc = 0      DO ipe = 1, GroupSize!! Find the segments which are relevant for the sender ipe! Make compact arrays BlockSizes and Displacements !        DO j = 1, ja          IF ( PeA(j) == ipe-1 ) THEN            Inc = Inc + 1            BlockSizesB(Inc) = BlockSizesA(j)            DisplacementsB(Inc) = DisplacementsA(j)            GlobalB(Inc)      = GlobalA(j)          ENDIF        ENDDO      ENDDO!! Create the receiver communication pattern!      Off = 0      DO ipe = 1, GroupSize        Num = Count(ipe)        DO i=1, Num        ENDDO#if defined(DEBUG_PARPATTERNGHOSTTOGHOST)        print *, "Receiver Iam", Iam, "Ipe", Ipe-1, "Num", Num, &                 "Displacements", DisplacementsB(Off+1:Off+Num), &                 "BlockSizes", BlockSizesB(Off+1:Off+Num)#endif#if defined( USE_ARENAS )        ALLOCATE( Pattern%RecvDesc(ipe)%Displacements(Num) )        ALLOCATE( Pattern%RecvDesc(ipe)%BlockSizes(Num) )        DO i=1, Num          Pattern%RecvDesc(ipe)%Displacements(i) = DisplacementsB(i+Off)          Pattern%RecvDesc(ipe)%BlockSizes(i)    = BlockSizesB(i+Off)        ENDDO#else        CALL MPI_TYPE_INDEXED( Num, BlockSizesB(Off+1),DisplacementsB(Off+1), &     &                         CPP_MPI_REAL8, Ptr, Ierror )        Pattern%RecvDesc( ipe ) = Ptr#endif        Off = Off + Num      ENDDO!! Now communicate what the receiver is expecting to the sender!      CALL ParExchangeVectorInt( InComm, Count, GlobalB,                 &     &                           CountOut, GlobalA  )      CALL ParExchangeVectorInt( InComm, Count, BlockSizesB,            &     &                           CountOut, BlockSizesA )!! Sender A: BlockSizes and Displacements can now be stored!      Off = 0      DO ipe=1, GroupSize        Num = CountOut(ipe)        DO i=1, Num          CALL DecompGlobalToLocal( GA%Local, GlobalA(i+Off), Local, Pe )          DisplacementsA(i+Off) = Local-1    ! zero-based displacement        ENDDO#if defined(DEBUG_PARPATTERNGHOSTTOGHOST)        print *, "Sender Iam", Iam, "Ipe", Ipe-1, "Num", Num,  &                 "Displacements", DisplacementsA(Off+1:Off+Num), &                 "BlockSizes", BlockSizesA(Off+1:Off+Num)#endif#if defined( USE_ARENAS )        ALLOCATE( Pattern%SendDesc(ipe)%Displacements(Num) )        ALLOCATE( Pattern%SendDesc(ipe)%BlockSizes(Num) )        DO i=1, Num          Pattern%SendDesc(ipe)%Displacements(i) = DisplacementsA(i+Off)          Pattern%SendDesc(ipe)%BlockSizes(i)    = BlockSizesA(i+Off)        ENDDO#else        CALL MPI_TYPE_INDEXED( Num, BlockSizesA(Off+1),DisplacementsA(Off+1),&     &                         CPP_MPI_REAL8, Ptr, Ierror )        Pattern%SendDesc( ipe ) = Ptr#endif        Off = Off + Num      ENDDO      DEALLOCATE( CountOut )      DEALLOCATE( Count )      DEALLOCATE( PeA )      DEALLOCATE( GlobalA )      DEALLOCATE( BlockSizesA )      DEALLOCATE( DisplacementsA )      DEALLOCATE( GlobalB )      DEALLOCATE( BlockSizesB )      DEALLOCATE( DisplacementsB )      CPP_LEAVE_PROCEDURE( "PARPATTERNGHOSTTOGHOST" )      RETURN!EOC      END SUBROUTINE ParPatternGhostToGhost!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE:   ParPatternFree --- Free the communication pattern!! !INTERFACE:      SUBROUTINE ParPatternFree( InComm, Pattern )!! !USES:      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER,  INTENT( IN )                 :: InComm  ! # of PEs! !INPUT/OUTPUT PARAMETERS:      TYPE(ParPatternType), INTENT( INOUT )  :: Pattern ! Comm Pattern!! !DESCRIPTION:!     This routine frees a communication pattern.  !! !SYSTEM ROUTINES:!     MPI_TYPE_FREE!! !BUGS:!     The MPI_TYPE_FREE statement does not seem to work with FFC!! !REVISION HISTORY:!   01.02.10   Sawyer     Creation!!EOP!-----------------------------------------------------------------------!BOC! !LOCAL VARIABLES:      INTEGER  ipe, GroupSize, Pointer, Ierror      CPP_ENTER_PROCEDURE( "PARPATTERNFREE" )!! First request the needed ghost values from other processors.!#if defined( USE_ARENAS )      DO ipe=1, Pattern%Size        DEALLOCATE( Pattern%RecvDesc(ipe)%Displacements )        DEALLOCATE( Pattern%RecvDesc(ipe)%BlockSizes )        DEALLOCATE( Pattern%SendDesc(ipe)%Displacements )        DEALLOCATE( Pattern%SendDesc(ipe)%BlockSizes )      ENDDO#else      CALL MPI_COMM_SIZE( InComm, GroupSize, Ierror )!! Free all the MPI derived types!      DO ipe=1, Pattern%Size        Pointer = Pattern%SendDesc(ipe)        CALL MPI_TYPE_FREE( Pointer, Ierror )        Pointer = Pattern%RecvDesc(ipe)        CALL MPI_TYPE_FREE( Pointer, Ierror )      ENDDO#endif      DEALLOCATE( Pattern%SendDesc )      DEALLOCATE( Pattern%RecvDesc )      CPP_LEAVE_PROCEDURE( "PARPATTERNFREE" )      RETURN!EOC      END SUBROUTINE ParPatternFree!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE: ParScatterReal --- Scatter slice to all PEs!! !INTERFACE:      SUBROUTINE ParScatterReal ( InComm, Root, Slice, Decomp, Local )! !USES:      USE decompmodule, ONLY:  DecompType, Lists      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER, INTENT( IN )          :: InComm       ! Communicator      INTEGER, INTENT( IN )          :: Root         ! Root PE      REAL(CPP_REAL8), INTENT( IN )  :: Slice(*)     ! Global Slice      TYPE(DecompType), INTENT( IN ) :: Decomp       ! Decomp information! !OUTPUT PARAMETERS:      REAL(CPP_REAL8), INTENT( OUT ) :: Local(*)     ! Local Slice! !DESCRIPTION:!     Given a decomposition of the domain, dole out a slice !     (one-dimensional array) to all the constituent PEs as described!     by the decomposition Decomp.!!! !SYSTEM ROUTINES:!     MPI_ISEND, MPI_RECV, MPI_COMM_RANK!! !REVISION HISTORY:!   97.04.14   Sawyer     Creation!   97.04.16   Sawyer     Cleaned up for walk-through!   97.05.01   Sawyer     Use Decomp%Comm for all local info!   97.05.18   Sawyer     DecompType has moved to ParUtilitiesTypes!   97.05.29   Sawyer     Changed 2-D arrays to 1-D!   97.07.03   Sawyer     Reformulated documentation!   97.07.22   Sawyer     DecompType has moved to DecompModule!   97.12.01   Sawyer     Changed MPI_SSEND to MPI_ISEND!   97.12.05   Sawyer     Added InComm and Root as arguments!   97.12.05   Sawyer     Added logic to support intercommunicators!   98.01.24   Sawyer     Removed dependence on MPI derived types TESTED!   98.02.05   Sawyer     Removed the use of intercommunicators!   98.03.30   Sawyer     Stats dimension corrected: Gsize*MPI_STATUS_SIZE!   99.01.19   Sawyer     Dropped assumed-size arrays!   00.07.07   Sawyer     Removed "1D" references!   00.07.23   Sawyer     Implementation with shared memory arenas!!EOP!-----------------------------------------------------------------------!BOC! !LOCAL VARIABLES:      INTEGER Ierror, I, J, K, L, Iam, GroupSize, Reqs( Gsize )#if !defined( USE_ARENAS )      INTEGER Status( MPI_STATUS_SIZE ), Stats( Gsize*MPI_STATUS_SIZE )      REAL(CPP_REAL8), ALLOCATABLE    :: SendBuf(:)#endif!      CPP_ENTER_PROCEDURE( "PARSCATTERREAL" )!#if defined( USE_ARENAS )!! Pull the local process information out of the communicator!!!!      Iam = MOD( Comm, MAX_PES )!!!      L   = Comm / MAX_PES!!!      GroupSize = MOD( L, MAX_PES ) + 1!!!      L   = L / MAX_PES!!!      Color = MOD( L, MAX_PES )!! For now, Iam and GroupSize take on the global communicator values!      Iam = GID      GroupSize = Gsize      IF ( Iam .EQ. Root ) THEN        L = 0        DO I = 1, GroupSize!! Pick out the array sections to be sent.! This is the inverse of the operation in ParGather!          DO J = 1, SIZE( Decomp%HEAD(I)%StartTags )            DO K = Decomp%HEAD(I)%StartTags(J),Decomp%HEAD(I)%EndTags(J)              L = L+1              DataBuf(L,1,1) = Slice(K)            ENDDO          ENDDO        ENDDO      ENDIF!! Barrier: all PEs participate!!      CALL mlp_barrier(gid,gsize)!! All receive from the root.  !! The local array may be larger than that specified in the decomposition!      L = 0      IF ( Iam .GT. 0 ) L = SUM( Decomp%NumEntries(1:Iam) )      DO I=1, Decomp%NumEntries(Iam+1)        Local(I) = DataBuf(L+I,1,1)      ENDDO!! The following is needed to ensure that DataBuf can now be reused!      CALL mlp_barrier(gid,gsize)#else      CALL MPI_COMM_RANK( InComm, Iam, Ierror )      CALL MPI_COMM_SIZE( InComm, GroupSize, Ierror )      IF ( Iam .EQ. Root ) THEN        ALLOCATE( SendBuf( SUM( Decomp%NumEntries ) ) )        L = 0        DO I = 1, GroupSize!! Pick out the array sections to be sent.! This is the inverse of the operation in ParGather!          DO J = 1, SIZE( Decomp%HEAD(I)%StartTags )            DO K = Decomp%HEAD(I)%StartTags(J),Decomp%HEAD(I)%EndTags(J)              L = L+1              SendBuf(L) = Slice(K)            ENDDO          ENDDO!! This is a non-blocking send. SendBuf cannot be immediately deallocated!! WARNING: F90-MPI inconsistency: make sure the indexing below always works!          CALL MPI_ISEND( SendBuf(L-Decomp%NumEntries(I)+1),             &                          Decomp%NumEntries(I), CPP_MPI_REAL8,           &                          I-1, 0, InComm, Reqs(I), Ierror )        ENDDO      ENDIF!! All receive from the root.  !! The local array may be larger than that specified in the decomposition!      CALL MPI_RECV( Local, Decomp%NumEntries(Iam+1),                    &                     CPP_MPI_REAL8,                                      &                     Root, 0, InComm, Status, Ierror )!! Experience shows that we should wait for all the non-blocking! PEs to check in, EVEN THOUGH THE MPI_RECV HAS COMPLETED !!!      IF ( Iam .EQ. Root ) THEN        CALL MPI_WAITALL( GroupSize, Reqs, Stats, Ierror )        DEALLOCATE( SendBuf )      ENDIF!! The following may be needed on some platforms to avoid an MPI bug.!      CALL MPI_BARRIER( InComm, Ierror )#endif      CPP_LEAVE_PROCEDURE( "PARSCATTERREAL" )      RETURN!EOC      END SUBROUTINE ParScatterReal!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE: ParScatterReal4 --- Scatter slice to all PEs!! !INTERFACE:      SUBROUTINE ParScatterReal4 ( InComm, Root, Slice, Decomp, Local )! !USES:      USE decompmodule, ONLY:  DecompType, Lists      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER, INTENT( IN )          :: InComm       ! Communicator      INTEGER, INTENT( IN )          :: Root         ! Root PE      REAL(CPP_REAL4), INTENT( IN )  :: Slice(*)     ! Global Slice      TYPE(DecompType), INTENT( IN ) :: Decomp       ! Decomp information! !OUTPUT PARAMETERS:      REAL(CPP_REAL4), INTENT( OUT ) :: Local(*)     ! Local Slice! !DESCRIPTION:!     Given a decomposition of the domain, dole out a slice !     (one-dimensional array) to all the constituent PEs as described!     by the decomposition Decomp.!!! !SYSTEM ROUTINES:!     MPI_ISEND, MPI_RECV, MPI_COMM_RANK!! !REVISION HISTORY:!   97.04.14   Sawyer     Creation!   97.04.16   Sawyer     Cleaned up for walk-through!   97.05.01   Sawyer     Use Decomp%Comm for all local info!   97.05.18   Sawyer     DecompType has moved to ParUtilitiesTypes!   97.05.29   Sawyer     Changed 2-D arrays to 1-D!   97.07.03   Sawyer     Reformulated documentation!   97.07.22   Sawyer     DecompType has moved to DecompModule!   97.12.01   Sawyer     Changed MPI_SSEND to MPI_ISEND!   97.12.05   Sawyer     Added InComm and Root as arguments!   97.12.05   Sawyer     Added logic to support intercommunicators!   98.01.24   Sawyer     Removed dependence on MPI derived types TESTED!   98.02.05   Sawyer     Removed the use of intercommunicators!   98.03.30   Sawyer     Stats dimension corrected: Gsize*MPI_

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?