parutilitiesmodule.f90

来自「CCSM Research Tools: Community Atmospher」· F90 代码 · 共 1,943 行 · 第 1/5 页

F90
1,943
字号
!! !DESCRIPTION:!     This routine splits the PEs into groups.  This is currently only!     supported in MPI mode. Read the chapter on MPI\_COMM\_SPLIT !     thoroughly.  !! !SYSTEM ROUTINES:!     MPI_COMM_SPLIT, MPI_COMM_SIZE, MPI_COMM_RANK!! !REVISION HISTORY:!   97.03.20   Sawyer     Creation!   97.04.16   Sawyer     Cleaned up for walk-through!   97.07.03   Sawyer     Reformulated documentation!   97.12.01   Sawyer     Xnodes and Ynodes are explicit arguments!   97.12.23   Lucchesi   Added call to MPI_INTERCOMM_CREATE!   98.01.06   Sawyer     Additions from RL for I/O Nodes!   98.02.02   Sawyer     Added the Cartesian information!   98.02.05   Sawyer     Removed the use of intercommunicators!   98.04.16   Sawyer     Removed all use of MPI_CART (CommRow redefined)!   99.01.10   Sawyer     CommRow now defined for all rows!   00.07.09   Sawyer     Removed 2D computational mesh!   00.08.08   Sawyer     Redefined as wrapper to mpi_comm_split!!EOP!-----------------------------------------------------------------------!BOC! !LOCAL VARIABLES:      INTEGER  Ierror      CPP_ENTER_PROCEDURE( "PARSPLIT" )#if !defined( USE_ARENAS )!!     Split the communicators!      CALL MPI_COMM_SPLIT( InComm, Color, InID, Comm, Ierror )      IF ( Comm .ne. MPI_COMM_NULL ) THEN        CALL MPI_COMM_RANK( Comm, MyID, Ierror )        CALL MPI_COMM_SIZE( Comm, Nprocs, Ierror )      ELSE!!     This PE does not participate: mark with impossible values!        MyID = -1        Nprocs = -1      ENDIF#endif      CPP_LEAVE_PROCEDURE( "PARSPLIT" )      RETURN!EOC      END SUBROUTINE ParSplit!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE:   ParFree --- Free a communicator!! !INTERFACE:      SUBROUTINE ParFree( InComm ) !! !USES:      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER InComm!! !DESCRIPTION:!     This routine frees a communicator created with ParSplit!! !REVISION HISTORY:!   97.09.11   Sawyer     Creation, to complement ParSplit!   00.07.24   Sawyer     Revamped ParMerge into a free communicator !! !LOCAL VARIABLES:      INTEGER  Ierror!!EOP!-----------------------------------------------------------------------!BOC      CPP_ENTER_PROCEDURE( "PARFREE" )!#if !defined( USE_ARENAS )      CALL MPI_COMM_FREE( InComm, Ierror ) #endif      CPP_LEAVE_PROCEDURE( "PARFREE" )      RETURN!EOC      END SUBROUTINE ParFree!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE:   ParPatternGhost --- Create pattern for given ghosting!! !INTERFACE:      SUBROUTINE ParPatternGhost( InComm, Ghost, Pattern )!! !USES:      USE decompmodule, ONLY : DecompGlobalToLocal, DecompLocalToGlobal      USE ghostmodule, ONLY : GhostType, GhostInfo      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER,  INTENT( IN )               :: InComm  ! # of PEs      TYPE(GhostType),  INTENT( IN )       :: Ghost   ! # of PEs! !OUTPUT PARAMETERS:      TYPE(ParPatternType), INTENT( OUT )  :: Pattern ! Comm Pattern!! !DESCRIPTION:!     This routine contructs a communication pattern from the ghost!     region definition.  That is, the resulting communication pattern!     can be used in ParBegin/EndTransfer with the ghosted arrays as!     inputs.  !! !SYSTEM ROUTINES:!     MPI_TYPE_INDEXED!! !REVISION HISTORY:!   01.02.10   Sawyer     Creation!   01.06.02   Sawyer     Renamed ParPatternGhost!!EOP!-----------------------------------------------------------------------!BOC! !LOCAL VARIABLES:      INTEGER  i, j, ipe, pe, Iam, GroupSize, Num, Length, Ptr, Ierror      INTEGER  Global, End, Local, GlobalSize, LocalSize, BorderSize      INTEGER, ALLOCATABLE :: InVector(:), OutVector(:)      INTEGER, ALLOCATABLE :: LenInVector(:), LenOutVector(:)      CPP_ENTER_PROCEDURE( "PARPATTERNGHOST" )!! First request the needed ghost values from other processors.!#if defined( USE_ARENAS )! Temporary solution until communicators are implemented      Pattern%Comm = 0      GroupSize = GSize      Iam = GID#else      CALL MPI_COMM_DUP( InComm, Pattern%Comm, Ierror )      CALL MPI_COMM_SIZE( InComm, GroupSize, Ierror )      CALL MPI_COMM_RANK( InComm, Iam, Ierror )#endif      Pattern%Iam  = Iam      Pattern%Size = GroupSize      ALLOCATE( Pattern%SendDesc( GroupSize ) )      ALLOCATE( Pattern%RecvDesc( GroupSize ) )!! Temporary variables!      ALLOCATE( LenInVector( GroupSize ) )      ALLOCATE( LenOutVector( GroupSize ) )      CALL GhostInfo( Ghost,GroupSize,GlobalSize,LocalSize,BorderSize )      ALLOCATE( InVector( 2*BorderSize ) )      ALLOCATE( OutVector( 2*LocalSize ) )!! A rather complicated loop to define the local ghost region.! The concept is the following:  go through all the points in the! border data structure.   It contains global indices of the points! which have to be copied over from neighboring PEs.  These indices! are collected into InVector for transmission to those PEs, in! effect informing them of the local PEs requirements.!! A special case is supported:  if the ghost domain wraps around! onto the domain of the local PE!  This is very tricky, because! the index space in both Ghost%Border and Ghost%Local MUST be! unique for DecompGlobalToLocal to work.   Solution:  ghost ! points are marked with the negative value of the needed domain ! value in both Ghost%Border and Ghost%Local.  These are "snapped ! over" to the true global index with the ABS function, so that ! they can be subsequently found in the true local domain.!      j = 1      DO ipe=1, GroupSize        Num = SIZE(Ghost%Border%Head(ipe)%StartTags)        Length = 0        DO i = 1, Num          Global = Ghost%Border%Head(ipe)%StartTags(i)          IF ( Global /= 0 ) THEN            Length = Length + 1            End    = Ghost%Border%Head(ipe)%EndTags(i)            InVector(j) = ABS(Global)            InVector(j+1) = ABS(End)            CALL DecompGlobalToLocal( Ghost%Local, Global, Local, Pe )            OutVector(Length) = Local-1                ! Zero-based address            OutVector(Length+Num) = End - Global+1     ! Chunk size            j = j + 2          ENDIF        ENDDO        LenInVector( ipe ) = 2*Length!! Set the receive buffer descriptor!#if defined(DEBUG_PARPATTERNGHOST)        print *,"Iam",Iam,"Pe",Ipe-1,"Lens",OutVector(Num+1:Num+Length), &             "Displacements", OutVector(1:Length)#endif#if defined( USE_ARENAS )! This code is currently untested         ALLOCATE( Pattern%RecvDesc(ipe)%Displacements(Length) )         ALLOCATE( Pattern%RecvDesc(ipe)%BlockSizes(Length) )         DO i=1, Length           Pattern%RecvDesc(ipe)%Displacements(i) = OutVector(i)           Pattern%RecvDesc(ipe)%BlockSizes(i)    = OutVector(Num+i)         ENDDO            #else        CALL MPI_TYPE_INDEXED( Length, OutVector(Num+1), OutVector,      &                               CPP_MPI_REAL8, Ptr, Ierror )        CALL MPI_TYPE_COMMIT( Ptr, Ierror )        Pattern%RecvDesc( ipe ) = Ptr#endif      ENDDO!! Everybody exchanges the needed information!#if defined(DEBUG_PARPATTERNGHOST)      print *, "iam", iam, "In", LenInVector,                            &                InVector( 1:SUM(LenInVector) )#endif      CALL ParExchangeVectorInt( InComm, LenInVector, InVector,          &                                     LenOutVector, OutVector )#if defined(DEBUG_PARPATTERNGHOST)      print *, "iam", iam, "Out", LenOutVector,                          &                OutVector( 1:SUM(LenOutVector) )#endif!! Now everyone has the segments which need to be sent to the ! immediate neighbors.  Save these in PatternType.!      j = 1      DO ipe = 1, GroupSize        Num = LenOutVector(ipe) / 2        DO i = 1, Num          CALL DecompGlobalToLocal( Ghost%Local,OutVector(j),Local,pe )          InVector(i) = Local-1          InVector(i+Num) = OutVector(j+1) - OutVector(j) + 1          j = j + 2        ENDDO#if defined(DEBUG_PARPATTERNGHOST)        print *, "Iam", Iam, "To", ipe-1, "InVector",                    &              InVector(1:Num), "block size", InVector(Num+1:2*Num)#endif#if defined( USE_ARENAS )         ALLOCATE( Pattern%SendDesc(ipe)%Displacements(Num) )         ALLOCATE( Pattern%SendDesc(ipe)%BlockSizes(Num) )         DO i=1, Num           Pattern%SendDesc(ipe)%Displacements(i) = InVector(i)           Pattern%SendDesc(ipe)%BlockSizes(i)    = InVector(Num+i)         ENDDO            #else        CALL MPI_TYPE_INDEXED( Num, InVector(Num+1), InVector,           &                               CPP_MPI_REAL8, Ptr, Ierror )        CALL MPI_TYPE_COMMIT( Ptr, Ierror )        Pattern%SendDesc( ipe ) = Ptr#endif      ENDDO!! Clean up the locally allocate variables!      DEALLOCATE( OutVector )      DEALLOCATE( InVector )      DEALLOCATE( LenOutVector )      DEALLOCATE( LenInVector )      CPP_LEAVE_PROCEDURE( "PARPATTERNGHOST" )      RETURN!EOC      END SUBROUTINE ParPatternGhost!-----------------------------------------------------------------------!-----------------------------------------------------------------------!BOP! !IROUTINE:   ParPatternDecompToDecomp --- Create pattern between decomps!! !INTERFACE:      SUBROUTINE ParPatternDecompToDecomp( InComm, DA, DB, Pattern )!! !USES:      USE decompmodule, ONLY : DecompType, DecompGlobalToLocal, DecompInfo      IMPLICIT NONE! !INPUT PARAMETERS:      INTEGER,  INTENT( IN )               :: InComm  ! # of PEs      TYPE(DecompType),  INTENT( IN )      :: DA      ! Source Decomp Desc      TYPE(DecompType),  INTENT( IN )      :: DB      ! Target Decomp Desc! !OUTPUT PARAMETERS:      TYPE(ParPatternType), INTENT( OUT )  :: Pattern ! Comm Pattern!! !DESCRIPTION:!     This routine contructs a communication pattern for a !     transformation from one decomposition to another, i.e., a !     so-called "transpose". The resulting communication pattern !     can be used in ParBegin/EndTransfer with the decomposed !     arrays as inputs.  !! !SYSTEM ROUTINES:!! !BUGS:!     Under development!! !REVISION HISTORY:!   01.05.29   Sawyer     Creation from RedistributeCreate!   01.07.13   Sawyer     Rewritten to minimize DecompGlobalToLocal!!EOP!-----------------------------------------------------------------------!BOC! !LOCAL VARIABLES:      LOGICAL NewIpe      INTEGER I, J, Tag, Local, Pe, LenB, JB, Ipe, Num, Inc, Off      INTEGER Ptr                                ! Pointer type      INTEGER GroupSize, Iam, Ierror      INTEGER OldPe, TotalPtsA, NpesA, TotalPtsB, NpesB      INTEGER, ALLOCATABLE :: Count(:)           ! # segments for each recv PE      INTEGER, ALLOCATABLE :: CountOut(:)        ! # segments for each send PE      INTEGER, ALLOCATABLE :: DisplacementsA(:)  ! Generic displacements      INTEGER, ALLOCATABLE :: BlockSizesA(:)     ! Generic block sizes      INTEGER, ALLOCATABLE :: LocalA(:)          ! Generic Local indices      INTEGER, ALLOCATABLE :: DisplacementsB(:)  ! Displacements for B      INTEGER, ALLOCATABLE :: BlockSizesB(:)     ! Block sizes for B      INTEGER, ALLOCATABLE :: LocalB(:)          ! Local indices for B      INTEGER, ALLOCATABLE :: PeB(:)             ! Processor element numbers      CPP_ENTER_PROCEDURE( "PARPATTERNDECOMPTODECOMP" )      CALL DecompInfo( DA, NpesA, TotalPtsA )      CALL DecompInfo( DB, NpesB, TotalPtsB )#if defined( USE_ARENAS )! Communicator is assumed to be over all PEs for now      GroupSize = Gsize      Iam = gid      Pattern%Comm = 0#else      CALL MPI_COMM_SIZE( InComm, GroupSize, Ierror )      CALL MPI_COMM_RANK( InComm, Iam, Ierror )      CALL MPI_COMM_DUP( InComm, Pattern%Comm, Ierror )#endif      Pattern%Size = GroupSize      Pattern%Iam  = Iam!! Allocate the number of entries and list head arrays!      CPP_ASSERT_F90( NpesA .EQ. GroupSize )      CPP_ASSERT_F90( NpesB .EQ. GroupSize )!! Allocate the patterns!      ALLOCATE( Pattern%SendDesc( GroupSize ) )      ALLOCATE( Pattern%RecvDesc( GroupSize ) )!! Local allocations!      ALLOCATE( DisplacementsA( TotalPtsA ) )   ! Allocate for worst case      ALLOCATE( BlockSizesA( TotalPtsA ) )      ! Allocate for worst case      ALLOCATE( LocalA( TotalPtsA ) )           ! Allocate for worst case      ALLOCATE( DisplacementsB( TotalPtsB ) )   ! Allocate for worst case      ALLOCATE( BlockSizesB( TotalPtsB ) )      ! Allocate for worst case      ALLOCATE( LocalB( TotalPtsA ) )           ! Allocate for worst case      ALLOCATE( PeB( TotalPtsB ) )              ! Allocate for worst case      ALLOCATE( Count( GroupSize ) )      ALLOCATE( CountOut( GroupSize ) )      JB        = 0      Count     = 0      LenB      = 0

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?