Changeset 4461


Ignore:
Timestamp:
Mar 12, 2020 4:51:59 PM (4 years ago)
Author:
raasch
Message:

extensions to allow usage of alternative communicators in exchange_horiz

Location:
palm/trunk/SOURCE
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/exchange_horiz_mod.f90

    r4457 r4461  
    2525! -----------------
    2626! $Id$
     27! optional communicator added to exchange_horiz
     28!
     29! 4457 2020-03-11 14:20:43Z raasch
    2730! routine has been modularized, file exchange_horiz_2d has been merged
    2831!
     
    4548! Description:
    4649! ------------
    47 !> Exchange of lateral boundary values (parallel computers) and cyclic
    48 !> lateral boundary conditions, respectively.
     50!> Exchange of ghost point layers for subdomains (in parallel mode) and setting
     51!> of cyclic lateral boundary conditions for the total domain .
    4952!------------------------------------------------------------------------------!
    5053 MODULE exchange_horiz_mod
     
    8487
    8588
    86  SUBROUTINE exchange_horiz( ar, nbgp_local)
     89!------------------------------------------------------------------------------!
     90! Description:
     91! ------------
     92!> Exchange of ghost point layers for subdomains (in parallel mode) and setting
     93!> of cyclic lateral boundary conditions for the total domain.
     94!> This routine is for REAL 3d-arrays.
     95!------------------------------------------------------------------------------!
     96 SUBROUTINE exchange_horiz( ar, nbgp_local, alternative_communicator)
    8797
    8898    USE control_parameters,                                                    &
     
    105115#endif
    106116
    107     INTEGER(iwp) ::  nbgp_local  !<
     117    INTEGER(iwp), OPTIONAL ::  alternative_communicator  !< alternative MPI communicator to be used
     118    INTEGER(iwp) ::  communicator  !< communicator that is used as argument in MPI calls
     119    INTEGER(iwp) ::  left_pe       !< id of left pe that is used as argument in MPI calls
     120    INTEGER(iwp) ::  nbgp_local    !< number of ghost point layers
     121    INTEGER(iwp) ::  north_pe      !< id of north pe that is used as argument in MPI calls
     122    INTEGER(iwp) ::  right_pe      !< id of right pe that is used as argument in MPI calls
     123    INTEGER(iwp) ::  south_pe      !< id of south pe that is used as argument in MPI calls
    108124   
    109125    REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local,               &
    110                         nxl-nbgp_local:nxr+nbgp_local) ::  ar  !<
     126                        nxl-nbgp_local:nxr+nbgp_local) ::  ar !< 3d-array for which exchange is done
    111127                       
    112128
     
    136152#endif
    137153
     154!
     155!-- Set the communicator to be used
     156    IF ( PRESENT( alternative_communicator ) )  THEN
     157!
     158!--    Alternative communicator is to be used
     159       communicator = communicator_configurations(alternative_communicator)%mpi_communicator
     160       left_pe  = communicator_configurations(alternative_communicator)%pleft
     161       right_pe = communicator_configurations(alternative_communicator)%pright
     162       south_pe = communicator_configurations(alternative_communicator)%psouth
     163       north_pe = communicator_configurations(alternative_communicator)%pnorth
     164
     165    ELSE
     166!
     167!--    Main communicator is to be used
     168       communicator = comm2d
     169       left_pe  = pleft
     170       right_pe = pright
     171       south_pe = psouth
     172       north_pe = pnorth
     173
     174    ENDIF
     175
    138176#if defined( __parallel )
    139177
     
    154192!
    155193!--       Send left boundary, receive right one (synchronous)
    156           CALL MPI_SENDRECV(                                                   &
    157               ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
    158               ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
    159               comm2d, status, ierr )
     194          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), left_pe,  0,    &
     195                             ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, 0,    &
     196                             communicator, status, ierr )
    160197!
    161198!--       Send right boundary, receive left one (synchronous)
    162           CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,       &
    163                              type_yz(grid_level), pright, 1,                   &
    164                              ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,         &
    165                              type_yz(grid_level), pleft,  1,                   &
    166                              comm2d, status, ierr )
     199          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,                           &
     200                             type_yz(grid_level), right_pe, 1,                                     &
     201                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,                             &
     202                             type_yz(grid_level), left_pe,  1,                                     &
     203                             communicator, status, ierr )
    167204
    168205       ELSE
     
    176213!
    177214!--          Send left boundary, receive right one (asynchronous)
    178              CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
    179                              pleft, req_count, comm2d, req(req_count+1), ierr )
    180              CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
    181                              pright, req_count, comm2d, req(req_count+2), ierr )
     215             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), left_pe,        &
     216                             req_count, communicator, req(req_count+1), ierr )
     217             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe,       &
     218                             req_count, communicator, req(req_count+2), ierr )
    182219!
    183220!--          Send right boundary, receive left one (asynchronous)
    184              CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,       &
    185                              type_yz(grid_level), pright, req_count+1, comm2d, &
    186                              req(req_count+3), ierr )
    187              CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,       &
    188                              type_yz(grid_level), pleft,  req_count+1, comm2d, &
    189                              req(req_count+4), ierr )
     221             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(grid_level),      &
     222                             right_pe, req_count+1, communicator, req(req_count+3), ierr )
     223             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_yz(grid_level),      &
     224                             left_pe,  req_count+1, communicator, req(req_count+4), ierr )
    190225
    191226             CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     
    219254!
    220255!--       Send front boundary, receive rear one (synchronous)
    221           CALL MPI_SENDRECV(                                                   &
    222               ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
    223               ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
    224               comm2d, status, ierr )
     256          CALL MPI_SENDRECV( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), south_pe, 0,    &
     257                             ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, 0,    &
     258                             communicator, status, ierr )
    225259!
    226260!--       Send rear boundary, receive front one (synchronous)
    227           CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,       &
    228                              type_xz(grid_level), pnorth, 1,                   &
    229                              ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,       &
    230                              type_xz(grid_level), psouth, 1,                   &
    231                              comm2d, status, ierr )
     261          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,                           &
     262                             type_xz(grid_level), north_pe, 1,                                     &
     263                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,                           &
     264                             type_xz(grid_level), south_pe, 1,                                     &
     265                             communicator, status, ierr )
    232266
    233267       ELSE
     
    242276!
    243277!--          Send front boundary, receive rear one (asynchronous)
    244              CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
    245                              psouth, req_count, comm2d, req(req_count+1), ierr )
    246              CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
    247                              pnorth, req_count, comm2d, req(req_count+2), ierr )
     278             CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), south_pe,       &
     279                             req_count, communicator, req(req_count+1), ierr )
     280             CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe,       &
     281                             req_count, communicator, req(req_count+2), ierr )
    248282!
    249283!--          Send rear boundary, receive front one (asynchronous)
    250              CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,       &
    251                              type_xz(grid_level), pnorth, req_count+1, comm2d, &
    252                              req(req_count+3), ierr )
    253              CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,       &
    254                              type_xz(grid_level), psouth, req_count+1, comm2d, &
    255                              req(req_count+4), ierr )
     284             CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(grid_level),      &
     285                             north_pe, req_count+1, communicator, req(req_count+3), ierr )
     286             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_xz(grid_level),      &
     287                             south_pe, req_count+1, communicator, req(req_count+4), ierr )
    256288
    257289             CALL MPI_WAITALL( 4, req, wait_stat, ierr )
  • palm/trunk/SOURCE/init_pegrid.f90

    r4444 r4461  
    2525! -----------------
    2626! $Id$
     27! communicator configurations for four virtual pe grids defined
     28!
     29! 4444 2020-03-05 15:59:50Z raasch
    2730! bugfix: cpp-directives for serial mode added
    2831!
     
    242245
    243246!
     247!-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used
     248!-- as the main communicator for this run, while others might be used for specific quantities like
     249!-- aerosol, chemical species, or passive scalars), if their horizontal boundary conditions shall
     250!-- be different from those of the other quantities (e.g. non-cyclic conditions for aerosols, and
     251!-- cyclic conditions for all others).
     252    DO  i = 1, 4
     253
     254       IF ( i == 1 )  cyclic = (/  .TRUE., .TRUE.  /)   ! cyclic along x and y
     255       IF ( i == 2 )  cyclic = (/  .TRUE., .FALSE. /)   ! cyclic along x
     256       IF ( i == 3 )  cyclic = (/ .FALSE., .TRUE.  /)   ! cyllic along y
     257       IF ( i == 4 )  cyclic = (/ .FALSE., .FALSE. /)   ! non-cyclic
     258
     259       CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder,                              &
     260                             communicator_configurations(i)%mpi_communicator, ierr )
     261
     262       CALL MPI_CART_SHIFT( communicator_configurations(i)%mpi_communicator, 0, 1,                 &
     263                            communicator_configurations(i)%pleft,                                  &
     264                            communicator_configurations(i)%pright, ierr )
     265
     266       CALL MPI_CART_SHIFT( communicator_configurations(i)%mpi_communicator, 1, 1,                 &
     267                            communicator_configurations(i)%psouth,                                 &
     268                            communicator_configurations(i)%pnorth, ierr )
     269
     270    ENDDO
     271
     272!
    244273!-- If necessary, set horizontal boundary conditions to non-cyclic
    245274    IF ( bc_lr /= 'cyclic' )  cyclic(1) = .FALSE.
     
    248277
    249278!
    250 !-- Create the virtual processor grid
    251     CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, &
    252                           comm2d, ierr )
     279!-- Set the main communicator (virtual pe grid) for this run
     280    IF ( bc_lr == 'cyclic'  .AND.  bc_ns == 'cyclic' )  i = 1
     281    IF ( bc_lr == 'cyclic'  .AND.  bc_ns /= 'cyclic' )  i = 2
     282    IF ( bc_lr /= 'cyclic'  .AND.  bc_ns == 'cyclic' )  i = 3
     283    IF ( bc_lr /= 'cyclic'  .AND.  bc_ns /= 'cyclic' )  i = 4
     284
     285    comm2d = communicator_configurations(i)%mpi_communicator
     286    pleft  = communicator_configurations(i)%pleft
     287    pright = communicator_configurations(i)%pright
     288    psouth = communicator_configurations(i)%psouth
     289    pnorth = communicator_configurations(i)%pnorth
     290
     291!
     292!-- Set rank and coordinates of the main communicator
    253293    CALL MPI_COMM_RANK( comm2d, myid, ierr )
    254294    WRITE (myid_char,'(''_'',I6.6)')  myid
    255295
    256296    CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr )
    257     CALL MPI_CART_SHIFT( comm2d, 0, 1, pleft, pright, ierr )
    258     CALL MPI_CART_SHIFT( comm2d, 1, 1, psouth, pnorth, ierr )
     297
    259298!
    260299!-- In case of cyclic boundary conditions, a y-shift at the boundaries in
     
    316355       ENDIF
    317356    ENDIF
     357
    318358!
    319359!-- Vertical nesting: store four lists that identify partner ranks to exchange
  • palm/trunk/SOURCE/modules.f90

    r4414 r4461  
    2525! -----------------
    2626! $Id$
     27! +virtual_pe_grid, communicator_configurations
     28!
     29! 4414 2020-02-19 20:16:04Z suehring
    2730! - nzb_diff_s_inner, nzb_diff_s_outer, nzb_inner,nzb_outer, nzb_s_inner,
    2831!   nzb_s_outer, nzb_u_inner, nzb_u_outer, nzb_v_inner, nzb_v_outer,
     
    12411244    INTEGER(iwp) ::  numprocs = 1                !< total number of appointed processor elements
    12421245    INTEGER(iwp) ::  numprocs_previous_run = -1  !< total number of appointed processor elements in previous run (job chain)
    1243     INTEGER(iwp) ::  pleft                       !< MPI-address of the processor left of the current one
    1244     INTEGER(iwp) ::  pnorth                      !< MPI-address of the processor north of the current one
    1245     INTEGER(iwp) ::  pright                      !< MPI-address of the processor right of the current one
    1246     INTEGER(iwp) ::  psouth                      !< MPI-address of the processor south of the current one
     1246    INTEGER(iwp) ::  pleft                       !< MPI id of left neigbour pe
     1247    INTEGER(iwp) ::  pnorth                      !< MPI id of right neigbour pe
     1248    INTEGER(iwp) ::  pright                      !< MPI id of south neigbour pe
     1249    INTEGER(iwp) ::  psouth                      !< MPI id of north neigbour pe
    12471250    INTEGER(iwp) ::  req_count = 0               !< MPI return variable - checks if Send-Receive operation is already finished
    12481251    INTEGER(iwp) ::  sendrecvcount_xy            !< number of subdomain gridpoints to be exchanged in direct transpositions (y --> x, or x --> y) or second (2d) transposition x --> y
     
    12641267
    12651268    LOGICAL ::  collective_wait = .FALSE.          !< switch to set an explicit MPI barrier in front of all collective MPI calls
     1269
     1270    TYPE virtual_pe_grid
     1271       INTEGER(iwp) ::  mpi_communicator         !< MPI communicator id
     1272       INTEGER(iwp) ::  pleft                    !< MPI id of left neigbour pe
     1273       INTEGER(iwp) ::  pright                   !< MPI id of right neigbour pe
     1274       INTEGER(iwp) ::  psouth                   !< MPI id of south neigbour pe
     1275       INTEGER(iwp) ::  pnorth                   !< MPI id of north neigbour pe
     1276    END TYPE virtual_pe_grid
     1277
     1278    TYPE(virtual_pe_grid) ::  communicator_configurations(4)  !< stores the four possible 2d virtual grids:
     1279                                                              !< cyclic, cyclic along x, cyclic along y, non-cyclic
    12661280
    12671281#if defined( __parallel )
Note: See TracChangeset for help on using the changeset viewer.