Changeset 2599


Ignore:
Timestamp:
Nov 1, 2017 1:18:45 PM (6 years ago)
Author:
hellstea
Message:

i/o grouping update for nested runs

Location:
palm/trunk/SOURCE
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/Makefile

    r2576 r2599  
    446446# The following line is needed for palm_simple_install, don't remove it!
    447447#to_be_replaced_by_include
     448
     449#BOUNDS="-Rbc"  # Array bounds checking. Compromises performance seriously.
    448450
    449451.SUFFIXES:
  • palm/trunk/SOURCE/parin.f90

    r2575 r2599  
    2525! -----------------
    2626! $Id$
     27! The i/o grouping is updated to work correctly also in nested runs.
     28!
     29! 2575 2017-10-24 09:57:58Z maronga
    2730! Renamed phi -> latitude, added longitude
    2831!
     
    390393    INTEGER(iwp) ::  i      !<
    391394    INTEGER(iwp) ::  ioerr  !< error flag for open/read/write
    392 
     395    INTEGER(iwp) ::  myworldid       !<
     396    INTEGER(iwp) ::  numworldprocs   !<
    393397
    394398    NAMELIST /inipar/  aerosol_bulk, alpha_surface, approximation, bc_e_b,     &
     
    513517
    514518    CALL location_message( 'finished', .TRUE. )
    515 
    516519!
    517520!-- Calculate the number of groups into which parallel I/O is split.
     
    522525!-- system.
    523526!-- First, set the default:
     527    CALL MPI_COMM_RANK( MPI_COMM_WORLD, myworldid, ierr )
     528    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numworldprocs, ierr )
    524529    IF ( maximum_parallel_io_streams == -1  .OR.                               &
    525          maximum_parallel_io_streams > numprocs )  THEN
    526        maximum_parallel_io_streams = numprocs
     530         maximum_parallel_io_streams > numworldprocs )  THEN
     531       maximum_parallel_io_streams = numworldprocs
    527532    ENDIF
    528533!
     
    532537!-- These settings are repeated in init_pegrid for the communicator comm2d,
    533538!-- which is not available here
    534     io_blocks = numprocs / maximum_parallel_io_streams
    535     io_group  = MOD( myid+1, io_blocks )
    536 
     539    !io_blocks = numprocs / maximum_parallel_io_streams
     540    io_blocks = numworldprocs / maximum_parallel_io_streams
     541    !io_group  = MOD( myid+1, io_blocks )
     542    io_group  = MOD( myworldid+1, io_blocks )
     543   
    537544    CALL location_message( 'reading NAMELIST parameters from PARIN', .FALSE. )
    538545!
  • palm/trunk/SOURCE/pmc_child_mod.f90

    r2101 r2599  
    2626! -----------------
    2727! $Id$
     28! Some cleanup and commenting improvements only.
     29!
     30! 2101 2017-01-05 16:42:31Z suehring
    2831!
    2932! 2000 2016-08-20 18:09:15Z knoop
     
    171174     CALL MPI_COMM_SIZE( me%model_comm, me%model_npes, istat )
    172175     CALL MPI_COMM_REMOTE_SIZE( me%inter_comm, me%inter_npes, istat )
    173 
    174 !
    175 !--  Intra-communicater is used for MPI_GET
     176!
     177!--  Intra-communicator is used for MPI_GET
    176178     CALL MPI_INTERCOMM_MERGE( me%inter_comm, .TRUE., me%intra_comm, istat )
    177179     CALL MPI_COMM_RANK( me%intra_comm, me%intra_rank, istat )
    178180
    179181     ALLOCATE( me%pes(me%inter_npes) )
    180 
    181 !
    182 !--  Allocate an array of type arraydef for all parent PEs to store information
    183 !--  of then transfer array
     182!
     183!--  Allocate an array of type arraydef for all parent processes to store
     184!--  information of then transfer array
    184185     DO  i = 1, me%inter_npes
    185186        ALLOCATE( me%pes(i)%array_list(pmc_max_array) )
     
    201202
    202203    INTEGER, INTENT(OUT) ::  istat  !<
    203 
    204204!
    205205!-- Local variables
     
    211211
    212212    istat = pmc_status_ok
    213 
    214213!
    215214!-- Check length of array names
     
    229228
    230229!
    231 !-- Broadcat to all child PEs
     230!-- Broadcast to all child processes
    232231!-- TODO: describe what is broadcast here and why it is done
    233232    CALL pmc_bcast( myname%couple_index, 0, comm=m_model_comm )
     
    236235    CALL pmc_bcast( myname%childdesc,    0, comm=m_model_comm )
    237236    CALL pmc_bcast( myname%nameonchild,  0, comm=m_model_comm )
    238 
    239 !
    240 !-- Broadcat to all parent PEs
     237!
     238!-- Broadcast to all parent processes
    241239!-- TODO: describe what is broadcast here and why it is done
    242240    IF ( m_model_rank == 0 )  THEN
     
    263261
    264262    LOGICAL, INTENT(IN), OPTIONAL ::  lastentry  !<
    265 
    266263!
    267264!-- Local variables
     
    290287    INTEGER :: i, ierr, i2, j, nr  !<
    291288    INTEGER :: indwin              !< MPI window object
    292     INTEGER :: indwin2  !          < MPI window object
     289    INTEGER :: indwin2             !< MPI window object
    293290
    294291    INTEGER(KIND=MPI_ADDRESS_KIND) :: win_size !< Size of MPI window 1 (in bytes)
     
    306303    CALL MPI_WIN_CREATE( dummy, win_size, iwp, MPI_INFO_NULL, me%intra_comm,    &
    307304                         indwin, ierr )
    308 
    309305!
    310306!-- Open window on parent side
    311307!-- TODO: why is the next MPI routine called twice??
    312308    CALL MPI_WIN_FENCE( 0, indwin, ierr )
    313 
    314309!
    315310!-- Close window on parent side and open on child side
     
    321316                     MPI_INTEGER, indwin, ierr )
    322317    ENDDO
    323 
    324318!
    325319!-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is
    326320!-- called
    327321    CALL MPI_WIN_FENCE( 0, indwin, ierr )
    328 
    329322!
    330323!-- Allocate memory for index array
     
    344337    ALLOCATE( myind(2*winsize) )
    345338    winsize = 1
    346 
    347339!
    348340!-- Local buffer used in MPI_GET can but must not be inside the MPI Window.
    349 !-- Here, we use a dummy for the MPI window because the parent PEs do not access
    350 !-- the RMA window via MPI_GET or MPI_PUT
     341!-- Here, we use a dummy for the MPI window because the parent processes do
     342!-- not access the RMA window via MPI_GET or MPI_PUT
    351343    CALL MPI_WIN_CREATE( dummy, winsize, iwp, MPI_INFO_NULL, me%intra_comm,     &
    352344                         indwin2, ierr )
    353 
    354345!
    355346!-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is
     
    377368       ENDIF
    378369    ENDDO
    379 
    380370!
    381371!-- Don't know why, but this barrier is necessary before we can free the windows
     
    406396!--  pmc_interface
    407397     CHARACTER(LEN=*), INTENT(OUT) ::  myname  !<
    408 
    409398!
    410399!-- Local variables
     
    414403
    415404    next_array_in_list = next_array_in_list + 1
    416 
    417 !
    418 !-- Array names are the same on all child PEs, so take first PE to get the name
     405!
     406!-- Array names are the same on all child PEs, so take first process to
     407!-- get the name   
    419408    ape => me%pes(1)
    420 
    421409!
    422410!-- Check if all arrays have been processed
     
    429417
    430418    myname = ar%name
    431 
    432419!
    433420!-- Return true if legal array
     
    545532    myindex = 0
    546533    bufsize = 8
    547 
    548534!
    549535!-- Parent to child direction.
    550536!-- First stride: compute size and set index
    551537    DO  i = 1, me%inter_npes
    552 
    553538       ape => me%pes(i)
    554539       tag = 200
    555 
    556540       DO  j = 1, ape%nr_arrays
    557 
    558541          ar => ape%array_list(j)
    559 
    560542!
    561543!--       Receive index from child
     
    564546                         MPI_STATUS_IGNORE, ierr )
    565547          ar%recvindex = myindex
    566 
    567548!
    568549!--       Determine max, because child buffer is allocated only once
     
    573554             bufsize = MAX( bufsize, ar%a_dim(1)*ar%a_dim(2) )
    574555          ENDIF
    575 
    576556       ENDDO
    577 
    578     ENDDO
    579 
     557    ENDDO
    580558!
    581559!-- Create RMA (one sided communication) data buffer.
     
    584562    CALL pmc_alloc_mem( base_array_pc, bufsize, base_ptr )
    585563    me%totalbuffersize = bufsize*wp  ! total buffer size in byte
    586 
    587564!
    588565!-- Second stride: set buffer pointer
    589566    DO  i = 1, me%inter_npes
    590 
    591567       ape => me%pes(i)
    592 
    593568       DO  j = 1, ape%nr_arrays
    594569          ar => ape%array_list(j)
    595570          ar%recvbuf = base_ptr
    596571       ENDDO
    597 
    598     ENDDO
    599 
     572    ENDDO
    600573!
    601574!-- Child to parent direction
     
    605578
    606579    DO  i = 1, me%inter_npes
    607 
    608580       ape => me%pes(i)
    609581       tag = 300
    610 
    611582       DO  j = 1, ape%nr_arrays
    612 
    613583          ar => ape%array_list(j)
    614584          IF( ar%nrdims == 2 )  THEN
     
    617587             arlen = ape%nrele*ar%a_dim(1)
    618588          ENDIF
    619 
    620589          tag    = tag + 1
    621590          rcount = rcount + 1
     
    629598             ar%sendindex = noindex
    630599          ENDIF
    631 
    632600!
    633601!--       Maximum of 1024 outstanding requests
    634 !--       TODO: explain where this maximum comes from (arbitrary?)
     602!--       TODO: explain where this maximum comes from (arbitrary?).
     603!--       Outstanding = pending?
    635604          IF ( rcount == 1024 )  THEN
    636605             CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr )
     
    651620
    652621    ENDDO
    653 
    654622!
    655623!-- Create RMA (one sided communication) window for data buffer child to parent
     
    670638    CALL MPI_WIN_FENCE( 0, me%win_parent_child, ierr )
    671639    CALL MPI_BARRIER( me%intra_comm, ierr )
    672 
    673640!
    674641!-- Second stride: set buffer pointer
    675642    DO  i = 1, me%inter_npes
    676 
    677643       ape => me%pes(i)
    678 
    679644       DO  j = 1, ape%nr_arrays
    680 
    681           ar => ape%array_list(j)
    682 
     645          ar => ape%array_list(j)         
    683646          IF ( ape%nrele > 0 )  THEN
    684647             ar%sendbuf = C_LOC( base_array_cp(ar%sendindex) )
    685 
    686648!
    687649!--          TODO: if this is an error to be really expected, replace the
     
    695657             ENDIF
    696658          ENDIF
    697 
    698659       ENDDO
    699 
    700660    ENDDO
    701661
     
    743703       waittime = t2 - t1
    744704    ENDIF
    745 
    746705!
    747706!-- Wait for buffer is filled.
     
    753712
    754713    DO  ip = 1, me%inter_npes
    755 
    756714       ape => me%pes(ip)
    757 
    758715       DO  j = 1, ape%nr_arrays
    759 
    760716          ar => ape%array_list(j)
    761 
    762717          IF ( ar%nrdims == 2 )  THEN
    763718             nr = ape%nrele
     
    765720             nr = ape%nrele * ar%a_dim(1)
    766721          ENDIF
    767 
    768722          buf_shape(1) = nr
    769723          CALL C_F_POINTER( ar%recvbuf, buf, buf_shape )
    770 
    771724!
    772725!--       MPI passive target RMA
     
    780733             CALL MPI_WIN_UNLOCK( ip-1, me%win_parent_child, ierr )
    781734          ENDIF
    782 
    783735          myindex = 1
    784736          IF ( ar%nrdims == 2 )  THEN
    785 
    786737             CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) )
    787 
    788738             DO  ij = 1, ape%nrele
    789739                data_2d(ape%locind(ij)%j,ape%locind(ij)%i) = buf(myindex)
    790740                myindex = myindex + 1
    791741             ENDDO
    792 
    793742          ELSEIF ( ar%nrdims == 3 )  THEN
    794 
    795743             CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) )
    796 
    797744             DO  ij = 1, ape%nrele
    798745                data_3d(:,ape%locind(ij)%j,ape%locind(ij)%i) =                  &
     
    800747                myindex = myindex+ar%a_dim(1)
    801748             ENDDO
    802 
    803           ENDIF
    804 
     749          ENDIF
    805750       ENDDO
    806 
    807751    ENDDO
    808752
     
    849793
    850794    DO  ip = 1, me%inter_npes
    851 
    852795       ape => me%pes(ip)
    853 
    854796       DO  j = 1, ape%nr_arrays
    855 
    856797          ar => aPE%array_list(j)
    857798          myindex = 1
    858 
    859799          IF ( ar%nrdims == 2 )  THEN
    860 
    861800             buf_shape(1) = ape%nrele
    862801             CALL C_F_POINTER( ar%sendbuf, buf,     buf_shape     )
    863802             CALL C_F_POINTER( ar%data,    data_2d, ar%a_dim(1:2) )
    864 
    865803             DO  ij = 1, ape%nrele
    866804                buf(myindex) = data_2d(ape%locind(ij)%j,ape%locind(ij)%i)
    867805                myindex = myindex + 1
    868806             ENDDO
    869 
    870807          ELSEIF ( ar%nrdims == 3 )  THEN
    871 
    872808             buf_shape(1) = ape%nrele*ar%a_dim(1)
    873809             CALL C_F_POINTER( ar%sendbuf, buf,     buf_shape     )
    874810             CALL C_F_POINTER( ar%data,    data_3d, ar%a_dim(1:3) )
    875 
    876811             DO  ij = 1, ape%nrele
    877812                buf(myindex:myindex+ar%a_dim(1)-1) =                            &
     
    879814                myindex = myindex + ar%a_dim(1)
    880815             ENDDO
    881 
    882           ENDIF
    883 
     816          ENDIF
    884817       ENDDO
    885 
    886     ENDDO
    887 
     818    ENDDO
    888819!
    889820!-- TODO: Fence might do it, test later
  • palm/trunk/SOURCE/pmc_general_mod.f90

    r2101 r2599  
    2626! -----------------
    2727! $Id$
     28! Some cleanup and commenting improvements only.
     29!
     30! 2101 2017-01-05 16:42:31Z suehring
    2831!
    2932! 2000 2016-08-20 18:09:15Z knoop
     
    153156 CONTAINS
    154157
     158
     159   
    155160 SUBROUTINE pmc_g_setname( mychild, couple_index, aname )
    156161
     
    170175!-- Set name of array in arraydef structure
    171176    DO  i = 1, mychild%inter_npes
    172 
    173177       ape => mychild%pes(i)
    174178       ape%nr_arrays = ape%nr_arrays + 1
    175179       ape%array_list(ape%nr_arrays)%name        = aname
    176180       ape%array_list(ape%nr_arrays)%coupleindex = couple_index
    177 
    178181    ENDDO
    179182
     
    196199
    197200    n = SIZE(array,2)
    198 
    199201    DO  j = 1, n-1
    200202       DO  i = j+1, n
    201 
    202203          IF ( array(sort_ind,i) < array(sort_ind,j) )  THEN
    203204             tmp = array(:,i)
     
    205206             array(:,j) = tmp
    206207          ENDIF
    207 
    208208       ENDDO
    209209    ENDDO
  • palm/trunk/SOURCE/pmc_handle_communicator_mod.f90

    r2516 r2599  
    2626! -----------------
    2727! $Id$
     28! Separate peer communicator peer_comm introduced for MPI_INTERCOMM_CREATE.
     29! Some cleanup and commenting improvements.
     30!
     31! 2516 2017-10-04 11:03:04Z suehring
    2832! Remove tabs
    2933!
     
    127131    PUBLIC  pmc_status_ok, pmc_status_error
    128132
    129     INTEGER, PARAMETER, PUBLIC ::  pmc_error_npes        = 1  !< illegal number of PEs
     133    INTEGER, PARAMETER, PUBLIC ::  pmc_error_npes        = 1  !< illegal number of processes
    130134    INTEGER, PARAMETER, PUBLIC ::  pmc_namelist_error    = 2  !< error(s) in nestpar namelist
    131135    INTEGER, PARAMETER, PUBLIC ::  pmc_no_namelist_found = 3  !< no couple layout namelist found
     
    144148    INTEGER, PUBLIC ::  m_model_rank          !<
    145149    INTEGER, PUBLIC ::  m_model_npes          !<
    146     INTEGER         ::  m_parent_remote_size  !< number of parent PEs
     150    INTEGER         ::  m_parent_remote_size  !< number of processes in the parent model
     151    INTEGER         ::  peer_comm             !< peer_communicator for inter communicators
    147152
    148153    INTEGER, DIMENSION(pmc_max_models), PUBLIC ::  m_to_child_comm    !< communicator to the child(ren)
     
    200205    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat )
    201206!
    202 !-- Only PE 0 of root model reads
     207!-- Only process 0 of root model reads
    203208    IF ( m_world_rank == 0 )  THEN
    204209
     
    210215       THEN
    211216!
    212 !--       Calculate start PE of every model
     217!--       Determine the first process id of each model
    213218          start_pe(1) = 0
    214219          DO  i = 2, m_ncpl+1
     
    217222
    218223!
    219 !--       The number of cores provided with the run must be the same as the
    220 !--       total sum of cores required by all nest domains
     224!--       The sum of numbers of processes requested by all the domains
     225!--       must be equal to the total number of processes of the run
    221226          IF ( start_pe(m_ncpl+1) /= m_world_npes )  THEN
    222227             WRITE ( message_string, '(2A,I6,2A,I6,A)' )                        &
     
    231236    ENDIF
    232237!
    233 !-- Broadcast the read status. This synchronises all other PEs with PE 0 of
    234 !-- the root model. Without synchronisation, they would not behave in the
    235 !-- correct way (e.g. they would not return in case of a missing NAMELIST)
     238!-- Broadcast the read status. This synchronises all other processes with
     239!-- process 0 of the root model. Without synchronisation, they would not
     240!-- behave in the correct way (e.g. they would not return in case of a
     241!-- missing NAMELIST).
    236242    CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
    237243
     
    254260    ENDIF
    255261
    256     CALL MPI_BCAST( m_ncpl,          1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat)
    257     CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat)
    258 
     262    CALL MPI_BCAST( m_ncpl,          1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
     263    CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
    259264!
    260265!-- Broadcast coupling layout
     
    277282    CALL MPI_BCAST( nesting_datatransfer_mode, LEN(nesting_datatransfer_mode),  &
    278283                    MPI_CHARACTER, 0, MPI_COMM_WORLD, istat )
    279 
    280284!
    281285!-- Assign global MPI processes to individual models by setting the couple id
     
    288292    ENDDO
    289293    m_my_cpl_rank = m_world_rank - start_pe(i)
    290 
    291294!
    292295!-- MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach).
     
    296299                         istat )
    297300!
    298 !-- Get size and rank of the model running on this PE
     301!-- Get size and rank of the model running on this process
    299302    CALL  MPI_COMM_RANK( comm, m_model_rank, istat )
    300303    CALL  MPI_COMM_SIZE( comm, m_model_npes, istat )
    301 
    302 !
    303 !-- Broadcast (from PE 0) the parent id and id of every model
     304!
     305!-- Broadcast (from process 0) the parent id and id of every model
    304306    DO  i = 1, m_ncpl
    305307       CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0,              &
     
    308310                       MPI_COMM_WORLD, istat )
    309311    ENDDO
    310 
    311312!
    312313!-- Save the current model communicator for pmc internal use
     
    314315
    315316!
    316 !-- Create intercommunicator between parent and children.
     317!-- Create intercommunicator between the parent and children.
    317318!-- MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of
    318319!-- different colors.
    319 !-- The grouping was done above with MPI_COMM_SPLIT
     320!-- The grouping was done above with MPI_COMM_SPLIT.
     321!-- A duplicate of MPI_COMM_WORLD is created and used as peer communicator
     322!-- (peer_comm) for MPI_INTERCOMM_CREATE.
     323    CALL MPI_COMM_DUP( MPI_COMM_WORLD, peer_comm, ierr )
    320324    DO  i = 2, m_ncpl
    321 
    322325       IF ( m_couplers(i)%parent_id == m_my_cpl_id )  THEN
    323326!
    324 !--       Collect parent PEs.
    325 !--       Every model exept the root model has a parent model which acts as
    326 !--       parent model. Create an intercommunicator to connect current PE to
    327 !--       all children PEs
     327!--       Identify all children models of the current model and create
     328!--       inter-communicators to connect between the current model and its
     329!--       children models.
    328330          tag = 500 + i
    329           CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i),      &
     331          CALL MPI_INTERCOMM_CREATE( comm, 0, peer_comm, start_pe(i),           &
    330332                                     tag, m_to_child_comm(i), istat)
    331333          childcount = childcount + 1
    332334          activeparent(i) = 1
    333 
    334335       ELSEIF ( i == m_my_cpl_id)  THEN
    335336!
    336 !--       Collect children PEs.
    337 !--       Every model except the root model has a parent model.
    338 !--       Create an intercommunicator to connect current PE to all parent PEs
     337!--       Create an inter-communicator to connect between the current
     338!--       model and its parent model.   
    339339          tag = 500 + i
    340           CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD,                   &
     340          CALL MPI_INTERCOMM_CREATE( comm, 0, peer_comm,                        &
    341341                                     start_pe(m_couplers(i)%parent_id),         &
    342342                                     tag, m_to_parent_comm, istat )
    343343       ENDIF
    344 
    345344    ENDDO
    346 
    347 !
    348 !-- If I am parent, count the number of children that I have
     345!
     346!-- If I am a parent, count the number of children I have.
    349347!-- Although this loop is symmetric on all processes, the "activeparent" flag
    350 !-- is true (==1) on the respective individual PE only.
     348!-- is true (==1) on the respective individual process only.
    351349    ALLOCATE( pmc_parent_for_child(childcount+1) )
    352350
     
    362360    IF ( m_my_cpl_id > 1 )  THEN
    363361       CALL MPI_COMM_REMOTE_SIZE( m_to_parent_comm, m_parent_remote_size,       &
    364                                   istat)
     362                                  istat )
    365363    ELSE
    366364!
     
    369367    ENDIF
    370368!
    371 !-- Set myid to non-tero value except for the root domain. This is a setting
     369!-- Set myid to non-zero value except for the root domain. This is a setting
    372370!-- for the message routine which is called at the end of pmci_init. That
    373371!-- routine outputs messages for myid = 0, only. However, myid has not been
    374 !-- assigened so far, so that all PEs of the root model would output a
    375 !-- message. To avoid this, set myid to some other value except for PE0 of the
    376 !-- root domain.
     372!-- assigened so far, so that all processes of the root model would output a
     373!-- message. To avoid this, set myid to some other value except for process 0
     374!-- of the root domain.
    377375    IF ( m_world_rank /= 0 )  myid = 1
    378376
     
    401399    INTEGER, INTENT(OUT), OPTIONAL ::  npe_total           !<
    402400
    403     INTEGER ::  requested_cpl_id  !<
    404 
    405     REAL(wp), INTENT(OUT), OPTIONAL ::  lower_left_x  !<
    406     REAL(wp), INTENT(OUT), OPTIONAL ::  lower_left_y  !<
     401    INTEGER ::  requested_cpl_id                           !<
     402
     403    REAL(wp), INTENT(OUT), OPTIONAL ::  lower_left_x       !<
     404    REAL(wp), INTENT(OUT), OPTIONAL ::  lower_left_y       !<
    407405
    408406!
     
    416414       requested_cpl_id = m_my_cpl_id
    417415    ENDIF
    418 
    419416!
    420417!-- Return the requested information
     
    459456
    460457 SUBROUTINE read_coupling_layout( nesting_datatransfer_mode, nesting_mode,      &
    461                                   pmc_status )
     458      pmc_status )
    462459
    463460    IMPLICIT NONE
     
    481478
    482479    pmc_status = pmc_status_ok
    483 
    484480!
    485481!-- Open the NAMELIST-file and read the nesting layout
     
    487483    READ ( 11, nestpar, IOSTAT=istat )
    488484!
    489 !-- Set filepointer to the beginning of the file. Otherwise PE0 will later
     485!-- Set filepointer to the beginning of the file. Otherwise process 0 will later
    490486!-- be unable to read the inipar-NAMELIST
    491487    REWIND ( 11 )
    492488
    493489    IF ( istat < 0 )  THEN
    494 
    495490!
    496491!--    No nestpar-NAMELIST found
    497492       pmc_status = pmc_no_namelist_found
    498 
    499493       RETURN
    500 
    501494    ELSEIF ( istat > 0 )  THEN
    502 
    503495!
    504496!--    Errors in reading nestpar-NAMELIST
    505497       pmc_status = pmc_namelist_error
    506498       RETURN
    507 
    508     ENDIF
    509 
     499    ENDIF
    510500!
    511501!-- Output location message
    512502    CALL location_message( 'initialize communicators for nesting', .FALSE. )
    513 
    514 !
    515 !-- Assign the layout to the internally used variable
     503!
     504!-- Assign the layout to the corresponding internally used variable m_couplers
    516505    m_couplers = domain_layouts
    517 
    518506!
    519507!-- Get the number of nested models given in the nestpar-NAMELIST
     
    521509!
    522510!--    When id=-1 is found for the first time, the list of domains is finished
    523         IF ( m_couplers(i)%id == -1  .OR.  i == pmc_max_models )  THEN
     511       IF ( m_couplers(i)%id == -1  .OR.  i == pmc_max_models )  THEN
    524512          IF ( m_couplers(i)%id == -1 )  THEN
    525513             m_ncpl = i - 1
     
    529517          ENDIF
    530518       ENDIF
    531 
    532519    ENDDO
    533 
    534520!
    535521!-- Make sure that all domains have equal lower left corner in case of vertical
  • palm/trunk/SOURCE/pmc_interface_mod.f90

    r2582 r2599  
    2626! -----------------
    2727! $Id$
     28! Some cleanup and commenting improvements only.
     29!
     30! 2582 2017-10-26 13:19:46Z hellstea
    2831! Resetting of e within buildings / topography in pmci_parent_datatrans removed
    2932! as unnecessary since e is not anterpolated, and as incorrect since it overran
  • palm/trunk/SOURCE/pmc_mpi_wrapper_mod.f90

    r2101 r2599  
    2626! -----------------
    2727! $Id$
     28! Some cleanup and commenting improvements only.
     29!
     30! 2101 2017-01-05 16:42:31Z suehring
    2831!
    2932! 2000 2016-08-20 18:09:15Z knoop
     
    142145    INTEGER, INTENT(OUT)              ::  ierr         !<
    143146
     147   
    144148    ierr = 0
    145149    CALL MPI_SEND( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm,     &
    146                    ierr)
     150                   ierr )
    147151
    148152 END SUBROUTINE pmc_send_to_parent_integer
     
    160164    INTEGER, INTENT(OUT)               ::  ierr         !<
    161165
     166   
    162167    ierr = 0
    163168    CALL MPI_RECV( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm,     &
     
    178183    INTEGER, INTENT(OUT)                :: ierr         !<
    179184
     185   
    180186    ierr = 0
    181187    CALL MPI_SEND( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm,     &
     
    196202    INTEGER, INTENT(OUT)               ::  ierr         !<
    197203
     204   
    198205    ierr = 0
    199206    CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr )
     
    213220    INTEGER, INTENT(OUT)                ::  ierr         !<
    214221
     222   
    215223    ierr = 0
    216224    CALL MPI_RECV( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm,        &
     
    231239    INTEGER, INTENT(OUT)                 ::  ierr         !<
    232240
     241   
    233242    ierr = 0
    234243    CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr )
     
    265274    INTEGER, INTENT(OUT)                   ::  ierr         !<
    266275
     276   
    267277    ierr = 0
    268278    CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr )
     
    282292    INTEGER, INTENT(OUT)                    ::  ierr         !<
    283293
     294   
    284295    ierr = 0
    285296    CALL MPI_RECV( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm,        &
     
    302313    INTEGER, INTENT(OUT)              ::  ierr         !<
    303314
     315   
    304316    ierr = 0
    305317    CALL MPI_SEND( buf, n, MPI_INTEGER, child_rank, tag,                        &
     
    322334    INTEGER, INTENT(OUT)                 ::  ierr         !<
    323335
     336   
    324337    ierr = 0
    325338    CALL MPI_RECV( buf, n, MPI_INTEGER, child_rank, tag,                        &
     
    342355    INTEGER, INTENT(OUT)                 ::  ierr         !<
    343356
     357   
    344358    ierr = 0
    345359    CALL MPI_RECV( buf, n, MPI_INTEGER, child_rank, tag,                        &
     
    362376    INTEGER, INTENT(OUT)               ::  ierr         !<
    363377
     378   
    364379    ierr = 0
    365380    CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag,                           &
     
    382397    INTEGER, INTENT(OUT)                  ::  ierr         !<
    383398
     399   
    384400    ierr = 0
    385401    CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag,                           &
     
    402418    INTEGER, INTENT(OUT)                 ::  ierr         !<
    403419
     420   
    404421    ierr = 0
    405422    CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag,                           &
     
    422439    INTEGER, INTENT(OUT)                  ::  ierr         !<
    423440
     441   
    424442    ierr = 0
    425443    CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag,                           &
     
    431449
    432450 SUBROUTINE pmc_send_to_child_real_r3( child_id, buf, n, child_rank, tag,       &
    433                                        ierr)
     451                                       ierr )
    434452
    435453    IMPLICIT NONE
     
    442460    INTEGER, INTENT(OUT)                   ::  ierr         !<
    443461
     462   
    444463    ierr = 0
    445464    CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag,                           &
     
    462481    INTEGER, INTENT(OUT)                    ::  ierr         !<
    463482
     483   
    464484    ierr = 0
    465485    CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag,                           &
     
    511531    INTEGER ::  myerr   !<
    512532
     533   
    513534    IF ( PRESENT( comm ) )  THEN
    514535       mycomm = comm
     
    540561
    541562!
    542 !-- PE 0 on parent broadcast to all child PEs
     563!-- Process 0 on parent broadcast to all child processes
    543564    IF ( PRESENT( child_id ) )  THEN
    544565
     
    581602    TYPE(C_PTR)                    ::  p_myind  !<
    582603
     604   
    583605    winsize = idim1 * C_SIZEOF( ierr )
    584606
     
    605627    TYPE(C_PTR)                    :: p_myind  !<
    606628
     629   
    607630    winsize = idim1 * wp
    608631
     
    623646    REAL(kind=wp) :: pmc_time  !<
    624647
     648   
    625649    pmc_time = MPI_WTIME()
    626650
  • palm/trunk/SOURCE/pmc_parent_mod.f90

    r2101 r2599  
    2626! -----------------
    2727! $Id$
     28! Some cleanup and commenting improvements only.
     29!
     30! 2101 2017-01-05 16:42:31Z suehring
    2831!
    2932! 2000 2016-08-20 18:09:15Z knoop
     
    192195
    193196!
    194 !--    Intra communicater is used for MPI_GET
     197!--    Intra communicator is used for MPI_GET
    195198       CALL MPI_INTERCOMM_MERGE( children(childid)%inter_comm, .FALSE.,         &
    196199                                 children(childid)%intra_comm, istat )
     
    232235
    233236     IF ( m_model_rank == 0 )  THEN
    234 
    235 !
    236 !--     Sort to ascending parent PE order
     237!
     238!--     Sort to ascending parent process order
    237239        CALL pmc_sort( index_list, 6 )
    238 
    239240        is = 1
    240241        DO  ip = 0, m_model_npes-1
    241 
    242 !
    243 !--        Split into parent PEs
     242!
     243!--        Split into parent processes
    244244           ie = is - 1
    245 
    246 !
    247 !--        There may be no entry for this PE
     245!
     246!--        There may be no entry for this process
    248247           IF ( is <= SIZE( index_list,2 )  .AND.  ie >= 0 )  THEN
    249 
    250248              DO WHILE ( index_list(6,ie+1 ) == ip )
    251249                 ie = ie + 1
    252250                 IF ( ie == SIZE( index_list,2 ) )  EXIT
    253251              ENDDO
    254 
    255252              ian = ie - is + 1
    256 
    257253           ELSE
    258254              is  = -1
     
    260256              ian =  0
    261257           ENDIF
    262 
    263 !
    264 !--        Send data to other parent PEs
     258!
     259!--        Send data to other parent processes
    265260           IF ( ip == 0 )  THEN
    266261              indchildren(childid)%nrpoints = ian
     
    279274           ENDIF
    280275           is = ie + 1
    281 
    282276        ENDDO
    283 
    284277     ELSE
    285 
    286278        CALL MPI_RECV( indchildren(childid)%nrpoints, 1, MPI_INTEGER, 0, 1000,  &
    287279                       m_model_comm, MPI_STATUS_IGNORE, istat )
    288280        ian = indchildren(childid)%nrpoints
    289 
    290281        IF ( ian > 0 )  THEN
    291282           ALLOCATE( indchildren(childid)%index_list_2d(6,ian) )
     
    294285                          MPI_STATUS_IGNORE, istat)
    295286        ENDIF
    296 
    297287     ENDIF
    298 
    299288     CALL set_pe_index_list( childid, children(childid),                        &
    300289                             indchildren(childid)%index_list_2d,                &
     
    328317
    329318    next_array_in_list = next_array_in_list + 1
    330 
    331 !
    332 !-- Array names are the same on all children PEs, so take first PE to get the name
     319!
     320!-- Array names are the same on all children processes, so take first
     321!-- process to get the name
    333322    ape => children(childid)%pes(1)
    334323
    335324    IF ( next_array_in_list > ape%nr_arrays )  THEN
    336 
    337325!
    338326!--    All arrays are done
     
    343331    ar => ape%array_list(next_array_in_list)
    344332    myname = ar%name
    345 
    346333!
    347334!-- Return true if legal array
     
    413400
    414401    array_adr = C_LOC(array)
    415 
    416402!
    417403!-- In PALM's pointer version, two indices have to be stored internally.
     
    469455    rcount  = 0
    470456    bufsize = 8
    471 
    472457!
    473458!-- First stride: compute size and set index
     
    493478          CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag,                    &
    494479                          children(childid)%inter_comm, req(rcount), ierr )
    495 
    496480!
    497481!--       Maximum of 1024 outstanding requests
    498 !--       TODO: what does this limit mean?
     482!--       TODO: what does this limit mean? Does outstanding mean pending?
    499483          IF ( rcount == 1024 )  THEN
    500484             CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr )
     
    505489          bufsize = bufsize + arlen
    506490          ar%sendsize = arlen
    507 
    508491       ENDDO
    509492
     
    513496
    514497    ENDDO
    515 
    516498!
    517499!-- Create RMA (One Sided Communication) window for data buffer parent to
     
    530512                         children(childid)%intra_comm,                          &
    531513                         children(childid)%win_parent_child, ierr )
    532 
    533514!
    534515!-- Open window to set data
    535516    CALL MPI_WIN_FENCE( 0, children(childid)%win_parent_child, ierr )
    536 
    537517!
    538518!-- Second stride: set buffer pointer
     
    555535       ENDDO
    556536    ENDDO
    557 
    558537!
    559538!-- Child to parent direction
    560539    bufsize = 8
    561 
    562540!
    563541!-- First stride: compute size and set index
    564542    DO  i = 1, children(childid)%inter_npes
    565 
    566543       ape => children(childid)%pes(i)
    567544       tag = 300
    568 
    569545       DO  j = 1, ape%nr_arrays
    570 
    571546          ar => ape%array_list(j)
    572 
    573547!
    574548!--       Receive index from child
     
    576550          CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag,                     &
    577551                         children(childid)%inter_comm, MPI_STATUS_IGNORE, ierr )
    578 
    579552          IF ( ar%nrdims == 3 )  THEN
    580553             bufsize = MAX( bufsize, ape%nrele * ar%a_dim(4) )
     
    583556          ENDIF
    584557          ar%recvindex = myindex
    585 
    586558        ENDDO
    587 
    588     ENDDO
    589 
     559    ENDDO
    590560!
    591561!-- Create RMA (one sided communication) data buffer.
     
    596566
    597567    CALL MPI_BARRIER( children(childid)%intra_comm, ierr )
    598 
    599568!
    600569!-- Second stride: set buffer pointer
    601570    DO  i = 1, children(childid)%inter_npes
    602 
    603571       ape => children(childid)%pes(i)
    604 
    605572       DO  j = 1, ape%nr_arrays
    606573          ar => ape%array_list(j)
    607574          ar%recvbuf = base_ptr
    608575       ENDDO
    609 
    610576    ENDDO
    611577
     
    654620
    655621    DO  ip = 1, children(childid)%inter_npes
    656 
    657622       ape => children(childid)%pes(ip)
    658 
    659623       DO  j = 1, ape%nr_arrays
    660 
    661624          ar => ape%array_list(j)
    662625          myindex = 1
    663 
    664626          IF ( ar%nrdims == 2 )  THEN
    665 
    666627             buf_shape(1) = ape%nrele
    667628             CALL C_F_POINTER( ar%sendbuf, buf, buf_shape )
     
    671632                myindex = myindex + 1
    672633             ENDDO
    673 
    674634          ELSEIF ( ar%nrdims == 3 )  THEN
    675 
    676635             buf_shape(1) = ape%nrele*ar%a_dim(4)
    677636             CALL C_F_POINTER( ar%sendbuf, buf, buf_shape )
     
    682641                myindex = myindex + ar%a_dim(4)
    683642             ENDDO
    684 
    685643          ENDIF
    686 
    687644        ENDDO
    688 
    689     ENDDO
    690 
     645    ENDDO
    691646!
    692647!-- Buffer is filled
     
    727682
    728683    t1 = pmc_time()
    729 
    730684!
    731685!-- Wait for child to fill buffer
     
    733687    t2 = pmc_time() - t1
    734688    IF ( PRESENT( waittime ) )  waittime = t2
    735 
    736689!
    737690!-- TODO: check next statement
     
    741694
    742695    DO  ip = 1, children(childid)%inter_npes
    743 
    744696       ape => children(childid)%pes(ip)
    745 
    746697       DO  j = 1, ape%nr_arrays
    747 
    748698          ar => ape%array_list(j)
    749 
     699         
    750700          IF ( ar%recvindex < 0 )  CYCLE
    751701
     
    755705             nr = ape%nrele * ar%a_dim(4)
    756706          ENDIF
    757 
    758707          buf_shape(1) = nr
    759708          CALL C_F_POINTER( ar%recvbuf, buf, buf_shape )
    760 
    761709!
    762710!--       MPI passive target RMA
    763711          IF ( nr > 0 )  THEN
    764712             target_disp = ar%recvindex - 1
    765 
    766 !
    767 !--          Child PEs are located behind parent PEs
     713!
     714!--          Child processes are located behind parent process
    768715             target_pe = ip - 1 + m_model_npes
    769716             CALL MPI_WIN_LOCK( MPI_LOCK_SHARED, target_pe, 0,                  &
     
    774721                                  children(childid)%win_parent_child, ierr )
    775722          ENDIF
    776 
    777723          myindex = 1
    778724          IF ( ar%nrdims == 2 )  THEN
    779 
    780725             CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) )
    781726             DO  ij = 1, ape%nrele
     
    783728                myindex = myindex + 1
    784729             ENDDO
    785 
    786730          ELSEIF ( ar%nrdims == 3 )  THEN
    787 
    788731             CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3))
    789732             DO  ij = 1, ape%nrele
     
    792735                myindex = myindex + ar%a_dim(4)
    793736             ENDDO
    794 
    795737          ENDIF
    796 
    797738       ENDDO
    798 
    799739    ENDDO
    800740
     
    832772
    833773!
    834 !-- Set array for child inter PE 0
     774!-- Set array for child inter process 0
    835775    IMPLICIT NONE
    836776
     
    849789
    850790    DO  i = 1, children(childid)%inter_npes
    851 
    852791       ape => children(childid)%pes(i)
    853792       ar  => ape%array_list(next_array_in_list)
     
    855794       ar%a_dim  = dims
    856795       ar%data   = array_adr
    857 
    858796       IF ( PRESENT( second_adr ) )  THEN
    859797          ar%po_data(1) = array_adr
     
    863801          ar%po_data(2) = C_NULL_PTR
    864802       ENDIF
    865 
    866803    ENDDO
    867804
     
    885822
    886823    DO  ip = 1, children(childid)%inter_npes
    887 
    888824       ape => children(childid)%pes(ip)
    889 
    890825       DO  j = 1, ape%nr_arrays
    891 
    892826          ar => ape%array_list(j)
    893827          IF ( iactive == 1  .OR.  iactive == 2 )  THEN
    894828             ar%data = ar%po_data(iactive)
    895829          ENDIF
    896 
    897830       ENDDO
    898 
    899831    ENDDO
    900832
     
    931863
    932864!
    933 !-- First, count entries for every remote child PE
     865!-- First, count entries for every remote child process
    934866    DO  i = 1, mychild%inter_npes
    935867       ape => mychild%pes(i)
    936868       ape%nrele = 0
    937869    ENDDO
    938 
    939870!
    940871!-- Loop over number of coarse grid cells
    941872    DO  j = 1, nrp
    942        rempe = index_list(5,j) + 1   ! PE number on remote PE
     873       rempe = index_list(5,j) + 1   ! process number on remote process
    943874       ape => mychild%pes(rempe)
    944        ape%nrele = ape%nrele + 1     ! Increment number of elements for this child PE
     875       ape%nrele = ape%nrele + 1     ! Increment number of elements for this child process
    945876    ENDDO
    946877
     
    951882
    952883    remind = 0
    953 
    954884!
    955885!-- Second, create lists
     
    963893       ape%locind(ind)%j = index_list(2,j)
    964894    ENDDO
    965 
    966 !
    967 !-- Prepare number of elements for children PEs
     895!
     896!-- Prepare number of elements for children processes
    968897    CALL pmc_alloc_mem( rldef, mychild%inter_npes*2 )
    969 
    970 !
    971 !-- Number of child PEs * size of INTEGER (i just arbitrary INTEGER)
     898!
     899!-- Number of child processes * size of INTEGER (i just arbitrary INTEGER)
    972900    winsize = mychild%inter_npes*c_sizeof(i)*2
    973901
    974902    CALL MPI_WIN_CREATE( rldef, winsize, iwp, MPI_INFO_NULL,                    &
    975903                         mychild%intra_comm, indwin, ierr )
    976 
    977904!
    978905!-- Open window to set data
    979906    CALL MPI_WIN_FENCE( 0, indwin, ierr )
    980907
    981     rldef(1) = 0            ! index on remote PE 0
    982     rldef(2) = remind(1)    ! number of elements on remote PE 0
    983 
     908    rldef(1) = 0            ! index on remote process 0
     909    rldef(2) = remind(1)    ! number of elements on remote process 0
    984910!
    985911!-- Reserve buffer for index array
    986912    DO  i = 2, mychild%inter_npes
    987913       i2          = (i-1) * 2 + 1
    988        rldef(i2)   = rldef(i2-2) + rldef(i2-1) * 2  ! index on remote PE
    989        rldef(i2+1) = remind(i)                      ! number of elements on remote PE
    990     ENDDO
    991 
     914       rldef(i2)   = rldef(i2-2) + rldef(i2-1) * 2  ! index on remote process
     915       rldef(i2+1) = remind(i)                      ! number of elements on remote process
     916    ENDDO
    992917!
    993918!-- Close window to allow child to access data
    994919    CALL MPI_WIN_FENCE( 0, indwin, ierr )
    995 
    996920!
    997921!-- Child has retrieved data
     
    1000924    i2 = 2 * mychild%inter_npes - 1
    1001925    winsize = ( rldef(i2) + rldef(i2+1) ) * 2
    1002 
    1003926!
    1004927!-- Make sure, MPI_ALLOC_MEM works
     
    1013936!-- Open window to set data
    1014937    CALL MPI_WIN_FENCE( 0, indwin2, ierr )
    1015 
    1016938!
    1017939!-- Create the 2D index list
    1018940    DO  j = 1, nrp
    1019        rempe = index_list(5,j) + 1    ! PE number on remote PE
     941       rempe = index_list(5,j) + 1    ! process number on remote process
    1020942       ape => mychild%pes(rempe)
    1021943       i2    = rempe * 2 - 1
     
    1025947       rldef(i2)      = rldef(i2)+2
    1026948    ENDDO
    1027 
    1028949!
    1029950!-- All data are set
    1030951    CALL MPI_WIN_FENCE( 0, indwin2, ierr )
    1031 
    1032952!
    1033953!-- Don't know why, but this barrier is necessary before windows can be freed
Note: See TracChangeset for help on using the changeset viewer.