source: palm/trunk/SOURCE/pmc_handle_communicator.f90 @ 1764

Last change on this file since 1764 was 1764, checked in by raasch, 6 years ago

update of the nested domain system + some bugfixes

  • Property svn:keywords set to Id
File size: 16.1 KB
Line 
1 MODULE PMC_handle_communicator
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2015 Leibniz Universitaet Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! ------------------
22! pmc_layout type: comm_cpl and comm_parent removed, character "name" moved at
23! the beginning of the variable list,
24! domain layout is read with new NAMELIST nestpar from standard file PARIN,
25! MPI-datatype REAL8 replaced by REAL, kind=8 replaced by wp,
26! variable domain_layouts instead of m_couplers introduced for this NAMELIST,
27! general format changed to PALM style
28!
29! Former revisions:
30! -----------------
31! $Id: pmc_handle_communicator.f90 1764 2016-02-28 12:45:19Z raasch $
32!
33! 1762 2016-02-25 12:31:13Z hellstea
34! Initial revision by K. Ketelsen
35!
36! Description:
37! ------------
38! Handle MPI communicator in PALM model coupler
39!------------------------------------------------------------------------------!
40
41#if defined( __parallel )
42    USE kinds
43
44#if defined( __lc )
45    USE MPI
46#else
47    INCLUDE "mpif.h"
48#endif
49
50   USE pmc_general,                                                            &
51       ONLY: pmc_status_ok, pmc_status_error, pmc_max_modell
52
53   IMPLICIT NONE
54
55   TYPE pmc_layout
56
57      CHARACTER(len=32) ::  name
58
59      INTEGER  ::  id
60      INTEGER  ::  parent_id
61      INTEGER  ::  npe_x
62      INTEGER  ::  npe_y
63
64      REAL(wp) ::  lower_left_x
65      REAL(wp) ::  lower_left_y
66
67   END TYPE pmc_layout
68
69   PUBLIC  pmc_status_ok, pmc_status_error
70
71   INTEGER, PARAMETER, PUBLIC ::  pmc_error_npes          = 1  ! illegal number of PEs
72   INTEGER, PARAMETER, PUBLIC ::  pmc_namelist_error      = 2  ! error(s) in nestpar namelist
73   INTEGER, PARAMETER, PUBLIC ::  pmc_no_namelist_found   = 3  ! No couple layout file found
74
75   ! Coupler Setup
76
77   INTEGER                                    :: m_my_CPL_id  !Coupler id of this model
78   INTEGER                                    :: m_Parent_id  !Coupler id of parent of this model
79   INTEGER                                    :: m_NrOfCpl    !Number of Coupler in layout file
80   TYPE(PMC_layout),DIMENSION(PMC_MAX_MODELL) :: m_couplers   !Information of all coupler
81
82   ! MPI settings
83
84   INTEGER,PUBLIC                    :: m_model_comm          !Communicator of this model
85   INTEGER,PUBLIC                    :: m_to_server_comm      !Communicator to the server
86   INTEGER,DIMENSION(PMC_MAX_MODELL) :: m_to_client_comm      !Communicator to the client(s)
87   INTEGER,PUBLIC                    :: m_world_rank
88   INTEGER                           :: m_world_npes
89   INTEGER,PUBLIC                    :: m_model_rank
90   INTEGER,PUBLIC                    :: m_model_npes
91   INTEGER                           :: m_server_remote_size  !Number of Server PE's
92
93   PUBLIC m_to_client_comm
94
95   !Indicates this PE is server for Cleint NR
96
97   INTEGER,DIMENSION(:),POINTER,PUBLIC :: PMC_Server_for_Client
98
99   INTERFACE pmc_is_rootmodel
100      MODULE PROCEDURE pmc_is_rootmodel
101   END INTERFACE pmc_is_rootmodel
102
103   INTERFACE PMC_get_local_model_info
104      MODULE PROCEDURE PMC_get_local_model_info
105   END INTERFACE PMC_get_local_model_info
106
107   PUBLIC pmc_get_local_model_info, pmc_init_model, pmc_is_rootmodel
108
109 CONTAINS
110
111   SUBROUTINE pmc_init_model( comm, nesting_mode, pmc_status )
112
113      USE control_parameters,                                                  &
114          ONLY:  message_string
115
116      USE pegrid,                                                              &
117          ONLY:  myid
118
119      IMPLICIT NONE
120
121      CHARACTER(LEN=7), INTENT(OUT) ::  nesting_mode
122
123      INTEGER, INTENT(OUT)                ::  comm
124      INTEGER, INTENT(OUT)                ::  pmc_status
125
126      INTEGER                             ::  i, ierr, istat
127      INTEGER,DIMENSION(pmc_max_modell+1) ::  start_pe
128      INTEGER                             ::  m_my_cpl_rank
129      INTEGER                             ::  tag, clientcount
130      INTEGER,DIMENSION(pmc_max_modell)   ::  activeserver  ! I am active server for this client ID
131
132      pmc_status   = pmc_status_ok
133      comm         = -1
134      m_my_cpl_id  = -1
135      clientcount  =  0
136      activeserver = -1
137      start_pe(:)  =  0
138
139      CALL  MPI_COMM_RANK( MPI_COMM_WORLD, m_world_rank, istat )
140      CALL  MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat )
141!
142!--   Only PE 0 of root model reads
143      IF ( m_world_rank == 0 )  THEN
144
145         CALL read_coupling_layout( nesting_mode, pmc_status )
146
147         IF ( pmc_status /= pmc_no_namelist_found  .AND.                       &
148              pmc_status /= pmc_namelist_error )                               &
149         THEN
150!
151!--         Calculate start PE of every model
152            start_pe(1) = 0
153            DO  i = 2, m_nrofcpl+1
154               start_pe(i) = start_pe(i-1) +                                   &
155                             m_couplers(i-1)%npe_x * m_couplers(i-1)%npe_y
156            ENDDO
157
158!
159!--         The number of cores provided with the run must be the same as the
160!--         total sum of cores required by all nest domains
161!--         TO_DO: can we use > instead of /= ?
162            IF ( start_pe(m_nrofcpl+1) /= m_world_npes )  THEN
163!--            TO_DO: this IF statement is redundant
164               IF ( m_world_rank == 0 )  THEN
165                  WRITE ( message_string, '(A,I6,A,I6,A)' )                    &
166                                  'nesting-setup requires more MPI procs (',   &
167                                  start_pe(m_nrofcpl+1), ') than provided (',  &
168                                  m_world_npes,')'
169                  CALL message( 'pmc_init_model', 'PA0229', 3, 2, 0, 6, 0 )
170               ENDIF
171            ENDIF
172
173         ENDIF
174
175      ENDIF
176!
177!--   Broadcast the read status. This synchronises all other PEs with PE 0 of
178!--   the root model. Without synchronisation, they would not behave in the
179!--   correct way (e.g. they would not return in case of a missing NAMELIST)
180      CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
181
182      IF ( pmc_status == pmc_no_namelist_found )  THEN
183!
184!--      Not a nested run; return the MPI_WORLD communicator
185         comm = MPI_COMM_WORLD
186         RETURN
187
188      ELSEIF ( pmc_status == pmc_namelist_error )  THEN
189!
190!--      Only the root model gives the error message. Others are aborted by the
191!--      message-routine with MPI_ABORT. Must be done this way since myid and
192!--      comm2d have not yet been assigned at this point.
193         IF ( m_world_rank == 0 )  THEN
194            message_string = 'errors in \$nestpar'
195            CALL message( 'pmc_init_model', 'PA0223', 3, 2, 0, 6, 0 )
196         ENDIF
197
198      ENDIF
199
200      CALL MPI_BCAST( m_nrofcpl, 1,          MPI_INTEGER, 0, MPI_COMM_WORLD, istat)
201      CALL MPI_BCAST( start_pe, m_nrofcpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat)
202
203!
204!--   Broadcast coupling layout
205      DO  i = 1, m_nrofcpl
206         CALL MPI_BCAST( m_couplers(i)%name, LEN( m_couplers(i)%name ), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat )
207         CALL MPI_BCAST( m_couplers(i)%id,           1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
208         CALL MPI_BCAST( m_couplers(i)%Parent_id,    1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
209         CALL MPI_BCAST( m_couplers(i)%npe_x,        1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
210         CALL MPI_BCAST( m_couplers(i)%npe_y,        1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat )
211         CALL MPI_BCAST( m_couplers(i)%lower_left_x, 1, MPI_REAL,    0, MPI_COMM_WORLD, istat )
212         CALL MPI_BCAST( m_couplers(i)%lower_left_y, 1, MPI_REAL,    0, MPI_COMM_WORLD, istat )
213      ENDDO
214
215!
216!--   Assign global MPI processes to individual models by setting the couple id
217      DO  i = 1, m_nrofcpl
218         IF ( m_world_rank >= start_pe(i)  .AND.  m_world_rank < start_pe(i+1) ) &
219         THEN
220            m_my_cpl_id = i
221            EXIT
222         ENDIF
223      ENDDO
224      m_my_cpl_rank = m_world_rank - start_pe(i)
225
226!
227!--   MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach).
228!--   The communictors for the individual models as created by MPI_COMM_SPLIT.
229!--   The color of the model is represented by the coupler id
230      CALL MPI_COMM_SPLIT( MPI_COMM_WORLD, m_my_cpl_id, m_my_cpl_rank, comm,   &
231                           istat )
232      IF ( istat /= MPI_SUCCESS )  THEN
233!
234!--      TO_DO: replace by message-call
235!--      TO_DO: Can this really happen, or is this just for the debugging phase?
236         IF ( m_world_rank == 0 )  WRITE (0,*) 'PMC: Error in MPI_Comm_split '
237         CALL MPI_ABORT( MPI_COMM_WORLD, ierr, istat )
238      ENDIF
239
240!
241!--   Get size and rank of the model running on this PE
242      CALL  MPI_COMM_RANK( comm, m_model_rank, istat )
243      CALL  MPI_COMM_SIZE( comm, m_model_npes, istat )
244
245!
246!--   Broadcast (from PE 0) the parent id and id of every model
247      DO  i = 1, m_nrofcpl
248         CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0,           &
249                         MPI_COMM_WORLD, istat )
250         CALL MPI_BCAST( m_couplers(i)%id,        1, MPI_INTEGER, 0,           &
251                         MPI_COMM_WORLD, istat )
252      ENDDO
253
254!
255!--   TO_DO: describe what is happening here, and why
256      m_model_comm = comm
257
258!
259!--   Create intercommunicator between server and clients.
260!--   MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of
261!--   different colors.
262!--   The grouping was done above with MPI_COMM_SPLIT
263      DO  i = 2, m_nrofcpl
264
265         IF ( m_couplers(i)%parent_id == m_my_cpl_id )  THEN
266!
267!--         Collect server PEs
268!--         TO_DO: explain in more details, what is done here
269            tag = 500 + i
270            CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i),   &
271                                       tag, m_to_client_comm(i), istat)
272            clientcount = clientcount + 1
273            activeserver(i) = 1
274
275         ELSEIF ( i == m_my_cpl_id)  THEN
276!
277!--         Collect client PEs
278!--         TO_DO: explain in more detail, what is happening here
279            tag = 500 + i
280            CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD,                &
281                                       start_pe(m_couplers(i)%parent_id),      &
282                                       tag, m_to_server_comm, istat )
283         ENDIF
284
285         IF ( istat /= MPI_SUCCESS )  THEN
286!
287!--         TO_DO: replace by message-call
288!--         TO_DO: can this really happen, or is this just for debugging?
289            IF ( m_world_rank == 0 )  WRITE (0,*) 'PMC: Error in Coupler Setup '
290            CALL MPI_ABORT( MPI_COMM_WORLD, ierr, istat )
291         ENDIF
292
293      ENDDO
294
295!
296!--   If I am server, count the number of clients that I have
297!--   Although this loop is symmetric on all processes, the "activeserver" flag
298!--   is true (==1) on the respective individual PE only.
299      ALLOCATE( pmc_server_for_client(clientcount+1) )
300
301      clientcount = 0
302      DO  i = 2, m_nrofcpl
303         IF ( activeserver(i) == 1 )  THEN
304            clientcount = clientcount + 1
305            pmc_server_for_client(clientcount) = i
306         ENDIF
307      ENDDO
308!--   TO_DO: explain why this is done
309      pmc_server_for_client(clientcount+1) = -1
310
311!
312!--   Get the size of the server model
313!--   TO_DO: what does "size" mean here? Number of PEs?
314      IF ( m_my_cpl_id > 1 )  THEN
315         CALL MPI_COMM_REMOTE_SIZE( m_to_server_comm, m_server_remote_size,    &
316                                    istat)
317      ELSE
318!
319!--      The root model does not have a server
320         m_server_remote_size = -1             !
321      ENDIF
322!
323!--   Set myid to non-tero value except for the root domain. This is a setting
324!--   for the message routine which is called at the end of pmci_init. That
325!--   routine outputs messages for myid = 0, only. However, myid has not been
326!--   assigened so far, so that all PEs of the root model would output a
327!--   message. To avoid this, set myid to some other value except for PE0 of the
328!--   root domain.
329      IF ( m_world_rank /= 0 )  myid = 1
330
331   END SUBROUTINE PMC_init_model
332
333
334!
335!-- Make module private variables available to palm
336!-- TO_DO: why can't they be available from the beginning, i.e. why do they
337!--        first have to be declared as different private variables?
338   SUBROUTINE pmc_get_local_model_info( my_cpl_id, my_cpl_parent_id, cpl_name, &
339                                        npe_x, npe_y, lower_left_x,            &
340                                        lower_left_y )
341
342      USE kinds
343
344      IMPLICIT NONE
345
346      CHARACTER(LEN=*), INTENT(OUT), OPTIONAL ::  cpl_name
347      INTEGER, INTENT(OUT), OPTIONAL          ::  my_cpl_id
348      INTEGER, INTENT(OUT), OPTIONAL          ::  my_cpl_parent_id
349      INTEGER, INTENT(OUT), OPTIONAL          ::  npe_x
350      INTEGER, INTENT(OUT), OPTIONAL          ::  npe_y
351      REAL(wp), INTENT(OUT), OPTIONAL         ::  lower_left_x
352      REAL(wp), INTENT(OUT), OPTIONAL         ::  lower_left_y
353
354!--   TO_DO: is the PRESENT clause really required here?
355      IF ( PRESENT( my_cpl_id )           )  my_cpl_id        = m_my_cpl_id
356      IF ( PRESENT( my_cpl_parent_id )    )  my_cpl_parent_id = m_couplers(my_cpl_id)%parent_id
357      IF ( PRESENT( cpl_name )            )  cpl_name         = m_couplers(my_cpl_id)%name
358      IF ( PRESENT( npe_x )               )  npe_x            = m_couplers(my_cpl_id)%npe_x
359      IF ( PRESENT( npe_y )               )  npe_y            = m_couplers(my_cpl_id)%npe_y
360      IF ( PRESENT( lower_left_x )        )  lower_left_x     = m_couplers(my_cpl_id)%lower_left_x
361      IF ( PRESENT( lower_left_y )        )  lower_left_y     = m_couplers(my_cpl_id)%lower_left_y
362
363   END SUBROUTINE pmc_get_local_model_info
364
365
366
367   LOGICAL function pmc_is_rootmodel( )
368
369      IMPLICIT NONE
370
371      pmc_is_rootmodel = ( m_my_cpl_id == 1 )
372
373   END FUNCTION pmc_is_rootmodel
374
375
376
377
378!-- TO_DO: what does this comment mean?
379! Private SUBROUTINEs
380 SUBROUTINE read_coupling_layout( nesting_mode, pmc_status )
381
382    IMPLICIT NONE
383
384    CHARACTER(LEN=7) ::  nesting_mode
385
386    INTEGER, INTENT(INOUT) ::  pmc_status
387    INTEGER                ::  i, istat, iunit
388
389    TYPE(pmc_layout), DIMENSION(pmc_max_modell) ::  domain_layouts
390
391
392    NAMELIST /nestpar/  domain_layouts, nesting_mode
393
394!
395!-- Initialize some coupling variables
396    domain_layouts(1:pmc_max_modell)%id = -1
397    m_nrofcpl =   0
398    iunit     = 345
399
400    pmc_status = pmc_status_ok
401
402!
403!-- Open the NAMELIST-file and read the nesting layout
404    CALL check_open( 11 )
405    READ ( 11, nestpar, IOSTAT=istat )
406
407    IF ( istat < 0 )  THEN
408!
409!--    No nestpar-NAMELIST found
410       pmc_status = pmc_no_namelist_found
411!
412!--    Set filepointer to the beginning of the file. Otherwise PE0 will later
413!--    be unable to read the inipar-NAMELIST
414       REWIND ( 11 )
415       RETURN
416
417    ELSEIF ( istat > 0 )  THEN
418!
419!--    Errors in reading nestpar-NAMELIST
420       pmc_status = pmc_namelist_error
421       RETURN
422
423    ENDIF
424
425!
426!-- Output location message
427    CALL location_message( 'initialize communicators for nesting', .FALSE. )
428!
429!-- Assign the layout to the internally used variable
430    m_couplers = domain_layouts
431
432!
433!-- Get the number of nested models given in the nestpar-NAMELIST
434    DO  i = 1, pmc_max_modell
435
436       IF ( m_couplers(i)%id /= -1  .AND.  i <= pmc_max_modell )  THEN
437          WRITE ( 0, '(A,A,1X,4I7,1X,2F10.2)' )  'Set up Model  ',             &
438                              TRIM( m_couplers(i)%name ), m_couplers(i)%id,    &
439                              m_couplers(i)%Parent_id, m_couplers(i)%npe_x,    &
440                              m_couplers(i)%npe_y, m_couplers(i)%lower_left_x, &
441                              m_couplers(i)%lower_left_y
442       ELSE
443!
444!--       When id=-1 is found for the first time, the list of domains is
445!--       finished (or latest after pmc_max_modell entries
446          m_nrofcpl = i - 1
447          EXIT
448       ENDIF
449
450    ENDDO
451
452 END SUBROUTINE read_coupling_layout
453
454#endif
455 END MODULE pmc_handle_communicator
Note: See TracBrowser for help on using the repository browser.