MODULE PMC_handle_communicator !--------------------------------------------------------------------------------! ! This file is part of PALM. ! ! PALM is free software: you can redistribute it and/or modify it under the terms ! of the GNU General Public License as published by the Free Software Foundation, ! either version 3 of the License, or (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2015 Leibniz Universitaet Hannover !--------------------------------------------------------------------------------! ! ! Current revisions: ! ------------------ ! ! ! Former revisions: ! ----------------- ! $Id: pmc_handle_communicator.f90 1809 2016-04-05 20:13:28Z knoop $ ! ! 1808 2016-04-05 19:44:00Z raasch ! MPI module used by default on all machines ! ! 1797 2016-03-21 16:50:28Z raasch ! introduction of different datatransfer modes, ! export of comm_world_nesting ! ! 1791 2016-03-11 10:41:25Z raasch ! m_nrofcpl renamed m_ncpl, ! pmc_get_local_model_info renamed pmc_get_model_info, some keywords also ! renamed and some added, ! debug write-statements commented out ! ! 1786 2016-03-08 05:49:27Z raasch ! Bugfix: nesting_mode is broadcast now ! ! 1779 2016-03-03 08:01:28Z raasch ! only the total number of PEs is given in the nestpar-NAMELIST, ! additional comments included ! ! 1764 2016-02-28 12:45:19Z raasch ! pmc_layout type: comm_cpl and comm_parent removed, character "name" moved at ! the beginning of the variable list, ! domain layout is read with new NAMELIST nestpar from standard file PARIN, ! MPI-datatype REAL8 replaced by REAL, kind=8 replaced by wp, ! variable domain_layouts instead of m_couplers introduced for this NAMELIST, ! general format changed to PALM style ! ! 1762 2016-02-25 12:31:13Z hellstea ! Initial revision by K. Ketelsen ! ! Description: ! ------------ ! Handle MPI communicator in PALM model coupler !------------------------------------------------------------------------------! #if defined( __parallel ) USE kinds #if defined( __mpifh ) INCLUDE "mpif.h" #else USE MPI #endif USE pmc_general, & ONLY: pmc_status_ok, pmc_status_error, pmc_max_modell IMPLICIT NONE TYPE pmc_layout CHARACTER(len=32) :: name INTEGER :: id INTEGER :: parent_id INTEGER :: npe_total REAL(wp) :: lower_left_x REAL(wp) :: lower_left_y END TYPE pmc_layout PUBLIC pmc_status_ok, pmc_status_error INTEGER, PARAMETER, PUBLIC :: pmc_error_npes = 1 ! illegal number of PEs INTEGER, PARAMETER, PUBLIC :: pmc_namelist_error = 2 ! error(s) in nestpar namelist INTEGER, PARAMETER, PUBLIC :: pmc_no_namelist_found = 3 ! No couple layout file found ! Coupler Setup INTEGER :: m_world_comm !global nesting communicator INTEGER :: m_my_CPL_id !Coupler id of this model INTEGER :: m_Parent_id !Coupler id of parent of this model INTEGER :: m_ncpl !Number of Couplers in layout file TYPE(PMC_layout),DIMENSION(PMC_MAX_MODELL) :: m_couplers !Information of all couplers ! MPI settings INTEGER,PUBLIC :: m_model_comm !Communicator of this model INTEGER,PUBLIC :: m_to_server_comm !Communicator to the server INTEGER,DIMENSION(PMC_MAX_MODELL) :: m_to_client_comm !Communicator to the client(s) INTEGER,PUBLIC :: m_world_rank INTEGER :: m_world_npes INTEGER,PUBLIC :: m_model_rank INTEGER,PUBLIC :: m_model_npes INTEGER :: m_server_remote_size !Number of Server PE's PUBLIC m_to_client_comm !Indicates this PE is server for Cleint NR INTEGER,DIMENSION(:),POINTER,PUBLIC :: PMC_Server_for_Client INTERFACE pmc_is_rootmodel MODULE PROCEDURE pmc_is_rootmodel END INTERFACE pmc_is_rootmodel INTERFACE pmc_get_model_info MODULE PROCEDURE pmc_get_model_info END INTERFACE pmc_get_model_info PUBLIC pmc_get_model_info, pmc_init_model, pmc_is_rootmodel CONTAINS SUBROUTINE pmc_init_model( comm, nesting_datatransfer_mode, nesting_mode, & pmc_status ) USE control_parameters, & ONLY: message_string USE pegrid, & ONLY: myid IMPLICIT NONE CHARACTER(LEN=7), INTENT(OUT) :: nesting_mode CHARACTER(LEN=7), INTENT(OUT) :: nesting_datatransfer_mode INTEGER, INTENT(OUT) :: comm INTEGER, INTENT(OUT) :: pmc_status INTEGER :: i, ierr, istat INTEGER,DIMENSION(pmc_max_modell+1) :: start_pe INTEGER :: m_my_cpl_rank INTEGER :: tag, clientcount INTEGER,DIMENSION(pmc_max_modell) :: activeserver ! I am active server for this client ID pmc_status = pmc_status_ok comm = -1 m_world_comm = MPI_COMM_WORLD m_my_cpl_id = -1 clientcount = 0 activeserver = -1 start_pe(:) = 0 CALL MPI_COMM_RANK( MPI_COMM_WORLD, m_world_rank, istat ) CALL MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat ) ! !-- Only PE 0 of root model reads IF ( m_world_rank == 0 ) THEN CALL read_coupling_layout( nesting_datatransfer_mode, nesting_mode, & pmc_status ) IF ( pmc_status /= pmc_no_namelist_found .AND. & pmc_status /= pmc_namelist_error ) & THEN ! !-- Calculate start PE of every model start_pe(1) = 0 DO i = 2, m_ncpl+1 start_pe(i) = start_pe(i-1) + m_couplers(i-1)%npe_total ENDDO ! !-- The number of cores provided with the run must be the same as the !-- total sum of cores required by all nest domains IF ( start_pe(m_ncpl+1) /= m_world_npes ) THEN WRITE ( message_string, '(A,I6,A,I6,A)' ) & 'nesting-setup requires more MPI procs (', & start_pe(m_ncpl+1), ') than provided (', & m_world_npes,')' CALL message( 'pmc_init_model', 'PA0229', 3, 2, 0, 6, 0 ) ENDIF ENDIF ENDIF ! !-- Broadcast the read status. This synchronises all other PEs with PE 0 of !-- the root model. Without synchronisation, they would not behave in the !-- correct way (e.g. they would not return in case of a missing NAMELIST) CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) IF ( pmc_status == pmc_no_namelist_found ) THEN ! !-- Not a nested run; return the MPI_WORLD communicator comm = MPI_COMM_WORLD RETURN ELSEIF ( pmc_status == pmc_namelist_error ) THEN ! !-- Only the root model gives the error message. Others are aborted by the !-- message-routine with MPI_ABORT. Must be done this way since myid and !-- comm2d have not yet been assigned at this point. IF ( m_world_rank == 0 ) THEN message_string = 'errors in \$nestpar' CALL message( 'pmc_init_model', 'PA0223', 3, 2, 0, 6, 0 ) ENDIF ENDIF CALL MPI_BCAST( m_ncpl, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) ! !-- Broadcast coupling layout DO i = 1, m_ncpl CALL MPI_BCAST( m_couplers(i)%name, LEN( m_couplers(i)%name ), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%Parent_id, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%npe_total, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%lower_left_x, 1, MPI_REAL, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%lower_left_y, 1, MPI_REAL, 0, MPI_COMM_WORLD, istat ) !-- TO_DO: the next two calls can and should probably moved outside this loop CALL MPI_BCAST( nesting_mode, LEN( nesting_mode ), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) CALL MPI_BCAST( nesting_datatransfer_mode, LEN(nesting_datatransfer_mode), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) ENDDO ! !-- Assign global MPI processes to individual models by setting the couple id DO i = 1, m_ncpl IF ( m_world_rank >= start_pe(i) .AND. m_world_rank < start_pe(i+1) ) & THEN m_my_cpl_id = i EXIT ENDIF ENDDO m_my_cpl_rank = m_world_rank - start_pe(i) ! !-- MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach). !-- The communictors for the individual models as created by MPI_COMM_SPLIT. !-- The color of the model is represented by the coupler id CALL MPI_COMM_SPLIT( MPI_COMM_WORLD, m_my_cpl_id, m_my_cpl_rank, comm, & istat ) ! !-- Get size and rank of the model running on this PE CALL MPI_COMM_RANK( comm, m_model_rank, istat ) CALL MPI_COMM_SIZE( comm, m_model_npes, istat ) ! !-- Broadcast (from PE 0) the parent id and id of every model DO i = 1, m_ncpl CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0, & MPI_COMM_WORLD, istat ) CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, & MPI_COMM_WORLD, istat ) ENDDO ! !-- Save the current model communicator for PMC internal use m_model_comm = comm ! !-- Create intercommunicator between server and clients. !-- MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of !-- different colors. !-- The grouping was done above with MPI_COMM_SPLIT DO i = 2, m_ncpl IF ( m_couplers(i)%parent_id == m_my_cpl_id ) THEN ! !-- Collect server PEs. !-- Every model exept the root model has a parent model which acts as !-- server model. Create an intercommunicator to connect current PE to !-- all client PEs tag = 500 + i CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i), & tag, m_to_client_comm(i), istat) clientcount = clientcount + 1 activeserver(i) = 1 ELSEIF ( i == m_my_cpl_id) THEN ! !-- Collect client PEs. !-- Every model exept the root model has a paremt model which acts as !-- server model. Create an intercommunicator to connect current PE to !-- all server PEs tag = 500 + i CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, & start_pe(m_couplers(i)%parent_id), & tag, m_to_server_comm, istat ) ENDIF ENDDO ! !-- If I am server, count the number of clients that I have !-- Although this loop is symmetric on all processes, the "activeserver" flag !-- is true (==1) on the respective individual PE only. ALLOCATE( pmc_server_for_client(clientcount+1) ) clientcount = 0 DO i = 2, m_ncpl IF ( activeserver(i) == 1 ) THEN clientcount = clientcount + 1 pmc_server_for_client(clientcount) = i ENDIF ENDDO ! !-- Get the size of the server model IF ( m_my_cpl_id > 1 ) THEN CALL MPI_COMM_REMOTE_SIZE( m_to_server_comm, m_server_remote_size, & istat) ELSE ! !-- The root model does not have a server m_server_remote_size = -1 ! ENDIF ! !-- Set myid to non-tero value except for the root domain. This is a setting !-- for the message routine which is called at the end of pmci_init. That !-- routine outputs messages for myid = 0, only. However, myid has not been !-- assigened so far, so that all PEs of the root model would output a !-- message. To avoid this, set myid to some other value except for PE0 of the !-- root domain. IF ( m_world_rank /= 0 ) myid = 1 END SUBROUTINE PMC_init_model ! !-- Provide module private variables of the pmc for PALM SUBROUTINE pmc_get_model_info( comm_world_nesting, cpl_id, cpl_name, & cpl_parent_id, lower_left_x, lower_left_y, & ncpl, npe_total, request_for_cpl_id ) USE kinds IMPLICIT NONE CHARACTER(LEN=*), INTENT(OUT), OPTIONAL :: cpl_name INTEGER, INTENT(IN), OPTIONAL :: request_for_cpl_id INTEGER, INTENT(OUT), OPTIONAL :: comm_world_nesting INTEGER, INTENT(OUT), OPTIONAL :: cpl_id INTEGER, INTENT(OUT), OPTIONAL :: cpl_parent_id INTEGER, INTENT(OUT), OPTIONAL :: ncpl INTEGER, INTENT(OUT), OPTIONAL :: npe_total INTEGER :: requested_cpl_id REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_x REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_y ! !-- Set the requested coupler id IF ( PRESENT( request_for_cpl_id ) ) THEN requested_cpl_id = request_for_cpl_id ! !-- Check for allowed range of values IF ( requested_cpl_id < 1 .OR. requested_cpl_id > m_ncpl ) RETURN ELSE requested_cpl_id = m_my_cpl_id ENDIF ! !-- Return the requested information IF ( PRESENT( comm_world_nesting ) ) THEN comm_world_nesting = m_world_comm ENDIF IF ( PRESENT( cpl_id ) ) THEN cpl_id = requested_cpl_id ENDIF IF ( PRESENT( cpl_parent_id ) ) THEN cpl_parent_id = m_couplers(requested_cpl_id)%parent_id ENDIF IF ( PRESENT( cpl_name ) ) THEN cpl_name = m_couplers(requested_cpl_id)%name ENDIF IF ( PRESENT( ncpl ) ) THEN ncpl = m_ncpl ENDIF IF ( PRESENT( npe_total ) ) THEN npe_total = m_couplers(requested_cpl_id)%npe_total ENDIF IF ( PRESENT( lower_left_x ) ) THEN lower_left_x = m_couplers(requested_cpl_id)%lower_left_x ENDIF IF ( PRESENT( lower_left_y ) ) THEN lower_left_y = m_couplers(requested_cpl_id)%lower_left_y ENDIF END SUBROUTINE pmc_get_model_info LOGICAL function pmc_is_rootmodel( ) IMPLICIT NONE pmc_is_rootmodel = ( m_my_cpl_id == 1 ) END FUNCTION pmc_is_rootmodel SUBROUTINE read_coupling_layout( nesting_datatransfer_mode, nesting_mode, & pmc_status ) IMPLICIT NONE CHARACTER(LEN=7), INTENT(INOUT) :: nesting_mode CHARACTER(LEN=7), INTENT(INOUT) :: nesting_datatransfer_mode INTEGER, INTENT(INOUT) :: pmc_status INTEGER :: i, istat TYPE(pmc_layout), DIMENSION(pmc_max_modell) :: domain_layouts NAMELIST /nestpar/ domain_layouts, nesting_datatransfer_mode, nesting_mode ! !-- Initialize some coupling variables domain_layouts(1:pmc_max_modell)%id = -1 m_ncpl = 0 pmc_status = pmc_status_ok ! !-- Open the NAMELIST-file and read the nesting layout CALL check_open( 11 ) READ ( 11, nestpar, IOSTAT=istat ) IF ( istat < 0 ) THEN ! !-- No nestpar-NAMELIST found pmc_status = pmc_no_namelist_found ! !-- Set filepointer to the beginning of the file. Otherwise PE0 will later !-- be unable to read the inipar-NAMELIST REWIND ( 11 ) RETURN ELSEIF ( istat > 0 ) THEN ! !-- Errors in reading nestpar-NAMELIST pmc_status = pmc_namelist_error RETURN ENDIF ! !-- Output location message CALL location_message( 'initialize communicators for nesting', .FALSE. ) ! !-- Assign the layout to the internally used variable m_couplers = domain_layouts ! !-- Get the number of nested models given in the nestpar-NAMELIST DO i = 1, pmc_max_modell ! !-- When id=-1 is found for the first time, the list of domains is finished IF ( m_couplers(i)%id == -1 .OR. i == pmc_max_modell ) THEN IF ( m_couplers(i)%id == -1 ) THEN m_ncpl = i - 1 EXIT ELSE m_ncpl = pmc_max_modell ENDIF ENDIF ENDDO END SUBROUTINE read_coupling_layout #endif END MODULE pmc_handle_communicator