Changeset 1900 for palm/trunk/SOURCE
- Timestamp:
- May 4, 2016 3:27:53 PM (9 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 5 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/Makefile
r1873 r1900 20 20 # Current revisions: 21 21 # ------------------ 22 # 22 # pmc_general renamed pmc_general_mod 23 23 # 24 24 # Former revisions: … … 303 303 mod_particle_attributes.f90 netcdf_interface_mod.f90 nudging_mod.f90 \ 304 304 package_parin.f90 palm.f90 parin.f90 plant_canopy_model_mod.f90 pmc_interface_mod.f90 \ 305 pmc_client_mod.f90 pmc_general.f90 pmc_handle_communicator_mod.f90 pmc_mpi_wrapper_mod.f90 \ 306 pmc_server_mod.f90 \ 307 poisfft_mod.f90 poismg.f90 \ 305 pmc_client_mod.f90 pmc_general_mod.f90 pmc_handle_communicator_mod.f90 \ 306 pmc_mpi_wrapper_mod.f90 pmc_server_mod.f90 poisfft_mod.f90 poismg.f90 \ 308 307 poismg_fast_mod.f90 pres.f90 print_1d.f90 production_e.f90 \ 309 308 prognostic_equations.f90 progress_bar_mod.f90 radiation_model_mod.f90 \ … … 486 485 radiation_model_mod.o microphysics_mod.o 487 486 plant_canopy_model_mod.o: modules.o mod_kinds.o 488 pmc_interface_mod.o: modules.o mod_kinds.o pmc_client_mod.o pmc_general .o\487 pmc_interface_mod.o: modules.o mod_kinds.o pmc_client_mod.o pmc_general_mod.o\ 489 488 pmc_handle_communicator_mod.o pmc_mpi_wrapper_mod.o pmc_server_mod.o 490 pmc_client_mod.o: mod_kinds.o pmc_general .o pmc_handle_communicator_mod.o\489 pmc_client_mod.o: mod_kinds.o pmc_general_mod.o pmc_handle_communicator_mod.o\ 491 490 pmc_mpi_wrapper_mod.o 492 pmc_general .o: mod_kinds.o493 pmc_handle_communicator_mod.o: modules.o mod_kinds.o pmc_general .o491 pmc_general_mod.o: mod_kinds.o 492 pmc_handle_communicator_mod.o: modules.o mod_kinds.o pmc_general_mod.o 494 493 pmc_mpi_wrapper_mod.o: pmc_handle_communicator_mod.o 495 pmc_server_mod.o: pmc_general .o pmc_handle_communicator_mod.o pmc_mpi_wrapper_mod.o494 pmc_server_mod.o: pmc_general_mod.o pmc_handle_communicator_mod.o pmc_mpi_wrapper_mod.o 496 495 poisfft_mod.o: modules.o cpulog_mod.o fft_xy_mod.o mod_kinds.o tridia_solver_mod.o 497 496 poismg.o: modules.o cpulog_mod.o mod_kinds.o -
palm/trunk/SOURCE/pmc_general_mod.f90
r1899 r1900 1 MODULE pmc_general1 MODULE pmc_general 2 2 3 3 !--------------------------------------------------------------------------------! … … 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatted to match PALM style, file renamed again 23 23 ! 24 24 ! Former revisions: … … 54 54 55 55 #if defined( __parallel ) 56 use, intrinsic :: iso_c_binding57 58 USE kinds56 USE, INTRINSIC :: ISO_C_BINDING 57 58 USE kinds 59 59 60 60 #if defined( __mpifh ) … … 64 64 #endif 65 65 66 IMPLICIT none67 PRIVATE 68 SAVE69 70 ! return status 71 INTEGER,parameter,PUBLIC :: PMC_STATUS_OK = 072 INTEGER,parameter,PUBLIC :: PMC_STATUS_ERROR = -173 INTEGER,parameter,PUBLIC :: PMC_DA_NAME_ERR = 1074 75 INTEGER,parameter,PUBLIC :: PMC_MAX_ARRAY = 32 !Max Number of Array which can be coupled76 INTEGER,parameter,PUBLIC :: PMC_MAX_MODELL = 6477 INTEGER,parameter,PUBLIC :: DA_Desclen = 878 INTEGER,parameter,PUBLIC :: DA_Namelen = 16 79 80 TYPE, PUBLIC :: xy_ind ! Pair of indices in horizontal plane81 INTEGER:: i82 INTEGER:: j83 END TYPE84 85 TYPE, PUBLIC :: ArrayDef86 INTEGER :: coupleIndex87 INTEGER :: NrDims ! Number of Dimensions88 INTEGER,DIMENSION(4) :: A_dim ! Size of dimensions89 TYPE(c_ptr) :: data ! Pointer of data in server space90 TYPE(c_ptr), DIMENSION(2) :: po_data ! Base Pointers, PMC_S_Set_Active_data_array sets active pointer91 INTEGER(idp) :: SendIndex ! index in Send Buffer92 INTEGER(idp) :: RecvIndex ! index in Receive Buffer93 INTEGER :: SendSize ! size in Send Buffer94 INTEGER :: RecvSize ! size in Receiver Buffer95 TYPE (c_ptr) :: SendBuf ! Pointer of Data in Send buffer96 TYPE (c_ptr) :: RecvBuf ! Pointer of Data in ReceiveSbuffer97 CHARACTER(len=8) :: Name ! Name of Array98 99 Type(ArrayDef),POINTER :: next100 END TYPE ArrayDef101 102 TYPE (ArrayDef), PUBLIC, POINTER :: next; 103 104 TYPE, PUBLIC :: PeDef 105 INTEGER :: Nr_arrays=0 ! Number of arrays which will be transfered in this run106 INTEGER :: NrEle ! Number of Elemets, same for all arrays107 TYPE (xy_ind), POINTER,DIMENSION(:) :: locInd ! xy index local array for remote PE108 TYPE( ArrayDef), POINTER, DIMENSION(:) :: array_list ! List of Data Arrays to be transfered109 END TYPE PeDef110 111 TYPE, PUBLIC :: ClientDef 112 INTEGER(idp) :: TotalBufferSize113 INTEGER :: model_comm ! Communicator of this model114 INTEGER :: inter_comm ! Inter communicator model and client115 INTEGER :: intra_comm ! Intracommunicator model and client116 INTEGER :: model_rank ! Rank of this model117 INTEGER :: model_npes ! Number of PEsthis model118 INTEGER :: inter_npes ! Number of PEs clientmodel119 INTEGER :: intra_rank ! rank within intra_comm120 INTEGER :: win_server_client ! MPI RMA for preparing data on server AND client side121 TYPE (PeDef), DIMENSION(:), POINTER :: PEs ! List of all Client PEs122 END TYPE ClientDef123 124 TYPE, PUBLIC :: DA_NameDef ! Data Array Name Definition 125 INTEGER :: couple_index ! Unique Number of Array126 CHARACTER(len=DA_Desclen) :: ServerDesc ! Server array description127 CHARACTER(len=DA_Namelen) :: NameOnServer ! Name of array within Server128 CHARACTER(len=DA_Desclen) :: ClientDesc ! Client array description129 CHARACTER(len=DA_Namelen) :: NameOnClient ! Name of array within Client130 END TYPE DA_NameDef131 132 INTERFACE PMC_G_SetName 133 MODULE procedure PMC_G_SetName134 end INTERFACE PMC_G_SetName135 136 INTERFACE PMC_sort 137 MODULE procedure sort_2d_i138 end INTERFACE PMC_sort139 140 PUBLIC PMC_G_SetName, PMC_sort 141 66 IMPLICIT NONE 67 68 PRIVATE 69 SAVE 70 71 INTEGER, PARAMETER, PUBLIC :: da_desclen = 8 !< 72 INTEGER, PARAMETER, PUBLIC :: da_namelen = 16 !< 73 INTEGER, PARAMETER, PUBLIC :: pmc_da_name_err = 10 !< 74 INTEGER, PARAMETER, PUBLIC :: pmc_max_array = 32 !< max # of arrays which can be coupled 75 INTEGER, PARAMETER, PUBLIC :: pmc_max_models = 64 !< 76 INTEGER, PARAMETER, PUBLIC :: pmc_status_ok = 0 !< 77 INTEGER, PARAMETER, PUBLIC :: pmc_status_error = -1 !< 78 79 80 TYPE, PUBLIC :: xy_ind !< pair of indices in horizontal plane 81 INTEGER :: i 82 INTEGER :: j 83 END TYPE 84 85 TYPE, PUBLIC :: arraydef 86 INTEGER :: coupleindex !< 87 INTEGER :: nrdims !< number of dimensions 88 INTEGER, DIMENSION(4) :: a_dim !< size of dimensions 89 TYPE(C_PTR) :: data !< pointer of data in server space 90 TYPE(C_PTR), DIMENSION(2) :: po_data !< base pointers, 91 !< pmc_s_set_active_data_array 92 !< sets active pointer 93 INTEGER(idp) :: SendIndex !< index in send buffer 94 INTEGER(idp) :: RecvIndex !< index in receive buffer 95 INTEGER :: SendSize !< size in send buffer 96 INTEGER :: RecvSize !< size in receive buffer 97 TYPE(C_PTR) :: SendBuf !< data pointer in send buffer 98 TYPE(C_PTR) :: RecvBuf !< data pointer in receive buffer 99 CHARACTER(LEN=8) :: Name !< name of array 100 TYPE(arraydef), POINTER :: next 101 END TYPE arraydef 102 103 TYPE(arraydef), PUBLIC, POINTER :: next 104 105 TYPE, PUBLIC :: pedef 106 INTEGER :: nr_arrays = 0 !< number of arrays which will be transfered 107 INTEGER :: nrele !< number of elements, same for all arrays 108 TYPE(xy_ind), POINTER, DIMENSION(:) :: locInd !< xy index local array for remote PE 109 TYPE(arraydef), POINTER, DIMENSION(:) :: array_list !< list of data arrays to be transfered 110 END TYPE pedef 111 112 TYPE, PUBLIC :: clientdef 113 INTEGER(idp) :: totalbuffersize !< 114 INTEGER :: model_comm !< communicator of this model 115 INTEGER :: inter_comm !< inter communicator model and client 116 INTEGER :: intra_comm !< intra communicator model and client 117 INTEGER :: model_rank !< rank of this model 118 INTEGER :: model_npes !< number of PEs this model 119 INTEGER :: inter_npes !< number of PEs client model 120 INTEGER :: intra_rank !< rank within intra_comm 121 INTEGER :: win_server_client !< MPI RMA for preparing data on server AND client side 122 TYPE(pedef), DIMENSION(:), POINTER :: pes !< list of all client PEs 123 END TYPE clientdef 124 125 TYPE, PUBLIC :: da_namedef !< data array name definition 126 INTEGER :: couple_index !< unique number of array 127 CHARACTER(LEN=da_desclen) :: serverdesc !< server array description 128 CHARACTER(LEN=da_namelen) :: nameonserver !< name of array within server 129 CHARACTER(LEN=da_desclen) :: clientdesc !< client array description 130 CHARACTER(LEN=da_namelen) :: nameonclient !< name of array within client 131 END TYPE da_namedef 132 133 INTERFACE pmc_g_setname 134 MODULE PROCEDURE pmc_g_setname 135 END INTERFACE pmc_g_setname 136 137 INTERFACE pmc_sort 138 MODULE PROCEDURE sort_2d_i 139 END INTERFACE pmc_sort 140 141 PUBLIC pmc_g_setname, pmc_sort 142 142 143 143 CONTAINS 144 SUBROUTINE PMC_G_SetName (myClient, couple_index, aName) 145 IMPLICIT none 146 147 TYPE(ClientDef),INTENT(INOUT) :: myClient 148 INTEGER,INTENT(IN) :: couple_index 149 CHARACTER(LEN=*) :: aName 150 151 INTEGER :: i 152 TYPE(ArrayDef),POINTER :: ar 153 TYPE(PeDef),POINTER :: aPE 154 155 ! 156 !-- Assign array to next free index in array_list. 157 !-- Set name of array in ArrayDef structure 158 do i=1,myClient%inter_npes 159 aPE => myClient%PEs(i) 160 aPE%Nr_arrays = aPE%Nr_arrays+1 161 aPE%array_list(aPE%Nr_arrays)%name = aName 162 aPE%array_list(aPE%Nr_arrays)%coupleIndex = couple_index 163 end do 164 165 return 166 end SUBROUTINE PMC_G_SetName 167 168 169 SUBROUTINE sort_2d_i (array,sort_ind) 170 IMPLICIT none 171 INTEGER,DIMENSION(:,:),INTENT(INOUT) :: array 172 INTEGER,INTENT(IN) :: sort_ind 173 174 !-- local Variables 175 INTEGER :: i,j,n 176 INTEGER,DIMENSION(size(array,1)) :: tmp 177 178 n = size(array,2) 179 do j=1,n-1 180 do i=j+1,n 181 if(array(sort_ind,i) < array(sort_ind,j) ) then 182 tmp = array(:,i) 183 array(:,i) = array(:,j) 184 array(:,j) = tmp 185 end if 186 end do 187 end do 188 189 return 190 END SUBROUTINE sort_2d_i 144 145 SUBROUTINE pmc_g_setname( myclient, couple_index, aname ) 146 147 IMPLICIT NONE 148 149 CHARACTER(LEN=*) :: aname !< 150 INTEGER, INTENT(IN) :: couple_index !< 151 TYPE(clientdef), INTENT(INOUT) :: myclient !< 152 153 INTEGER :: i !< 154 155 TYPE(arraydef), POINTER :: ar !< 156 TYPE(pedef), POINTER :: ape !< 157 158 ! 159 !-- Assign array to next free index in array list. 160 !-- Set name of array in arraydef structure 161 DO i = 1, myclient%inter_npes 162 163 ape => myclient%pes(i) 164 ape%nr_arrays = ape%nr_arrays + 1 165 ape%array_list(ape%nr_arrays)%name = aname 166 ape%array_list(ape%nr_arrays)%coupleindex = couple_index 167 168 ENDDO 169 170 END SUBROUTINE pmc_g_setname 171 172 173 174 SUBROUTINE sort_2d_i( array, sort_ind ) 175 176 IMPLICIT NONE 177 178 INTEGER, INTENT(IN) :: sort_ind 179 INTEGER, DIMENSION(:,:), INTENT(INOUT) :: array 180 181 INTEGER :: i !< 182 INTEGER :: j !< 183 INTEGER :: n !< 184 185 INTEGER, DIMENSION(SIZE(array,1)) :: tmp !< 186 187 n = SIZE(array,2) 188 189 DO j = 1, n-1 190 DO i = j+1, n 191 192 IF ( array(sort_ind,i) < array(sort_ind,j) ) THEN 193 tmp = array(:,i) 194 array(:,i) = array(:,j) 195 array(:,j) = tmp 196 ENDIF 197 198 ENDDO 199 ENDDO 200 201 END SUBROUTINE sort_2d_i 191 202 192 203 #endif 193 endMODULE pmc_general204 END MODULE pmc_general -
palm/trunk/SOURCE/pmc_handle_communicator_mod.f90
r1883 r1900 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatting to match PALM style 23 23 ! 24 24 ! Former revisions: … … 32 32 ! 1850 2016-04-08 13:29:27Z maronga 33 33 ! Module renamed 34 !35 34 ! 36 35 ! 1808 2016-04-05 19:44:00Z raasch … … 79 78 #endif 80 79 81 USE pmc_general, & 82 ONLY: pmc_status_ok, pmc_status_error, pmc_max_modell 83 84 IMPLICIT NONE 85 86 TYPE pmc_layout 87 88 CHARACTER(len=32) :: name 89 90 INTEGER :: id 91 INTEGER :: parent_id 92 INTEGER :: npe_total 93 94 REAL(wp) :: lower_left_x 95 REAL(wp) :: lower_left_y 96 97 END TYPE pmc_layout 98 99 PUBLIC pmc_status_ok, pmc_status_error 100 101 INTEGER, PARAMETER, PUBLIC :: pmc_error_npes = 1 ! illegal number of PEs 102 INTEGER, PARAMETER, PUBLIC :: pmc_namelist_error = 2 ! error(s) in nestpar namelist 103 INTEGER, PARAMETER, PUBLIC :: pmc_no_namelist_found = 3 ! No couple layout file found 104 105 ! Coupler Setup 106 107 INTEGER :: m_world_comm !global nesting communicator 108 INTEGER :: m_my_CPL_id !Coupler id of this model 109 INTEGER :: m_Parent_id !Coupler id of parent of this model 110 INTEGER :: m_ncpl !Number of Couplers in layout file 111 112 TYPE(PMC_layout),DIMENSION(PMC_MAX_MODELL) :: m_couplers !Information of all couplers 113 114 ! MPI settings 115 116 INTEGER,PUBLIC :: m_model_comm !Communicator of this model 117 INTEGER,PUBLIC :: m_to_server_comm !Communicator to the server 118 INTEGER,DIMENSION(PMC_MAX_MODELL) :: m_to_client_comm !Communicator to the client(s) 119 INTEGER,PUBLIC :: m_world_rank 120 INTEGER :: m_world_npes 121 INTEGER,PUBLIC :: m_model_rank 122 INTEGER,PUBLIC :: m_model_npes 123 INTEGER :: m_server_remote_size !Number of Server PE's 124 125 PUBLIC m_to_client_comm 126 127 !Indicates this PE is server for Cleint NR 128 129 INTEGER,DIMENSION(:),POINTER,PUBLIC :: PMC_Server_for_Client 130 131 INTERFACE pmc_is_rootmodel 132 MODULE PROCEDURE pmc_is_rootmodel 133 END INTERFACE pmc_is_rootmodel 134 135 INTERFACE pmc_get_model_info 136 MODULE PROCEDURE pmc_get_model_info 137 END INTERFACE pmc_get_model_info 138 139 PUBLIC pmc_get_model_info, pmc_init_model, pmc_is_rootmodel 80 USE pmc_general, & 81 ONLY: pmc_status_ok, pmc_status_error, pmc_max_models 82 83 IMPLICIT NONE 84 85 TYPE pmc_layout 86 87 CHARACTER(LEN=32) :: name 88 89 INTEGER :: id !< 90 INTEGER :: parent_id !< 91 INTEGER :: npe_total !< 92 93 REAL(wp) :: lower_left_x !< 94 REAL(wp) :: lower_left_y !< 95 96 END TYPE pmc_layout 97 98 PUBLIC pmc_status_ok, pmc_status_error 99 100 INTEGER, PARAMETER, PUBLIC :: pmc_error_npes = 1 !< illegal number of PEs 101 INTEGER, PARAMETER, PUBLIC :: pmc_namelist_error = 2 !< error(s) in nestpar namelist 102 INTEGER, PARAMETER, PUBLIC :: pmc_no_namelist_found = 3 !< no couple layout namelist found 103 104 INTEGER :: m_world_comm !< global nesting communicator 105 INTEGER :: m_my_cpl_id !< coupler id of this model 106 INTEGER :: m_parent_id !< coupler id of parent of this model 107 INTEGER :: m_ncpl !< number of couplers given in nestpar namelist 108 109 TYPE(pmc_layout), DIMENSION(pmc_max_models) :: m_couplers !< information of all couplers 110 111 INTEGER, PUBLIC :: m_model_comm !< communicator of this model 112 INTEGER, PUBLIC :: m_to_server_comm !< communicator to the server 113 INTEGER, PUBLIC :: m_world_rank !< 114 INTEGER :: m_world_npes !< 115 INTEGER, PUBLIC :: m_model_rank !< 116 INTEGER, PUBLIC :: m_model_npes !< 117 INTEGER :: m_server_remote_size !< number of server PEs 118 119 INTEGER, DIMENSION(pmc_max_models), PUBLIC :: m_to_client_comm !< communicator to the client(s) 120 INTEGER, DIMENSION(:), POINTER, PUBLIC :: pmc_server_for_client !< 121 122 123 INTERFACE pmc_is_rootmodel 124 MODULE PROCEDURE pmc_is_rootmodel 125 END INTERFACE pmc_is_rootmodel 126 127 INTERFACE pmc_get_model_info 128 MODULE PROCEDURE pmc_get_model_info 129 END INTERFACE pmc_get_model_info 130 131 PUBLIC pmc_get_model_info, pmc_init_model, pmc_is_rootmodel 140 132 141 133 CONTAINS 142 134 143 SUBROUTINE pmc_init_model( comm, nesting_datatransfer_mode, nesting_mode,&135 SUBROUTINE pmc_init_model( comm, nesting_datatransfer_mode, nesting_mode, & 144 136 pmc_status ) 145 137 146 USE control_parameters,&147 148 149 USE pegrid,&150 138 USE control_parameters, & 139 ONLY: message_string 140 141 USE pegrid, & 142 ONLY: myid 151 143 152 144 IMPLICIT NONE 153 145 154 CHARACTER(LEN=7), INTENT(OUT) :: nesting_mode 155 CHARACTER(LEN=7), INTENT(OUT) :: nesting_datatransfer_mode 156 157 INTEGER, INTENT(OUT) :: comm 158 INTEGER, INTENT(OUT) :: pmc_status 159 160 INTEGER :: i, ierr, istat 161 INTEGER,DIMENSION(pmc_max_modell+1) :: start_pe 162 INTEGER :: m_my_cpl_rank 163 INTEGER :: tag, clientcount 164 INTEGER,DIMENSION(pmc_max_modell) :: activeserver ! I am active server for this client ID 165 166 pmc_status = pmc_status_ok 167 comm = -1 168 m_world_comm = MPI_COMM_WORLD 169 m_my_cpl_id = -1 170 clientcount = 0 171 activeserver = -1 172 start_pe(:) = 0 173 174 CALL MPI_COMM_RANK( MPI_COMM_WORLD, m_world_rank, istat ) 175 CALL MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat ) 176 ! 177 !-- Only PE 0 of root model reads 178 IF ( m_world_rank == 0 ) THEN 179 180 CALL read_coupling_layout( nesting_datatransfer_mode, nesting_mode, & 181 pmc_status ) 182 183 IF ( pmc_status /= pmc_no_namelist_found .AND. & 184 pmc_status /= pmc_namelist_error ) & 185 THEN 186 ! 187 !-- Calculate start PE of every model 188 start_pe(1) = 0 189 DO i = 2, m_ncpl+1 190 start_pe(i) = start_pe(i-1) + m_couplers(i-1)%npe_total 191 ENDDO 192 193 ! 194 !-- The number of cores provided with the run must be the same as the 195 !-- total sum of cores required by all nest domains 196 IF ( start_pe(m_ncpl+1) /= m_world_npes ) THEN 197 WRITE ( message_string, '(A,I6,A,I6,A)' ) & 198 'nesting-setup requires more MPI procs (', & 199 start_pe(m_ncpl+1), ') than provided (', & 200 m_world_npes,')' 201 CALL message( 'pmc_init_model', 'PA0229', 3, 2, 0, 6, 0 ) 202 ENDIF 203 204 ENDIF 205 206 ENDIF 207 ! 208 !-- Broadcast the read status. This synchronises all other PEs with PE 0 of 209 !-- the root model. Without synchronisation, they would not behave in the 210 !-- correct way (e.g. they would not return in case of a missing NAMELIST) 211 CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 212 213 IF ( pmc_status == pmc_no_namelist_found ) THEN 214 ! 215 !-- Not a nested run; return the MPI_WORLD communicator 216 comm = MPI_COMM_WORLD 217 RETURN 218 219 ELSEIF ( pmc_status == pmc_namelist_error ) THEN 220 ! 221 !-- Only the root model gives the error message. Others are aborted by the 222 !-- message-routine with MPI_ABORT. Must be done this way since myid and 223 !-- comm2d have not yet been assigned at this point. 224 IF ( m_world_rank == 0 ) THEN 225 message_string = 'errors in \$nestpar' 226 CALL message( 'pmc_init_model', 'PA0223', 3, 2, 0, 6, 0 ) 227 ENDIF 228 229 ENDIF 230 231 CALL MPI_BCAST( m_ncpl, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 232 CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 233 234 ! 235 !-- Broadcast coupling layout 236 DO i = 1, m_ncpl 237 CALL MPI_BCAST( m_couplers(i)%name, LEN( m_couplers(i)%name ), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 238 CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 239 CALL MPI_BCAST( m_couplers(i)%Parent_id, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 240 CALL MPI_BCAST( m_couplers(i)%npe_total, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 241 CALL MPI_BCAST( m_couplers(i)%lower_left_x, 1, MPI_REAL, 0, MPI_COMM_WORLD, istat ) 242 CALL MPI_BCAST( m_couplers(i)%lower_left_y, 1, MPI_REAL, 0, MPI_COMM_WORLD, istat ) 243 ENDDO 244 CALL MPI_BCAST( nesting_mode, LEN( nesting_mode ), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 245 CALL MPI_BCAST( nesting_datatransfer_mode, LEN(nesting_datatransfer_mode), MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 246 247 ! 248 !-- Assign global MPI processes to individual models by setting the couple id 249 DO i = 1, m_ncpl 250 IF ( m_world_rank >= start_pe(i) .AND. m_world_rank < start_pe(i+1) ) & 251 THEN 252 m_my_cpl_id = i 253 EXIT 254 ENDIF 255 ENDDO 256 m_my_cpl_rank = m_world_rank - start_pe(i) 257 258 ! 259 !-- MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach). 260 !-- The communictors for the individual models as created by MPI_COMM_SPLIT. 261 !-- The color of the model is represented by the coupler id 262 CALL MPI_COMM_SPLIT( MPI_COMM_WORLD, m_my_cpl_id, m_my_cpl_rank, comm, & 263 istat ) 264 ! 265 !-- Get size and rank of the model running on this PE 266 CALL MPI_COMM_RANK( comm, m_model_rank, istat ) 267 CALL MPI_COMM_SIZE( comm, m_model_npes, istat ) 268 269 ! 270 !-- Broadcast (from PE 0) the parent id and id of every model 271 DO i = 1, m_ncpl 272 CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0, & 273 MPI_COMM_WORLD, istat ) 274 CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, & 275 MPI_COMM_WORLD, istat ) 276 ENDDO 277 278 ! 279 !-- Save the current model communicator for PMC internal use 280 m_model_comm = comm 281 282 ! 283 !-- Create intercommunicator between server and clients. 284 !-- MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of 285 !-- different colors. 286 !-- The grouping was done above with MPI_COMM_SPLIT 287 DO i = 2, m_ncpl 288 289 IF ( m_couplers(i)%parent_id == m_my_cpl_id ) THEN 290 ! 291 !-- Collect server PEs. 292 !-- Every model exept the root model has a parent model which acts as 293 !-- server model. Create an intercommunicator to connect current PE to 294 !-- all client PEs 295 tag = 500 + i 296 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i), & 297 tag, m_to_client_comm(i), istat) 298 clientcount = clientcount + 1 299 activeserver(i) = 1 300 301 ELSEIF ( i == m_my_cpl_id) THEN 302 ! 303 !-- Collect client PEs. 304 !-- Every model exept the root model has a paremt model which acts as 305 !-- server model. Create an intercommunicator to connect current PE to 306 !-- all server PEs 307 tag = 500 + i 308 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, & 309 start_pe(m_couplers(i)%parent_id), & 310 tag, m_to_server_comm, istat ) 311 ENDIF 312 313 ENDDO 314 315 ! 316 !-- If I am server, count the number of clients that I have 317 !-- Although this loop is symmetric on all processes, the "activeserver" flag 318 !-- is true (==1) on the respective individual PE only. 319 ALLOCATE( pmc_server_for_client(clientcount+1) ) 320 321 clientcount = 0 322 DO i = 2, m_ncpl 323 IF ( activeserver(i) == 1 ) THEN 324 clientcount = clientcount + 1 325 pmc_server_for_client(clientcount) = i 326 ENDIF 327 ENDDO 328 ! 329 !-- Get the size of the server model 330 IF ( m_my_cpl_id > 1 ) THEN 331 CALL MPI_COMM_REMOTE_SIZE( m_to_server_comm, m_server_remote_size, & 332 istat) 333 ELSE 334 ! 335 !-- The root model does not have a server 336 m_server_remote_size = -1 ! 337 ENDIF 338 ! 339 !-- Set myid to non-tero value except for the root domain. This is a setting 340 !-- for the message routine which is called at the end of pmci_init. That 341 !-- routine outputs messages for myid = 0, only. However, myid has not been 342 !-- assigened so far, so that all PEs of the root model would output a 343 !-- message. To avoid this, set myid to some other value except for PE0 of the 344 !-- root domain. 345 IF ( m_world_rank /= 0 ) myid = 1 346 347 END SUBROUTINE PMC_init_model 348 349 146 CHARACTER(LEN=7), INTENT(OUT) :: nesting_mode !< 147 CHARACTER(LEN=7), INTENT(OUT) :: nesting_datatransfer_mode !< 148 149 INTEGER, INTENT(OUT) :: comm !< 150 INTEGER, INTENT(OUT) :: pmc_status !< 151 152 INTEGER :: clientcount !< 153 INTEGER :: i !< 154 INTEGER :: ierr !< 155 INTEGER :: istat !< 156 INTEGER :: m_my_cpl_rank !< 157 INTEGER :: tag !< 158 159 INTEGER, DIMENSION(pmc_max_models) :: activeserver ! I am active server for this client ID 160 INTEGER, DIMENSION(pmc_max_models+1) :: start_pe 161 162 pmc_status = pmc_status_ok 163 comm = -1 164 m_world_comm = MPI_COMM_WORLD 165 m_my_cpl_id = -1 166 clientcount = 0 167 activeserver = -1 168 start_pe(:) = 0 169 170 CALL MPI_COMM_RANK( MPI_COMM_WORLD, m_world_rank, istat ) 171 CALL MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat ) 172 ! 173 !-- Only PE 0 of root model reads 174 IF ( m_world_rank == 0 ) THEN 175 176 CALL read_coupling_layout( nesting_datatransfer_mode, nesting_mode, & 177 pmc_status ) 178 179 IF ( pmc_status /= pmc_no_namelist_found .AND. & 180 pmc_status /= pmc_namelist_error ) & 181 THEN 182 ! 183 !-- Calculate start PE of every model 184 start_pe(1) = 0 185 DO i = 2, m_ncpl+1 186 start_pe(i) = start_pe(i-1) + m_couplers(i-1)%npe_total 187 ENDDO 188 189 ! 190 !-- The number of cores provided with the run must be the same as the 191 !-- total sum of cores required by all nest domains 192 IF ( start_pe(m_ncpl+1) /= m_world_npes ) THEN 193 WRITE ( message_string, '(A,I6,A,I6,A)' ) & 194 'nesting-setup requires more MPI procs (', & 195 start_pe(m_ncpl+1), ') than provided (', & 196 m_world_npes,')' 197 CALL message( 'pmc_init_model', 'PA0229', 3, 2, 0, 6, 0 ) 198 ENDIF 199 200 ENDIF 201 202 ENDIF 203 ! 204 !-- Broadcast the read status. This synchronises all other PEs with PE 0 of 205 !-- the root model. Without synchronisation, they would not behave in the 206 !-- correct way (e.g. they would not return in case of a missing NAMELIST) 207 CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 208 209 IF ( pmc_status == pmc_no_namelist_found ) THEN 210 ! 211 !-- Not a nested run; return the MPI_WORLD communicator 212 comm = MPI_COMM_WORLD 213 RETURN 214 215 ELSEIF ( pmc_status == pmc_namelist_error ) THEN 216 ! 217 !-- Only the root model gives the error message. Others are aborted by the 218 !-- message-routine with MPI_ABORT. Must be done this way since myid and 219 !-- comm2d have not yet been assigned at this point. 220 IF ( m_world_rank == 0 ) THEN 221 message_string = 'errors in \$nestpar' 222 CALL message( 'pmc_init_model', 'PA0223', 3, 2, 0, 6, 0 ) 223 ENDIF 224 225 ENDIF 226 227 CALL MPI_BCAST( m_ncpl, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 228 CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 229 230 ! 231 !-- Broadcast coupling layout 232 DO i = 1, m_ncpl 233 CALL MPI_BCAST( m_couplers(i)%name, LEN( m_couplers(i)%name ), & 234 MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 235 CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, & 236 MPI_COMM_WORLD, istat ) 237 CALL MPI_BCAST( m_couplers(i)%Parent_id, 1, MPI_INTEGER, 0, & 238 MPI_COMM_WORLD, istat ) 239 CALL MPI_BCAST( m_couplers(i)%npe_total, 1, MPI_INTEGER, 0, & 240 MPI_COMM_WORLD, istat ) 241 CALL MPI_BCAST( m_couplers(i)%lower_left_x, 1, MPI_REAL, 0, & 242 MPI_COMM_WORLD, istat ) 243 CALL MPI_BCAST( m_couplers(i)%lower_left_y, 1, MPI_REAL, 0, & 244 MPI_COMM_WORLD, istat ) 245 ENDDO 246 CALL MPI_BCAST( nesting_mode, LEN( nesting_mode ), MPI_CHARACTER, 0, & 247 MPI_COMM_WORLD, istat ) 248 CALL MPI_BCAST( nesting_datatransfer_mode, LEN(nesting_datatransfer_mode), & 249 MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 250 251 ! 252 !-- Assign global MPI processes to individual models by setting the couple id 253 DO i = 1, m_ncpl 254 IF ( m_world_rank >= start_pe(i) .AND. m_world_rank < start_pe(i+1) ) & 255 THEN 256 m_my_cpl_id = i 257 EXIT 258 ENDIF 259 ENDDO 260 m_my_cpl_rank = m_world_rank - start_pe(i) 261 262 ! 263 !-- MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach). 264 !-- The communictors for the individual models as created by MPI_COMM_SPLIT. 265 !-- The color of the model is represented by the coupler id 266 CALL MPI_COMM_SPLIT( MPI_COMM_WORLD, m_my_cpl_id, m_my_cpl_rank, comm, & 267 istat ) 268 ! 269 !-- Get size and rank of the model running on this PE 270 CALL MPI_COMM_RANK( comm, m_model_rank, istat ) 271 CALL MPI_COMM_SIZE( comm, m_model_npes, istat ) 272 273 ! 274 !-- Broadcast (from PE 0) the parent id and id of every model 275 DO i = 1, m_ncpl 276 CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0, & 277 MPI_COMM_WORLD, istat ) 278 CALL MPI_BCAST( m_couplers(i)%id, 1, MPI_INTEGER, 0, & 279 MPI_COMM_WORLD, istat ) 280 ENDDO 281 282 ! 283 !-- Save the current model communicator for pmc internal use 284 m_model_comm = comm 285 286 ! 287 !-- Create intercommunicator between server and clients. 288 !-- MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of 289 !-- different colors. 290 !-- The grouping was done above with MPI_COMM_SPLIT 291 DO i = 2, m_ncpl 292 293 IF ( m_couplers(i)%parent_id == m_my_cpl_id ) THEN 294 ! 295 !-- Collect server PEs. 296 !-- Every model exept the root model has a parent model which acts as 297 !-- server model. Create an intercommunicator to connect current PE to 298 !-- all client PEs 299 tag = 500 + i 300 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i), & 301 tag, m_to_client_comm(i), istat) 302 clientcount = clientcount + 1 303 activeserver(i) = 1 304 305 ELSEIF ( i == m_my_cpl_id) THEN 306 ! 307 !-- Collect client PEs. 308 !-- Every model exept the root model has a paremt model which acts as 309 !-- server model. Create an intercommunicator to connect current PE to 310 !-- all server PEs 311 tag = 500 + i 312 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, & 313 start_pe(m_couplers(i)%parent_id), & 314 tag, m_to_server_comm, istat ) 315 ENDIF 316 317 ENDDO 318 319 ! 320 !-- If I am server, count the number of clients that I have 321 !-- Although this loop is symmetric on all processes, the "activeserver" flag 322 !-- is true (==1) on the respective individual PE only. 323 ALLOCATE( pmc_server_for_client(clientcount+1) ) 324 325 clientcount = 0 326 DO i = 2, m_ncpl 327 IF ( activeserver(i) == 1 ) THEN 328 clientcount = clientcount + 1 329 pmc_server_for_client(clientcount) = i 330 ENDIF 331 ENDDO 332 ! 333 !-- Get the size of the server model 334 IF ( m_my_cpl_id > 1 ) THEN 335 CALL MPI_COMM_REMOTE_SIZE( m_to_server_comm, m_server_remote_size, & 336 istat) 337 ELSE 338 ! 339 !-- The root model does not have a server 340 m_server_remote_size = -1 341 ENDIF 342 ! 343 !-- Set myid to non-tero value except for the root domain. This is a setting 344 !-- for the message routine which is called at the end of pmci_init. That 345 !-- routine outputs messages for myid = 0, only. However, myid has not been 346 !-- assigened so far, so that all PEs of the root model would output a 347 !-- message. To avoid this, set myid to some other value except for PE0 of the 348 !-- root domain. 349 IF ( m_world_rank /= 0 ) myid = 1 350 351 END SUBROUTINE PMC_init_model 352 353 354 355 SUBROUTINE pmc_get_model_info( comm_world_nesting, cpl_id, cpl_name, & 356 cpl_parent_id, lower_left_x, lower_left_y, & 357 ncpl, npe_total, request_for_cpl_id ) 350 358 ! 351 359 !-- Provide module private variables of the pmc for PALM 352 SUBROUTINE pmc_get_model_info( comm_world_nesting, cpl_id, cpl_name, & 353 cpl_parent_id, lower_left_x, lower_left_y, & 354 ncpl, npe_total, request_for_cpl_id ) 355 356 USE kinds 357 358 IMPLICIT NONE 359 360 CHARACTER(LEN=*), INTENT(OUT), OPTIONAL :: cpl_name 361 362 INTEGER, INTENT(IN), OPTIONAL :: request_for_cpl_id 363 364 INTEGER, INTENT(OUT), OPTIONAL :: comm_world_nesting 365 INTEGER, INTENT(OUT), OPTIONAL :: cpl_id 366 INTEGER, INTENT(OUT), OPTIONAL :: cpl_parent_id 367 INTEGER, INTENT(OUT), OPTIONAL :: ncpl 368 INTEGER, INTENT(OUT), OPTIONAL :: npe_total 369 370 INTEGER :: requested_cpl_id 371 372 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_x 373 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_y 374 375 ! 376 !-- Set the requested coupler id 377 IF ( PRESENT( request_for_cpl_id ) ) THEN 378 requested_cpl_id = request_for_cpl_id 379 ! 380 !-- Check for allowed range of values 381 IF ( requested_cpl_id < 1 .OR. requested_cpl_id > m_ncpl ) RETURN 382 ELSE 383 requested_cpl_id = m_my_cpl_id 384 ENDIF 385 386 ! 387 !-- Return the requested information 388 IF ( PRESENT( comm_world_nesting ) ) THEN 389 comm_world_nesting = m_world_comm 390 ENDIF 391 IF ( PRESENT( cpl_id ) ) THEN 392 cpl_id = requested_cpl_id 393 ENDIF 394 IF ( PRESENT( cpl_parent_id ) ) THEN 395 cpl_parent_id = m_couplers(requested_cpl_id)%parent_id 396 ENDIF 397 IF ( PRESENT( cpl_name ) ) THEN 398 cpl_name = m_couplers(requested_cpl_id)%name 399 ENDIF 400 IF ( PRESENT( ncpl ) ) THEN 401 ncpl = m_ncpl 402 ENDIF 403 IF ( PRESENT( npe_total ) ) THEN 404 npe_total = m_couplers(requested_cpl_id)%npe_total 405 ENDIF 406 IF ( PRESENT( lower_left_x ) ) THEN 407 lower_left_x = m_couplers(requested_cpl_id)%lower_left_x 408 ENDIF 409 IF ( PRESENT( lower_left_y ) ) THEN 410 lower_left_y = m_couplers(requested_cpl_id)%lower_left_y 411 ENDIF 412 413 END SUBROUTINE pmc_get_model_info 414 415 416 417 LOGICAL function pmc_is_rootmodel( ) 418 419 IMPLICIT NONE 420 421 pmc_is_rootmodel = ( m_my_cpl_id == 1 ) 422 423 END FUNCTION pmc_is_rootmodel 360 361 USE kinds 362 363 IMPLICIT NONE 364 365 CHARACTER(LEN=*), INTENT(OUT), OPTIONAL :: cpl_name !< 366 367 INTEGER, INTENT(IN), OPTIONAL :: request_for_cpl_id !< 368 369 INTEGER, INTENT(OUT), OPTIONAL :: comm_world_nesting !< 370 INTEGER, INTENT(OUT), OPTIONAL :: cpl_id !< 371 INTEGER, INTENT(OUT), OPTIONAL :: cpl_parent_id !< 372 INTEGER, INTENT(OUT), OPTIONAL :: ncpl !< 373 INTEGER, INTENT(OUT), OPTIONAL :: npe_total !< 374 375 INTEGER :: requested_cpl_id !< 376 377 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_x !< 378 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_y !< 379 380 ! 381 !-- Set the requested coupler id 382 IF ( PRESENT( request_for_cpl_id ) ) THEN 383 requested_cpl_id = request_for_cpl_id 384 ! 385 !-- Check for allowed range of values 386 IF ( requested_cpl_id < 1 .OR. requested_cpl_id > m_ncpl ) RETURN 387 ELSE 388 requested_cpl_id = m_my_cpl_id 389 ENDIF 390 391 ! 392 !-- Return the requested information 393 IF ( PRESENT( comm_world_nesting ) ) THEN 394 comm_world_nesting = m_world_comm 395 ENDIF 396 IF ( PRESENT( cpl_id ) ) THEN 397 cpl_id = requested_cpl_id 398 ENDIF 399 IF ( PRESENT( cpl_parent_id ) ) THEN 400 cpl_parent_id = m_couplers(requested_cpl_id)%parent_id 401 ENDIF 402 IF ( PRESENT( cpl_name ) ) THEN 403 cpl_name = m_couplers(requested_cpl_id)%name 404 ENDIF 405 IF ( PRESENT( ncpl ) ) THEN 406 ncpl = m_ncpl 407 ENDIF 408 IF ( PRESENT( npe_total ) ) THEN 409 npe_total = m_couplers(requested_cpl_id)%npe_total 410 ENDIF 411 IF ( PRESENT( lower_left_x ) ) THEN 412 lower_left_x = m_couplers(requested_cpl_id)%lower_left_x 413 ENDIF 414 IF ( PRESENT( lower_left_y ) ) THEN 415 lower_left_y = m_couplers(requested_cpl_id)%lower_left_y 416 ENDIF 417 418 END SUBROUTINE pmc_get_model_info 419 420 421 422 LOGICAL function pmc_is_rootmodel( ) 423 424 IMPLICIT NONE 425 426 pmc_is_rootmodel = ( m_my_cpl_id == 1 ) 427 428 END FUNCTION pmc_is_rootmodel 424 429 425 430 … … 436 441 INTEGER :: i, istat 437 442 438 TYPE(pmc_layout), DIMENSION(pmc_max_model l) :: domain_layouts443 TYPE(pmc_layout), DIMENSION(pmc_max_models) :: domain_layouts 439 444 440 445 !-- TO_DO: include anterp_relax_length_? into nestpar and communicate them. … … 443 448 ! 444 449 !-- Initialize some coupling variables 445 domain_layouts(1:pmc_max_model l)%id = -1450 domain_layouts(1:pmc_max_models)%id = -1 446 451 m_ncpl = 0 447 452 … … 480 485 ! 481 486 !-- Get the number of nested models given in the nestpar-NAMELIST 482 DO i = 1, pmc_max_model l487 DO i = 1, pmc_max_models 483 488 ! 484 489 !-- When id=-1 is found for the first time, the list of domains is finished 485 IF ( m_couplers(i)%id == -1 .OR. i == pmc_max_model l) THEN490 IF ( m_couplers(i)%id == -1 .OR. i == pmc_max_models ) THEN 486 491 IF ( m_couplers(i)%id == -1 ) THEN 487 492 m_ncpl = i - 1 488 493 EXIT 489 494 ELSE 490 m_ncpl = pmc_max_model l495 m_ncpl = pmc_max_models 491 496 ENDIF 492 497 ENDIF -
palm/trunk/SOURCE/pmc_interface_mod.f90
r1895 r1900 150 150 151 151 USE pmc_general, & 152 ONLY: da_namelen , pmc_max_modell, pmc_status_ok152 ONLY: da_namelen 153 153 154 154 USE pmc_handle_communicator, & -
palm/trunk/SOURCE/pmc_mpi_wrapper_mod.f90
r1851 r1900 1 MODULE pmc_mpi_wrapper1 MODULE pmc_mpi_wrapper 2 2 3 3 !--------------------------------------------------------------------------------! … … 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatted to match PALM style 23 23 ! 24 24 ! Former revisions: … … 50 50 51 51 #if defined( __parallel ) 52 use, intrinsic :: iso_c_binding52 USE, INTRINSIC :: ISO_C_BINDING 53 53 54 54 #if defined( __mpifh ) … … 57 57 USE MPI 58 58 #endif 59 USE kinds 60 USE PMC_handle_communicator, ONLY: m_to_server_comm, m_to_client_comm, m_model_comm, m_model_rank 61 IMPLICIT none 62 PRIVATE 63 SAVE 64 65 INTERFACE PMC_Send_to_Server 66 MODULE PROCEDURE PMC_Send_to_Server_INTEGER 67 MODULE PROCEDURE PMC_Send_to_Server_INTEGER_2 68 MODULE PROCEDURE PMC_Send_to_Server_real_r1 69 MODULE PROCEDURE PMC_Send_to_Server_real_r2 70 MODULE PROCEDURE PMC_Send_to_Server_real_r3 71 END INTERFACE PMC_Send_to_Server 72 73 INTERFACE PMC_Recv_from_Server 74 MODULE PROCEDURE PMC_Recv_from_Server_INTEGER 75 MODULE PROCEDURE PMC_Recv_from_Server_real_r1 76 MODULE PROCEDURE PMC_Recv_from_Server_real_r2 77 MODULE PROCEDURE PMC_Recv_from_Server_real_r3 78 END INTERFACE PMC_Recv_from_Server 79 80 INTERFACE PMC_Send_to_Client 81 MODULE PROCEDURE PMC_Send_to_Client_INTEGER 82 MODULE PROCEDURE PMC_Send_to_Client_real_r1 83 MODULE PROCEDURE PMC_Send_to_Client_real_r2 84 MODULE PROCEDURE PMC_Send_to_Client_real_r3 85 END INTERFACE PMC_Send_to_Client 86 87 INTERFACE PMC_Recv_from_Client 88 MODULE PROCEDURE PMC_Recv_from_Client_INTEGER 89 MODULE PROCEDURE PMC_Recv_from_Client_INTEGER_2 90 MODULE PROCEDURE PMC_Recv_from_Client_real_r1 91 MODULE PROCEDURE PMC_Recv_from_Client_real_r2 92 MODULE PROCEDURE PMC_Recv_from_Client_real_r3 93 END INTERFACE PMC_Recv_from_Client 94 95 INTERFACE PMC_Bcast 96 MODULE PROCEDURE PMC_Bcast_INTEGER 97 MODULE PROCEDURE PMC_Bcast_character 98 END INTERFACE PMC_Bcast 99 100 INTERFACE PMC_Inter_Bcast 101 MODULE PROCEDURE PMC_Inter_Bcast_INTEGER_1 102 END INTERFACE PMC_Inter_Bcast 103 104 INTERFACE PMC_Alloc_mem 105 MODULE PROCEDURE PMC_Alloc_mem_INTEGER_1 106 MODULE PROCEDURE PMC_Alloc_mem_Real_1 107 END INTERFACE PMC_Alloc_mem 108 109 INTERFACE PMC_TIME 110 MODULE PROCEDURE PMC_TIME 111 END INTERFACE PMC_TIME 112 113 PUBLIC PMC_Send_to_Server, PMC_Recv_from_Server 114 PUBLIC PMC_Send_to_Client, PMC_Recv_from_Client 115 PUBLIC PMC_Bcast, PMC_Inter_Bcast, PMC_Alloc_mem 116 PUBLIC PMC_TIME 117 118 CONTAINS 119 120 SUBROUTINE PMC_Send_to_Server_INTEGER (buf, n, Server_rank, tag, ierr) 121 IMPLICIT none 122 INTEGER, DIMENSION(:), INTENT(IN) :: buf 123 INTEGER, INTENT(IN) :: n 124 INTEGER, INTENT(IN) :: Server_rank 125 INTEGER, INTENT(IN) :: tag 126 INTEGER, INTENT(OUT) :: ierr 127 128 ierr = 0 129 CALL MPI_Send (buf, n, MPI_INTEGER, Server_rank, tag, m_to_server_comm, ierr) 130 131 return 132 END SUBROUTINE PMC_Send_to_Server_INTEGER 133 134 SUBROUTINE PMC_Recv_from_Server_INTEGER (buf, n, Server_rank, tag, ierr) 135 IMPLICIT none 136 INTEGER, DIMENSION(:), INTENT(OUT) :: buf 137 INTEGER, INTENT(IN) :: n 138 INTEGER, INTENT(IN) :: Server_rank 139 INTEGER, INTENT(IN) :: tag 140 INTEGER, INTENT(OUT) :: ierr 141 142 ierr = 0 143 CALL MPI_Recv (buf, n, MPI_INTEGER, Server_rank, tag, m_to_server_comm, & 144 MPI_STATUS_IGNORE, ierr) 145 146 return 147 END SUBROUTINE PMC_Recv_from_Server_INTEGER 148 149 SUBROUTINE PMC_Send_to_Server_INTEGER_2 (buf, n, Server_rank, tag, ierr) 150 IMPLICIT none 151 INTEGER, DIMENSION(:,:), INTENT(IN) :: buf 152 INTEGER, INTENT(IN) :: n 153 INTEGER, INTENT(IN) :: Server_rank 154 INTEGER, INTENT(IN) :: tag 155 INTEGER, INTENT(OUT) :: ierr 156 157 ierr = 0 158 CALL MPI_Send (buf, n, MPI_INTEGER, Server_rank, tag, m_to_server_comm, ierr) 159 160 return 161 END SUBROUTINE PMC_Send_to_Server_INTEGER_2 162 163 SUBROUTINE PMC_Send_to_Server_real_r1 (buf, n, Server_rank, tag, ierr) 164 165 IMPLICIT none 166 167 REAL(wp), DIMENSION(:), INTENT(IN) :: buf 168 INTEGER, INTENT(IN) :: n 169 INTEGER, INTENT(IN) :: Server_rank 170 INTEGER, INTENT(IN) :: tag 171 INTEGER, INTENT(OUT) :: ierr 172 173 ierr = 0 174 CALL MPI_Send (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, ierr) 175 176 return 177 END SUBROUTINE PMC_Send_to_Server_real_r1 178 179 SUBROUTINE PMC_Recv_from_Server_real_r1 (buf, n, Server_rank, tag, ierr) 180 181 IMPLICIT none 182 183 REAL(wp), DIMENSION(:), INTENT(OUT) :: buf 184 INTEGER, INTENT(IN) :: n 185 INTEGER, INTENT(IN) :: Server_rank 186 INTEGER, INTENT(IN) :: tag 187 INTEGER, INTENT(OUT) :: ierr 188 189 ierr = 0 190 CALL MPI_Recv (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, & 191 MPI_STATUS_IGNORE, ierr) 192 193 return 194 END SUBROUTINE PMC_Recv_from_Server_real_r1 195 196 SUBROUTINE PMC_Send_to_Server_real_r2 (buf, n, Server_rank, tag, ierr) 197 198 IMPLICIT none 199 200 REAL(wp), DIMENSION(:,:), INTENT(IN) :: buf 201 INTEGER, INTENT(IN) :: n 202 INTEGER, INTENT(IN) :: Server_rank 203 INTEGER, INTENT(IN) :: tag 204 INTEGER, INTENT(OUT) :: ierr 205 206 ierr = 0 207 CALL MPI_Send (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, ierr) 208 209 return 210 END SUBROUTINE PMC_Send_to_Server_real_r2 211 212 SUBROUTINE PMC_Recv_from_Server_real_r2 (buf, n, Server_rank, tag, ierr) 213 214 IMPLICIT none 215 216 REAL(wp), DIMENSION(:,:), INTENT(OUT) :: buf 217 INTEGER, INTENT(IN) :: n 218 INTEGER, INTENT(IN) :: Server_rank 219 INTEGER, INTENT(IN) :: tag 220 INTEGER, INTENT(OUT) :: ierr 221 222 ierr = 0 223 CALL MPI_Recv (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, & 224 MPI_STATUS_IGNORE, ierr) 225 226 return 227 END SUBROUTINE PMC_Recv_from_Server_real_r2 228 229 SUBROUTINE PMC_Send_to_Server_real_r3 (buf, n, Server_rank, tag, ierr) 230 231 IMPLICIT none 232 233 REAL(wp), DIMENSION(:,:,:), INTENT(IN) :: buf 234 INTEGER, INTENT(IN) :: n 235 INTEGER, INTENT(IN) :: Server_rank 236 INTEGER, INTENT(IN) :: tag 237 INTEGER, INTENT(OUT) :: ierr 238 239 ierr = 0 240 CALL MPI_Send (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, ierr) 241 242 return 243 END SUBROUTINE PMC_Send_to_Server_real_r3 244 245 SUBROUTINE PMC_Recv_from_Server_real_r3 (buf, n, Server_rank, tag, ierr) 246 247 IMPLICIT none 248 249 REAL(wp), DIMENSION(:,:,:), INTENT(OUT) :: buf 250 INTEGER, INTENT(IN) :: n 251 INTEGER, INTENT(IN) :: Server_rank 252 INTEGER, INTENT(IN) :: tag 253 INTEGER, INTENT(OUT) :: ierr 254 255 ierr = 0 256 CALL MPI_Recv (buf, n, MPI_REAL, Server_rank, tag, m_to_server_comm, & 257 MPI_STATUS_IGNORE, ierr) 258 259 return 260 END SUBROUTINE PMC_Recv_from_Server_real_r3 261 262 263 SUBROUTINE PMC_Send_to_Client_INTEGER (Client_id, buf, n, Client_rank, tag, ierr) 264 IMPLICIT none 265 INTEGER, INTENT(IN) :: Client_id 266 INTEGER, DIMENSION(:), INTENT(IN) :: buf 267 INTEGER, INTENT(IN) :: n 268 INTEGER, INTENT(IN) :: Client_rank 269 INTEGER, INTENT(IN) :: tag 270 INTEGER, INTENT(OUT) :: ierr 271 272 ierr = 0 273 CALL MPI_Send (buf, n, MPI_INTEGER, Client_rank, tag, m_to_client_comm(Client_id), ierr) 274 275 return 276 END SUBROUTINE PMC_Send_to_Client_INTEGER 277 278 SUBROUTINE PMC_Recv_from_Client_INTEGER (Client_id, buf, n, Client_rank, tag, ierr) 279 IMPLICIT none 280 INTEGER, INTENT(IN) :: Client_id 281 INTEGER, DIMENSION(:), INTENT(INOUT) :: buf 282 INTEGER, INTENT(IN) :: n 283 INTEGER, INTENT(IN) :: Client_rank 284 INTEGER, INTENT(IN) :: tag 285 INTEGER, INTENT(OUT) :: ierr 286 287 ierr = 0 288 CALL MPI_Recv (buf, n, MPI_INTEGER, Client_rank, tag, m_to_client_comm(Client_id), & 289 MPI_STATUS_IGNORE, ierr) 290 291 return 292 END SUBROUTINE PMC_Recv_from_Client_INTEGER 293 294 SUBROUTINE PMC_Recv_from_Client_INTEGER_2 (Client_id, buf, n, Client_rank, tag, ierr) 295 IMPLICIT none 296 INTEGER, INTENT(IN) :: Client_id 297 INTEGER, DIMENSION(:,:), INTENT(OUT) :: buf 298 INTEGER, INTENT(IN) :: n 299 INTEGER, INTENT(IN) :: Client_rank 300 INTEGER, INTENT(IN) :: tag 301 INTEGER, INTENT(OUT) :: ierr 302 303 ierr = 0 304 CALL MPI_Recv (buf, n, MPI_INTEGER, Client_rank, tag, m_to_client_comm(Client_id), & 305 MPI_STATUS_IGNORE, ierr) 306 307 return 308 END SUBROUTINE PMC_Recv_from_Client_INTEGER_2 309 310 SUBROUTINE PMC_Send_to_Client_real_r1 (Client_id, buf, n, Client_rank, tag, ierr) 311 312 IMPLICIT none 313 314 INTEGER, INTENT(IN) :: Client_id 315 REAL(wp), DIMENSION(:), INTENT(IN) :: buf 316 INTEGER, INTENT(IN) :: n 317 INTEGER, INTENT(IN) :: Client_rank 318 INTEGER, INTENT(IN) :: tag 319 INTEGER, INTENT(OUT) :: ierr 320 321 ierr = 0 322 CALL MPI_Send (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 323 ierr) 324 325 return 326 END SUBROUTINE PMC_Send_to_Client_real_r1 327 328 SUBROUTINE PMC_Recv_from_Client_real_r1 (Client_id, buf, n, Client_rank, tag, ierr) 329 330 IMPLICIT none 331 332 INTEGER, INTENT(IN) :: Client_id 333 REAL(wp), DIMENSION(:), INTENT(INOUT) :: buf 334 INTEGER, INTENT(IN) :: n 335 INTEGER, INTENT(IN) :: Client_rank 336 INTEGER, INTENT(IN) :: tag 337 INTEGER, INTENT(OUT) :: ierr 338 339 ierr = 0 340 CALL MPI_Recv (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 341 MPI_STATUS_IGNORE, ierr) 342 343 return 344 END SUBROUTINE PMC_Recv_from_Client_real_r1 345 346 SUBROUTINE PMC_Send_to_Client_real_r2 (Client_id, buf, n, Client_rank, tag, ierr) 347 348 IMPLICIT none 349 350 INTEGER, INTENT(IN) :: Client_id 351 REAL(wp), DIMENSION(:,:), INTENT(IN) :: buf 352 INTEGER, INTENT(IN) :: n 353 INTEGER, INTENT(IN) :: Client_rank 354 INTEGER, INTENT(IN) :: tag 355 INTEGER, INTENT(OUT) :: ierr 356 357 ierr = 0 358 CALL MPI_Send (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 359 ierr) 360 361 return 362 END SUBROUTINE PMC_Send_to_Client_real_r2 363 364 SUBROUTINE PMC_Recv_from_Client_real_r2 (Client_id, buf, n, Client_rank, tag, ierr) 365 366 IMPLICIT none 367 368 INTEGER, INTENT(IN) :: Client_id 369 REAL(wp), DIMENSION(:,:), INTENT(OUT) :: buf 370 INTEGER, INTENT(IN) :: n 371 INTEGER, INTENT(IN) :: Client_rank 372 INTEGER, INTENT(IN) :: tag 373 INTEGER, INTENT(OUT) :: ierr 374 375 ierr = 0 376 CALL MPI_Recv (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 377 MPI_STATUS_IGNORE, ierr) 378 379 return 380 END SUBROUTINE PMC_Recv_from_Client_real_r2 381 382 SUBROUTINE PMC_Send_to_Client_real_r3 (Client_id, buf, n, Client_rank, tag, ierr) 383 384 IMPLICIT none 385 386 INTEGER, INTENT(IN) :: Client_id 387 REAL(wp), DIMENSION(:,:,:), INTENT(IN) :: buf 388 INTEGER, INTENT(IN) :: n 389 INTEGER, INTENT(IN) :: Client_rank 390 INTEGER, INTENT(IN) :: tag 391 INTEGER, INTENT(OUT) :: ierr 392 393 ierr = 0 394 CALL MPI_Send (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 395 ierr) 396 397 return 398 END SUBROUTINE PMC_Send_to_Client_real_r3 399 400 SUBROUTINE PMC_Recv_from_Client_real_r3 (Client_id, buf, n, Client_rank, tag, ierr) 401 402 IMPLICIT none 403 404 INTEGER, INTENT(IN) :: Client_id 405 REAL(wp), DIMENSION(:,:,:), INTENT(OUT) :: buf 406 INTEGER, INTENT(IN) :: n 407 INTEGER, INTENT(IN) :: Client_rank 408 INTEGER, INTENT(IN) :: tag 409 INTEGER, INTENT(OUT) :: ierr 410 411 ierr = 0 412 CALL MPI_Recv (buf, n, MPI_REAL, Client_rank, tag, m_to_client_comm(Client_id), & 413 MPI_STATUS_IGNORE, ierr) 414 415 return 416 END SUBROUTINE PMC_Recv_from_Client_real_r3 417 418 SUBROUTINE PMC_Bcast_INTEGER (buf, root_pe, comm, ierr) 419 IMPLICIT none 420 INTEGER, INTENT(INOUT) :: buf 421 INTEGER, INTENT(IN) :: root_pe 422 INTEGER, INTENT(IN),optional :: comm 423 INTEGER, INTENT(OUT),optional :: ierr 424 !-- local variables 425 INTEGER :: myComm 426 INTEGER :: myErr 427 428 if(present (comm)) then 429 myComm = comm 430 else 431 myComm = m_model_comm 432 end if 433 434 CALL MPI_Bcast (buf, 1, MPI_INTEGER, root_pe, myComm, myErr) 435 436 if(present (ierr)) then 437 ierr = myErr 438 end if 439 440 return 441 END SUBROUTINE PMC_Bcast_INTEGER 442 443 SUBROUTINE PMC_Bcast_character (buf, root_pe, comm, ierr) 444 IMPLICIT none 445 character(len=*), INTENT(INOUT) :: buf 446 INTEGER, INTENT(IN) :: root_pe 447 INTEGER, INTENT(IN),optional :: comm 448 INTEGER, INTENT(OUT),optional :: ierr 449 !-- local variables 450 INTEGER :: myComm 451 INTEGER :: myErr 452 453 if(present (comm)) then 454 myComm = comm 455 else 456 myComm = m_model_comm 457 end if 458 459 CALL MPI_Bcast (buf, len(buf), MPI_Character, root_pe, myComm, myErr) 460 461 if(present (ierr)) then 462 ierr = myErr 463 end if 464 465 return 466 END SUBROUTINE PMC_Bcast_character 467 468 SUBROUTINE PMC_Inter_Bcast_INTEGER_1 (buf, Client_id, ierr) 469 IMPLICIT none 470 INTEGER, INTENT(INOUT),DIMENSION(:) :: buf 471 INTEGER, INTENT(IN),optional :: Client_id 472 INTEGER, INTENT(OUT),optional :: ierr 473 !-- local variables 474 INTEGER :: myComm 475 INTEGER :: myErr 476 INTEGER :: root_pe 477 478 ! PE 0 Server Broadcast to all Client PE's 479 480 if(present (Client_id)) then 481 myComm = m_to_client_comm(Client_id) 482 if(m_model_rank == 0) then 483 root_pe = MPI_ROOT 484 else 485 root_pe = MPI_PROC_NULL 486 end if 487 else 488 myComm = m_to_server_comm 489 root_pe = 0 490 end if 491 492 CALL MPI_Bcast (buf, size(buf), MPI_INTEGER, root_pe, myComm, myErr) 493 494 if(present (ierr)) then 495 ierr = myErr 496 end if 497 498 return 499 END SUBROUTINE PMC_Inter_Bcast_INTEGER_1 500 501 ! Allocate Memory with MPI_Alloc_mem using intermediate C-pointer 502 503 SUBROUTINE PMC_Alloc_mem_INTEGER_1 (iarray, idim1) 504 IMPLICIT none 505 INTEGER,DIMENSION(:),POINTER,INTENT(INOUT) :: iarray 506 INTEGER,INTENT(IN) :: idim1 507 508 Type(c_ptr) :: p_myInd 509 INTEGER,DIMENSION(1) :: aShape 510 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 511 INTEGER :: ierr 512 513 WinSize = idim1*c_sizeof(ierr) ! Length of INTEGER 514 515 CALL MPI_Alloc_mem (WinSize , MPI_INFO_NULL, p_myInd, ierr); 516 aShape(1) = idim1 517 CALL c_f_pointer(p_myInd,iarray,aShape) 518 519 return 520 END SUBROUTINE PMC_Alloc_mem_INTEGER_1 521 522 SUBROUTINE PMC_Alloc_mem_Real_1 (array, idim1, base_ptr) 523 IMPLICIT none 524 REAL(kind=wp),DIMENSION(:),POINTER,INTENT(INOUT) :: array 525 INTEGER(idp),INTENT(IN) :: idim1 526 Type(c_ptr),INTENT(OUT),optional :: base_ptr 527 528 529 Type(c_ptr) :: p_myInd 530 INTEGER,DIMENSION(1) :: aShape 531 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 532 INTEGER :: ierr 533 534 WinSize = idim1*wp ! Length of INTEGER 535 536 CALL MPI_Alloc_mem (WinSize , MPI_INFO_NULL, p_myInd, ierr); 537 aShape(1) = idim1 538 CALL c_f_pointer(p_myInd,array,aShape) 539 540 if(present(base_ptr)) then 541 base_ptr = p_myInd 542 end if 543 544 return 545 END SUBROUTINE PMC_Alloc_mem_Real_1 546 547 FUNCTION PMC_TIME () 548 REAL(kind=wp) :: PMC_TIME 549 550 PMC_TIME = MPI_Wtime () 551 552 return 553 554 END FUNCTION PMC_TIME 59 60 USE kinds 61 USE pmc_handle_communicator, & 62 ONLY: m_model_comm, m_model_rank, m_to_server_comm, m_to_client_comm 63 64 IMPLICIT NONE 65 66 PRIVATE 67 SAVE 68 69 INTERFACE pmc_send_to_server 70 MODULE PROCEDURE pmc_send_to_server_integer 71 MODULE PROCEDURE pmc_send_to_server_integer_2 72 MODULE PROCEDURE pmc_send_to_server_real_r1 73 MODULE PROCEDURE pmc_send_to_server_real_r2 74 MODULE PROCEDURE pmc_send_to_server_real_r3 75 END INTERFACE pmc_send_to_server 76 77 INTERFACE pmc_recv_from_server 78 MODULE PROCEDURE pmc_recv_from_server_integer 79 MODULE PROCEDURE pmc_recv_from_server_real_r1 80 MODULE PROCEDURE pmc_recv_from_server_real_r2 81 MODULE PROCEDURE pmc_recv_from_server_real_r3 82 END INTERFACE pmc_recv_from_server 83 84 INTERFACE pmc_send_to_client 85 MODULE PROCEDURE pmc_send_to_client_integer 86 MODULE PROCEDURE pmc_send_to_client_real_r1 87 MODULE PROCEDURE pmc_send_to_client_real_r2 88 MODULE PROCEDURE pmc_send_to_client_real_r3 89 END INTERFACE pmc_send_to_client 90 91 INTERFACE pmc_recv_from_client 92 MODULE PROCEDURE pmc_recv_from_client_integer 93 MODULE PROCEDURE pmc_recv_from_client_integer_2 94 MODULE PROCEDURE pmc_recv_from_client_real_r1 95 MODULE PROCEDURE pmc_recv_from_client_real_r2 96 MODULE PROCEDURE pmc_recv_from_client_real_r3 97 END INTERFACE pmc_recv_from_client 98 99 INTERFACE pmc_bcast 100 MODULE PROCEDURE pmc_bcast_integer 101 MODULE PROCEDURE pmc_bcast_character 102 END INTERFACE pmc_bcast 103 104 INTERFACE pmc_inter_bcast 105 MODULE PROCEDURE pmc_inter_bcast_integer_1 106 END INTERFACE pmc_inter_bcast 107 108 INTERFACE pmc_alloc_mem 109 MODULE PROCEDURE pmc_alloc_mem_integer_1 110 MODULE PROCEDURE pmc_alloc_mem_Real_1 111 END INTERFACE pmc_alloc_mem 112 113 INTERFACE pmc_time 114 MODULE PROCEDURE pmc_time 115 END INTERFACE pmc_time 116 117 PUBLIC pmc_alloc_mem, pmc_bcast, pmc_inter_bcast, pmc_recv_from_client, & 118 pmc_recv_from_server, pmc_send_to_client, pmc_send_to_server, & 119 pmc_time 120 121 CONTAINS 122 123 124 SUBROUTINE pmc_send_to_server_integer( buf, n, server_rank, tag, ierr ) 125 126 IMPLICIT NONE 127 128 INTEGER, DIMENSION(:), INTENT(IN) :: buf !< 129 INTEGER, INTENT(IN) :: n !< 130 INTEGER, INTENT(IN) :: server_rank !< 131 INTEGER, INTENT(IN) :: tag !< 132 INTEGER, INTENT(OUT) :: ierr !< 133 134 ierr = 0 135 CALL MPI_SEND( buf, n, MPI_INTEGER, server_rank, tag, m_to_server_comm, & 136 ierr) 137 138 END SUBROUTINE pmc_send_to_server_integer 139 140 141 142 SUBROUTINE pmc_recv_from_server_integer( buf, n, server_rank, tag, ierr ) 143 144 IMPLICIT NONE 145 146 INTEGER, DIMENSION(:), INTENT(OUT) :: buf !< 147 INTEGER, INTENT(IN) :: n !< 148 INTEGER, INTENT(IN) :: server_rank !< 149 INTEGER, INTENT(IN) :: tag !< 150 INTEGER, INTENT(OUT) :: ierr !< 151 152 ierr = 0 153 CALL MPI_RECV( buf, n, MPI_INTEGER, server_rank, tag, m_to_server_comm, & 154 MPI_STATUS_IGNORE, ierr ) 155 156 END SUBROUTINE pmc_recv_from_server_integer 157 158 159 160 SUBROUTINE pmc_send_to_server_integer_2( buf, n, server_rank, tag, ierr ) 161 162 IMPLICIT NONE 163 164 INTEGER, DIMENSION(:,:), INTENT(IN) :: buf !< 165 INTEGER, INTENT(IN) :: n !< 166 INTEGER, INTENT(IN) :: server_rank !< 167 INTEGER, INTENT(IN) :: tag !< 168 INTEGER, INTENT(OUT) :: ierr !< 169 170 ierr = 0 171 CALL MPI_SEND( buf, n, MPI_INTEGER, server_rank, tag, m_to_server_comm, & 172 ierr ) 173 174 END SUBROUTINE pmc_send_to_server_integer_2 175 176 177 178 SUBROUTINE pmc_send_to_server_real_r1( buf, n, server_rank, tag, ierr ) 179 180 IMPLICIT NONE 181 182 REAL(wp), DIMENSION(:), INTENT(IN) :: buf !< 183 INTEGER, INTENT(IN) :: n !< 184 INTEGER, INTENT(IN) :: server_rank !< 185 INTEGER, INTENT(IN) :: tag !< 186 INTEGER, INTENT(OUT) :: ierr !< 187 188 ierr = 0 189 CALL MPI_SEND( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, ierr ) 190 191 END SUBROUTINE pmc_send_to_server_real_r1 192 193 194 195 SUBROUTINE pmc_recv_from_server_real_r1( buf, n, server_rank, tag, ierr ) 196 197 IMPLICIT NONE 198 199 REAL(wp), DIMENSION(:), INTENT(OUT) :: buf !< 200 INTEGER, INTENT(IN) :: n !< 201 INTEGER, INTENT(IN) :: server_rank !< 202 INTEGER, INTENT(IN) :: tag !< 203 INTEGER, INTENT(OUT) :: ierr !< 204 205 ierr = 0 206 CALL MPI_RECV( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, & 207 MPI_STATUS_IGNORE, ierr ) 208 209 END SUBROUTINE pmc_recv_from_server_real_r1 210 211 212 213 SUBROUTINE pmc_send_to_server_real_r2( buf, n, server_rank, tag, ierr ) 214 215 IMPLICIT NONE 216 217 REAL(wp), DIMENSION(:,:), INTENT(IN) :: buf !< 218 INTEGER, INTENT(IN) :: n !< 219 INTEGER, INTENT(IN) :: server_rank !< 220 INTEGER, INTENT(IN) :: tag !< 221 INTEGER, INTENT(OUT) :: ierr !< 222 223 ierr = 0 224 CALL MPI_SEND( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, ierr ) 225 226 END SUBROUTINE pmc_send_to_server_real_r2 227 228 229 SUBROUTINE pmc_recv_from_server_real_r2( buf, n, server_rank, tag, ierr ) 230 231 IMPLICIT NONE 232 233 REAL(wp), DIMENSION(:,:), INTENT(OUT) :: buf !< 234 INTEGER, INTENT(IN) :: n !< 235 INTEGER, INTENT(IN) :: server_rank !< 236 INTEGER, INTENT(IN) :: tag !< 237 INTEGER, INTENT(OUT) :: ierr !< 238 239 ierr = 0 240 CALL MPI_RECV( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, & 241 MPI_STATUS_IGNORE, ierr ) 242 243 END SUBROUTINE pmc_recv_from_server_real_r2 244 245 246 247 SUBROUTINE pmc_send_to_server_real_r3( buf, n, server_rank, tag, ierr ) 248 249 IMPLICIT NONE 250 251 REAL(wp), DIMENSION(:,:,:), INTENT(IN) :: buf !< 252 INTEGER, INTENT(IN) :: n !< 253 INTEGER, INTENT(IN) :: server_rank !< 254 INTEGER, INTENT(IN) :: tag !< 255 INTEGER, INTENT(OUT) :: ierr !< 256 257 ierr = 0 258 CALL MPI_SEND( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, ierr ) 259 260 END SUBROUTINE pmc_send_to_server_real_r3 261 262 263 264 SUBROUTINE pmc_recv_from_server_real_r3( buf, n, server_rank, tag, ierr ) 265 266 IMPLICIT NONE 267 268 REAL(wp), DIMENSION(:,:,:), INTENT(OUT) :: buf !< 269 INTEGER, INTENT(IN) :: n !< 270 INTEGER, INTENT(IN) :: server_rank !< 271 INTEGER, INTENT(IN) :: tag !< 272 INTEGER, INTENT(OUT) :: ierr !< 273 274 ierr = 0 275 CALL MPI_RECV( buf, n, MPI_REAL, server_rank, tag, m_to_server_comm, & 276 MPI_STATUS_IGNORE, ierr ) 277 278 END SUBROUTINE pmc_recv_from_server_real_r3 279 280 281 282 SUBROUTINE pmc_send_to_client_integer( client_id, buf, n, client_rank, tag, & 283 ierr ) 284 285 IMPLICIT NONE 286 287 INTEGER, INTENT(IN) :: client_id !< 288 INTEGER, DIMENSION(:), INTENT(IN) :: buf !< 289 INTEGER, INTENT(IN) :: n !< 290 INTEGER, INTENT(IN) :: client_rank !< 291 INTEGER, INTENT(IN) :: tag !< 292 INTEGER, INTENT(OUT) :: ierr !< 293 294 ierr = 0 295 CALL MPI_SEND( buf, n, MPI_INTEGER, client_rank, tag, & 296 m_to_client_comm(client_id), ierr ) 297 298 END SUBROUTINE pmc_send_to_client_integer 299 300 301 302 SUBROUTINE pmc_recv_from_client_integer( client_id, buf, n, client_rank, tag, & 303 ierr ) 304 305 IMPLICIT NONE 306 307 INTEGER, INTENT(IN) :: client_id !< 308 INTEGER, DIMENSION(:), INTENT(INOUT) :: buf !< 309 INTEGER, INTENT(IN) :: n !< 310 INTEGER, INTENT(IN) :: client_rank !< 311 INTEGER, INTENT(IN) :: tag !< 312 INTEGER, INTENT(OUT) :: ierr !< 313 314 ierr = 0 315 CALL MPI_RECV( buf, n, MPI_INTEGER, client_rank, tag, & 316 m_to_client_comm(client_id), MPI_STATUS_IGNORE, ierr ) 317 318 END SUBROUTINE pmc_recv_from_client_integer 319 320 321 322 SUBROUTINE pmc_recv_from_client_integer_2( client_id, buf, n, client_rank, & 323 tag, ierr ) 324 325 IMPLICIT NONE 326 327 INTEGER, INTENT(IN) :: client_id !< 328 INTEGER, DIMENSION(:,:), INTENT(OUT) :: buf !< 329 INTEGER, INTENT(IN) :: n !< 330 INTEGER, INTENT(IN) :: client_rank !< 331 INTEGER, INTENT(IN) :: tag !< 332 INTEGER, INTENT(OUT) :: ierr !< 333 334 ierr = 0 335 CALL MPI_RECV( buf, n, MPI_INTEGER, client_rank, tag, & 336 m_to_client_comm(client_id), MPI_STATUS_IGNORE, ierr ) 337 338 END SUBROUTINE pmc_recv_from_client_integer_2 339 340 341 342 SUBROUTINE pmc_send_to_client_real_r1( client_id, buf, n, client_rank, tag, & 343 ierr ) 344 345 IMPLICIT NONE 346 347 INTEGER, INTENT(IN) :: client_id !< 348 REAL(wp), DIMENSION(:), INTENT(IN) :: buf !< 349 INTEGER, INTENT(IN) :: n !< 350 INTEGER, INTENT(IN) :: client_rank !< 351 INTEGER, INTENT(IN) :: tag !< 352 INTEGER, INTENT(OUT) :: ierr !< 353 354 ierr = 0 355 CALL MPI_SEND( buf, n, MPI_REAL, client_rank, tag, & 356 m_to_client_comm(client_id), ierr ) 357 358 END SUBROUTINE pmc_send_to_client_real_r1 359 360 361 362 SUBROUTINE pmc_recv_from_client_real_r1( client_id, buf, n, client_rank, tag, & 363 ierr ) 364 365 IMPLICIT NONE 366 367 INTEGER, INTENT(IN) :: client_id !< 368 REAL(wp), DIMENSION(:), INTENT(INOUT) :: buf !< 369 INTEGER, INTENT(IN) :: n !< 370 INTEGER, INTENT(IN) :: client_rank !< 371 INTEGER, INTENT(IN) :: tag !< 372 INTEGER, INTENT(OUT) :: ierr !< 373 374 ierr = 0 375 CALL MPI_RECV( buf, n, MPI_REAL, client_rank, tag, & 376 m_to_client_comm(client_id), MPI_STATUS_IGNORE, ierr ) 377 378 END SUBROUTINE pmc_recv_from_client_real_r1 379 380 381 382 SUBROUTINE pmc_send_to_client_real_r2( client_id, buf, n, client_rank, tag, & 383 ierr ) 384 385 IMPLICIT NONE 386 387 INTEGER, INTENT(IN) :: client_id !< 388 REAL(wp), DIMENSION(:,:), INTENT(IN) :: buf !< 389 INTEGER, INTENT(IN) :: n !< 390 INTEGER, INTENT(IN) :: client_rank !< 391 INTEGER, INTENT(IN) :: tag !< 392 INTEGER, INTENT(OUT) :: ierr !< 393 394 ierr = 0 395 CALL MPI_SEND( buf, n, MPI_REAL, client_rank, tag, & 396 m_to_client_comm(client_id), ierr ) 397 398 END SUBROUTINE pmc_send_to_client_real_r2 399 400 401 402 SUBROUTINE pmc_recv_from_client_real_r2( client_id, buf, n, client_rank, tag, & 403 ierr ) 404 405 IMPLICIT NONE 406 407 INTEGER, INTENT(IN) :: client_id !< 408 REAL(wp), DIMENSION(:,:), INTENT(OUT) :: buf !< 409 INTEGER, INTENT(IN) :: n !< 410 INTEGER, INTENT(IN) :: client_rank !< 411 INTEGER, INTENT(IN) :: tag !< 412 INTEGER, INTENT(OUT) :: ierr !< 413 414 ierr = 0 415 CALL MPI_RECV( buf, n, MPI_REAL, client_rank, tag, & 416 m_to_client_comm(client_id), MPI_STATUS_IGNORE, ierr ) 417 418 END SUBROUTINE pmc_recv_from_client_real_r2 419 420 421 422 SUBROUTINE pmc_send_to_client_real_r3( client_id, buf, n, client_rank, tag, & 423 ierr) 424 425 IMPLICIT NONE 426 427 INTEGER, INTENT(IN) :: client_id !< 428 REAL(wp), DIMENSION(:,:,:), INTENT(IN) :: buf !< 429 INTEGER, INTENT(IN) :: n !< 430 INTEGER, INTENT(IN) :: client_rank !< 431 INTEGER, INTENT(IN) :: tag !< 432 INTEGER, INTENT(OUT) :: ierr !< 433 434 ierr = 0 435 CALL MPI_SEND( buf, n, MPI_REAL, client_rank, tag, & 436 m_to_client_comm(client_id), ierr ) 437 438 END SUBROUTINE pmc_send_to_client_real_r3 439 440 441 442 SUBROUTINE pmc_recv_from_client_real_r3( client_id, buf, n, client_rank, tag, & 443 ierr ) 444 445 IMPLICIT NONE 446 447 INTEGER, INTENT(IN) :: client_id !< 448 REAL(wp), DIMENSION(:,:,:), INTENT(OUT) :: buf !< 449 INTEGER, INTENT(IN) :: n !< 450 INTEGER, INTENT(IN) :: client_rank !< 451 INTEGER, INTENT(IN) :: tag !< 452 INTEGER, INTENT(OUT) :: ierr !< 453 454 ierr = 0 455 CALL MPI_RECV( buf, n, MPI_REAL, client_rank, tag, & 456 m_to_client_comm(client_id), MPI_STATUS_IGNORE, ierr ) 457 458 END SUBROUTINE pmc_recv_from_client_real_r3 459 460 461 462 SUBROUTINE pmc_bcast_integer( buf, root_pe, comm, ierr ) 463 464 IMPLICIT NONE 465 466 INTEGER, INTENT(INOUT) :: buf !< 467 INTEGER, INTENT(IN) :: root_pe !< 468 INTEGER, INTENT(IN), OPTIONAL :: comm !< 469 INTEGER, INTENT(OUT), OPTIONAL :: ierr !< 470 471 INTEGER :: mycomm !< 472 INTEGER :: myerr !< 473 474 475 IF ( PRESENT( comm ) ) THEN 476 mycomm = comm 477 ELSE 478 mycomm = m_model_comm 479 ENDIF 480 481 CALL MPI_BCAST( buf, 1, MPI_INTEGER, root_pe, mycomm, myerr ) 482 483 IF ( PRESENT( ierr ) ) THEN 484 ierr = myerr 485 ENDIF 486 487 END SUBROUTINE pmc_bcast_integer 488 489 490 491 SUBROUTINE pmc_bcast_character( buf, root_pe, comm, ierr ) 492 493 IMPLICIT NONE 494 495 CHARACTER(LEN=*), INTENT(INOUT) :: buf !< 496 INTEGER, INTENT(IN) :: root_pe !< 497 INTEGER, INTENT(IN), OPTIONAL :: comm !< 498 INTEGER, INTENT(OUT), OPTIONAL :: ierr !< 499 500 INTEGER :: mycomm !< 501 INTEGER :: myerr !< 502 503 IF ( PRESENT( comm ) ) THEN 504 mycomm = comm 505 ELSE 506 mycomm = m_model_comm 507 ENDIF 508 509 CALL MPI_BCAST( buf, LEN(buf), MPI_CHARACTER, root_pe, mycomm, myerr ) 510 511 IF ( PRESENT( ierr ) ) THEN 512 ierr = myerr 513 ENDIF 514 515 END SUBROUTINE pmc_bcast_character 516 517 518 519 SUBROUTINE pmc_inter_bcast_integer_1( buf, client_id, ierr ) 520 521 IMPLICIT NONE 522 523 INTEGER, INTENT(INOUT),DIMENSION(:) :: buf !< 524 INTEGER, INTENT(IN),optional :: client_id !< 525 INTEGER, INTENT(OUT),optional :: ierr !< 526 527 INTEGER :: mycomm !< 528 INTEGER :: myerr !< 529 INTEGER :: root_pe !< 530 531 ! 532 !-- PE 0 server broadcast to all client PEs 533 IF ( PRESENT( client_id ) ) THEN 534 535 mycomm = m_to_client_comm(client_id) 536 537 IF ( m_model_rank == 0 ) THEN 538 root_pe = MPI_ROOT 539 ELSE 540 root_pe = MPI_PROC_NULL 541 ENDIF 542 543 ELSE 544 mycomm = m_to_server_comm 545 root_pe = 0 546 ENDIF 547 548 CALL MPI_BCAST( buf, SIZE( buf ), MPI_INTEGER, root_pe, mycomm, myerr ) 549 550 IF ( PRESENT( ierr ) ) THEN 551 ierr = myerr 552 ENDIF 553 554 END SUBROUTINE pmc_inter_bcast_integer_1 555 556 557 558 SUBROUTINE pmc_alloc_mem_integer_1( iarray, idim1 ) 559 ! 560 !-- Allocate memory with MPI_ALLOC_MEM using intermediate C-pointer 561 562 IMPLICIT NONE 563 564 INTEGER, DIMENSION(:), POINTER, INTENT(INOUT) :: iarray !< 565 INTEGER, INTENT(IN) :: idim1 !< 566 567 INTEGER, DIMENSION(1) :: ashape !< 568 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 569 INTEGER :: ierr !< 570 571 TYPE(C_PTR) :: p_myind !< 572 573 winsize = idim1 * C_SIZEOF( ierr ) 574 575 CALL MPI_ALLOC_MEM( winsize, MPI_INFO_NULL, p_myind, ierr ) 576 ashape(1) = idim1 577 CALL C_F_POINTER( p_myind, iarray, ashape ) 578 579 END SUBROUTINE pmc_alloc_mem_integer_1 580 581 582 583 SUBROUTINE pmc_alloc_mem_real_1( array, idim1, base_ptr ) 584 585 IMPLICIT NONE 586 587 INTEGER(idp), INTENT(IN) :: idim1 !< 588 REAL(KIND=wp), DIMENSION(:), POINTER, INTENT(INOUT) :: array !< 589 TYPE(C_PTR), INTENT(OUT), OPTIONAL :: base_ptr !< 590 591 INTEGER, DIMENSION(1) :: ashape !< 592 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 593 INTEGER :: ierr !< 594 595 TYPE(C_PTR) :: p_myind !< 596 597 winsize = idim1 * wp 598 599 CALL MPI_ALLOC_MEM( winsize , MPI_INFO_NULL, p_myind, ierr ) 600 ashape(1) = idim1 601 CALL C_F_POINTER( p_myind, array, ashape ) 602 603 IF ( PRESENT( base_ptr ) ) THEN 604 base_ptr = p_myind 605 ENDIF 606 607 END SUBROUTINE pmc_alloc_mem_Real_1 608 609 610 611 FUNCTION pmc_time() 612 613 REAL(kind=wp) :: pmc_time !< 614 615 pmc_time = MPI_WTIME() 616 617 END FUNCTION pmc_time 555 618 556 619 #endif -
palm/trunk/SOURCE/pmc_server_mod.f90
r1851 r1900 1 MODULE pmc_server1 MODULE pmc_server 2 2 3 3 !--------------------------------------------------------------------------------! … … 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatted to match PALM style 23 23 ! 24 24 ! Former revisions: … … 69 69 70 70 #if defined( __parallel ) 71 use, intrinsic :: iso_c_binding71 USE, INTRINSIC :: ISO_C_BINDING 72 72 73 73 #if defined( __mpifh ) … … 76 76 USE MPI 77 77 #endif 78 USE kinds 79 USE PMC_general, ONLY: ClientDef, PMC_MAX_MODELL,PMC_sort, DA_NameDef, DA_Desclen, DA_Namelen, & 80 PMC_G_SetName, PeDef, ArrayDef, PMC_MAX_ARRAY 81 USE PMC_handle_communicator, ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & 82 PMC_Server_for_Client, m_world_rank 83 USE PMC_MPI_wrapper, ONLY: PMC_Send_to_Client, PMC_Recv_from_Client, PMC_Bcast, PMC_Inter_Bcast, & 84 PMC_Alloc_mem, PMC_Time 85 86 IMPLICIT none 78 USE kinds 79 USE pmc_general, & 80 ONLY: arraydef, clientdef, da_namedef, da_namelen, pedef, & 81 pmc_g_setname, pmc_max_array, pmc_max_models, pmc_sort 82 83 USE pmc_handle_communicator, & 84 ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & 85 m_world_rank, pmc_server_for_client 86 87 USE pmc_mpi_wrapper, & 88 ONLY: pmc_alloc_mem, pmc_bcast, pmc_time 89 90 IMPLICIT NONE 91 87 92 PRIVATE 88 93 SAVE 89 94 90 TYPE ClientIndexDef 91 INTEGER :: NrPoints 92 INTEGER,DIMENSION(:,:),allocatable :: index_list_2d 93 END TYPE ClientIndexDef 94 95 TYPE(ClientDef),DIMENSION(PMC_MAX_MODELL) :: Clients 96 TYPE(ClientIndexDef),DIMENSION(PMC_MAX_MODELL) :: indClients 97 98 INTEGER :: next_array_in_list = 0 99 100 PUBLIC PMC_Server_for_Client 101 102 INTERFACE PMC_ServerInit 103 MODULE procedure PMC_ServerInit 104 END INTERFACE PMC_ServerInit 105 106 INTERFACE PMC_S_Set_2D_index_list 107 MODULE procedure PMC_S_Set_2D_index_list 108 END INTERFACE PMC_S_Set_2D_index_list 109 110 INTERFACE PMC_S_clear_next_array_list 111 MODULE procedure PMC_S_clear_next_array_list 112 END INTERFACE PMC_S_clear_next_array_list 113 114 INTERFACE PMC_S_GetNextArray 115 MODULE procedure PMC_S_GetNextArray 116 END INTERFACE PMC_S_GetNextArray 117 118 INTERFACE PMC_S_Set_DataArray 119 MODULE procedure PMC_S_Set_DataArray_2d 120 MODULE procedure PMC_S_Set_DataArray_3d 121 END INTERFACE PMC_S_Set_DataArray 122 123 INTERFACE PMC_S_setInd_and_AllocMem 124 MODULE procedure PMC_S_setInd_and_AllocMem 125 END INTERFACE PMC_S_setInd_and_AllocMem 126 127 INTERFACE PMC_S_FillBuffer 128 MODULE procedure PMC_S_FillBuffer 129 END INTERFACE PMC_S_FillBuffer 130 131 INTERFACE PMC_S_GetData_from_Buffer 132 MODULE procedure PMC_S_GetData_from_Buffer 133 END INTERFACE PMC_S_GetData_from_Buffer 134 135 INTERFACE PMC_S_Set_Active_data_array 136 MODULE procedure PMC_S_Set_Active_data_array 137 END INTERFACE PMC_S_Set_Active_data_array 138 139 ! PUBLIC section 140 141 PUBLIC PMC_ServerInit, PMC_S_Set_2D_index_list, PMC_S_GetNextArray, PMC_S_Set_DataArray 142 PUBLIC PMC_S_setInd_and_AllocMem, PMC_S_FillBuffer, PMC_S_GetData_from_Buffer, PMC_S_Set_Active_data_array 143 PUBLIC PMC_S_clear_next_array_list 144 145 CONTAINS 146 147 SUBROUTINE PMC_ServerInit 148 IMPLICIT none 149 INTEGER :: i 150 INTEGER :: j 151 INTEGER :: ClientId 152 INTEGER :: istat 153 154 do i=1,size(PMC_Server_for_Client)-1 155 ! if(m_model_comm == 0) write(0,*) 'PMC_Server: Initialize client Id',PMC_Server_for_Client(i) 156 157 ClientId = PMC_Server_for_Client(i) 158 159 Clients(ClientId)%model_comm = m_model_comm 160 Clients(ClientId)%inter_comm = m_to_client_comm(ClientId) 161 162 ! Get rank and size 163 CALL MPI_Comm_rank (Clients(ClientId)%model_comm, Clients(ClientId)%model_rank, istat); 164 CALL MPI_Comm_size (Clients(ClientId)%model_comm, Clients(ClientId)%model_npes, istat); 165 CALL MPI_Comm_remote_size (Clients(ClientId)%inter_comm, Clients(ClientId)%inter_npes, istat); 166 167 ! Intra communicater is used for MPI_Get 168 CALL MPI_Intercomm_merge (Clients(ClientId)%inter_comm, .false., Clients(ClientId)%intra_comm, istat); 169 CALL MPI_Comm_rank (Clients(ClientId)%intra_comm, Clients(ClientId)%intra_rank, istat); 170 171 ! write(9,*) 'ClientId ',i,ClientId,m_world_rank, Clients(ClientId)%inter_npes 172 173 ALLOCATE (Clients(ClientId)%PEs(Clients(ClientId)%inter_npes)) 174 ! 175 !-- Allocate for all client PEs an array of TYPE ArrayDef to store information of transfer array 176 do j=1,Clients(ClientId)%inter_npes 177 Allocate(Clients(ClientId)%PEs(j)%array_list(PMC_MAX_ARRAY)) 178 end do 179 180 CALL Get_DA_names_from_client (ClientId) 181 end do 182 183 return 184 END SUBROUTINE PMC_ServerInit 185 186 SUBROUTINE PMC_S_Set_2D_index_list (ClientId, index_list) 187 IMPLICIT none 188 INTEGER,INTENT(IN) :: ClientId 189 INTEGER,DIMENSION(:,:),INTENT(INOUT) :: index_list !Index list will be modified in sort, therefore INOUT 190 191 !-- Local variables 192 INTEGER :: ip,is,ie,ian,ic,n 193 INTEGER :: istat 194 195 if(m_model_rank == 0) then 196 CALL PMC_sort (index_list, 6) ! Sort to ascending Server PE 197 is = 1 198 199 do ip=0,m_model_npes-1 200 201 ! Split into Server PEs 202 ie = is-1 !there may be no entry for this PE 203 if(is <= size(index_list,2) .and. ie >= 0) then 204 do while ( index_list(6,ie+1) == ip) 205 ie = ie+1 206 if( ie == size(index_list,2)) EXIT 207 end do 208 209 ian = ie-is+1 210 else 211 is = -1 212 ie = -2 213 ian = 0 214 end if 215 216 ! Send data to other server PEs 217 218 if(ip == 0) then 219 indClients(ClientId)%NrPoints = ian 220 if(ian > 0) then 221 ALLOCATE (indClients(ClientId)%index_list_2d(6,ian)) 222 indClients(ClientId)%index_list_2d(:,1:ian) = index_list(:,is:ie) 223 end if 224 else 225 CALL MPI_Send (ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, istat) 226 if(ian > 0) then 227 CALL MPI_Send (index_list(1,is), 6*ian, MPI_INTEGER, ip, 1001, & 228 m_model_comm, istat) 229 end if 230 end if 231 is = ie+1 232 end do 233 else 234 CALL MPI_Recv (indClients(ClientId)%NrPoints, 1, MPI_INTEGER, 0, 1000, m_model_comm, & 235 MPI_STATUS_IGNORE, istat) 236 ian = indClients(ClientId)%NrPoints 237 if(ian > 0) then 238 ALLOCATE(indClients(ClientId)%index_list_2d(6,ian)) 239 CALL MPI_RECV (indClients(ClientId)%index_list_2d, 6*ian, MPI_INTEGER, 0, 1001, & 240 m_model_comm, MPI_STATUS_IGNORE, istat) 241 end if 242 end if 243 244 CALL Set_PE_index_list (ClientId,Clients(ClientId),indClients(ClientId)%index_list_2d,indClients(ClientId)%NrPoints) 245 246 return 247 END SUBROUTINE PMC_S_Set_2D_index_list 248 249 SUBROUTINE PMC_S_clear_next_array_list 250 IMPLICIT none 251 252 next_array_in_list = 0 253 254 return 255 END SUBROUTINE PMC_S_clear_next_array_list 256 257 ! List handling is still required to get minimal interaction with pmc_interface 258 logical function PMC_S_GetNextArray (ClientId, myName) 259 INTEGER(iwp),INTENT(IN) :: ClientId 260 CHARACTER(len=*),INTENT(OUT) :: myName 261 262 !-- local variables 263 TYPE(PeDef),POINTER :: aPE 264 TYPE(ArrayDef),POINTER :: ar 265 266 next_array_in_list = next_array_in_list+1 267 268 !-- Array Names are the same on all client PE, so take first PE to get the name 269 aPE => Clients(ClientId)%PEs(1) 270 271 if(next_array_in_list > aPE%Nr_arrays) then 272 PMC_S_GetNextArray = .false. ! all arrays done 273 return 274 end if 275 276 ar => aPE%array_list(next_array_in_list) 277 myName = ar%name 278 279 PMC_S_GetNextArray = .true. ! Return true if legal array 280 return 281 END function PMC_S_GetNextArray 282 283 SUBROUTINE PMC_S_Set_DataArray_2d (ClientId, array, array_2 ) 284 285 IMPLICIT none 286 287 INTEGER,INTENT(IN) :: ClientId 288 289 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER :: array 290 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER, OPTIONAL :: array_2 291 292 INTEGER :: NrDims 293 INTEGER,DIMENSION (4) :: dims 294 TYPE(c_ptr) :: array_adr 295 TYPE(c_ptr) :: second_adr 296 297 dims = 1 298 299 NrDims = 2 300 dims(1) = size(array,1) 301 dims(2) = size(array,2) 302 array_adr = c_loc(array) 303 304 IF ( PRESENT( array_2 ) ) THEN 305 second_adr = c_loc(array_2) 306 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) 307 ELSE 308 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) 95 TYPE clientindexdef 96 INTEGER :: nrpoints !< 97 INTEGER, DIMENSION(:,:), ALLOCATABLE :: index_list_2d !< 98 END TYPE clientindexdef 99 100 TYPE(clientdef), DIMENSION(pmc_max_models) :: clients !< 101 TYPE(clientindexdef), DIMENSION(pmc_max_models) :: indclients !< 102 103 INTEGER :: next_array_in_list = 0 !< 104 105 106 PUBLIC pmc_server_for_client 107 108 109 INTERFACE pmc_serverinit 110 MODULE PROCEDURE pmc_serverinit 111 END INTERFACE pmc_serverinit 112 113 INTERFACE pmc_s_set_2d_index_list 114 MODULE PROCEDURE pmc_s_set_2d_index_list 115 END INTERFACE pmc_s_set_2d_index_list 116 117 INTERFACE pmc_s_clear_next_array_list 118 MODULE PROCEDURE pmc_s_clear_next_array_list 119 END INTERFACE pmc_s_clear_next_array_list 120 121 INTERFACE pmc_s_getnextarray 122 MODULE PROCEDURE pmc_s_getnextarray 123 END INTERFACE pmc_s_getnextarray 124 125 INTERFACE pmc_s_set_dataarray 126 MODULE PROCEDURE pmc_s_set_dataarray_2d 127 MODULE PROCEDURE pmc_s_set_dataarray_3d 128 END INTERFACE pmc_s_set_dataarray 129 130 INTERFACE pmc_s_setind_and_allocmem 131 MODULE PROCEDURE pmc_s_setind_and_allocmem 132 END INTERFACE pmc_s_setind_and_allocmem 133 134 INTERFACE pmc_s_fillbuffer 135 MODULE PROCEDURE pmc_s_fillbuffer 136 END INTERFACE pmc_s_fillbuffer 137 138 INTERFACE pmc_s_getdata_from_buffer 139 MODULE PROCEDURE pmc_s_getdata_from_buffer 140 END INTERFACE pmc_s_getdata_from_buffer 141 142 INTERFACE pmc_s_set_active_data_array 143 MODULE PROCEDURE pmc_s_set_active_data_array 144 END INTERFACE pmc_s_set_active_data_array 145 146 PUBLIC pmc_serverinit, pmc_s_clear_next_array_list, pmc_s_fillbuffer, & 147 pmc_s_getdata_from_buffer, pmc_s_getnextarray, & 148 pmc_s_setind_and_allocmem, pmc_s_set_active_data_array, & 149 pmc_s_set_dataarray, pmc_s_set_2d_index_list 150 151 CONTAINS 152 153 154 SUBROUTINE pmc_serverinit 155 156 IMPLICIT NONE 157 158 INTEGER :: clientid !< 159 INTEGER :: i !< 160 INTEGER :: j !< 161 INTEGER :: istat !< 162 163 164 DO i = 1, SIZE( pmc_server_for_client )-1 165 166 clientid = pmc_server_for_client( i ) 167 168 clients(clientid)%model_comm = m_model_comm 169 clients(clientid)%inter_comm = m_to_client_comm(clientid) 170 ! 171 !-- Get rank and size 172 CALL MPI_COMM_RANK( clients(clientid)%model_comm, & 173 clients(clientid)%model_rank, istat ) 174 CALL MPI_COMM_SIZE( clients(clientid)%model_comm, & 175 clients(clientid)%model_npes, istat ) 176 CALL MPI_COMM_REMOTE_SIZE( clients(clientid)%inter_comm, & 177 clients(clientid)%inter_npes, istat ) 178 ! 179 !-- Intra communicater is used for MPI_GET 180 CALL MPI_INTERCOMM_MERGE( clients(clientid)%inter_comm, .FALSE., & 181 clients(clientid)%intra_comm, istat ) 182 CALL MPI_COMM_RANK( clients(clientid)%intra_comm, & 183 clients(clientid)%intra_rank, istat ) 184 185 ALLOCATE( clients(clientid)%pes(clients(clientid)%inter_npes)) 186 ! 187 !-- Allocate array of TYPE arraydef for all client PEs to store information 188 !-- of the transfer array 189 DO j = 1, clients(clientid)%inter_npes 190 ALLOCATE( clients(clientid)%pes(j)%array_list(pmc_max_array) ) 191 ENDDO 192 193 CALL get_da_names_from_client (clientid) 194 195 ENDDO 196 197 END SUBROUTINE pmc_serverinit 198 199 200 201 SUBROUTINE pmc_s_set_2d_index_list( clientid, index_list ) 202 203 IMPLICIT NONE 204 205 INTEGER, INTENT(IN) :: clientid !< 206 INTEGER, DIMENSION(:,:), INTENT(INOUT) :: index_list !< 207 208 INTEGER :: ian !< 209 INTEGER :: ic !< 210 INTEGER :: ie !< 211 INTEGER :: ip !< 212 INTEGER :: is !< 213 INTEGER :: istat !< 214 INTEGER :: n !< 215 216 217 IF ( m_model_rank == 0 ) THEN 218 ! 219 !-- Sort to ascending server PE 220 CALL pmc_sort( index_list, 6 ) 221 222 is = 1 223 DO ip = 0, m_model_npes-1 224 ! 225 !-- Split into server PEs 226 ie = is - 1 227 ! 228 !-- There may be no entry for this PE 229 IF ( is <= SIZE( index_list,2 ) .AND. ie >= 0 ) THEN 230 231 DO WHILE ( index_list(6,ie+1 ) == ip ) 232 ie = ie + 1 233 IF ( ie == SIZE( index_list,2 ) ) EXIT 234 ENDDO 235 236 ian = ie - is + 1 237 238 ELSE 239 is = -1 240 ie = -2 241 ian = 0 242 ENDIF 243 ! 244 !-- Send data to other server PEs 245 IF ( ip == 0 ) THEN 246 indclients(clientid)%nrpoints = ian 247 IF ( ian > 0) THEN 248 ALLOCATE( indclients(clientid)%index_list_2d(6,ian) ) 249 indclients(clientid)%index_list_2d(:,1:ian) = & 250 index_list(:,is:ie) 251 ENDIF 252 ELSE 253 CALL MPI_SEND( ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, & 254 istat ) 255 IF ( ian > 0) THEN 256 CALL MPI_SEND( index_list(1,is), 6*ian, MPI_INTEGER, ip, & 257 1001, m_model_comm, istat ) 258 ENDIF 259 ENDIF 260 is = ie + 1 261 262 ENDDO 263 264 ELSE 265 266 CALL MPI_RECV( indclients(clientid)%nrpoints, 1, MPI_INTEGER, 0, 1000, & 267 m_model_comm, MPI_STATUS_IGNORE, istat ) 268 ian = indclients(clientid)%nrpoints 269 270 IF ( ian > 0 ) THEN 271 ALLOCATE( indclients(clientid)%index_list_2d(6,ian) ) 272 CALL MPI_RECV( indclients(clientid)%index_list_2d, 6*ian, & 273 MPI_INTEGER, 0, 1001, m_model_comm, & 274 MPI_STATUS_IGNORE, istat) 309 275 ENDIF 310 276 311 return 312 END SUBROUTINE PMC_S_Set_DataArray_2d 313 314 SUBROUTINE PMC_S_Set_DataArray_3d (ClientId, array, nz_cl, nz, array_2 ) 315 316 IMPLICIT none 317 318 INTEGER,INTENT(IN) :: ClientId 319 320 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER :: array 321 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER, OPTIONAL :: array_2 322 INTEGER,INTENT(IN) :: nz_cl 323 INTEGER,INTENT(IN) :: nz 324 325 INTEGER :: NrDims 326 INTEGER,DIMENSION (4) :: dims 327 TYPE(c_ptr) :: array_adr 328 TYPE(c_ptr) :: second_adr 329 330 dims = 1 331 332 dims = 0 333 NrDims = 3 334 dims(1) = size(array,1) 335 dims(2) = size(array,2) 336 dims(3) = size(array,3) 337 dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 338 339 array_adr = c_loc(array) 340 341 ! 342 !-- In PALM's pointer version, two indices have to be stored internally. 343 !-- The active address of the data array is set in swap_timelevel 344 IF ( PRESENT( array_2 ) ) THEN 345 second_adr = c_loc(array_2) 346 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) 347 ELSE 348 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) 349 ENDIF 350 351 return 352 END SUBROUTINE PMC_S_Set_DataArray_3d 353 354 SUBROUTINE PMC_S_setInd_and_AllocMem (ClientId) 355 356 USE control_parameters, & 357 ONLY: message_string 358 359 IMPLICIT none 360 361 ! 362 !-- Naming convention for appendices: _sc -> server to client transfer 363 !-- _cs -> client to server transfer 364 !-- Send -> Server to client transfer 365 !-- Recv -> client to server transfer 366 INTEGER,INTENT(IN) :: ClientId 367 368 INTEGER :: i, istat, ierr, j 369 INTEGER :: arlen, myIndex, tag 370 INTEGER :: rCount ! count MPI requests 371 INTEGER(idp) :: bufsize ! Size of MPI data Window 372 TYPE(PeDef),POINTER :: aPE 373 TYPE(ArrayDef),POINTER :: ar 374 CHARACTER(len=DA_Namelen) :: myName 375 INTEGER,DIMENSION(1024) :: req 376 Type(c_ptr) :: base_ptr 377 REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_sc !Base array for server to client transfer 378 REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_cs !Base array for client to server transfer 379 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 380 381 ! 382 !-- Server to client direction 383 myIndex = 1 384 rCount = 0 385 bufsize = 8 386 387 ! 388 !-- First stride: Compute size and set index 389 do i=1,Clients(ClientId)%inter_npes 390 aPE => Clients(ClientId)%PEs(i) 391 tag = 200 392 do j=1,aPE%Nr_arrays 393 ar => aPE%array_list(j) 394 if(ar%NrDims == 2) then 395 arlen = aPE%NrEle ! 2D 396 else if(ar%NrDims == 3) then 397 arlen = aPE%NrEle * ar%A_dim(4); ! 3D 398 else 399 arlen = -1 400 end if 401 ar%SendIndex = myIndex 402 403 tag = tag+1 404 rCount = rCount+1 405 CALL MPI_Isend (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, req(rCount),ierr) 406 407 if(rCount == 1024) then ! Maximum of 1024 outstanding requests 408 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 409 rCount = 0; 410 end if 411 412 myIndex = myIndex+arlen 413 bufsize = bufsize+arlen 414 ar%SendSize = arlen 415 416 end do 417 if(rCount > 0) then ! Wait for all send completed 418 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 419 end if 420 end do 421 422 ! 423 !-- Create RMA (One Sided Communication) window for data buffer server to 424 !-- client transfer. 425 !-- The buffer of MPI_Get (counter part of transfer) can be PE-local, i.e. 426 !-- it can but must not be part of the MPI RMA window. 427 !-- Only one RMA window is required to prepare the data 428 !-- for server -> client transfer on the server side and 429 !-- for client -> server transfer on the client side 430 CALL PMC_Alloc_mem (base_array_sc, bufsize) 431 Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte 432 433 WinSize = bufsize*wp 434 CALL MPI_Win_create (base_array_sc, WinSize, wp, MPI_INFO_NULL, & 435 Clients(ClientId)%intra_comm, Clients(ClientId)%win_server_client, ierr) 436 CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr); ! Open Window to set data 437 ! 438 !-- Second stride: Set Buffer pointer 439 do i=1,Clients(ClientId)%inter_npes 440 aPE => Clients(ClientId)%PEs(i) 441 do j=1,aPE%Nr_arrays 442 ar => aPE%array_list(j) 443 ar%SendBuf = c_loc(base_array_sc(ar%SendIndex)) 444 if(ar%SendIndex+ar%SendSize > bufsize) then 445 write(0,'(a,i4,4i7,1x,a)') 'Server Buffer too small ',i, & 446 ar%SendIndex,ar%SendSize,ar%SendIndex+ar%SendSize,bufsize,trim(ar%name) 447 CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) 448 end if 449 end do 450 end do 451 452 !-- Client to server direction 453 454 bufsize = 8 455 456 !-- First stride, Compute size and set index 457 458 do i=1,Clients(ClientId)%inter_npes 459 aPE => Clients(ClientId)%PEs(i) 460 tag = 300 461 462 do j=1,aPE%Nr_arrays 463 ar => aPE%array_list(j) 464 465 ! Receive Index from client 466 tag = tag+1 467 CALL MPI_Recv (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, MPI_STATUS_IGNORE, ierr) 468 469 if(ar%NrDims == 3) then 470 bufsize = max(bufsize,aPE%NrEle * ar%A_dim(4)) ! 3D 471 else 472 bufsize = max(bufsize,aPE%NrEle) ! 2D 473 end if 474 ar%RecvIndex = myIndex 475 end do 476 477 end do 478 479 !-- Create RMA (One Sided Communication) data buffer 480 !-- The buffer for MPI_Get can be PE local, i.e. it can but must not be part of the MPI RMA window 481 482 CALL PMC_Alloc_mem (base_array_cs, bufsize, base_ptr) 483 Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte 484 485 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) 486 487 !-- Second stride, Set Buffer pointer 488 489 do i=1,Clients(ClientId)%inter_npes 490 aPE => Clients(ClientId)%PEs(i) 491 492 do j=1,aPE%Nr_arrays 493 ar => aPE%array_list(j) 494 ar%RecvBuf = base_ptr 495 end do 496 end do 497 498 return 499 END SUBROUTINE PMC_S_setInd_and_AllocMem 500 501 SUBROUTINE PMC_S_FillBuffer (ClientId, WaitTime) 502 IMPLICIT none 503 INTEGER,INTENT(IN) :: ClientId 504 REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime 505 506 INTEGER :: ip,ij,istat,ierr,j 507 INTEGER :: myIndex 508 REAL(wp) :: t1,t2 509 TYPE(PeDef),POINTER :: aPE 510 TYPE(ArrayDef),POINTER :: ar 511 CHARACTER(len=DA_Namelen) :: myName 512 INTEGER,DIMENSION(1) :: buf_shape 513 REAL(wp), POINTER, DIMENSION(:) :: buf 514 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d 515 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d 516 517 !-- Synchronization of the model is done in pmci_client_synchronize and pmci_server_synchronize 518 !-- Therefor the RMA window cann be filled without sychronization at this point and the barrier 519 !-- is not necessary 520 !-- Please note that WaitTime has to be set in PMC_S_FillBuffer AND PMC_C_GetBuffer 521 522 if(present(WaitTime)) then 523 t1 = PMC_Time() 524 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) 525 t2 = PMC_Time() 526 WaitTime = t2-t1 527 end if 528 529 do ip=1,Clients(ClientId)%inter_npes 530 aPE => Clients(ClientId)%PEs(ip) 531 do j=1,aPE%Nr_arrays 532 ar => aPE%array_list(j) 533 myIndex=1 534 if(ar%NrDims == 2) then 535 buf_shape(1) = aPE%NrEle 536 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 537 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 538 do ij=1,aPE%NrEle 539 buf(myIndex) = data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) 540 myIndex = myIndex+1 541 end do 542 else if(ar%NrDims == 3) then 543 buf_shape(1) = aPE%NrEle*ar%A_dim(4) 544 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 545 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 546 do ij=1,aPE%NrEle 547 buf(myIndex:myIndex+ar%A_dim(4)-1) = data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) 548 myIndex = myIndex+ar%A_dim(4) 549 end do 550 end if 551 end do 552 end do 553 554 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! buffer is filled 555 556 return 557 END SUBROUTINE PMC_S_FillBuffer 558 559 SUBROUTINE PMC_S_GetData_from_Buffer (ClientId, WaitTime) 560 561 IMPLICIT none 562 563 INTEGER,INTENT(IN) :: ClientId 564 REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime 565 566 !-- local variables 567 INTEGER :: ip,ij,istat,ierr,j 568 INTEGER :: myIndex 569 INTEGER :: nr 570 REAL(wp) :: t1,t2 571 TYPE(PeDef),POINTER :: aPE 572 TYPE(ArrayDef),POINTER :: ar 573 CHARACTER(len=DA_Namelen) :: myName 574 INTEGER,DIMENSION(1) :: buf_shape 575 REAL(wp), POINTER, DIMENSION(:) :: buf 576 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d 577 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d 578 INTEGER :: target_pe 579 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp 580 581 t1 = PMC_Time() 582 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for client to fill buffer 583 t2 = PMC_Time()-t1 584 if(present(WaitTime)) WaitTime = t2 585 586 ! CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr) ! Fence might do it, test later 587 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for buffer is filled 588 589 do ip=1,Clients(ClientId)%inter_npes 590 aPE => Clients(ClientId)%PEs(ip) 591 do j=1,aPE%Nr_arrays 592 ar => aPE%array_list(j) 593 594 if(ar%RecvIndex < 0) CYCLE 595 596 if(ar%NrDims == 2) then 597 nr = aPE%NrEle 598 else if(ar%NrDims == 3) then 599 nr = aPE%NrEle*ar%A_dim(4) 600 end if 601 602 buf_shape(1) = nr 603 CALL c_f_pointer(ar%RecvBuf, buf, buf_shape) 604 ! 605 !-- MPI passive target RMA 606 607 if(nr > 0) then 608 target_disp = ar%RecvIndex-1 609 target_pe = ip-1+m_model_npes ! client PEs are located behind server PEs 610 CALL MPI_Win_lock (MPI_LOCK_SHARED , target_pe, 0, Clients(ClientId)%win_server_client, ierr) 611 CALL MPI_Get (buf, nr, MPI_REAL, target_pe, target_disp, nr, MPI_REAL, Clients(ClientId)%win_server_client, ierr) 612 CALL MPI_Win_unlock (target_pe, Clients(ClientId)%win_server_client, ierr) 613 end if 614 615 myIndex=1 616 if(ar%NrDims == 2) then 617 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 618 do ij=1,aPE%NrEle 619 data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex) 620 myIndex = myIndex+1 621 end do 622 else if(ar%NrDims == 3) then 623 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 624 do ij=1,aPE%NrEle 625 data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex:myIndex+ar%A_dim(4)-1) 626 myIndex = myIndex+ar%A_dim(4) 627 end do 628 end if 629 end do 630 end do 631 632 END SUBROUTINE PMC_S_GetData_from_Buffer 633 634 ! Private SUBROUTINEs 635 636 SUBROUTINE Get_DA_names_from_client (ClientId) 637 IMPLICIT none 638 INTEGER,INTENT(IN) :: ClientId 639 !-- local variables 640 type(DA_NameDef) :: myName 641 642 ! Get Data Array Description and Name from Client 643 644 do 645 CALL PMC_Bcast ( myName%couple_index, 0, comm=m_to_client_comm(ClientId)) 646 if(myName%couple_index == -1) EXIT 647 CALL PMC_Bcast ( myName%ServerDesc, 0, comm=m_to_client_comm(ClientId)) 648 CALL PMC_Bcast ( myName%NameOnServer, 0, comm=m_to_client_comm(ClientId)) 649 CALL PMC_Bcast ( myName%ClientDesc, 0, comm=m_to_client_comm(ClientId)) 650 CALL PMC_Bcast ( myName%NameOnClient, 0, comm=m_to_client_comm(ClientId)) 651 652 CALL PMC_G_SetName (clients(ClientID), myName%couple_index, myName%NameOnServer ) 653 end do 654 655 return 656 END SUBROUTINE Get_DA_names_from_client 657 658 SUBROUTINE PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr) 659 IMPLICIT none 660 661 INTEGER,INTENT(IN) :: ClientId 662 INTEGER,INTENT(IN) :: NrDims 663 INTEGER,INTENT(IN),DIMENSION(:) :: dims 664 TYPE(c_ptr),INTENT(IN) :: array_adr 665 TYPE(c_ptr),INTENT(IN),OPTIONAL :: second_adr 666 667 INTEGER :: i 668 TYPE(PeDef),POINTER :: aPE 669 TYPE(ArrayDef),POINTER :: ar 670 CHARACTER(len=DA_Namelen) :: myName 671 672 ! Set Array for Client interPE 0 673 674 do i=1,Clients(ClientId)%inter_npes 675 aPE => Clients(ClientId)%PEs(i) 676 ar => aPE%array_list(next_array_in_list) 677 ar%NrDims = NrDims 678 ar%A_dim = dims 679 ar%data = array_adr 680 if(present(second_adr)) then 681 ar%po_data(1) = array_adr 682 ar%po_data(2) = second_adr 683 else 684 ar%po_data(1) = C_NULL_PTR 685 ar%po_data(2) = C_NULL_PTR 686 end if 687 end do 688 689 return 690 END SUBROUTINE PMC_S_SetArray 691 692 693 SUBROUTINE PMC_S_Set_Active_data_array (ClientId,iactive) 694 IMPLICIT none 695 696 INTEGER,INTENT(IN) :: ClientId 697 INTEGER,INTENT(IN) :: iactive 698 699 !-- local variables 700 INTEGER :: i, ip, j 701 TYPE(PeDef),POINTER :: aPE 702 TYPE(ArrayDef),POINTER :: ar 703 CHARACTER(len=DA_Namelen) :: myName 704 705 do ip=1,Clients(ClientId)%inter_npes 706 aPE => Clients(ClientId)%PEs(ip) 707 do j=1,aPE%Nr_arrays 708 ar => aPE%array_list(j) 709 if(iactive == 1 .OR. iactive == 2) then 710 ar%data = ar%po_data(iactive) 711 end if 712 end do 713 end do 714 715 return 716 END SUBROUTINE PMC_S_Set_Active_data_array 717 718 719 SUBROUTINE Set_PE_index_list (ClientId, myClient,index_list,NrP) 720 IMPLICIT none 721 722 INTEGER,INTENT(IN) :: ClientId 723 TYPE(ClientDef),INTENT(INOUT) :: myClient 724 INTEGER,INTENT(IN),DIMENSION(:,:) :: index_list 725 INTEGER,INTENT(IN) :: NrP 726 727 !-- local variables 728 INTEGER :: i,j,ind,ierr,i2 729 TYPE(PeDef),POINTER :: aPE 730 INTEGER :: RemPE 731 INTEGER,DIMENSION(myClient%inter_npes) :: RemInd 732 INTEGER,DIMENSION(:),POINTER :: RemIndw 733 INTEGER,DIMENSION(:),POINTER :: RLdef 734 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 735 INTEGER :: indWin,indWin2 736 737 ! First, count entries for every remote client PE 738 739 do i=1,myClient%inter_npes 740 aPE => myClient%PEs(i) 741 aPE%NrEle = 0 742 end do 743 744 do j=1,NrP ! loop over number of cells coarse grid 745 RemPE = index_list(5,j)+1 ! Pe number remote PE 746 aPE => myClient%PEs(RemPE) 747 aPE% NrEle = aPE% NrEle+1 ! Increment Number of elements for this client Pe 748 end do 749 750 do i=1,myClient%inter_npes 751 aPE => myClient%PEs(i) 752 ALLOCATE(aPE%locInd(aPE%NrEle)) 753 end do 754 755 RemInd = 0 756 757 ! Second, Create lists 758 759 do j=1,NrP ! loop over number of cells coarse grid 760 RemPE = index_list(5,j)+1 ! Pe number remote PE 761 aPE => myClient%PEs(RemPE) 762 RemInd(RemPE) = RemInd(RemPE)+1 763 ind = RemInd(RemPE) 764 aPE%locInd(ind)%i = index_list(1,j) 765 aPE%locInd(ind)%j = index_list(2,j) 766 end do 767 768 ! Prepare Number of Elements for Client PEs 769 CALL PMC_Alloc_mem (RLdef, myClient%inter_npes*2) 770 WinSize = myClient%inter_npes*c_sizeof(i)*2 ! Number of Client PEs * size of INTEGER (i just arbitrary INTEGER) 771 772 CALL MPI_Win_create (RLdef, WinSize, iwp, MPI_INFO_NULL, myClient%intra_comm, indWin, ierr); 773 CALL MPI_Win_fence (0, indWin, ierr); ! Open Window to set data 774 775 RLdef(1) = 0 ! Index on Remote PE 0 776 RLdef(2) = RemInd(1) ! Number of Elements on Rem PE 0 777 778 do i=2,myClient%inter_npes ! Reserve Buffer for index array 779 i2 = (i-1)*2+1 780 RLdef(i2) = RLdef(i2-2) + RLdef(i2-1)*2 ! Index on Remote PE 781 RLdef(i2+1) = RemInd(i) ! Number of Elements on Remote PE 782 end do 783 784 CALL MPI_Win_fence (0, indWin, ierr); ! Close Window to allow client to access data 785 CALL MPI_Win_fence (0, indWin, ierr); ! Client has retrieved data 786 787 i2 = 2*myClient%inter_npes-1 788 WinSize = (RLdef(i2)+RLdef(i2+1))*2 789 WinSize = max(WinSize,1) ! Make sure, MPI_Alloc_mem works 790 791 CALL PMC_Alloc_mem (RemIndw, int(WinSize)) 792 793 CALL MPI_Barrier (m_model_comm, ierr) 794 CALL MPI_Win_create (RemIndw, WinSize*c_sizeof(i), iwp, MPI_INFO_NULL, myClient%intra_comm, indWin2, ierr); 795 796 CALL MPI_Win_fence (0, indWin2, ierr); ! Open Window to set data 797 do j=1,NrP ! this loop creates the 2D index list 798 RemPE = index_list(5,j)+1 ! Pe number remote PE 799 aPE => myClient%PEs(RemPE) 800 i2 = RemPE*2-1 801 ind = RLdef(i2)+1 802 RemIndw(ind) = index_list(3,j) 803 RemIndw(ind+1) = index_list(4,j) 804 RLdef(i2) = RLdef(i2)+2 805 end do 806 CALL MPI_Win_fence (0, indWin2, ierr); !all data set 807 808 CALL MPI_Barrier(myClient%intra_comm, ierr) ! Dont know why, but this barrier is necessary before we can free the windows 809 810 CALL MPI_Win_free(indWin, ierr) 811 CALL MPI_Win_free(indWin2, ierr) 812 813 ! Sollte funktionieren, Problem mit MPI implementation 814 ! https://www.lrz.de/services/software/parallel/mpi/onesided 815 ! CALL MPI_Free_mem (RemIndw, ierr) 816 817 return 818 END SUBROUTINE Set_PE_index_list 277 ENDIF 278 279 CALL set_pe_index_list( clientid, clients(clientid), & 280 indclients(clientid)%index_list_2d, & 281 indclients(clientid)%nrpoints ) 282 283 END SUBROUTINE pmc_s_set_2d_index_list 284 285 286 287 SUBROUTINE pmc_s_clear_next_array_list 288 289 IMPLICIT NONE 290 291 next_array_in_list = 0 292 293 END SUBROUTINE pmc_s_clear_next_array_list 294 295 296 297 LOGICAL FUNCTION pmc_s_getnextarray( clientid, myname ) 298 ! 299 !-- List handling is still required to get minimal interaction with 300 !-- pmc_interface 301 !-- TODO: what does "still" mean? Is there a chance to change this! 302 CHARACTER(LEN=*), INTENT(OUT) :: myname !< 303 INTEGER(iwp), INTENT(IN) :: clientid !< 304 305 TYPE(arraydef), POINTER :: ar 306 TYPE(pedef), POINTER :: ape 307 308 next_array_in_list = next_array_in_list + 1 309 ! 310 !-- Array names are the same on all client PEs, so take first PE to get the name 311 ape => clients(clientid)%pes(1) 312 313 IF ( next_array_in_list > ape%nr_arrays ) THEN 314 ! 315 !-- All arrays are done 316 pmc_s_getnextarray = .FALSE. 317 RETURN 318 ENDIF 319 320 ar => ape%array_list(next_array_in_list) 321 myname = ar%name 322 ! 323 !-- Return true if legal array 324 !-- TODO: what does this comment mean? Can there be non-legal arrays?? 325 pmc_s_getnextarray = .TRUE. 326 327 END FUNCTION pmc_s_getnextarray 328 329 330 331 SUBROUTINE pmc_s_set_dataarray_2d( clientid, array, array_2 ) 332 333 IMPLICIT NONE 334 335 INTEGER,INTENT(IN) :: clientid !< 336 337 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER :: array !< 338 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER, OPTIONAL :: array_2 !< 339 340 INTEGER :: nrdims !< 341 INTEGER, DIMENSION(4) :: dims !< 342 TYPE(C_PTR) :: array_adr !< 343 TYPE(C_PTR) :: second_adr !< 344 345 346 dims = 1 347 nrdims = 2 348 dims(1) = SIZE( array,1 ) 349 dims(2) = SIZE( array,2 ) 350 array_adr = C_LOC( array ) 351 352 IF ( PRESENT( array_2 ) ) THEN 353 second_adr = C_LOC(array_2) 354 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr, & 355 second_adr = second_adr) 356 ELSE 357 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr ) 358 ENDIF 359 360 END SUBROUTINE pmc_s_set_dataarray_2d 361 362 363 364 SUBROUTINE pmc_s_set_dataarray_3d( clientid, array, nz_cl, nz, array_2 ) 365 366 IMPLICIT NONE 367 368 INTEGER, INTENT(IN) :: clientid !< 369 INTEGER, INTENT(IN) :: nz !< 370 INTEGER, INTENT(IN) :: nz_cl !< 371 372 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER :: array !< 373 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER, OPTIONAL :: array_2 !< 374 375 INTEGER :: nrdims !< 376 INTEGER, DIMENSION(4) :: dims !< 377 TYPE(C_PTR) :: array_adr !< 378 TYPE(C_PTR) :: second_adr !< 379 380 !-- TODO: the next assignment seems to be obsolete. Please check! 381 dims = 1 382 dims = 0 383 nrdims = 3 384 dims(1) = SIZE( array,1 ) 385 dims(2) = SIZE( array,2 ) 386 dims(3) = SIZE( array,3 ) 387 dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 388 389 array_adr = C_LOC(array) 390 391 ! 392 !-- In PALM's pointer version, two indices have to be stored internally. 393 !-- The active address of the data array is set in swap_timelevel. 394 IF ( PRESENT( array_2 ) ) THEN 395 second_adr = C_LOC( array_2 ) 396 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr, & 397 second_adr = second_adr) 398 ELSE 399 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr ) 400 ENDIF 401 402 END SUBROUTINE pmc_s_set_dataarray_3d 403 404 405 406 SUBROUTINE pmc_s_setind_and_allocmem( clientid ) 407 408 USE control_parameters, & 409 ONLY: message_string 410 411 IMPLICIT NONE 412 413 ! 414 !-- Naming convention for appendices: _sc -> server to client transfer 415 !-- _cs -> client to server transfer 416 !-- send -> server to client transfer 417 !-- recv -> client to server transfer 418 INTEGER, INTENT(IN) :: clientid !< 419 420 INTEGER :: arlen !< 421 INTEGER :: i !< 422 INTEGER :: ierr !< 423 INTEGER :: istat !< 424 INTEGER :: j !< 425 INTEGER :: myindex !< 426 INTEGER :: rcount !< count MPI requests 427 INTEGER :: tag !< 428 429 INTEGER(idp) :: bufsize !< size of MPI data window 430 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 431 432 INTEGER, DIMENSION(1024) :: req !< 433 434 TYPE(C_PTR) :: base_ptr !< 435 TYPE(pedef), POINTER :: ape !< 436 TYPE(arraydef), POINTER :: ar !< 437 438 REAL(wp),DIMENSION(:), POINTER, SAVE :: base_array_sc !< base array for server to client transfer 439 REAL(wp),DIMENSION(:), POINTER, SAVE :: base_array_cs !< base array for client to server transfer 440 441 ! 442 !-- Server to client direction 443 myindex = 1 444 rcount = 0 445 bufsize = 8 446 447 ! 448 !-- First stride: compute size and set index 449 DO i = 1, clients(clientid)%inter_npes 450 451 ape => clients(clientid)%pes(i) 452 tag = 200 453 454 DO j = 1, ape%nr_arrays 455 456 ar => ape%array_list(j) 457 IF ( ar%nrdims == 2 ) THEN 458 arlen = ape%nrele 459 ELSEIF ( ar%nrdims == 3 ) THEN 460 arlen = ape%nrele * ar%a_dim(4) 461 ELSE 462 arlen = -1 463 ENDIF 464 ar%sendindex = myindex 465 466 tag = tag + 1 467 rcount = rcount + 1 468 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, & 469 clients(clientid)%inter_comm, req(rcount), ierr ) 470 ! 471 !-- Maximum of 1024 outstanding requests 472 !-- TODO: what does this limit means? 473 IF ( rcount == 1024 ) THEN 474 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 475 rcount = 0 476 ENDIF 477 478 myindex = myindex + arlen 479 bufsize = bufsize + arlen 480 ar%sendsize = arlen 481 482 ENDDO 483 484 IF ( rcount > 0 ) THEN 485 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 486 ENDIF 487 488 ENDDO 489 490 ! 491 !-- Create RMA (One Sided Communication) window for data buffer server to 492 !-- client transfer. 493 !-- The buffer of MPI_GET (counterpart of transfer) can be PE-local, i.e. 494 !-- it can but must not be part of the MPI RMA window. Only one RMA window is 495 !-- required to prepare the data for 496 !-- server -> client transfer on the server side 497 !-- and for 498 !-- client -> server transfer on the client side 499 CALL pmc_alloc_mem( base_array_sc, bufsize ) 500 clients(clientid)%totalbuffersize = bufsize * wp 501 502 winsize = bufsize * wp 503 CALL MPI_WIN_CREATE( base_array_sc, winsize, wp, MPI_INFO_NULL, & 504 clients(clientid)%intra_comm, & 505 clients(clientid)%win_server_client, ierr ) 506 ! 507 !-- Open window to set data 508 CALL MPI_WIN_FENCE( 0, clients(clientid)%win_server_client, ierr ) 509 ! 510 !-- Second stride: set buffer pointer 511 DO i = 1, clients(clientid)%inter_npes 512 513 ape => clients(clientid)%pes(i) 514 515 DO j = 1, ape%nr_arrays 516 517 ar => ape%array_list(j) 518 ar%sendbuf = C_LOC( base_array_sc(ar%sendindex) ) 519 520 !-- TODO: replace this by standard PALM error message using the message routine 521 IF ( ar%sendindex + ar%sendsize > bufsize ) THEN 522 write(0,'(a,i4,4i7,1x,a)') 'Server Buffer too small ',i, & 523 ar%sendindex,ar%sendsize,ar%sendindex+ar%sendsize,bufsize,trim(ar%name) 524 CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) 525 ENDIF 526 ENDDO 527 ENDDO 528 529 ! 530 !-- Client to server direction 531 bufsize = 8 532 ! 533 !-- First stride: compute size and set index 534 DO i = 1, clients(clientid)%inter_npes 535 536 ape => clients(clientid)%pes(i) 537 tag = 300 538 539 DO j = 1, ape%nr_arrays 540 541 ar => ape%array_list(j) 542 ! 543 !-- Receive index from client 544 tag = tag + 1 545 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, & 546 clients(clientid)%inter_comm, MPI_STATUS_IGNORE, ierr ) 547 548 IF ( ar%nrdims == 3 ) THEN 549 bufsize = MAX( bufsize, ape%nrele * ar%a_dim(4) ) 550 ELSE 551 bufsize = MAX( bufsize, ape%nrele ) 552 ENDIF 553 ar%recvindex = myindex 554 555 ENDDO 556 557 ENDDO 558 559 ! 560 !-- Create RMA (one sided communication) data buffer. 561 !-- The buffer for MPI_GET can be PE local, i.e. it can but must not be part of 562 !-- the MPI RMA window 563 CALL pmc_alloc_mem( base_array_cs, bufsize, base_ptr ) 564 clients(clientid)%totalbuffersize = bufsize * wp 565 566 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 567 ! 568 !-- Second stride: set buffer pointer 569 DO i = 1, clients(clientid)%inter_npes 570 571 ape => clients(clientid)%pes(i) 572 573 DO j = 1, ape%nr_arrays 574 ar => ape%array_list(j) 575 ar%recvbuf = base_ptr 576 ENDDO 577 578 ENDDO 579 580 END SUBROUTINE pmc_s_setind_and_allocmem 581 582 583 584 SUBROUTINE pmc_s_fillbuffer( clientid, waittime ) 585 586 IMPLICIT NONE 587 588 INTEGER, INTENT(IN) :: clientid !< 589 590 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 591 592 INTEGER :: ierr !< 593 INTEGER :: ij !< 594 INTEGER :: ip !< 595 INTEGER :: istat !< 596 INTEGER :: j !< 597 INTEGER :: myindex !< 598 599 INTEGER, DIMENSION(1) :: buf_shape 600 601 REAL(wp) :: t1 !< 602 REAL(wp) :: t2 !< 603 REAL(wp), POINTER, DIMENSION(:) :: buf !< 604 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d !< 605 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !< 606 607 TYPE(pedef), POINTER :: ape !< 608 TYPE(arraydef), POINTER :: ar !< 609 610 ! 611 !-- Synchronization of the model is done in pmci_client_synchronize and 612 !-- pmci_server_synchronize. Therefor the RMA window can be filled without 613 !-- sychronization at this point and a barrier is not necessary. 614 !-- Please note that waittime has to be set in pmc_s_fillbuffer AND 615 !-- pmc_c_getbuffer 616 IF ( PRESENT( waittime) ) THEN 617 t1 = pmc_time() 618 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 619 t2 = pmc_time() 620 waittime = t2- t1 621 ENDIF 622 623 DO ip = 1, clients(clientid)%inter_npes 624 625 ape => clients(clientid)%pes(ip) 626 627 DO j = 1, ape%nr_arrays 628 629 ar => ape%array_list(j) 630 myindex = 1 631 632 IF ( ar%nrdims == 2 ) THEN 633 634 buf_shape(1) = ape%nrele 635 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 636 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 637 DO ij = 1, ape%nrele 638 buf(myindex) = data_2d(ape%locind(ij)%j,ape%locind(ij)%i) 639 myindex = myindex + 1 640 ENDDO 641 642 ELSEIF ( ar%nrdims == 3 ) THEN 643 644 buf_shape(1) = ape%nrele*ar%a_dim(4) 645 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 646 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 647 DO ij = 1, ape%nrele 648 buf(myindex:myindex+ar%a_dim(4)-1) = & 649 data_3d(1:ar%a_dim(4),ape%locind(ij)%j,ape%locind(ij)%i) 650 myindex = myindex + ar%a_dim(4) 651 ENDDO 652 653 ENDIF 654 655 ENDDO 656 657 ENDDO 658 ! 659 !-- Buffer is filled 660 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 661 662 END SUBROUTINE pmc_s_fillbuffer 663 664 665 666 SUBROUTINE pmc_s_getdata_from_buffer( clientid, waittime ) 667 668 IMPLICIT NONE 669 670 INTEGER, INTENT(IN) :: clientid !< 671 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 672 673 INTEGER :: ierr !< 674 INTEGER :: ij !< 675 INTEGER :: ip !< 676 INTEGER :: istat !< 677 INTEGER :: j !< 678 INTEGER :: myindex !< 679 INTEGER :: nr !< 680 INTEGER :: target_pe !< 681 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp !< 682 683 INTEGER, DIMENSION(1) :: buf_shape !< 684 685 REAL(wp) :: t1 !< 686 REAL(wp) :: t2 !< 687 REAL(wp), POINTER, DIMENSION(:) :: buf !< 688 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d !< 689 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !< 690 691 TYPE(pedef), POINTER :: ape !< 692 TYPE(arraydef), POINTER :: ar !< 693 694 695 t1 = pmc_time() 696 ! 697 !-- Wait for client to fill buffer 698 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 699 t2 = pmc_time() - t1 700 IF ( PRESENT( waittime ) ) waittime = t2 701 ! 702 !-- TODO: check next statement 703 !-- Fence might do it, test later 704 !-- CALL MPI_WIN_FENCE( 0, clients(clientid)%win_server_client, ierr) 705 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 706 707 DO ip = 1, clients(clientid)%inter_npes 708 709 ape => clients(clientid)%pes(ip) 710 711 DO j = 1, ape%nr_arrays 712 713 ar => ape%array_list(j) 714 715 IF ( ar%recvindex < 0 ) CYCLE 716 717 IF ( ar%nrdims == 2 ) THEN 718 nr = ape%nrele 719 ELSEIF ( ar%nrdims == 3 ) THEN 720 nr = ape%nrele * ar%a_dim(4) 721 ENDIF 722 723 buf_shape(1) = nr 724 CALL C_F_POINTER( ar%recvbuf, buf, buf_shape ) 725 726 ! 727 !-- MPI passive target RMA 728 IF ( nr > 0 ) THEN 729 target_disp = ar%recvindex - 1 730 ! 731 !-- Client PEs are located behind server PEs 732 target_pe = ip - 1 + m_model_npes 733 CALL MPI_WIN_LOCK( MPI_LOCK_SHARED, target_pe, 0, & 734 clients(clientid)%win_server_client, ierr ) 735 CALL MPI_GET( buf, nr, MPI_REAL, target_pe, target_disp, nr, & 736 MPI_REAL, clients(clientid)%win_server_client, ierr ) 737 CALL MPI_WIN_UNLOCK( target_pe, & 738 clients(clientid)%win_server_client, ierr ) 739 ENDIF 740 741 myindex = 1 742 IF ( ar%nrdims == 2 ) THEN 743 744 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 745 DO ij = 1, ape%nrele 746 data_2d(ape%locind(ij)%j,ape%locind(ij)%i) = buf(myindex) 747 myindex = myindex + 1 748 ENDDO 749 750 ELSEIF ( ar%nrdims == 3 ) THEN 751 752 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3)) 753 DO ij = 1, ape%nrele 754 data_3d(1:ar%a_dim(4),ape%locind(ij)%j,ape%locind(ij)%i) = & 755 buf(myindex:myindex+ar%a_dim(4)-1) 756 myindex = myindex + ar%a_dim(4) 757 ENDDO 758 759 ENDIF 760 761 ENDDO 762 763 ENDDO 764 765 END SUBROUTINE pmc_s_getdata_from_buffer 766 767 768 769 SUBROUTINE get_da_names_from_client( clientid ) 770 ! 771 !-- Get data array description and name from client 772 IMPLICIT NONE 773 774 INTEGER, INTENT(IN) :: clientid !< 775 776 TYPE(da_namedef) :: myname !< 777 778 DO 779 CALL pmc_bcast( myname%couple_index, 0, comm=m_to_client_comm(clientid) ) 780 IF ( myname%couple_index == -1 ) EXIT 781 CALL pmc_bcast( myname%serverdesc, 0, comm=m_to_client_comm(clientid) ) 782 CALL pmc_bcast( myname%nameonserver, 0, comm=m_to_client_comm(clientid) ) 783 CALL pmc_bcast( myname%clientdesc, 0, comm=m_to_client_comm(clientid) ) 784 CALL pmc_bcast( myname%nameonclient, 0, comm=m_to_client_comm(clientid) ) 785 786 CALL pmc_g_setname( clients(clientid), myname%couple_index, & 787 myname%nameonserver ) 788 ENDDO 789 790 END SUBROUTINE get_da_names_from_client 791 792 793 794 SUBROUTINE pmc_s_setarray(clientid, nrdims, dims, array_adr, second_adr ) 795 ! 796 !-- Set array for client interPE 0 797 IMPLICIT NONE 798 799 INTEGER, INTENT(IN) :: clientid !< 800 INTEGER, INTENT(IN) :: nrdims !< 801 INTEGER, INTENT(IN), DIMENSION(:) :: dims !< 802 803 TYPE(C_PTR), INTENT(IN) :: array_adr !< 804 TYPE(C_PTR), INTENT(IN), OPTIONAL :: second_adr !< 805 806 INTEGER :: i !< local counter 807 808 TYPE(pedef), POINTER :: ape !< 809 TYPE(arraydef), POINTER :: ar !< 810 811 812 DO i = 1, clients(clientid)%inter_npes 813 814 ape => clients(clientid)%pes(i) 815 ar => ape%array_list(next_array_in_list) 816 ar%nrdims = nrdims 817 ar%a_dim = dims 818 ar%data = array_adr 819 820 IF ( PRESENT( second_adr ) ) THEN 821 ar%po_data(1) = array_adr 822 ar%po_data(2) = second_adr 823 ELSE 824 ar%po_data(1) = C_NULL_PTR 825 ar%po_data(2) = C_NULL_PTR 826 ENDIF 827 828 ENDDO 829 830 END SUBROUTINE pmc_s_setarray 831 832 833 834 SUBROUTINE pmc_s_set_active_data_array( clientid, iactive ) 835 836 IMPLICIT NONE 837 838 INTEGER, INTENT(IN) :: clientid !< 839 INTEGER, INTENT(IN) :: iactive !< 840 841 INTEGER :: i !< 842 INTEGER :: ip !< 843 INTEGER :: j !< 844 845 TYPE(pedef), POINTER :: ape !< 846 TYPE(arraydef), POINTER :: ar !< 847 848 DO ip = 1, clients(clientid)%inter_npes 849 850 ape => clients(clientid)%pes(ip) 851 852 DO j = 1, ape%nr_arrays 853 854 ar => ape%array_list(j) 855 IF ( iactive == 1 .OR. iactive == 2 ) THEN 856 ar%data = ar%po_data(iactive) 857 ENDIF 858 859 ENDDO 860 861 ENDDO 862 863 END SUBROUTINE pmc_s_set_active_data_array 864 865 866 867 SUBROUTINE set_pe_index_list( clientid, myclient, index_list, nrp ) 868 869 IMPLICIT NONE 870 871 INTEGER, INTENT(IN) :: clientid !< 872 INTEGER, INTENT(IN), DIMENSION(:,:) :: index_list !< 873 INTEGER, INTENT(IN) :: nrp !< 874 875 TYPE(clientdef), INTENT(INOUT) :: myclient !< 876 877 INTEGER :: i !< 878 INTEGER :: ierr !< 879 INTEGER :: ind !< 880 INTEGER :: indwin !< 881 INTEGER :: indwin2 !< 882 INTEGER :: i2 !< 883 INTEGER :: j !< 884 INTEGER :: rempe !< 885 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 886 887 INTEGER, DIMENSION(myclient%inter_npes) :: remind !< 888 889 INTEGER, DIMENSION(:), POINTER :: remindw !< 890 INTEGER, DIMENSION(:), POINTER :: rldef !< 891 892 TYPE(pedef), POINTER :: ape !< 893 894 ! 895 !-- First, count entries for every remote client PE 896 DO i = 1, myclient%inter_npes 897 ape => myclient%pes(i) 898 ape%nrele = 0 899 ENDDO 900 ! 901 !-- Loop over number of coarse grid cells 902 DO j = 1, nrp 903 rempe = index_list(5,j) + 1 ! PE number on remote PE 904 ape => myclient%pes(rempe) 905 ape%nrele = ape%nrele + 1 ! Increment number of elements for this client PE 906 ENDDO 907 908 DO i = 1, myclient%inter_npes 909 ape => myclient%pes(i) 910 ALLOCATE( ape%locind(ape%nrele) ) 911 ENDDO 912 913 remind = 0 914 915 ! 916 !-- Second, create lists 917 !-- Loop over number of coarse grid cells 918 DO j = 1, nrp 919 rempe = index_list(5,j) + 1 920 ape => myclient%pes(rempe) 921 remind(rempe) = remind(rempe)+1 922 ind = remind(rempe) 923 ape%locind(ind)%i = index_list(1,j) 924 ape%locind(ind)%j = index_list(2,j) 925 ENDDO 926 ! 927 !-- Prepare number of elements for client PEs 928 CALL pmc_alloc_mem( rldef, myclient%inter_npes*2 ) 929 ! 930 !-- Number of client PEs * size of INTEGER (i just arbitrary INTEGER) 931 winsize = myclient%inter_npes*c_sizeof(i)*2 932 933 CALL MPI_WIN_CREATE( rldef, winsize, iwp, MPI_INFO_NULL, & 934 myclient%intra_comm, indwin, ierr ) 935 ! 936 !-- Open window to set data 937 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 938 939 rldef(1) = 0 ! index on remote PE 0 940 rldef(2) = remind(1) ! number of elements on remote PE 0 941 ! 942 !-- Reserve buffer for index array 943 DO i = 2, myclient%inter_npes 944 i2 = (i-1) * 2 + 1 945 rldef(i2) = rldef(i2-2) + rldef(i2-1) * 2 ! index on remote PE 946 rldef(i2+1) = remind(i) ! number of elements on remote PE 947 ENDDO 948 ! 949 !-- Close window to allow client to access data 950 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 951 ! 952 !-- Client has retrieved data 953 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 954 955 i2 = 2 * myclient%inter_npes - 1 956 winsize = ( rldef(i2) + rldef(i2+1) ) * 2 957 ! 958 !-- Make sure, MPI_ALLOC_MEM works 959 winsize = MAX( winsize, 1 ) 960 961 CALL pmc_alloc_mem( remindw, INT( winsize ) ) 962 963 CALL MPI_BARRIER( m_model_comm, ierr ) 964 CALL MPI_WIN_CREATE( remindw, winsize*c_sizeof(i), iwp, MPI_INFO_NULL, & 965 myclient%intra_comm, indwin2, ierr ) 966 ! 967 !-- Open window to set data 968 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 969 ! 970 !-- Create the 2D index list 971 DO j = 1, nrp 972 rempe = index_list(5,j) + 1 ! PE number on remote PE 973 ape => myclient%pes(rempe) 974 i2 = rempe * 2 - 1 975 ind = rldef(i2) + 1 976 remindw(ind) = index_list(3,j) 977 remindw(ind+1) = index_list(4,j) 978 rldef(i2) = rldef(i2)+2 979 ENDDO 980 ! 981 !-- All data areset 982 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 983 ! 984 !-- Don't know why, but this barrier is necessary before windows can be freed 985 !-- TODO: find out why this is required 986 CALL MPI_BARRIER( myclient%intra_comm, ierr ) 987 988 CALL MPI_WIN_FREE( indwin, ierr ) 989 CALL MPI_WIN_FREE( indwin2, ierr ) 990 991 !-- TODO: check if the following idea needs to be done 992 !-- Sollte funktionieren, Problem mit MPI implementation 993 !-- https://www.lrz.de/services/software/parallel/mpi/onesided 994 !-- CALL MPI_Free_mem (remindw, ierr) 995 996 END SUBROUTINE set_pe_index_list 819 997 820 998 #endif 821 END MODULE pmc_server999 END MODULE pmc_server
Note: See TracChangeset
for help on using the changeset viewer.